repo
stringlengths
1
191
file
stringlengths
23
351
code
stringlengths
0
5.32M
file_length
int64
0
5.32M
avg_line_length
float64
0
2.9k
max_line_length
int64
0
288k
extension_type
stringclasses
1 value
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/pipe/tests/TestSpacePipe.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.pipe.tests; import junit.framework.Test; import junit.framework.TestCase; import junit.framework.TestSuite; import java.io.*; import cc.mallet.fst.tests.TestCRF; import cc.mallet.pipe.*; import cc.mallet.pipe.iterator.*; import cc.mallet.pipe.tsf.*; import cc.mallet.types.InstanceList; /** * Unit Test for class TestSpacePipe.java * * * Created: Thu Feb 26 14:56:55 2004 * * @author <a href="mailto:[email protected]">Charles Sutton</a> * @version $Id: TestSpacePipe.java,v 1.1 2007/10/22 21:37:40 mccallum Exp $ */ public class TestSpacePipe extends TestCase { public TestSpacePipe (String name){ super(name); } public static String spacePipeOutput = "name: array:0\ninput: TokenSequence edu.umass.cs.mallet.base.types.TokenSequence@1f6226\nToken#0:f feature(r@1_&_e@2_&_e@3)=1.0 feature(<START2>@-3_&_<START1>@-2_&_<START0>@-1)=1.0 feature(f_&_r@1_&_e@2)=1.0 feature(<START1>@-2_&_<START0>@-1_&_f)=1.0 feature(f_&_r@1)=1.0 feature(<START0>@-1_&_f)=1.0 feature(r@1)=1.0 feature(f)=1.0\nToken#1:r feature(e@1_&_e@2_&_s@3)=1.0 feature(<START1>@-3_&_<START0>@-2_&_f@-1)=1.0 feature(r_&_e@1_&_e@2)=1.0 feature(<START0>@-2_&_f@-1_&_r)=1.0 feature(r_&_e@1)=1.0 feature(f@-1_&_r)=1.0 feature(e@1)=1.0 feature(r)=1.0\nToken#2:e feature(e@1_&_s@2_&_o@3)=1.0 feature(<START0>@-3_&_f@-2_&_r@-1)=1.0 feature(e_&_e@1_&_s@2)=1.0 feature(f@-2_&_r@-1_&_e)=1.0 feature(e_&_e@1)=1.0 feature(r@-1_&_e)=1.0 feature(e@1)=1.0 feature(e)=1.0\nToken#3:e feature(s@1_&_o@2_&_f@3)=1.0 feature(f@-3_&_r@-2_&_e@-1)=1.0 feature(e_&_s@1_&_o@2)=1.0 feature(r@-2_&_e@-1_&_e)=1.0 feature(e_&_s@1)=1.0 feature(e@-1_&_e)=1.0 feature(s@1)=1.0 feature(e)=1.0\nToken#4:s feature(o@1_&_f@2_&_t@3)=1.0 feature(r@-3_&_e@-2_&_e@-1)=1.0 feature(s_&_o@1_&_f@2)=1.0 feature(e@-2_&_e@-1_&_s)=1.0 feature(s_&_o@1)=1.0 feature(e@-1_&_s)=1.0 feature(o@1)=1.0 feature(s)=1.0\nToken#5:o feature(f@1_&_t@2_&_w@3)=1.0 feature(e@-3_&_e@-2_&_s@-1)=1.0 feature(o_&_f@1_&_t@2)=1.0 feature(e@-2_&_s@-1_&_o)=1.0 feature(o_&_f@1)=1.0 feature(s@-1_&_o)=1.0 feature(f@1)=1.0 feature(o)=1.0\nToken#6:f feature(t@1_&_w@2_&_a@3)=1.0 feature(e@-3_&_s@-2_&_o@-1)=1.0 feature(f_&_t@1_&_w@2)=1.0 feature(s@-2_&_o@-1_&_f)=1.0 feature(f_&_t@1)=1.0 feature(o@-1_&_f)=1.0 feature(t@1)=1.0 feature(f)=1.0\nToken#7:t feature(w@1_&_a@2_&_r@3)=1.0 feature(s@-3_&_o@-2_&_f@-1)=1.0 feature(t_&_w@1_&_a@2)=1.0 feature(o@-2_&_f@-1_&_t)=1.0 feature(t_&_w@1)=1.0 feature(f@-1_&_t)=1.0 feature(w@1)=1.0 feature(t)=1.0\nToken#8:w feature(a@1_&_r@2_&_e@3)=1.0 feature(o@-3_&_f@-2_&_t@-1)=1.0 feature(w_&_a@1_&_r@2)=1.0 feature(f@-2_&_t@-1_&_w)=1.0 feature(w_&_a@1)=1.0 feature(t@-1_&_w)=1.0 feature(a@1)=1.0 feature(w)=1.0\nToken#9:a feature(r@1_&_e@2_&_i@3)=1.0 feature(f@-3_&_t@-2_&_w@-1)=1.0 feature(a_&_r@1_&_e@2)=1.0 feature(t@-2_&_w@-1_&_a)=1.0 feature(a_&_r@1)=1.0 feature(w@-1_&_a)=1.0 feature(r@1)=1.0 feature(a)=1.0\nToken#10:r feature(e@1_&_i@2_&_s@3)=1.0 feature(t@-3_&_w@-2_&_a@-1)=1.0 feature(r_&_e@1_&_i@2)=1.0 feature(w@-2_&_a@-1_&_r)=1.0 feature(r_&_e@1)=1.0 feature(a@-1_&_r)=1.0 feature(e@1)=1.0 feature(r)=1.0\nToken#11:e feature(i@1_&_s@2_&_a@3)=1.0 feature(w@-3_&_a@-2_&_r@-1)=1.0 feature(e_&_i@1_&_s@2)=1.0 feature(a@-2_&_r@-1_&_e)=1.0 feature(e_&_i@1)=1.0 feature(r@-1_&_e)=1.0 feature(i@1)=1.0 feature(e)=1.0\nToken#12:i feature(s@1_&_a@2_&_m@3)=1.0 feature(a@-3_&_r@-2_&_e@-1)=1.0 feature(i_&_s@1_&_a@2)=1.0 feature(r@-2_&_e@-1_&_i)=1.0 feature(i_&_s@1)=1.0 feature(e@-1_&_i)=1.0 feature(s@1)=1.0 feature(i)=1.0\nToken#13:s feature(a@1_&_m@2_&_a@3)=1.0 feature(r@-3_&_e@-2_&_i@-1)=1.0 feature(s_&_a@1_&_m@2)=1.0 feature(e@-2_&_i@-1_&_s)=1.0 feature(s_&_a@1)=1.0 feature(i@-1_&_s)=1.0 feature(a@1)=1.0 feature(s)=1.0\nToken#14:a feature(m@1_&_a@2_&_t@3)=1.0 feature(e@-3_&_i@-2_&_s@-1)=1.0 feature(a_&_m@1_&_a@2)=1.0 feature(i@-2_&_s@-1_&_a)=1.0 feature(a_&_m@1)=1.0 feature(s@-1_&_a)=1.0 feature(m@1)=1.0 feature(a)=1.0\nToken#15:m feature(a@1_&_t@2_&_t@3)=1.0 feature(i@-3_&_s@-2_&_a@-1)=1.0 feature(m_&_a@1_&_t@2)=1.0 feature(s@-2_&_a@-1_&_m)=1.0 feature(m_&_a@1)=1.0 feature(a@-1_&_m)=1.0 feature(a@1)=1.0 feature(m)=1.0\nToken#16:a feature(t@1_&_t@2_&_e@3)=1.0 feature(s@-3_&_a@-2_&_m@-1)=1.0 feature(a_&_t@1_&_t@2)=1.0 feature(a@-2_&_m@-1_&_a)=1.0 feature(a_&_t@1)=1.0 feature(m@-1_&_a)=1.0 feature(t@1)=1.0 feature(a)=1.0\nToken#17:t feature(t@1_&_e@2_&_r@3)=1.0 feature(a@-3_&_m@-2_&_a@-1)=1.0 feature(t_&_t@1_&_e@2)=1.0 feature(m@-2_&_a@-1_&_t)=1.0 feature(t_&_t@1)=1.0 feature(a@-1_&_t)=1.0 feature(t@1)=1.0 feature(t)=1.0\nToken#18:t feature(e@1_&_r@2_&_o@3)=1.0 feature(m@-3_&_a@-2_&_t@-1)=1.0 feature(t_&_e@1_&_r@2)=1.0 feature(a@-2_&_t@-1_&_t)=1.0 feature(t_&_e@1)=1.0 feature(t@-1_&_t)=1.0 feature(e@1)=1.0 feature(t)=1.0\nToken#19:e feature(r@1_&_o@2_&_f@3)=1.0 feature(a@-3_&_t@-2_&_t@-1)=1.0 feature(e_&_r@1_&_o@2)=1.0 feature(t@-2_&_t@-1_&_e)=1.0 feature(e_&_r@1)=1.0 feature(t@-1_&_e)=1.0 feature(r@1)=1.0 feature(e)=1.0\nToken#20:r feature(o@1_&_f@2_&_t@3)=1.0 feature(t@-3_&_t@-2_&_e@-1)=1.0 feature(r_&_o@1_&_f@2)=1.0 feature(t@-2_&_e@-1_&_r)=1.0 feature(r_&_o@1)=1.0 feature(e@-1_&_r)=1.0 feature(o@1)=1.0 feature(r)=1.0\nToken#21:o feature(f@1_&_t@2_&_h@3)=1.0 feature(t@-3_&_e@-2_&_r@-1)=1.0 feature(o_&_f@1_&_t@2)=1.0 feature(e@-2_&_r@-1_&_o)=1.0 feature(o_&_f@1)=1.0 feature(r@-1_&_o)=1.0 feature(f@1)=1.0 feature(o)=1.0\nToken#22:f feature(t@1_&_h@2_&_e@3)=1.0 feature(e@-3_&_r@-2_&_o@-1)=1.0 feature(f_&_t@1_&_h@2)=1.0 feature(r@-2_&_o@-1_&_f)=1.0 feature(f_&_t@1)=1.0 feature(o@-1_&_f)=1.0 feature(t@1)=1.0 feature(f)=1.0\nToken#23:t feature(h@1_&_e@2_&_u@3)=1.0 feature(r@-3_&_o@-2_&_f@-1)=1.0 feature(t_&_h@1_&_e@2)=1.0 feature(o@-2_&_f@-1_&_t)=1.0 feature(t_&_h@1)=1.0 feature(f@-1_&_t)=1.0 feature(h@1)=1.0 feature(t)=1.0\nToken#24:h feature(e@1_&_u@2_&_s@3)=1.0 feature(o@-3_&_f@-2_&_t@-1)=1.0 feature(h_&_e@1_&_u@2)=1.0 feature(f@-2_&_t@-1_&_h)=1.0 feature(h_&_e@1)=1.0 feature(t@-1_&_h)=1.0 feature(e@1)=1.0 feature(h)=1.0\nToken#25:e feature(u@1_&_s@2_&_e@3)=1.0 feature(f@-3_&_t@-2_&_h@-1)=1.0 feature(e_&_u@1_&_s@2)=1.0 feature(t@-2_&_h@-1_&_e)=1.0 feature(e_&_u@1)=1.0 feature(h@-1_&_e)=1.0 feature(u@1)=1.0 feature(e)=1.0\nToken#26:u feature(s@1_&_e@2_&_r@3)=1.0 feature(t@-3_&_h@-2_&_e@-1)=1.0 feature(u_&_s@1_&_e@2)=1.0 feature(h@-2_&_e@-1_&_u)=1.0 feature(u_&_s@1)=1.0 feature(e@-1_&_u)=1.0 feature(s@1)=1.0 feature(u)=1.0\nToken#27:s feature(e@1_&_r@2_&_s@3)=1.0 feature(h@-3_&_e@-2_&_u@-1)=1.0 feature(s_&_e@1_&_r@2)=1.0 feature(e@-2_&_u@-1_&_s)=1.0 feature(s_&_e@1)=1.0 feature(u@-1_&_s)=1.0 feature(e@1)=1.0 feature(s)=1.0\nToken#28:e feature(r@1_&_s@2_&_'@3)=1.0 feature(e@-3_&_u@-2_&_s@-1)=1.0 feature(e_&_r@1_&_s@2)=1.0 feature(u@-2_&_s@-1_&_e)=1.0 feature(e_&_r@1)=1.0 feature(s@-1_&_e)=1.0 feature(r@1)=1.0 feature(e)=1.0\nToken#29:r feature(s@1_&_'@2_&_f@3)=1.0 feature(u@-3_&_s@-2_&_e@-1)=1.0 feature(r_&_s@1_&_'@2)=1.0 feature(s@-2_&_e@-1_&_r)=1.0 feature(r_&_s@1)=1.0 feature(e@-1_&_r)=1.0 feature(s@1)=1.0 feature(r)=1.0\nToken#30:s feature('@1_&_f@2_&_r@3)=1.0 feature(s@-3_&_e@-2_&_r@-1)=1.0 feature(s_&_'@1_&_f@2)=1.0 feature(e@-2_&_r@-1_&_s)=1.0 feature(s_&_'@1)=1.0 feature(r@-1_&_s)=1.0 feature('@1)=1.0 feature(s)=1.0\nToken#31:' feature(f@1_&_r@2_&_e@3)=1.0 feature(e@-3_&_r@-2_&_s@-1)=1.0 feature('_&_f@1_&_r@2)=1.0 feature(r@-2_&_s@-1_&_')=1.0 feature('_&_f@1)=1.0 feature(s@-1_&_')=1.0 feature(f@1)=1.0 feature(')=1.0\nToken#32:f feature(r@1_&_e@2_&_e@3)=1.0 feature(r@-3_&_s@-2_&_'@-1)=1.0 feature(f_&_r@1_&_e@2)=1.0 feature(s@-2_&_'@-1_&_f)=1.0 feature(f_&_r@1)=1.0 feature('@-1_&_f)=1.0 feature(r@1)=1.0 feature(f)=1.0\nToken#33:r feature(e@1_&_e@2_&_d@3)=1.0 feature(s@-3_&_'@-2_&_f@-1)=1.0 feature(r_&_e@1_&_e@2)=1.0 feature('@-2_&_f@-1_&_r)=1.0 feature(r_&_e@1)=1.0 feature(f@-1_&_r)=1.0 feature(e@1)=1.0 feature(r)=1.0\nToken#34:e feature(e@1_&_d@2_&_o@3)=1.0 feature('@-3_&_f@-2_&_r@-1)=1.0 feature(e_&_e@1_&_d@2)=1.0 feature(f@-2_&_r@-1_&_e)=1.0 feature(e_&_e@1)=1.0 feature(r@-1_&_e)=1.0 feature(e@1)=1.0 feature(e)=1.0\nToken#35:e feature(d@1_&_o@2_&_m@3)=1.0 feature(f@-3_&_r@-2_&_e@-1)=1.0 feature(e_&_d@1_&_o@2)=1.0 feature(r@-2_&_e@-1_&_e)=1.0 feature(e_&_d@1)=1.0 feature(e@-1_&_e)=1.0 feature(d@1)=1.0 feature(e)=1.0\nToken#36:d feature(o@1_&_m@2_&_t@3)=1.0 feature(r@-3_&_e@-2_&_e@-1)=1.0 feature(d_&_o@1_&_m@2)=1.0 feature(e@-2_&_e@-1_&_d)=1.0 feature(d_&_o@1)=1.0 feature(e@-1_&_d)=1.0 feature(o@1)=1.0 feature(d)=1.0\nToken#37:o feature(m@1_&_t@2_&_o@3)=1.0 feature(e@-3_&_e@-2_&_d@-1)=1.0 feature(o_&_m@1_&_t@2)=1.0 feature(e@-2_&_d@-1_&_o)=1.0 feature(o_&_m@1)=1.0 feature(d@-1_&_o)=1.0 feature(m@1)=1.0 feature(o)=1.0\nToken#38:m feature(t@1_&_o@2_&_r@3)=1.0 feature(e@-3_&_d@-2_&_o@-1)=1.0 feature(m_&_t@1_&_o@2)=1.0 feature(d@-2_&_o@-1_&_m)=1.0 feature(m_&_t@1)=1.0 feature(o@-1_&_m)=1.0 feature(t@1)=1.0 feature(m)=1.0\nToken#39:t feature(o@1_&_r@2_&_u@3)=1.0 feature(d@-3_&_o@-2_&_m@-1)=1.0 feature(t_&_o@1_&_r@2)=1.0 feature(o@-2_&_m@-1_&_t)=1.0 feature(t_&_o@1)=1.0 feature(m@-1_&_t)=1.0 feature(o@1)=1.0 feature(t)=1.0\nToken#40:o feature(r@1_&_u@2_&_n@3)=1.0 feature(o@-3_&_m@-2_&_t@-1)=1.0 feature(o_&_r@1_&_u@2)=1.0 feature(m@-2_&_t@-1_&_o)=1.0 feature(o_&_r@1)=1.0 feature(t@-1_&_o)=1.0 feature(r@1)=1.0 feature(o)=1.0\nToken#41:r feature(u@1_&_n@2_&_,@3)=1.0 feature(m@-3_&_t@-2_&_o@-1)=1.0 feature(r_&_u@1_&_n@2)=1.0 feature(t@-2_&_o@-1_&_r)=1.0 feature(r_&_u@1)=1.0 feature(o@-1_&_r)=1.0 feature(u@1)=1.0 feature(r)=1.0\nToken#42:u feature(n@1_&_,@2_&_c@3)=1.0 feature(t@-3_&_o@-2_&_r@-1)=1.0 feature(u_&_n@1_&_,@2)=1.0 feature(o@-2_&_r@-1_&_u)=1.0 feature(u_&_n@1)=1.0 feature(r@-1_&_u)=1.0 feature(n@1)=1.0 feature(u)=1.0\nToken#43:n feature(,@1_&_c@2_&_o@3)=1.0 feature(o@-3_&_r@-2_&_u@-1)=1.0 feature(n_&_,@1_&_c@2)=1.0 feature(r@-2_&_u@-1_&_n)=1.0 feature(n_&_,@1)=1.0 feature(u@-1_&_n)=1.0 feature(,@1)=1.0 feature(n)=1.0\nToken#44:, feature(c@1_&_o@2_&_p@3)=1.0 feature(r@-3_&_u@-2_&_n@-1)=1.0 feature(,_&_c@1_&_o@2)=1.0 feature(u@-2_&_n@-1_&_,)=1.0 feature(,_&_c@1)=1.0 feature(n@-1_&_,)=1.0 feature(c@1)=1.0 feature(,)=1.0\nToken#45:c feature(o@1_&_p@2_&_y@3)=1.0 feature(u@-3_&_n@-2_&_,@-1)=1.0 feature(c_&_o@1_&_p@2)=1.0 feature(n@-2_&_,@-1_&_c)=1.0 feature(c_&_o@1)=1.0 feature(,@-1_&_c)=1.0 feature(o@1)=1.0 feature(c)=1.0\nToken#46:o feature(p@1_&_y@2_&_,@3)=1.0 feature(n@-3_&_,@-2_&_c@-1)=1.0 feature(o_&_p@1_&_y@2)=1.0 feature(,@-2_&_c@-1_&_o)=1.0 feature(o_&_p@1)=1.0 feature(c@-1_&_o)=1.0 feature(p@1)=1.0 feature(o)=1.0\nToken#47:p feature(y@1_&_,@2_&_d@3)=1.0 feature(,@-3_&_c@-2_&_o@-1)=1.0 feature(p_&_y@1_&_,@2)=1.0 feature(c@-2_&_o@-1_&_p)=1.0 feature(p_&_y@1)=1.0 feature(o@-1_&_p)=1.0 feature(y@1)=1.0 feature(p)=1.0\nToken#48:y feature(,@1_&_d@2_&_i@3)=1.0 feature(c@-3_&_o@-2_&_p@-1)=1.0 feature(y_&_,@1_&_d@2)=1.0 feature(o@-2_&_p@-1_&_y)=1.0 feature(y_&_,@1)=1.0 feature(p@-1_&_y)=1.0 feature(,@1)=1.0 feature(y)=1.0\nToken#49:, feature(d@1_&_i@2_&_s@3)=1.0 feature(o@-3_&_p@-2_&_y@-1)=1.0 feature(,_&_d@1_&_i@2)=1.0 feature(p@-2_&_y@-1_&_,)=1.0 feature(,_&_d@1)=1.0 feature(y@-1_&_,)=1.0 feature(d@1)=1.0 feature(,)=1.0\nToken#50:d feature(i@1_&_s@2_&_t@3)=1.0 feature(p@-3_&_y@-2_&_,@-1)=1.0 feature(d_&_i@1_&_s@2)=1.0 feature(y@-2_&_,@-1_&_d)=1.0 feature(d_&_i@1)=1.0 feature(,@-1_&_d)=1.0 feature(i@1)=1.0 feature(d)=1.0\nToken#51:i feature(s@1_&_t@2_&_r@3)=1.0 feature(y@-3_&_,@-2_&_d@-1)=1.0 feature(i_&_s@1_&_t@2)=1.0 feature(,@-2_&_d@-1_&_i)=1.0 feature(i_&_s@1)=1.0 feature(d@-1_&_i)=1.0 feature(s@1)=1.0 feature(i)=1.0\nToken#52:s feature(t@1_&_r@2_&_i@3)=1.0 feature(,@-3_&_d@-2_&_i@-1)=1.0 feature(s_&_t@1_&_r@2)=1.0 feature(d@-2_&_i@-1_&_s)=1.0 feature(s_&_t@1)=1.0 feature(i@-1_&_s)=1.0 feature(t@1)=1.0 feature(s)=1.0\nToken#53:t feature(r@1_&_i@2_&_b@3)=1.0 feature(d@-3_&_i@-2_&_s@-1)=1.0 feature(t_&_r@1_&_i@2)=1.0 feature(i@-2_&_s@-1_&_t)=1.0 feature(t_&_r@1)=1.0 feature(s@-1_&_t)=1.0 feature(r@1)=1.0 feature(t)=1.0\nToken#54:r feature(i@1_&_b@2_&_u@3)=1.0 feature(i@-3_&_s@-2_&_t@-1)=1.0 feature(r_&_i@1_&_b@2)=1.0 feature(s@-2_&_t@-1_&_r)=1.0 feature(r_&_i@1)=1.0 feature(t@-1_&_r)=1.0 feature(i@1)=1.0 feature(r)=1.0\nToken#55:i feature(b@1_&_u@2_&_t@3)=1.0 feature(s@-3_&_t@-2_&_r@-1)=1.0 feature(i_&_b@1_&_u@2)=1.0 feature(t@-2_&_r@-1_&_i)=1.0 feature(i_&_b@1)=1.0 feature(r@-1_&_i)=1.0 feature(b@1)=1.0 feature(i)=1.0\nToken#56:b feature(u@1_&_t@2_&_e@3)=1.0 feature(t@-3_&_r@-2_&_i@-1)=1.0 feature(b_&_u@1_&_t@2)=1.0 feature(r@-2_&_i@-1_&_b)=1.0 feature(b_&_u@1)=1.0 feature(i@-1_&_b)=1.0 feature(u@1)=1.0 feature(b)=1.0\nToken#57:u feature(t@1_&_e@2_&_,@3)=1.0 feature(r@-3_&_i@-2_&_b@-1)=1.0 feature(u_&_t@1_&_e@2)=1.0 feature(i@-2_&_b@-1_&_u)=1.0 feature(u_&_t@1)=1.0 feature(b@-1_&_u)=1.0 feature(t@1)=1.0 feature(u)=1.0\nToken#58:t feature(e@1_&_,@2_&_s@3)=1.0 feature(i@-3_&_b@-2_&_u@-1)=1.0 feature(t_&_e@1_&_,@2)=1.0 feature(b@-2_&_u@-1_&_t)=1.0 feature(t_&_e@1)=1.0 feature(u@-1_&_t)=1.0 feature(e@1)=1.0 feature(t)=1.0\nToken#59:e feature(,@1_&_s@2_&_t@3)=1.0 feature(b@-3_&_u@-2_&_t@-1)=1.0 feature(e_&_,@1_&_s@2)=1.0 feature(u@-2_&_t@-1_&_e)=1.0 feature(e_&_,@1)=1.0 feature(t@-1_&_e)=1.0 feature(,@1)=1.0 feature(e)=1.0\nToken#60:, feature(s@1_&_t@2_&_u@3)=1.0 feature(u@-3_&_t@-2_&_e@-1)=1.0 feature(,_&_s@1_&_t@2)=1.0 feature(t@-2_&_e@-1_&_,)=1.0 feature(,_&_s@1)=1.0 feature(e@-1_&_,)=1.0 feature(s@1)=1.0 feature(,)=1.0\nToken#61:s feature(t@1_&_u@2_&_d@3)=1.0 feature(t@-3_&_e@-2_&_,@-1)=1.0 feature(s_&_t@1_&_u@2)=1.0 feature(e@-2_&_,@-1_&_s)=1.0 feature(s_&_t@1)=1.0 feature(,@-1_&_s)=1.0 feature(t@1)=1.0 feature(s)=1.0\nToken#62:t feature(u@1_&_d@2_&_y@3)=1.0 feature(e@-3_&_,@-2_&_s@-1)=1.0 feature(t_&_u@1_&_d@2)=1.0 feature(,@-2_&_s@-1_&_t)=1.0 feature(t_&_u@1)=1.0 feature(s@-1_&_t)=1.0 feature(u@1)=1.0 feature(t)=1.0\nToken#63:u feature(d@1_&_y@2_&_,@3)=1.0 feature(,@-3_&_s@-2_&_t@-1)=1.0 feature(u_&_d@1_&_y@2)=1.0 feature(s@-2_&_t@-1_&_u)=1.0 feature(u_&_d@1)=1.0 feature(t@-1_&_u)=1.0 feature(d@1)=1.0 feature(u)=1.0\nToken#64:d feature(y@1_&_,@2_&_c@3)=1.0 feature(s@-3_&_t@-2_&_u@-1)=1.0 feature(d_&_y@1_&_,@2)=1.0 feature(t@-2_&_u@-1_&_d)=1.0 feature(d_&_y@1)=1.0 feature(u@-1_&_d)=1.0 feature(y@1)=1.0 feature(d)=1.0\nToken#65:y feature(,@1_&_c@2_&_h@3)=1.0 feature(t@-3_&_u@-2_&_d@-1)=1.0 feature(y_&_,@1_&_c@2)=1.0 feature(u@-2_&_d@-1_&_y)=1.0 feature(y_&_,@1)=1.0 feature(d@-1_&_y)=1.0 feature(,@1)=1.0 feature(y)=1.0\nToken#66:, feature(c@1_&_h@2_&_a@3)=1.0 feature(u@-3_&_d@-2_&_y@-1)=1.0 feature(,_&_c@1_&_h@2)=1.0 feature(d@-2_&_y@-1_&_,)=1.0 feature(,_&_c@1)=1.0 feature(y@-1_&_,)=1.0 feature(c@1)=1.0 feature(,)=1.0\nToken#67:c feature(h@1_&_a@2_&_n@3)=1.0 feature(d@-3_&_y@-2_&_,@-1)=1.0 feature(c_&_h@1_&_a@2)=1.0 feature(y@-2_&_,@-1_&_c)=1.0 feature(c_&_h@1)=1.0 feature(,@-1_&_c)=1.0 feature(h@1)=1.0 feature(c)=1.0\nToken#68:h feature(a@1_&_n@2_&_g@3)=1.0 feature(y@-3_&_,@-2_&_c@-1)=1.0 feature(h_&_a@1_&_n@2)=1.0 feature(,@-2_&_c@-1_&_h)=1.0 feature(h_&_a@1)=1.0 feature(c@-1_&_h)=1.0 feature(a@1)=1.0 feature(h)=1.0\nToken#69:a feature(n@1_&_g@2_&_e@3)=1.0 feature(,@-3_&_c@-2_&_h@-1)=1.0 feature(a_&_n@1_&_g@2)=1.0 feature(c@-2_&_h@-1_&_a)=1.0 feature(a_&_n@1)=1.0 feature(h@-1_&_a)=1.0 feature(n@1)=1.0 feature(a)=1.0\nToken#70:n feature(g@1_&_e@2_&_a@3)=1.0 feature(c@-3_&_h@-2_&_a@-1)=1.0 feature(n_&_g@1_&_e@2)=1.0 feature(h@-2_&_a@-1_&_n)=1.0 feature(n_&_g@1)=1.0 feature(a@-1_&_n)=1.0 feature(g@1)=1.0 feature(n)=1.0\nToken#71:g feature(e@1_&_a@2_&_n@3)=1.0 feature(h@-3_&_a@-2_&_n@-1)=1.0 feature(g_&_e@1_&_a@2)=1.0 feature(a@-2_&_n@-1_&_g)=1.0 feature(g_&_e@1)=1.0 feature(n@-1_&_g)=1.0 feature(e@1)=1.0 feature(g)=1.0\nToken#72:e feature(a@1_&_n@2_&_d@3)=1.0 feature(a@-3_&_n@-2_&_g@-1)=1.0 feature(e_&_a@1_&_n@2)=1.0 feature(n@-2_&_g@-1_&_e)=1.0 feature(e_&_a@1)=1.0 feature(g@-1_&_e)=1.0 feature(a@1)=1.0 feature(e)=1.0\nToken#73:a feature(n@1_&_d@2_&_i@3)=1.0 feature(n@-3_&_g@-2_&_e@-1)=1.0 feature(a_&_n@1_&_d@2)=1.0 feature(g@-2_&_e@-1_&_a)=1.0 feature(a_&_n@1)=1.0 feature(e@-1_&_a)=1.0 feature(n@1)=1.0 feature(a)=1.0\nToken#74:n feature(d@1_&_i@2_&_m@3)=1.0 feature(g@-3_&_e@-2_&_a@-1)=1.0 feature(n_&_d@1_&_i@2)=1.0 feature(e@-2_&_a@-1_&_n)=1.0 feature(n_&_d@1)=1.0 feature(a@-1_&_n)=1.0 feature(d@1)=1.0 feature(n)=1.0\nToken#75:d feature(i@1_&_m@2_&_p@3)=1.0 feature(e@-3_&_a@-2_&_n@-1)=1.0 feature(d_&_i@1_&_m@2)=1.0 feature(a@-2_&_n@-1_&_d)=1.0 feature(d_&_i@1)=1.0 feature(n@-1_&_d)=1.0 feature(i@1)=1.0 feature(d)=1.0\nToken#76:i feature(m@1_&_p@2_&_r@3)=1.0 feature(a@-3_&_n@-2_&_d@-1)=1.0 feature(i_&_m@1_&_p@2)=1.0 feature(n@-2_&_d@-1_&_i)=1.0 feature(i_&_m@1)=1.0 feature(d@-1_&_i)=1.0 feature(m@1)=1.0 feature(i)=1.0\nToken#77:m feature(p@1_&_r@2_&_o@3)=1.0 feature(n@-3_&_d@-2_&_i@-1)=1.0 feature(m_&_p@1_&_r@2)=1.0 feature(d@-2_&_i@-1_&_m)=1.0 feature(m_&_p@1)=1.0 feature(i@-1_&_m)=1.0 feature(p@1)=1.0 feature(m)=1.0\nToken#78:p feature(r@1_&_o@2_&_v@3)=1.0 feature(d@-3_&_i@-2_&_m@-1)=1.0 feature(p_&_r@1_&_o@2)=1.0 feature(i@-2_&_m@-1_&_p)=1.0 feature(p_&_r@1)=1.0 feature(m@-1_&_p)=1.0 feature(r@1)=1.0 feature(p)=1.0\nToken#79:r feature(o@1_&_v@2_&_e@3)=1.0 feature(i@-3_&_m@-2_&_p@-1)=1.0 feature(r_&_o@1_&_v@2)=1.0 feature(m@-2_&_p@-1_&_r)=1.0 feature(r_&_o@1)=1.0 feature(p@-1_&_r)=1.0 feature(o@1)=1.0 feature(r)=1.0\nToken#80:o feature(v@1_&_e@2_&_t@3)=1.0 feature(m@-3_&_p@-2_&_r@-1)=1.0 feature(o_&_v@1_&_e@2)=1.0 feature(p@-2_&_r@-1_&_o)=1.0 feature(o_&_v@1)=1.0 feature(r@-1_&_o)=1.0 feature(v@1)=1.0 feature(o)=1.0\nToken#81:v feature(e@1_&_t@2_&_h@3)=1.0 feature(p@-3_&_r@-2_&_o@-1)=1.0 feature(v_&_e@1_&_t@2)=1.0 feature(r@-2_&_o@-1_&_v)=1.0 feature(v_&_e@1)=1.0 feature(o@-1_&_v)=1.0 feature(e@1)=1.0 feature(v)=1.0\nToken#82:e feature(t@1_&_h@2_&_e@3)=1.0 feature(r@-3_&_o@-2_&_v@-1)=1.0 feature(e_&_t@1_&_h@2)=1.0 feature(o@-2_&_v@-1_&_e)=1.0 feature(e_&_t@1)=1.0 feature(v@-1_&_e)=1.0 feature(t@1)=1.0 feature(e)=1.0\nToken#83:t feature(h@1_&_e@2_&_s@3)=1.0 feature(o@-3_&_v@-2_&_e@-1)=1.0 feature(t_&_h@1_&_e@2)=1.0 feature(v@-2_&_e@-1_&_t)=1.0 feature(t_&_h@1)=1.0 feature(e@-1_&_t)=1.0 feature(h@1)=1.0 feature(t)=1.0\nToken#84:h feature(e@1_&_s@2_&_o@3)=1.0 feature(v@-3_&_e@-2_&_t@-1)=1.0 feature(h_&_e@1_&_s@2)=1.0 feature(e@-2_&_t@-1_&_h)=1.0 feature(h_&_e@1)=1.0 feature(t@-1_&_h)=1.0 feature(e@1)=1.0 feature(h)=1.0\nToken#85:e feature(s@1_&_o@2_&_f@3)=1.0 feature(e@-3_&_t@-2_&_h@-1)=1.0 feature(e_&_s@1_&_o@2)=1.0 feature(t@-2_&_h@-1_&_e)=1.0 feature(e_&_s@1)=1.0 feature(h@-1_&_e)=1.0 feature(s@1)=1.0 feature(e)=1.0\nToken#86:s feature(o@1_&_f@2_&_t@3)=1.0 feature(t@-3_&_h@-2_&_e@-1)=1.0 feature(s_&_o@1_&_f@2)=1.0 feature(h@-2_&_e@-1_&_s)=1.0 feature(s_&_o@1)=1.0 feature(e@-1_&_s)=1.0 feature(o@1)=1.0 feature(s)=1.0\nToken#87:o feature(f@1_&_t@2_&_w@3)=1.0 feature(h@-3_&_e@-2_&_s@-1)=1.0 feature(o_&_f@1_&_t@2)=1.0 feature(e@-2_&_s@-1_&_o)=1.0 feature(o_&_f@1)=1.0 feature(s@-1_&_o)=1.0 feature(f@1)=1.0 feature(o)=1.0\nToken#88:f feature(t@1_&_w@2_&_a@3)=1.0 feature(e@-3_&_s@-2_&_o@-1)=1.0 feature(f_&_t@1_&_w@2)=1.0 feature(s@-2_&_o@-1_&_f)=1.0 feature(f_&_t@1)=1.0 feature(o@-1_&_f)=1.0 feature(t@1)=1.0 feature(f)=1.0\nToken#89:t feature(w@1_&_a@2_&_r@3)=1.0 feature(s@-3_&_o@-2_&_f@-1)=1.0 feature(t_&_w@1_&_a@2)=1.0 feature(o@-2_&_f@-1_&_t)=1.0 feature(t_&_w@1)=1.0 feature(f@-1_&_t)=1.0 feature(w@1)=1.0 feature(t)=1.0\nToken#90:w feature(a@1_&_r@2_&_e@3)=1.0 feature(o@-3_&_f@-2_&_t@-1)=1.0 feature(w_&_a@1_&_r@2)=1.0 feature(f@-2_&_t@-1_&_w)=1.0 feature(w_&_a@1)=1.0 feature(t@-1_&_w)=1.0 feature(a@1)=1.0 feature(w)=1.0\nToken#91:a feature(r@1_&_e@2_&_.@3)=1.0 feature(f@-3_&_t@-2_&_w@-1)=1.0 feature(a_&_r@1_&_e@2)=1.0 feature(t@-2_&_w@-1_&_a)=1.0 feature(a_&_r@1)=1.0 feature(w@-1_&_a)=1.0 feature(r@1)=1.0 feature(a)=1.0\nToken#92:r feature(e@1_&_.@2_&_m@3)=1.0 feature(t@-3_&_w@-2_&_a@-1)=1.0 feature(r_&_e@1_&_.@2)=1.0 feature(w@-2_&_a@-1_&_r)=1.0 feature(r_&_e@1)=1.0 feature(a@-1_&_r)=1.0 feature(e@1)=1.0 feature(r)=1.0\nToken#93:e feature(.@1_&_m@2_&_o@3)=1.0 feature(w@-3_&_a@-2_&_r@-1)=1.0 feature(e_&_.@1_&_m@2)=1.0 feature(a@-2_&_r@-1_&_e)=1.0 feature(e_&_.@1)=1.0 feature(r@-1_&_e)=1.0 feature(.@1)=1.0 feature(e)=1.0\nToken#94:. feature(m@1_&_o@2_&_r@3)=1.0 feature(a@-3_&_r@-2_&_e@-1)=1.0 feature(._&_m@1_&_o@2)=1.0 feature(r@-2_&_e@-1_&_.)=1.0 feature(._&_m@1)=1.0 feature(e@-1_&_.)=1.0 feature(m@1)=1.0 feature(.)=1.0\nToken#95:m feature(o@1_&_r@2_&_e@3)=1.0 feature(r@-3_&_e@-2_&_.@-1)=1.0 feature(m_&_o@1_&_r@2)=1.0 feature(e@-2_&_.@-1_&_m)=1.0 feature(m_&_o@1)=1.0 feature(.@-1_&_m)=1.0 feature(o@1)=1.0 feature(m)=1.0\nToken#96:o feature(r@1_&_e@2_&_p@3)=1.0 feature(e@-3_&_.@-2_&_m@-1)=1.0 feature(o_&_r@1_&_e@2)=1.0 feature(.@-2_&_m@-1_&_o)=1.0 feature(o_&_r@1)=1.0 feature(m@-1_&_o)=1.0 feature(r@1)=1.0 feature(o)=1.0\nToken#97:r feature(e@1_&_p@2_&_r@3)=1.0 feature(.@-3_&_m@-2_&_o@-1)=1.0 feature(r_&_e@1_&_p@2)=1.0 feature(m@-2_&_o@-1_&_r)=1.0 feature(r_&_e@1)=1.0 feature(o@-1_&_r)=1.0 feature(e@1)=1.0 feature(r)=1.0\nToken#98:e feature(p@1_&_r@2_&_e@3)=1.0 feature(m@-3_&_o@-2_&_r@-1)=1.0 feature(e_&_p@1_&_r@2)=1.0 feature(o@-2_&_r@-1_&_e)=1.0 feature(e_&_p@1)=1.0 feature(r@-1_&_e)=1.0 feature(p@1)=1.0 feature(e)=1.0\nToken#99:p feature(r@1_&_e@2_&_c@3)=1.0 feature(o@-3_&_r@-2_&_e@-1)=1.0 feature(p_&_r@1_&_e@2)=1.0 feature(r@-2_&_e@-1_&_p)=1.0 feature(p_&_r@1)=1.0 feature(e@-1_&_p)=1.0 feature(r@1)=1.0 feature(p)=1.0\nToken#100:r feature(e@1_&_c@2_&_i@3)=1.0 feature(r@-3_&_e@-2_&_p@-1)=1.0 feature(r_&_e@1_&_c@2)=1.0 feature(e@-2_&_p@-1_&_r)=1.0 feature(r_&_e@1)=1.0 feature(p@-1_&_r)=1.0 feature(e@1)=1.0 feature(r)=1.0\nToken#101:e feature(c@1_&_i@2_&_s@3)=1.0 feature(e@-3_&_p@-2_&_r@-1)=1.0 feature(e_&_c@1_&_i@2)=1.0 feature(p@-2_&_r@-1_&_e)=1.0 feature(e_&_c@1)=1.0 feature(r@-1_&_e)=1.0 feature(c@1)=1.0 feature(e)=1.0\nToken#102:c feature(i@1_&_s@2_&_e@3)=1.0 feature(p@-3_&_r@-2_&_e@-1)=1.0 feature(c_&_i@1_&_s@2)=1.0 feature(r@-2_&_e@-1_&_c)=1.0 feature(c_&_i@1)=1.0 feature(e@-1_&_c)=1.0 feature(i@1)=1.0 feature(c)=1.0\nToken#103:i feature(s@1_&_e@2_&_l@3)=1.0 feature(r@-3_&_e@-2_&_c@-1)=1.0 feature(i_&_s@1_&_e@2)=1.0 feature(e@-2_&_c@-1_&_i)=1.0 feature(i_&_s@1)=1.0 feature(c@-1_&_i)=1.0 feature(s@1)=1.0 feature(i)=1.0\nToken#104:s feature(e@1_&_l@2_&_y@3)=1.0 feature(e@-3_&_c@-2_&_i@-1)=1.0 feature(s_&_e@1_&_l@2)=1.0 feature(c@-2_&_i@-1_&_s)=1.0 feature(s_&_e@1)=1.0 feature(i@-1_&_s)=1.0 feature(e@1)=1.0 feature(s)=1.0\nToken#105:e feature(l@1_&_y@2_&_,@3)=1.0 feature(c@-3_&_i@-2_&_s@-1)=1.0 feature(e_&_l@1_&_y@2)=1.0 feature(i@-2_&_s@-1_&_e)=1.0 feature(e_&_l@1)=1.0 feature(s@-1_&_e)=1.0 feature(l@1)=1.0 feature(e)=1.0\nToken#106:l feature(y@1_&_,@2_&_i@3)=1.0 feature(i@-3_&_s@-2_&_e@-1)=1.0 feature(l_&_y@1_&_,@2)=1.0 feature(s@-2_&_e@-1_&_l)=1.0 feature(l_&_y@1)=1.0 feature(e@-1_&_l)=1.0 feature(y@1)=1.0 feature(l)=1.0\nToken#107:y feature(,@1_&_i@2_&_t@3)=1.0 feature(s@-3_&_e@-2_&_l@-1)=1.0 feature(y_&_,@1_&_i@2)=1.0 feature(e@-2_&_l@-1_&_y)=1.0 feature(y_&_,@1)=1.0 feature(l@-1_&_y)=1.0 feature(,@1)=1.0 feature(y)=1.0\nToken#108:, feature(i@1_&_t@2_&_r@3)=1.0 feature(e@-3_&_l@-2_&_y@-1)=1.0 feature(,_&_i@1_&_t@2)=1.0 feature(l@-2_&_y@-1_&_,)=1.0 feature(,_&_i@1)=1.0 feature(y@-1_&_,)=1.0 feature(i@1)=1.0 feature(,)=1.0\nToken#109:i feature(t@1_&_r@2_&_e@3)=1.0 feature(l@-3_&_y@-2_&_,@-1)=1.0 feature(i_&_t@1_&_r@2)=1.0 feature(y@-2_&_,@-1_&_i)=1.0 feature(i_&_t@1)=1.0 feature(,@-1_&_i)=1.0 feature(t@1)=1.0 feature(i)=1.0\nToken#110:t feature(r@1_&_e@2_&_f@3)=1.0 feature(y@-3_&_,@-2_&_i@-1)=1.0 feature(t_&_r@1_&_e@2)=1.0 feature(,@-2_&_i@-1_&_t)=1.0 feature(t_&_r@1)=1.0 feature(i@-1_&_t)=1.0 feature(r@1)=1.0 feature(t)=1.0\nToken#111:r feature(e@1_&_f@2_&_e@3)=1.0 feature(,@-3_&_i@-2_&_t@-1)=1.0 feature(r_&_e@1_&_f@2)=1.0 feature(i@-2_&_t@-1_&_r)=1.0 feature(r_&_e@1)=1.0 feature(t@-1_&_r)=1.0 feature(e@1)=1.0 feature(r)=1.0\nToken#112:e feature(f@1_&_e@2_&_r@3)=1.0 feature(i@-3_&_t@-2_&_r@-1)=1.0 feature(e_&_f@1_&_e@2)=1.0 feature(t@-2_&_r@-1_&_e)=1.0 feature(e_&_f@1)=1.0 feature(r@-1_&_e)=1.0 feature(f@1)=1.0 feature(e)=1.0\nToken#113:f feature(e@1_&_r@2_&_s@3)=1.0 feature(t@-3_&_r@-2_&_e@-1)=1.0 feature(f_&_e@1_&_r@2)=1.0 feature(r@-2_&_e@-1_&_f)=1.0 feature(f_&_e@1)=1.0 feature(e@-1_&_f)=1.0 feature(e@1)=1.0 feature(f)=1.0\nToken#114:e feature(r@1_&_s@2_&_t@3)=1.0 feature(r@-3_&_e@-2_&_f@-1)=1.0 feature(e_&_r@1_&_s@2)=1.0 feature(e@-2_&_f@-1_&_e)=1.0 feature(e_&_r@1)=1.0 feature(f@-1_&_e)=1.0 feature(r@1)=1.0 feature(e)=1.0\nToken#115:r feature(s@1_&_t@2_&_o@3)=1.0 feature(e@-3_&_f@-2_&_e@-1)=1.0 feature(r_&_s@1_&_t@2)=1.0 feature(f@-2_&_e@-1_&_r)=1.0 feature(r_&_s@1)=1.0 feature(e@-1_&_r)=1.0 feature(s@1)=1.0 feature(r)=1.0\nToken#116:s feature(t@1_&_o@2_&_f@3)=1.0 feature(f@-3_&_e@-2_&_r@-1)=1.0 feature(s_&_t@1_&_o@2)=1.0 feature(e@-2_&_r@-1_&_s)=1.0 feature(s_&_t@1)=1.0 feature(r@-1_&_s)=1.0 feature(t@1)=1.0 feature(s)=1.0\nToken#117:t feature(o@1_&_f@2_&_o@3)=1.0 feature(e@-3_&_r@-2_&_s@-1)=1.0 feature(t_&_o@1_&_f@2)=1.0 feature(r@-2_&_s@-1_&_t)=1.0 feature(t_&_o@1)=1.0 feature(s@-1_&_t)=1.0 feature(o@1)=1.0 feature(t)=1.0\nToken#118:o feature(f@1_&_o@2_&_u@3)=1.0 feature(r@-3_&_s@-2_&_t@-1)=1.0 feature(o_&_f@1_&_o@2)=1.0 feature(s@-2_&_t@-1_&_o)=1.0 feature(o_&_f@1)=1.0 feature(t@-1_&_o)=1.0 feature(f@1)=1.0 feature(o)=1.0\nToken#119:f feature(o@1_&_u@2_&_r@3)=1.0 feature(s@-3_&_t@-2_&_o@-1)=1.0 feature(f_&_o@1_&_u@2)=1.0 feature(t@-2_&_o@-1_&_f)=1.0 feature(f_&_o@1)=1.0 feature(o@-1_&_f)=1.0 feature(o@1)=1.0 feature(f)=1.0\nToken#120:o feature(u@1_&_r@2_&_k@3)=1.0 feature(t@-3_&_o@-2_&_f@-1)=1.0 feature(o_&_u@1_&_r@2)=1.0 feature(o@-2_&_f@-1_&_o)=1.0 feature(o_&_u@1)=1.0 feature(f@-1_&_o)=1.0 feature(u@1)=1.0 feature(o)=1.0\nToken#121:u feature(r@1_&_k@2_&_i@3)=1.0 feature(o@-3_&_f@-2_&_o@-1)=1.0 feature(u_&_r@1_&_k@2)=1.0 feature(f@-2_&_o@-1_&_u)=1.0 feature(u_&_r@1)=1.0 feature(o@-1_&_u)=1.0 feature(r@1)=1.0 feature(u)=1.0\nToken#122:r feature(k@1_&_i@2_&_n@3)=1.0 feature(f@-3_&_o@-2_&_u@-1)=1.0 feature(r_&_k@1_&_i@2)=1.0 feature(o@-2_&_u@-1_&_r)=1.0 feature(r_&_k@1)=1.0 feature(u@-1_&_r)=1.0 feature(k@1)=1.0 feature(r)=1.0\nToken#123:k feature(i@1_&_n@2_&_d@3)=1.0 feature(o@-3_&_u@-2_&_r@-1)=1.0 feature(k_&_i@1_&_n@2)=1.0 feature(u@-2_&_r@-1_&_k)=1.0 feature(k_&_i@1)=1.0 feature(r@-1_&_k)=1.0 feature(i@1)=1.0 feature(k)=1.0\nToken#124:i feature(n@1_&_d@2_&_s@3)=1.0 feature(u@-3_&_r@-2_&_k@-1)=1.0 feature(i_&_n@1_&_d@2)=1.0 feature(r@-2_&_k@-1_&_i)=1.0 feature(i_&_n@1)=1.0 feature(k@-1_&_i)=1.0 feature(n@1)=1.0 feature(i)=1.0\nToken#125:n feature(d@1_&_s@2_&_o@3)=1.0 feature(r@-3_&_k@-2_&_i@-1)=1.0 feature(n_&_d@1_&_s@2)=1.0 feature(k@-2_&_i@-1_&_n)=1.0 feature(n_&_d@1)=1.0 feature(i@-1_&_n)=1.0 feature(d@1)=1.0 feature(n)=1.0\nToken#126:d feature(s@1_&_o@2_&_f@3)=1.0 feature(k@-3_&_i@-2_&_n@-1)=1.0 feature(d_&_s@1_&_o@2)=1.0 feature(i@-2_&_n@-1_&_d)=1.0 feature(d_&_s@1)=1.0 feature(n@-1_&_d)=1.0 feature(s@1)=1.0 feature(d)=1.0\nToken#127:s feature(o@1_&_f@2_&_f@3)=1.0 feature(i@-3_&_n@-2_&_d@-1)=1.0 feature(s_&_o@1_&_f@2)=1.0 feature(n@-2_&_d@-1_&_s)=1.0 feature(s_&_o@1)=1.0 feature(d@-1_&_s)=1.0 feature(o@1)=1.0 feature(s)=1.0\nToken#128:o feature(f@1_&_f@2_&_r@3)=1.0 feature(n@-3_&_d@-2_&_s@-1)=1.0 feature(o_&_f@1_&_f@2)=1.0 feature(d@-2_&_s@-1_&_o)=1.0 feature(o_&_f@1)=1.0 feature(s@-1_&_o)=1.0 feature(f@1)=1.0 feature(o)=1.0\nToken#129:f feature(f@1_&_r@2_&_e@3)=1.0 feature(d@-3_&_s@-2_&_o@-1)=1.0 feature(f_&_f@1_&_r@2)=1.0 feature(s@-2_&_o@-1_&_f)=1.0 feature(f_&_f@1)=1.0 feature(o@-1_&_f)=1.0 feature(f@1)=1.0 feature(f)=1.0\nToken#130:f feature(r@1_&_e@2_&_e@3)=1.0 feature(s@-3_&_o@-2_&_f@-1)=1.0 feature(f_&_r@1_&_e@2)=1.0 feature(o@-2_&_f@-1_&_f)=1.0 feature(f_&_r@1)=1.0 feature(f@-1_&_f)=1.0 feature(r@1)=1.0 feature(f)=1.0\nToken#131:r feature(e@1_&_e@2_&_d@3)=1.0 feature(o@-3_&_f@-2_&_f@-1)=1.0 feature(r_&_e@1_&_e@2)=1.0 feature(f@-2_&_f@-1_&_r)=1.0 feature(r_&_e@1)=1.0 feature(f@-1_&_r)=1.0 feature(e@1)=1.0 feature(r)=1.0\nToken#132:e feature(e@1_&_d@2_&_o@3)=1.0 feature(f@-3_&_f@-2_&_r@-1)=1.0 feature(e_&_e@1_&_d@2)=1.0 feature(f@-2_&_r@-1_&_e)=1.0 feature(e_&_e@1)=1.0 feature(r@-1_&_e)=1.0 feature(e@1)=1.0 feature(e)=1.0\nToken#133:e feature(d@1_&_o@2_&_m@3)=1.0 feature(f@-3_&_r@-2_&_e@-1)=1.0 feature(e_&_d@1_&_o@2)=1.0 feature(r@-2_&_e@-1_&_e)=1.0 feature(e_&_d@1)=1.0 feature(e@-1_&_e)=1.0 feature(d@1)=1.0 feature(e)=1.0\nToken#134:d feature(o@1_&_m@2_&_,@3)=1.0 feature(r@-3_&_e@-2_&_e@-1)=1.0 feature(d_&_o@1_&_m@2)=1.0 feature(e@-2_&_e@-1_&_d)=1.0 feature(d_&_o@1)=1.0 feature(e@-1_&_d)=1.0 feature(o@1)=1.0 feature(d)=1.0\nToken#135:o feature(m@1_&_,@2_&_f@3)=1.0 feature(e@-3_&_e@-2_&_d@-1)=1.0 feature(o_&_m@1_&_,@2)=1.0 feature(e@-2_&_d@-1_&_o)=1.0 feature(o_&_m@1)=1.0 feature(d@-1_&_o)=1.0 feature(m@1)=1.0 feature(o)=1.0\nToken#136:m feature(,@1_&_f@2_&_o@3)=1.0 feature(e@-3_&_d@-2_&_o@-1)=1.0 feature(m_&_,@1_&_f@2)=1.0 feature(d@-2_&_o@-1_&_m)=1.0 feature(m_&_,@1)=1.0 feature(o@-1_&_m)=1.0 feature(,@1)=1.0 feature(m)=1.0\nToken#137:, feature(f@1_&_o@2_&_r@3)=1.0 feature(d@-3_&_o@-2_&_m@-1)=1.0 feature(,_&_f@1_&_o@2)=1.0 feature(o@-2_&_m@-1_&_,)=1.0 feature(,_&_f@1)=1.0 feature(m@-1_&_,)=1.0 feature(f@1)=1.0 feature(,)=1.0\nToken#138:f feature(o@1_&_r@2_&_t@3)=1.0 feature(o@-3_&_m@-2_&_,@-1)=1.0 feature(f_&_o@1_&_r@2)=1.0 feature(m@-2_&_,@-1_&_f)=1.0 feature(f_&_o@1)=1.0 feature(,@-1_&_f)=1.0 feature(o@1)=1.0 feature(f)=1.0\nToken#139:o feature(r@1_&_t@2_&_h@3)=1.0 feature(m@-3_&_,@-2_&_f@-1)=1.0 feature(o_&_r@1_&_t@2)=1.0 feature(,@-2_&_f@-1_&_o)=1.0 feature(o_&_r@1)=1.0 feature(f@-1_&_o)=1.0 feature(r@1)=1.0 feature(o)=1.0\nToken#140:r feature(t@1_&_h@2_&_e@3)=1.0 feature(,@-3_&_f@-2_&_o@-1)=1.0 feature(r_&_t@1_&_h@2)=1.0 feature(f@-2_&_o@-1_&_r)=1.0 feature(r_&_t@1)=1.0 feature(o@-1_&_r)=1.0 feature(t@1)=1.0 feature(r)=1.0\nToken#141:t feature(h@1_&_e@2_&_u@3)=1.0 feature(f@-3_&_o@-2_&_r@-1)=1.0 feature(t_&_h@1_&_e@2)=1.0 feature(o@-2_&_r@-1_&_t)=1.0 feature(t_&_h@1)=1.0 feature(r@-1_&_t)=1.0 feature(h@1)=1.0 feature(t)=1.0\nToken#142:h feature(e@1_&_u@2_&_s@3)=1.0 feature(o@-3_&_r@-2_&_t@-1)=1.0 feature(h_&_e@1_&_u@2)=1.0 feature(r@-2_&_t@-1_&_h)=1.0 feature(h_&_e@1)=1.0 feature(t@-1_&_h)=1.0 feature(e@1)=1.0 feature(h)=1.0\nToken#143:e feature(u@1_&_s@2_&_e@3)=1.0 feature(r@-3_&_t@-2_&_h@-1)=1.0 feature(e_&_u@1_&_s@2)=1.0 feature(t@-2_&_h@-1_&_e)=1.0 feature(e_&_u@1)=1.0 feature(h@-1_&_e)=1.0 feature(u@1)=1.0 feature(e)=1.0\nToken#144:u feature(s@1_&_e@2_&_r@3)=1.0 feature(t@-3_&_h@-2_&_e@-1)=1.0 feature(u_&_s@1_&_e@2)=1.0 feature(h@-2_&_e@-1_&_u)=1.0 feature(u_&_s@1)=1.0 feature(e@-1_&_u)=1.0 feature(s@1)=1.0 feature(u)=1.0\nToken#145:s feature(e@1_&_r@2_&_s@3)=1.0 feature(h@-3_&_e@-2_&_u@-1)=1.0 feature(s_&_e@1_&_r@2)=1.0 feature(e@-2_&_u@-1_&_s)=1.0 feature(s_&_e@1)=1.0 feature(u@-1_&_s)=1.0 feature(e@1)=1.0 feature(s)=1.0\nToken#146:e feature(r@1_&_s@2_&_o@3)=1.0 feature(e@-3_&_u@-2_&_s@-1)=1.0 feature(e_&_r@1_&_s@2)=1.0 feature(u@-2_&_s@-1_&_e)=1.0 feature(e_&_r@1)=1.0 feature(s@-1_&_e)=1.0 feature(r@1)=1.0 feature(e)=1.0\nToken#147:r feature(s@1_&_o@2_&_f@3)=1.0 feature(u@-3_&_s@-2_&_e@-1)=1.0 feature(r_&_s@1_&_o@2)=1.0 feature(s@-2_&_e@-1_&_r)=1.0 feature(r_&_s@1)=1.0 feature(e@-1_&_r)=1.0 feature(s@1)=1.0 feature(r)=1.0\nToken#148:s feature(o@1_&_f@2_&_t@3)=1.0 feature(s@-3_&_e@-2_&_r@-1)=1.0 feature(s_&_o@1_&_f@2)=1.0 feature(e@-2_&_r@-1_&_s)=1.0 feature(s_&_o@1)=1.0 feature(r@-1_&_s)=1.0 feature(o@1)=1.0 feature(s)=1.0\nToken#149:o feature(f@1_&_t@2_&_h@3)=1.0 feature(e@-3_&_r@-2_&_s@-1)=1.0 feature(o_&_f@1_&_t@2)=1.0 feature(r@-2_&_s@-1_&_o)=1.0 feature(o_&_f@1)=1.0 feature(s@-1_&_o)=1.0 feature(f@1)=1.0 feature(o)=1.0\nToken#150:f feature(t@1_&_h@2_&_e@3)=1.0 feature(r@-3_&_s@-2_&_o@-1)=1.0 feature(f_&_t@1_&_h@2)=1.0 feature(s@-2_&_o@-1_&_f)=1.0 feature(f_&_t@1)=1.0 feature(o@-1_&_f)=1.0 feature(t@1)=1.0 feature(f)=1.0\nToken#151:t feature(h@1_&_e@2_&_s@3)=1.0 feature(s@-3_&_o@-2_&_f@-1)=1.0 feature(t_&_h@1_&_e@2)=1.0 feature(o@-2_&_f@-1_&_t)=1.0 feature(t_&_h@1)=1.0 feature(f@-1_&_t)=1.0 feature(h@1)=1.0 feature(t)=1.0\nToken#152:h feature(e@1_&_s@2_&_o@3)=1.0 feature(o@-3_&_f@-2_&_t@-1)=1.0 feature(h_&_e@1_&_s@2)=1.0 feature(f@-2_&_t@-1_&_h)=1.0 feature(h_&_e@1)=1.0 feature(t@-1_&_h)=1.0 feature(e@1)=1.0 feature(h)=1.0\nToken#153:e feature(s@1_&_o@2_&_f@3)=1.0 feature(f@-3_&_t@-2_&_h@-1)=1.0 feature(e_&_s@1_&_o@2)=1.0 feature(t@-2_&_h@-1_&_e)=1.0 feature(e_&_s@1)=1.0 feature(h@-1_&_e)=1.0 feature(s@1)=1.0 feature(e)=1.0\nToken#154:s feature(o@1_&_f@2_&_t@3)=1.0 feature(t@-3_&_h@-2_&_e@-1)=1.0 feature(s_&_o@1_&_f@2)=1.0 feature(h@-2_&_e@-1_&_s)=1.0 feature(s_&_o@1)=1.0 feature(e@-1_&_s)=1.0 feature(o@1)=1.0 feature(s)=1.0\nToken#155:o feature(f@1_&_t@2_&_w@3)=1.0 feature(h@-3_&_e@-2_&_s@-1)=1.0 feature(o_&_f@1_&_t@2)=1.0 feature(e@-2_&_s@-1_&_o)=1.0 feature(o_&_f@1)=1.0 feature(s@-1_&_o)=1.0 feature(f@1)=1.0 feature(o)=1.0\nToken#156:f feature(t@1_&_w@2_&_a@3)=1.0 feature(e@-3_&_s@-2_&_o@-1)=1.0 feature(f_&_t@1_&_w@2)=1.0 feature(s@-2_&_o@-1_&_f)=1.0 feature(f_&_t@1)=1.0 feature(o@-1_&_f)=1.0 feature(t@1)=1.0 feature(f)=1.0\nToken#157:t feature(w@1_&_a@2_&_r@3)=1.0 feature(s@-3_&_o@-2_&_f@-1)=1.0 feature(t_&_w@1_&_a@2)=1.0 feature(o@-2_&_f@-1_&_t)=1.0 feature(t_&_w@1)=1.0 feature(f@-1_&_t)=1.0 feature(w@1)=1.0 feature(t)=1.0\nToken#158:w feature(a@1_&_r@2_&_e@3)=1.0 feature(o@-3_&_f@-2_&_t@-1)=1.0 feature(w_&_a@1_&_r@2)=1.0 feature(f@-2_&_t@-1_&_w)=1.0 feature(w_&_a@1)=1.0 feature(t@-1_&_w)=1.0 feature(a@1)=1.0 feature(w)=1.0\nToken#159:a feature(r@1_&_e@2_&_.@3)=1.0 feature(f@-3_&_t@-2_&_w@-1)=1.0 feature(a_&_r@1_&_e@2)=1.0 feature(t@-2_&_w@-1_&_a)=1.0 feature(a_&_r@1)=1.0 feature(w@-1_&_a)=1.0 feature(r@1)=1.0 feature(a)=1.0\nToken#160:r feature(e@1_&_.@2_&_<END0>@3)=1.0 feature(t@-3_&_w@-2_&_a@-1)=1.0 feature(r_&_e@1_&_.@2)=1.0 feature(w@-2_&_a@-1_&_r)=1.0 feature(r_&_e@1)=1.0 feature(a@-1_&_r)=1.0 feature(e@1)=1.0 feature(r)=1.0\nToken#161:e feature(.@1_&_<END0>@2_&_<END1>@3)=1.0 feature(w@-3_&_a@-2_&_r@-1)=1.0 feature(e_&_.@1_&_<END0>@2)=1.0 feature(a@-2_&_r@-1_&_e)=1.0 feature(e_&_.@1)=1.0 feature(r@-1_&_e)=1.0 feature(.@1)=1.0 feature(e)=1.0\nToken#162:. feature(<END0>@1_&_<END1>@2_&_<END2>@3)=1.0 feature(a@-3_&_r@-2_&_e@-1)=1.0 feature(._&_<END0>@1_&_<END1>@2)=1.0 feature(r@-2_&_e@-1_&_.)=1.0 feature(._&_<END0>@1)=1.0 feature(e@-1_&_.)=1.0 feature(<END0>@1)=1.0 feature(.)=1.0\n\ntarget: start (0)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nstart (0)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nstart (0)\nnotstart (1)\nstart (0)\nstart (0)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nstart (0)\nnotstart (1)\nstart (0)\nnotstart (1)\nnotstart (1)\nstart (0)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nstart (0)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nstart (0)\nnotstart (1)\nstart (0)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nstart (0)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nstart (0)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nstart (0)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nstart (0)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nstart (0)\nnotstart (1)\nnotstart (1)\nstart (0)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nstart (0)\nnotstart (1)\nnotstart (1)\nstart (0)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nstart (0)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nstart (0)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nstart (0)\nnotstart (1)\nstart (0)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nstart (0)\nnotstart (1)\nstart (0)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nstart (0)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nstart (0)\nnotstart (1)\nstart (0)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nstart (0)\nnotstart (1)\nnotstart (1)\nstart (0)\nnotstart (1)\nnotstart (1)\nstart (0)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nstart (0)\nnotstart (1)\nstart (0)\nnotstart (1)\nnotstart (1)\nstart (0)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\n\nname: array:1\ninput: TokenSequence edu.umass.cs.mallet.base.types.TokenSequence@58f9d3\nToken#0:t feature(h@1_&_e@2_&_f@3)=1.0 feature(<START2>@-3_&_<START1>@-2_&_<START0>@-1)=1.0 feature(t_&_h@1_&_e@2)=1.0 feature(<START1>@-2_&_<START0>@-1_&_t)=1.0 feature(t_&_h@1)=1.0 feature(<START0>@-1_&_t)=1.0 feature(h@1)=1.0 feature(t)=1.0\nToken#1:h feature(e@1_&_f@2_&_r@3)=1.0 feature(<START1>@-3_&_<START0>@-2_&_t@-1)=1.0 feature(h_&_e@1_&_f@2)=1.0 feature(<START0>@-2_&_t@-1_&_h)=1.0 feature(h_&_e@1)=1.0 feature(t@-1_&_h)=1.0 feature(e@1)=1.0 feature(h)=1.0\nToken#2:e feature(f@1_&_r@2_&_e@3)=1.0 feature(<START0>@-3_&_t@-2_&_h@-1)=1.0 feature(e_&_f@1_&_r@2)=1.0 feature(t@-2_&_h@-1_&_e)=1.0 feature(e_&_f@1)=1.0 feature(h@-1_&_e)=1.0 feature(f@1)=1.0 feature(e)=1.0\nToken#3:f feature(r@1_&_e@2_&_e@3)=1.0 feature(t@-3_&_h@-2_&_e@-1)=1.0 feature(f_&_r@1_&_e@2)=1.0 feature(h@-2_&_e@-1_&_f)=1.0 feature(f_&_r@1)=1.0 feature(e@-1_&_f)=1.0 feature(r@1)=1.0 feature(f)=1.0\nToken#4:r feature(e@1_&_e@2_&_d@3)=1.0 feature(h@-3_&_e@-2_&_f@-1)=1.0 feature(r_&_e@1_&_e@2)=1.0 feature(e@-2_&_f@-1_&_r)=1.0 feature(r_&_e@1)=1.0 feature(f@-1_&_r)=1.0 feature(e@1)=1.0 feature(r)=1.0\nToken#5:e feature(e@1_&_d@2_&_o@3)=1.0 feature(e@-3_&_f@-2_&_r@-1)=1.0 feature(e_&_e@1_&_d@2)=1.0 feature(f@-2_&_r@-1_&_e)=1.0 feature(e_&_e@1)=1.0 feature(r@-1_&_e)=1.0 feature(e@1)=1.0 feature(e)=1.0\nToken#6:e feature(d@1_&_o@2_&_m@3)=1.0 feature(f@-3_&_r@-2_&_e@-1)=1.0 feature(e_&_d@1_&_o@2)=1.0 feature(r@-2_&_e@-1_&_e)=1.0 feature(e_&_d@1)=1.0 feature(e@-1_&_e)=1.0 feature(d@1)=1.0 feature(e)=1.0\nToken#7:d feature(o@1_&_m@2_&_t@3)=1.0 feature(r@-3_&_e@-2_&_e@-1)=1.0 feature(d_&_o@1_&_m@2)=1.0 feature(e@-2_&_e@-1_&_d)=1.0 feature(d_&_o@1)=1.0 feature(e@-1_&_d)=1.0 feature(o@1)=1.0 feature(d)=1.0\nToken#8:o feature(m@1_&_t@2_&_o@3)=1.0 feature(e@-3_&_e@-2_&_d@-1)=1.0 feature(o_&_m@1_&_t@2)=1.0 feature(e@-2_&_d@-1_&_o)=1.0 feature(o_&_m@1)=1.0 feature(d@-1_&_o)=1.0 feature(m@1)=1.0 feature(o)=1.0\nToken#9:m feature(t@1_&_o@2_&_r@3)=1.0 feature(e@-3_&_d@-2_&_o@-1)=1.0 feature(m_&_t@1_&_o@2)=1.0 feature(d@-2_&_o@-1_&_m)=1.0 feature(m_&_t@1)=1.0 feature(o@-1_&_m)=1.0 feature(t@1)=1.0 feature(m)=1.0\nToken#10:t feature(o@1_&_r@2_&_u@3)=1.0 feature(d@-3_&_o@-2_&_m@-1)=1.0 feature(t_&_o@1_&_r@2)=1.0 feature(o@-2_&_m@-1_&_t)=1.0 feature(t_&_o@1)=1.0 feature(m@-1_&_t)=1.0 feature(o@1)=1.0 feature(t)=1.0\nToken#11:o feature(r@1_&_u@2_&_n@3)=1.0 feature(o@-3_&_m@-2_&_t@-1)=1.0 feature(o_&_r@1_&_u@2)=1.0 feature(m@-2_&_t@-1_&_o)=1.0 feature(o_&_r@1)=1.0 feature(t@-1_&_o)=1.0 feature(r@1)=1.0 feature(o)=1.0\nToken#12:r feature(u@1_&_n@2_&_t@3)=1.0 feature(m@-3_&_t@-2_&_o@-1)=1.0 feature(r_&_u@1_&_n@2)=1.0 feature(t@-2_&_o@-1_&_r)=1.0 feature(r_&_u@1)=1.0 feature(o@-1_&_r)=1.0 feature(u@1)=1.0 feature(r)=1.0\nToken#13:u feature(n@1_&_t@2_&_h@3)=1.0 feature(t@-3_&_o@-2_&_r@-1)=1.0 feature(u_&_n@1_&_t@2)=1.0 feature(o@-2_&_r@-1_&_u)=1.0 feature(u_&_n@1)=1.0 feature(r@-1_&_u)=1.0 feature(n@1)=1.0 feature(u)=1.0\nToken#14:n feature(t@1_&_h@2_&_e@3)=1.0 feature(o@-3_&_r@-2_&_u@-1)=1.0 feature(n_&_t@1_&_h@2)=1.0 feature(r@-2_&_u@-1_&_n)=1.0 feature(n_&_t@1)=1.0 feature(u@-1_&_n)=1.0 feature(t@1)=1.0 feature(n)=1.0\nToken#15:t feature(h@1_&_e@2_&_p@3)=1.0 feature(r@-3_&_u@-2_&_n@-1)=1.0 feature(t_&_h@1_&_e@2)=1.0 feature(u@-2_&_n@-1_&_t)=1.0 feature(t_&_h@1)=1.0 feature(n@-1_&_t)=1.0 feature(h@1)=1.0 feature(t)=1.0\nToken#16:h feature(e@1_&_p@2_&_r@3)=1.0 feature(u@-3_&_n@-2_&_t@-1)=1.0 feature(h_&_e@1_&_p@2)=1.0 feature(n@-2_&_t@-1_&_h)=1.0 feature(h_&_e@1)=1.0 feature(t@-1_&_h)=1.0 feature(e@1)=1.0 feature(h)=1.0\nToken#17:e feature(p@1_&_r@2_&_o@3)=1.0 feature(n@-3_&_t@-2_&_h@-1)=1.0 feature(e_&_p@1_&_r@2)=1.0 feature(t@-2_&_h@-1_&_e)=1.0 feature(e_&_p@1)=1.0 feature(h@-1_&_e)=1.0 feature(p@1)=1.0 feature(e)=1.0\nToken#18:p feature(r@1_&_o@2_&_g@3)=1.0 feature(t@-3_&_h@-2_&_e@-1)=1.0 feature(p_&_r@1_&_o@2)=1.0 feature(h@-2_&_e@-1_&_p)=1.0 feature(p_&_r@1)=1.0 feature(e@-1_&_p)=1.0 feature(r@1)=1.0 feature(p)=1.0\nToken#19:r feature(o@1_&_g@2_&_r@3)=1.0 feature(h@-3_&_e@-2_&_p@-1)=1.0 feature(r_&_o@1_&_g@2)=1.0 feature(e@-2_&_p@-1_&_r)=1.0 feature(r_&_o@1)=1.0 feature(p@-1_&_r)=1.0 feature(o@1)=1.0 feature(r)=1.0\nToken#20:o feature(g@1_&_r@2_&_a@3)=1.0 feature(e@-3_&_p@-2_&_r@-1)=1.0 feature(o_&_g@1_&_r@2)=1.0 feature(p@-2_&_r@-1_&_o)=1.0 feature(o_&_g@1)=1.0 feature(r@-1_&_o)=1.0 feature(g@1)=1.0 feature(o)=1.0\nToken#21:g feature(r@1_&_a@2_&_m@3)=1.0 feature(p@-3_&_r@-2_&_o@-1)=1.0 feature(g_&_r@1_&_a@2)=1.0 feature(r@-2_&_o@-1_&_g)=1.0 feature(g_&_r@1)=1.0 feature(o@-1_&_g)=1.0 feature(r@1)=1.0 feature(g)=1.0\nToken#22:r feature(a@1_&_m@2_&_,@3)=1.0 feature(r@-3_&_o@-2_&_g@-1)=1.0 feature(r_&_a@1_&_m@2)=1.0 feature(o@-2_&_g@-1_&_r)=1.0 feature(r_&_a@1)=1.0 feature(g@-1_&_r)=1.0 feature(a@1)=1.0 feature(r)=1.0\nToken#23:a feature(m@1_&_,@2_&_f@3)=1.0 feature(o@-3_&_g@-2_&_r@-1)=1.0 feature(a_&_m@1_&_,@2)=1.0 feature(g@-2_&_r@-1_&_a)=1.0 feature(a_&_m@1)=1.0 feature(r@-1_&_a)=1.0 feature(m@1)=1.0 feature(a)=1.0\nToken#24:m feature(,@1_&_f@2_&_o@3)=1.0 feature(g@-3_&_r@-2_&_a@-1)=1.0 feature(m_&_,@1_&_f@2)=1.0 feature(r@-2_&_a@-1_&_m)=1.0 feature(m_&_,@1)=1.0 feature(a@-1_&_m)=1.0 feature(,@1)=1.0 feature(m)=1.0\nToken#25:, feature(f@1_&_o@2_&_r@3)=1.0 feature(r@-3_&_a@-2_&_m@-1)=1.0 feature(,_&_f@1_&_o@2)=1.0 feature(a@-2_&_m@-1_&_,)=1.0 feature(,_&_f@1)=1.0 feature(m@-1_&_,)=1.0 feature(f@1)=1.0 feature(,)=1.0\nToken#26:f feature(o@1_&_r@2_&_a@3)=1.0 feature(a@-3_&_m@-2_&_,@-1)=1.0 feature(f_&_o@1_&_r@2)=1.0 feature(m@-2_&_,@-1_&_f)=1.0 feature(f_&_o@1)=1.0 feature(,@-1_&_f)=1.0 feature(o@1)=1.0 feature(f)=1.0\nToken#27:o feature(r@1_&_a@2_&_n@3)=1.0 feature(m@-3_&_,@-2_&_f@-1)=1.0 feature(o_&_r@1_&_a@2)=1.0 feature(,@-2_&_f@-1_&_o)=1.0 feature(o_&_r@1)=1.0 feature(f@-1_&_o)=1.0 feature(r@1)=1.0 feature(o)=1.0\nToken#28:r feature(a@1_&_n@2_&_y@3)=1.0 feature(,@-3_&_f@-2_&_o@-1)=1.0 feature(r_&_a@1_&_n@2)=1.0 feature(f@-2_&_o@-1_&_r)=1.0 feature(r_&_a@1)=1.0 feature(o@-1_&_r)=1.0 feature(a@1)=1.0 feature(r)=1.0\nToken#29:a feature(n@1_&_y@2_&_p@3)=1.0 feature(f@-3_&_o@-2_&_r@-1)=1.0 feature(a_&_n@1_&_y@2)=1.0 feature(o@-2_&_r@-1_&_a)=1.0 feature(a_&_n@1)=1.0 feature(r@-1_&_a)=1.0 feature(n@1)=1.0 feature(a)=1.0\nToken#30:n feature(y@1_&_p@2_&_u@3)=1.0 feature(o@-3_&_r@-2_&_a@-1)=1.0 feature(n_&_y@1_&_p@2)=1.0 feature(r@-2_&_a@-1_&_n)=1.0 feature(n_&_y@1)=1.0 feature(a@-1_&_n)=1.0 feature(y@1)=1.0 feature(n)=1.0\nToken#31:y feature(p@1_&_u@2_&_r@3)=1.0 feature(r@-3_&_a@-2_&_n@-1)=1.0 feature(y_&_p@1_&_u@2)=1.0 feature(a@-2_&_n@-1_&_y)=1.0 feature(y_&_p@1)=1.0 feature(n@-1_&_y)=1.0 feature(p@1)=1.0 feature(y)=1.0\nToken#32:p feature(u@1_&_r@2_&_p@3)=1.0 feature(a@-3_&_n@-2_&_y@-1)=1.0 feature(p_&_u@1_&_r@2)=1.0 feature(n@-2_&_y@-1_&_p)=1.0 feature(p_&_u@1)=1.0 feature(y@-1_&_p)=1.0 feature(u@1)=1.0 feature(p)=1.0\nToken#33:u feature(r@1_&_p@2_&_o@3)=1.0 feature(n@-3_&_y@-2_&_p@-1)=1.0 feature(u_&_r@1_&_p@2)=1.0 feature(y@-2_&_p@-1_&_u)=1.0 feature(u_&_r@1)=1.0 feature(p@-1_&_u)=1.0 feature(r@1)=1.0 feature(u)=1.0\nToken#34:r feature(p@1_&_o@2_&_s@3)=1.0 feature(y@-3_&_p@-2_&_u@-1)=1.0 feature(r_&_p@1_&_o@2)=1.0 feature(p@-2_&_u@-1_&_r)=1.0 feature(r_&_p@1)=1.0 feature(u@-1_&_r)=1.0 feature(p@1)=1.0 feature(r)=1.0\nToken#35:p feature(o@1_&_s@2_&_e@3)=1.0 feature(p@-3_&_u@-2_&_r@-1)=1.0 feature(p_&_o@1_&_s@2)=1.0 feature(u@-2_&_r@-1_&_p)=1.0 feature(p_&_o@1)=1.0 feature(r@-1_&_p)=1.0 feature(o@1)=1.0 feature(p)=1.0\nToken#36:o feature(s@1_&_e@2_&_.@3)=1.0 feature(u@-3_&_r@-2_&_p@-1)=1.0 feature(o_&_s@1_&_e@2)=1.0 feature(r@-2_&_p@-1_&_o)=1.0 feature(o_&_s@1)=1.0 feature(p@-1_&_o)=1.0 feature(s@1)=1.0 feature(o)=1.0\nToken#37:s feature(e@1_&_.@2_&_<END0>@3)=1.0 feature(r@-3_&_p@-2_&_o@-1)=1.0 feature(s_&_e@1_&_.@2)=1.0 feature(p@-2_&_o@-1_&_s)=1.0 feature(s_&_e@1)=1.0 feature(o@-1_&_s)=1.0 feature(e@1)=1.0 feature(s)=1.0\nToken#38:e feature(.@1_&_<END0>@2_&_<END1>@3)=1.0 feature(p@-3_&_o@-2_&_s@-1)=1.0 feature(e_&_.@1_&_<END0>@2)=1.0 feature(o@-2_&_s@-1_&_e)=1.0 feature(e_&_.@1)=1.0 feature(s@-1_&_e)=1.0 feature(.@1)=1.0 feature(e)=1.0\nToken#39:. feature(<END0>@1_&_<END1>@2_&_<END2>@3)=1.0 feature(o@-3_&_s@-2_&_e@-1)=1.0 feature(._&_<END0>@1_&_<END1>@2)=1.0 feature(s@-2_&_e@-1_&_.)=1.0 feature(._&_<END0>@1)=1.0 feature(e@-1_&_.)=1.0 feature(<END0>@1)=1.0 feature(.)=1.0\n\ntarget: start (0)\nnotstart (1)\nnotstart (1)\nstart (0)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nstart (0)\nnotstart (1)\nstart (0)\nnotstart (1)\nnotstart (1)\nstart (0)\nnotstart (1)\nnotstart (1)\nstart (0)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nstart (0)\nnotstart (1)\nnotstart (1)\nstart (0)\nnotstart (1)\nnotstart (1)\nstart (0)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\nnotstart (1)\n\n"; public void testSpacePipe () { Pipe p = new SerialPipes (new Pipe[] { new CharSequence2TokenSequence ("."), new TokenSequenceLowercase (), new TestCRF.TestCRFTokenSequenceRemoveSpaces (), new TokenText (), new OffsetConjunctions (false, new int[][] {{0}, {1},{-1,0},{0,1}, {-2,-1,0}, {0,1,2}, {-3,-2,-1}, {1,2,3}, }), new PrintInputAndTarget(), }); // Print to a string ByteArrayOutputStream out = new ByteArrayOutputStream (); PrintStream oldOut = System.out; System.setOut (new PrintStream (out)); InstanceList lst = new InstanceList (p); lst.addThruPipe (new ArrayIterator (new String[] { TestCRF.data[0], TestCRF.data[1], })); System.setOut (oldOut); assertEquals (spacePipeOutput, out.toString()); } /** * @return a <code>TestSuite</code> */ public static TestSuite suite() { return new TestSuite (TestSpacePipe.class); } public static void main(String[] args) { junit.textui.TestRunner.run(suite()); } }// TestSpacePipe
46,823
591.708861
44,625
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/pipe/tsf/LexiconMembership.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** Tests membership of the token text in the provided list of words. The lexicon words are provided in a file, one word per line. @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.pipe.tsf; import java.io.*; import cc.mallet.pipe.*; import cc.mallet.types.*; public class LexiconMembership extends Pipe implements Serializable { String name; gnu.trove.THashSet lexicon; boolean ignoreCase; public LexiconMembership (String name, Reader lexiconReader, boolean ignoreCase) { this.name = name; this.lexicon = new gnu.trove.THashSet (); this.ignoreCase = ignoreCase; LineNumberReader reader = new LineNumberReader (lexiconReader); String line; while (true) { try { line = reader.readLine(); } catch (IOException e) { throw new IllegalStateException (); } if (line == null) { break; } else { // System.out.println(name + " : " + (ignoreCase ? line.toLowerCase().intern() : line.intern()) ); lexicon.add (ignoreCase ? line.toLowerCase() : line); } } if (lexicon.size() == 0) throw new IllegalArgumentException ("Empty lexicon"); } public LexiconMembership (String name, File lexiconFile, boolean ignoreCase) throws FileNotFoundException { this (name, new BufferedReader (new FileReader (lexiconFile)), ignoreCase); } public LexiconMembership (File lexiconFile, boolean ignoreCase) throws FileNotFoundException { this (lexiconFile.getName(), lexiconFile, ignoreCase); } public LexiconMembership (File lexiconFile) throws FileNotFoundException { this (lexiconFile.getName(), lexiconFile, true); } public Instance pipe (Instance carrier) { TokenSequence ts = (TokenSequence) carrier.getData(); for (int i = 0; i < ts.size(); i++) { Token t = ts.get(i); String s = t.getText(); String conS=s; //dealing with ([a-z]+), ([a-z]+, [a-z]+), [a-z]+. if(conS.startsWith("(")) conS = conS.substring(1); if(conS.endsWith(")") || conS.endsWith(".")) conS = conS.substring(0, conS.length()-1); if (lexicon.contains (ignoreCase ? s.toLowerCase() : s)) t.setFeatureValue (name, 1.0); if(conS.compareTo(s) != 0) { if (lexicon.contains (ignoreCase ? conS.toLowerCase() : conS)) t.setFeatureValue (name, 1.0); } } return carrier; } // Serialization private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 0; private void writeObject (ObjectOutputStream out) throws IOException { out.writeInt (CURRENT_SERIAL_VERSION); out.writeObject (name); out.writeObject (lexicon); out.writeBoolean (ignoreCase); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { int version = in.readInt (); this.name = (String) in.readObject(); this.lexicon = (gnu.trove.THashSet) in.readObject(); this.ignoreCase = in.readBoolean(); } }
3,342
28.584071
106
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/pipe/tsf/TokenTextCharNGrams.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** Add the token text as a feature with value 1.0. @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.pipe.tsf; import java.io.*; import java.util.regex.Pattern; import cc.mallet.pipe.*; import cc.mallet.types.*; public class TokenTextCharNGrams extends Pipe implements Serializable { static char startBorderChar = '>'; static char endBorderChar = '<'; String prefix; int[] gramSizes; boolean distinguishBorders = false; public TokenTextCharNGrams (String prefix, int[] gramSizes, boolean distinguishBorders) { this.prefix=prefix; this.gramSizes = gramSizes; this.distinguishBorders = distinguishBorders; } public TokenTextCharNGrams (String prefix, int[] gramSizes) { this.prefix=prefix; this.gramSizes = gramSizes; } public TokenTextCharNGrams () { this ("CHARBIGRAM=", new int[] {2}); } public Instance pipe (Instance carrier) { TokenSequence ts = (TokenSequence) carrier.getData(); for (int i = 0; i < ts.size(); i++) { Token t = ts.get(i); String s = t.getText(); if (distinguishBorders) s = startBorderChar + s + endBorderChar; int slen = s.length(); for (int j = 0; j < gramSizes.length; j++) { int size = gramSizes[j]; for (int k = 0; k < (slen - size)+1; k++) t.setFeatureValue ((prefix + s.substring (k, k+size)), 1.0); } } return carrier; } // Serialization // Version 0 : Initial (Saved prefix & gram sizes) // Version 1 : Save distinguishBorders private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 1; private void writeObject (ObjectOutputStream out) throws IOException { out.writeInt (CURRENT_SERIAL_VERSION); out.writeObject (prefix); out.writeInt (gramSizes.length); for (int i = 0; i < gramSizes.length; i++) out.writeInt (gramSizes[i]); out.writeBoolean (distinguishBorders); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { int version = in.readInt (); prefix = (String) in.readObject(); int gsl = in.readInt (); if (gsl > 0) { gramSizes = new int[gsl]; for (int i = 0; i < gsl; i++) gramSizes[i] = in.readInt(); } if (version >= 1) { distinguishBorders = in.readBoolean (); } } }
2,732
25.278846
92
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/pipe/tsf/TokenText.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** Add the token text as a feature with value 1.0. @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.pipe.tsf; import java.io.*; import java.util.regex.Pattern; import cc.mallet.pipe.*; import cc.mallet.types.*; public class TokenText extends Pipe implements Serializable { String prefix; Pattern matchingRegex; public TokenText (String prefix, Pattern matchingRegex) { this.prefix = prefix; this.matchingRegex = matchingRegex; } public TokenText (String prefix) { this.prefix=prefix; this.matchingRegex = null; } public TokenText () { } public Instance pipe (Instance carrier) { TokenSequence ts = (TokenSequence) carrier.getData(); for (int i = 0; i < ts.size(); i++) { Token t = ts.get(i); if (matchingRegex == null || matchingRegex.matcher(t.getText()).matches()) { t.setFeatureValue (prefix == null ? t.getText() : (prefix+t.getText()), 1.0); } } return carrier; } // Serialization private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 1; private void writeObject (ObjectOutputStream out) throws IOException { out.writeInt (CURRENT_SERIAL_VERSION); out.writeObject (prefix); out.writeObject (matchingRegex); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { int version = in.readInt (); prefix = (String) in.readObject (); if (version > 0) matchingRegex = (Pattern) in.readObject(); } }
1,955
24.076923
92
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/pipe/tsf/OffsetFeatureConjunction.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** Create new feature from the conjunction of features from given offsets that match given regular expressions. This can be seen as hand-coding in a few of the conjunctions that you'd get from {@link OffsetConjunctions}. <P> For example, creating a pipe with <TT>new OffsetFeatureConjunction ("TIME", new String[] { "number", "W=:" "number" }, new int[] { 0, 1, 2 })<TT> will create a feature that is true whenever all of (a) a feature at the current time matches "number" (b) a feature at the next time step matches "W=:" (b) a feature 2 timesteps from now match "number", so that you have a simple time detector. <P>If the conjunction passes, then either the first timestep (that is, the one all the offsets were computed from), or all matching timesteps, get the feature "TIME" --- depending on the value of the field tagAllTimesteps. @author Charles Sutton <a href="mailto:[email protected]">[email protected]</a> @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.pipe.tsf; import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.io.Serializable; import java.util.regex.Pattern; import cc.mallet.pipe.Pipe; import cc.mallet.types.Instance; import cc.mallet.types.Token; import cc.mallet.types.TokenSequence; import cc.mallet.util.PropertyList; public class OffsetFeatureConjunction extends Pipe implements Serializable { private String thisFeatureName; private Pattern[] featurePatterns; private int[] offsets; private boolean[] isNonNegated; private boolean tagAllTimesteps; /** * Create a Pipe for adding conjunctions of specified features. * @param thisFeatureName Name of this conjunction feature. * @param featureNames String giving name for each subfeature i. * @param offsets For each subfeature i, which offset from the current timestep * must i appear at. * @param isNonNegated If element i is false, then the negation of the * feature is added to the conjuction. */ public OffsetFeatureConjunction (String thisFeatureName, String[] featureNames, int[] offsets, boolean[] isNonNegated, boolean tagAllTimesteps) { this.thisFeatureName = thisFeatureName; this.featurePatterns = patternify (featureNames); this.offsets = offsets; this.isNonNegated = isNonNegated; this.tagAllTimesteps = tagAllTimesteps; } private static boolean[] trueArray (int length) { boolean[] ret = new boolean[length]; for (int i = 0; i < length; i++) ret[i] = true; return ret; } private Pattern[] patternify (String[] regex) { Pattern[] retval = new Pattern [regex.length]; for (int i = 0; i < regex.length; i++) { retval [i] = Pattern.compile (regex[i]); } return retval; } public OffsetFeatureConjunction (String thisFeatureName, String[] featureNames, int[] offsets, boolean tagAllTimesteps) { this (thisFeatureName, featureNames, offsets, trueArray(featureNames.length), tagAllTimesteps); } public OffsetFeatureConjunction (String thisFeatureName, String[] featureNames, int[] offsets) { this (thisFeatureName, featureNames, offsets, trueArray(featureNames.length), false); } public boolean isTagAllTimesteps () { return tagAllTimesteps; } public String getFeatureName () { return thisFeatureName; } public Pattern[] getFeaturePatterns () { return featurePatterns; } public int[] getOffsets () { return offsets; } public boolean[] getNonNegated () { return isNonNegated; } public Instance pipe (Instance carrier) { TokenSequence ts = (TokenSequence) carrier.getData(); int tsSize = ts.size(); for (int t = 0; t < tsSize; t++) { // Check whether the conjunction is true at time step t boolean passes = true; for (int fnum = 0; fnum < featurePatterns.length; fnum++) { int pos = t + offsets[fnum]; if (!(pos >= 0 && pos < tsSize)) { passes = false; break; } boolean featurePresent = hasMatchingFeature (ts.get(pos), featurePatterns [fnum]); if (featurePresent != isNonNegated [fnum]) { passes = false; break; } } if (passes) { if (tagAllTimesteps) { for (int fnum = 0; fnum < featurePatterns.length; fnum++) { int pos = t + offsets[fnum]; ts.get(pos).setFeatureValue (thisFeatureName, 1.0); } } else { ts.get(t).setFeatureValue (thisFeatureName, 1.0); } } } return carrier; } private boolean hasMatchingFeature (Token token, Pattern pattern) { PropertyList.Iterator iter = token.getFeatures ().iterator (); while (iter.hasNext()) { iter.next(); if (pattern.matcher (iter.getKey()). matches ()) { if (iter.getNumericValue() == 1.0) { return true; } } } return false; } // Serialization private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 1; private static final int NULL_INTEGER = -1; private void writeObject (ObjectOutputStream out) throws IOException { out.writeInt (CURRENT_SERIAL_VERSION); out.writeObject (thisFeatureName); out.writeBoolean (tagAllTimesteps); int size; size = (featurePatterns == null) ? NULL_INTEGER : featurePatterns.length; out.writeInt(size); if (size != NULL_INTEGER) { for (int i = 0; i <size; i++) { out.writeObject (featurePatterns[i]); out.writeInt (offsets[i]); out.writeBoolean (isNonNegated[i]); } } } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { int size; int version = in.readInt (); thisFeatureName = (String) in.readObject(); if (version >= 1) tagAllTimesteps = in.readBoolean (); size = in.readInt(); if (size == NULL_INTEGER) { featurePatterns = null; offsets = null; isNonNegated = null; } else { featurePatterns = new Pattern[size]; offsets = new int[size]; isNonNegated = new boolean[size]; for (int i = 0; i < size; i++) { featurePatterns[i] = (Pattern) in.readObject(); offsets[i] = in.readInt(); isNonNegated[i] = in.readBoolean(); } } } }
6,608
29.040909
144
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/pipe/tsf/CountMatchesAlignedWithOffsets.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** Count the number of times the provided regular expression matches the token text, and add a feature with the provided name having value equal to the count. @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.pipe.tsf; import java.util.regex.Pattern; import java.util.regex.Matcher; import cc.mallet.pipe.*; import cc.mallet.types.*; public class CountMatchesAlignedWithOffsets extends Pipe { Pattern regex; String feature; int[] offsets; boolean normalizeByMatchCount = false; public CountMatchesAlignedWithOffsets (String featureName, Pattern regex, int[] offsets, boolean normalizeByMatchCount) { this.feature = featureName; this.regex = regex; this.offsets = offsets; this.normalizeByMatchCount = normalizeByMatchCount; } public CountMatchesAlignedWithOffsets (String featureName, Pattern regex, int[] offsets) { this (featureName, regex, offsets, false); } public Instance pipe (Instance carrier) { TokenSequence ts = (TokenSequence) carrier.getData(); int countMatches; int countAlignedMatches; for (int i = 0; i < ts.size(); i++) { countMatches = 0; countAlignedMatches = 0; Token t = ts.get(i); Matcher matcher = regex.matcher (t.getText()); while (matcher.find ()) { countMatches++; int position = matcher.start(); for (int j = 0; j < offsets.length; j++) { int offset = i + offsets[j]; if (offset >= 0 && offset < ts.size()) { String offsetText = ts.get(offset).getText(); if (offsetText.length() > position) { Matcher offsetMatcher = regex.matcher (offsetText.substring(position)); if (offsetMatcher.lookingAt()) countAlignedMatches++; } } } } if (countAlignedMatches > 0) t.setFeatureValue (feature, (normalizeByMatchCount ? ((double)countAlignedMatches)/countMatches : (double)countAlignedMatches)); } return carrier; } }
2,480
27.517241
90
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/pipe/tsf/SequencePrintingPipe.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.pipe.tsf; import java.io.PrintWriter; import cc.mallet.pipe.Pipe; import cc.mallet.types.*; import cc.mallet.util.Maths; import cc.mallet.util.PropertyList; /** * Created: Jul 6, 2005 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: SequencePrintingPipe.java,v 1.1 2007/10/22 21:37:58 mccallum Exp $ */ public class SequencePrintingPipe extends Pipe { private PrintWriter writer; public SequencePrintingPipe (PrintWriter writer) { this.writer = writer; } public Instance pipe (Instance carrier) { Sequence data = (Sequence) carrier.getData (); Sequence target = (Sequence) carrier.getTarget (); if (data.size () != target.size ()) throw new IllegalArgumentException ("Trying to print into SimpleTagger format, where data and target lengths do not match\n" +"data.length = "+data.size()+", target.length = "+target.size ()); int N = data.size (); if (data instanceof TokenSequence) { throw new UnsupportedOperationException ("Not yet implemented."); } else if (data instanceof FeatureVectorSequence) { FeatureVectorSequence fvs = (FeatureVectorSequence) data; Alphabet dict = (fvs.size() > 0) ? fvs.getFeatureVector (0).getAlphabet () : null; for (int i = 0; i < N; i++) { Object label = target.get (i); writer.print (label); FeatureVector fv = fvs.getFeatureVector (i); for (int loc = 0; loc < fv.numLocations (); loc++) { writer.print (' '); String fname = dict.lookupObject (fv.indexAtLocation (loc)).toString (); double value = fv.valueAtLocation (loc); if (!Maths.almostEquals (value, 1.0)) { throw new IllegalArgumentException ("Printing to SimpleTagger format: FeatureVector not binary at time slice "+i+" fv:"+fv); } writer.print (fname); } writer.println (); } } else { throw new IllegalArgumentException ("Don't know how to print data of type "+data); } writer.println (); return carrier; } }
2,527
32.263158
136
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/pipe/tsf/RegexMatches.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** Add feature with value 1.0 if the entire token text matches the provided regular expression. @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.pipe.tsf; import java.util.regex.Pattern; import java.util.regex.Matcher; import java.io.*; import cc.mallet.pipe.*; import cc.mallet.types.*; public class RegexMatches extends Pipe implements Serializable { Pattern regex; String feature; public RegexMatches (String featureName, Pattern regex) { this.feature = featureName; this.regex = regex; } // Too dangerous with both arguments having the same type //public RegexMatches (String regex, String feature) { //this (Pattern.compile (regex), feature); //} public Instance pipe (Instance carrier) { TokenSequence ts = (TokenSequence) carrier.getData(); for (int i = 0; i < ts.size(); i++) { Token t = ts.get(i); String s = t.getText(); String conS=s; //dealing with ([a-z]+), ([a-z]+, [a-z]+), [a-z]+. if(conS.startsWith("(")) conS = conS.substring(1); if(conS.endsWith(")") || conS.endsWith(".")) conS = conS.substring(0, conS.length()-1); if (regex.matcher (s).matches ()) t.setFeatureValue (feature, 1.0); if(conS.compareTo(s) != 0) { if (regex.matcher (conS).matches ()) t.setFeatureValue (feature, 1.0); } } return carrier; } // Serialization private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 0; private void writeObject (ObjectOutputStream out) throws IOException { out.writeInt(CURRENT_SERIAL_VERSION); out.writeObject(regex); out.writeObject(feature); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { int version = in.readInt (); regex = (Pattern) in.readObject(); feature = (String) in.readObject(); } }
2,309
25.551724
92
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/pipe/tsf/TrieLexiconMembership.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** Tests membership of the token text in the provided list of phrases. The lexicon words are provided in a file, one space-separated phrase per line. @author Wei Lee and Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> Modifications by @author Kedar Bellare <a href="mailto:[email protected]">[email protected]</a> for joint extraction. */ package cc.mallet.pipe.tsf; import java.io.*; import java.util.*; import cc.mallet.pipe.*; import cc.mallet.types.*; public class TrieLexiconMembership extends Pipe implements Serializable { // Perhaps give it your own tokenizer? String name; // perhaps make this an array of names boolean ignoreCase; TrieLexicon lexicon; public TrieLexiconMembership(String name, Reader lexiconReader, boolean ignoreCase) { this.name = name; this.lexicon = new TrieLexicon(name, ignoreCase); LineNumberReader reader = new LineNumberReader(lexiconReader); String line; while (true) { try { line = reader.readLine(); } catch (IOException e) { throw new IllegalStateException(); } if (line == null) { break; } else { lexicon.add(line); } } if (lexicon.size() == 0) throw new IllegalArgumentException("Empty lexicon"); } public TrieLexiconMembership(String name, Reader lexiconReader, boolean ignoreCase, boolean includeDelims, String delim) { this.name = name; this.lexicon = new TrieLexicon(name, ignoreCase); LineNumberReader reader = new LineNumberReader(lexiconReader); String line; while (true) { try { line = reader.readLine(); } catch (IOException e) { throw new IllegalStateException(); } if (line == null) { break; } else { lexicon.add(line, includeDelims, delim); } } if (lexicon.size() == 0) throw new IllegalArgumentException("Empty lexicon"); } public TrieLexiconMembership(String name, File lexiconFile, boolean ignoreCase) throws FileNotFoundException { this(name, new BufferedReader(new FileReader(lexiconFile)), ignoreCase); } public TrieLexiconMembership(String name, File lexiconFile, boolean ignoreCase, boolean includeDelims, String delim) throws FileNotFoundException { this(name, new BufferedReader(new FileReader(lexiconFile)), ignoreCase, includeDelims, delim); } public TrieLexiconMembership(File lexiconFile, boolean ignoreCase) throws FileNotFoundException { this(lexiconFile.getName(), lexiconFile, ignoreCase); } public TrieLexiconMembership(File lexiconFile) throws FileNotFoundException { this(lexiconFile.getName(), lexiconFile, true); } public Instance pipe(Instance carrier) { TokenSequence ts = (TokenSequence) carrier.getData(); lexicon.addFeatures(ts); return carrier; } // Serialization private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 0; private void writeObject(ObjectOutputStream out) throws IOException { out.writeInt(CURRENT_SERIAL_VERSION); out.writeObject(name); out.writeObject(lexicon); out.writeBoolean(ignoreCase); } private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { int version = in.readInt(); this.name = (String) in.readObject(); this.lexicon = (TrieLexicon) in.readObject(); this.ignoreCase = in.readBoolean(); } private static class TrieLexicon implements Serializable { static final String END_OF_WORD_TOKEN = "end_of_word"; String name; boolean ignoreCase; Hashtable lex; int size; public TrieLexicon(String name, boolean ignoreCase) { this.name = name; this.ignoreCase = ignoreCase; this.lex = new Hashtable(); this.size = 0; } public void add(String word) { add(word, false, " "); } public void add(String word, boolean includeDelims, String delim) { boolean newWord = false; StringTokenizer st = new StringTokenizer(word, delim, includeDelims); Hashtable currentLevel = lex; while (st.hasMoreTokens()) { String token = st.nextToken(); if (ignoreCase) token = token.toLowerCase(); if (!currentLevel.containsKey(token)) { currentLevel.put(token, new Hashtable()); newWord = true; } currentLevel = (Hashtable) currentLevel.get(token); } currentLevel.put(END_OF_WORD_TOKEN, ""); if (newWord) size++; } public void addFeatures(TokenSequence ts) { int i = 0; while (i < ts.size()) { int j = endOfWord(ts, i); if (j == -1) { i++; } else { for (; i <= j; i++) { Token t = ts.get(i); t.setFeatureValue(name, 1.0); } } } } private int endOfWord(TokenSequence ts, int start) { if (start < 0 || start >= ts.size()) { System.err .println("Lexicon.lastIndexOf: error - out of TokenSequence boundaries"); return -1; } Hashtable currentLevel = lex; int end = -1; for (int i = start; i < ts.size(); i++) { Token t = ts.get(i); String s = t.getText(); if (ignoreCase) s = s.toLowerCase(); currentLevel = (Hashtable) currentLevel.get(s); if (currentLevel == null) { return end; } if (currentLevel.containsKey(END_OF_WORD_TOKEN)) { end = i; } } return end; } public int size() { return size; } // Serialization private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 0; private void writeObject(ObjectOutputStream out) throws IOException { out.writeInt(CURRENT_SERIAL_VERSION); out.writeObject(name); out.writeObject(lex); out.writeBoolean(ignoreCase); out.writeInt(size); } private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { int version = in.readInt(); this.name = (String) in.readObject(); this.lex = (Hashtable) in.readObject(); this.ignoreCase = in.readBoolean(); this.size = in.readInt(); } } }
6,300
25.812766
105
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/pipe/tsf/OffsetPropertyConjunctions.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** Create new features from all possible conjunctions with other (possibly position-offset) features. @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.pipe.tsf; import java.io.*; import cc.mallet.pipe.Pipe; import cc.mallet.types.Instance; import cc.mallet.types.Token; import cc.mallet.types.TokenSequence; import cc.mallet.util.PropertyList; public class OffsetPropertyConjunctions extends Pipe implements Serializable { int[][] conjunctions; boolean includeOriginalSingletons; String propertyKey; // To include all the old previous singleton features, pass {{0}} // For a conjunction at the current time step, pass {{0,0}} // For a conjunction of current and previous, pass {{0,-1}} // For a conjunction of the current and next two, pass {{0,1,2}} private OffsetPropertyConjunctions (boolean includeOriginalSingletons, String propertyKey, int[][] conjunctions) { this.conjunctions = conjunctions; this.includeOriginalSingletons = includeOriginalSingletons; this.propertyKey = propertyKey; } public OffsetPropertyConjunctions (boolean includeOriginalSingletons, int[][] conjunctions) { this (includeOriginalSingletons, null, conjunctions); } public OffsetPropertyConjunctions (int[][] conjunctions) { this (true, conjunctions); } public Instance pipe (Instance carrier) { TokenSequence ts = (TokenSequence) carrier.getData(); int tsSize = ts.size(); PropertyList[] oldfs = new PropertyList[ts.size()]; PropertyList[] newfs = new PropertyList[ts.size()]; for (int i = 0; i < tsSize; i++) oldfs[i] = ts.get(i).getFeatures (); if (includeOriginalSingletons) for (int i = 0; i < tsSize; i++) newfs[i] = ts.get(i).getFeatures (); for (int i = 0; i < ts.size(); i++) { //System.out.println ("OffsetPropertyConjunctions: ts index="+i+", conjunction ="); conjunctionList: for (int j = 0; j < conjunctions.length; j++) { // Make sure that the offsets in the conjunction are all available at this position for (int k = 0; k < conjunctions[j].length; k++) { if (conjunctions[j][k] + i < 0 || conjunctions[j][k] + i > tsSize-1 || oldfs[i+conjunctions[j][k]] == null) continue conjunctionList; //System.out.print (" "+conjunctions[j][k]); } //System.out.print ("\n"); // Add the features for this conjunction if (conjunctions[j].length == 1) { int offset = conjunctions[j][0]; if (offset == 0 && includeOriginalSingletons) throw new IllegalArgumentException ("Original singletons already there."); PropertyList.Iterator iter = oldfs[i+offset].iterator(); while (iter.hasNext()) { iter.next(); if (propertyKey != null && !propertyKey.equals(iter.getKey())) continue; String key = iter.getKey() + (offset==0 ? "" : "@"+offset); newfs[i] = PropertyList.add (key, iter.getNumericValue(), newfs[i]); } } else if (conjunctions[j].length == 2) { //System.out.println ("token="+ts.getToken(i).getText()+" conjunctionIndex="+j); int offset0 = conjunctions[j][0]; int offset1 = conjunctions[j][1]; PropertyList.Iterator iter0 = oldfs[i+offset0].iterator(); int iter0i = -1; while (iter0.hasNext()) { iter0i++; iter0.next(); if (propertyKey != null && !propertyKey.equals(iter0.getKey())) continue; PropertyList.Iterator iter1 = oldfs[i+offset1].iterator(); int iter1i = -1; while (iter1.hasNext()) { iter1i++; iter1.next(); if (propertyKey != null && !propertyKey.equals(iter1.getKey())) continue; // Avoid redundant doubling of feature space; include only upper triangle //System.out.println ("off0="+offset0+" off1="+offset1+" iter0i="+iter0i+" iter1i="+iter1i); if (offset0 == offset1 && iter1i <= iter0i) continue; //System.out.println (">off0="+offset0+" off1="+offset1+" iter0i="+iter0i+" iter1i="+iter1i); String key = iter0.getKey() + (offset0==0 ? "" : "@"+offset0) +"&"+iter1.getKey() + (offset1==0 ? "" : "@"+offset1); newfs[i] = PropertyList.add (key, iter0.getNumericValue() * iter1.getNumericValue(), newfs[i]); } } } else if (conjunctions[j].length == 3) { int offset0 = conjunctions[j][0]; int offset1 = conjunctions[j][1]; int offset2 = conjunctions[j][2]; PropertyList.Iterator iter0 = oldfs[i+offset0].iterator(); int iter0i = -1; while (iter0.hasNext()) { iter0i++; iter0.next(); if (propertyKey != null && !propertyKey.equals(iter0.getKey())) continue; PropertyList.Iterator iter1 = oldfs[i+offset1].iterator(); int iter1i = -1; while (iter1.hasNext()) { iter1i++; iter1.next(); if (propertyKey != null && !propertyKey.equals(iter1.getKey())) continue; // Avoid redundant doubling of feature space; include only upper triangle if (offset0 == offset1 && iter1i <= iter0i) continue; PropertyList.Iterator iter2 = oldfs[i+offset2].iterator(); int iter2i = -1; while (iter2.hasNext()) { iter2i++; iter2.next(); if (propertyKey != null && !propertyKey.equals(iter2.getKey())) continue; // Avoid redundant doubling of feature space; include only upper triangle if (offset1 == offset2 && iter2i <= iter1i) continue; String key = iter0.getKey() + (offset0==0 ? "" : "@"+offset0) +"&"+iter1.getKey() + (offset1==0 ? "" : "@"+offset1) +"&"+iter2.getKey() + (offset2==0 ? "" : "@"+offset2); newfs[i] = PropertyList.add (key, iter0.getNumericValue() * iter1.getNumericValue() * iter2.getNumericValue(), newfs[i]); } } } } else { throw new UnsupportedOperationException ("Conjunctions of length 4 or more not yet implemented."); } } } // Put the new PropertyLists in place for (int i = 0; i < ts.size(); i++) ts.get(i).setFeatures (newfs[i]); return carrier; } // Serialization private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 0; private static final int NULL_INTEGER = -1; private void writeObject (ObjectOutputStream out) throws IOException { out.writeInt (CURRENT_SERIAL_VERSION); int size1, size2; size1 = (conjunctions == null) ? NULL_INTEGER : conjunctions.length; out.writeInt(size1); if (size1 != NULL_INTEGER) { for (int i = 0; i <size1; i++) { size2 = (conjunctions[i] == null) ? NULL_INTEGER: conjunctions.length; out.writeInt(size2); if (size2 != NULL_INTEGER) { for (int j = 0; j <size2; j++) { out.writeInt(conjunctions[i][j]); } } } } out.writeBoolean(includeOriginalSingletons); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { int size1, size2; int version = in.readInt (); size1 = in.readInt();; if (size1 == NULL_INTEGER) { conjunctions = null; } else { conjunctions = new int[size1][]; for (int i = 0; i < size1; i++) { size2 = in.readInt(); if (size2 == NULL_INTEGER) { conjunctions[i] = null; } else { conjunctions[i] = new int[size2]; for (int j = 0; j < size2; j++) { conjunctions[i][j] = in.readInt(); } } } } includeOriginalSingletons = in.readBoolean(); } }
7,873
34.468468
113
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/pipe/tsf/FeaturesInWindow.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** Create new features from features (matching a regex within a window +/- the current position). For example, <br><code> FeaturesInWindow p = new FeaturesInWindow("PREV-", -1, 1, Pattern.compile("POS-.*"), true) </code> <br> will create a pipe that adds a feature to the current position for each feature in the previous starting with "POS-". So if the previous position has "POS-NN" we add "PREV-POS-NN". The last argument to the constructor is currently ignored. The alternative constructor matches all patterns, so: <br><code> FeaturesInWindow p = new FeaturesInWindow(s, l, r); </code> <br> is equivalent to <br><code> FeaturesInWindow p = new FeaturesInWindow("PREV-", -1, 1, Pattern.compile(".*"), true); </code> <br> but more efficient, since we don't actually check using the Pattern. @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.pipe.tsf; import java.io.*; import java.util.regex.*; import cc.mallet.pipe.Pipe; import cc.mallet.types.Instance; import cc.mallet.types.Token; import cc.mallet.types.TokenSequence; import cc.mallet.util.PropertyList; public class FeaturesInWindow extends Pipe implements Serializable { String namePrefix, namePrefixLeft; int leftBoundary; int rightBoundary; Pattern featureRegex; boolean includeBeginEndBoundaries; boolean includeCurrentToken = false; private static final int maxWindowSize = 20; private static final PropertyList[] startfs = new PropertyList[maxWindowSize]; private static final PropertyList[] endfs = new PropertyList[maxWindowSize]; static { initStartEndFs (); } private static void initStartEndFs () { for (int i = 0; i < maxWindowSize; i++) { startfs[i] = PropertyList.add ("<START"+i+">", 1.0, null); endfs[i] = PropertyList.add ("<END"+i+">", 1.0, null); } } /** @param namePrefix what to prepend to feature names * @param leftBoundaryOffset left boundary of the window (e.g. -1 means * include the previous word * @param rightBoundaryOffset right boundary for this window (e.g. 1 means * include the current position, but not the next * @param featureRegex add only for features matching this (null = always match * @param includeBeginEndBoundaries ignored */ public FeaturesInWindow (String namePrefix, int leftBoundaryOffset, int rightBoundaryOffset, Pattern featureRegex, boolean includeBeginEndBoundaries) { this.namePrefix = namePrefix; this.leftBoundary = leftBoundaryOffset; this.rightBoundary = rightBoundaryOffset; this.featureRegex = featureRegex; this.includeBeginEndBoundaries = includeBeginEndBoundaries; } /** equivalent to <br> <code> FeaturesInWindow((namePrefix, leftBoundaryOffset, rightBoundaryOffset, null, true); </code> */ public FeaturesInWindow (String namePrefix, int leftBoundaryOffset, int rightBoundaryOffset) { this (namePrefix, leftBoundaryOffset, rightBoundaryOffset, null, true); } public Instance pipe (Instance carrier) { TokenSequence ts = (TokenSequence) carrier.getData(); int tsSize = ts.size(); PropertyList[] newFeatures = new PropertyList[tsSize]; for (int i = 0; i < tsSize; i++) { Token t = ts.get (i); PropertyList pl = t.getFeatures(); newFeatures[i] = pl; for (int position = i + leftBoundary; position < i + rightBoundary; position++) { if (position == i && !includeCurrentToken) continue; PropertyList pl2; if (position < 0) pl2 = startfs[-position]; else if (position >= tsSize) pl2 = endfs[position-tsSize]; else pl2 = ts.get(position).getFeatures (); PropertyList.Iterator pl2i = pl2.iterator(); while (pl2i.hasNext()) { pl2i.next(); String key = pl2i.getKey(); if (featureRegex == null || featureRegex.matcher(key).matches()) { newFeatures[i] = PropertyList.add ((namePrefixLeft == null || position-i>0 ? namePrefix : namePrefixLeft)+key, pl2i.getNumericValue(), newFeatures[i]); } } } } for (int i = 0; i < tsSize; i++) { // Put the new PropertyLists in place ts.get (i).setFeatures (newFeatures[i]); } return carrier; } // Serialization private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 0; private void writeObject (ObjectOutputStream out) throws IOException { out.writeInt (CURRENT_SERIAL_VERSION); out.writeObject (namePrefix); out.writeInt (leftBoundary); out.writeInt (rightBoundary); out.writeObject (featureRegex); out.writeBoolean (includeBeginEndBoundaries); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { int version = in.readInt (); namePrefix = (String) in.readObject(); leftBoundary = in.readInt (); rightBoundary = in.readInt (); featureRegex = (Pattern) in.readObject(); includeBeginEndBoundaries = in.readBoolean(); } }
5,415
33.062893
116
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/pipe/tsf/FeaturesOfFirstMention.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** Create new features from features (matching a regex within a window +/- the current position. @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.pipe.tsf; import java.io.*; import java.util.regex.*; import cc.mallet.pipe.Pipe; import cc.mallet.types.Instance; import cc.mallet.types.Token; import cc.mallet.types.TokenSequence; import cc.mallet.util.PropertyList; public class FeaturesOfFirstMention extends Pipe implements Serializable { String namePrefix; String firstMentionName; // If not null, add this feature if this token is the first mention Pattern featureRegex; // Matching tokentext are candidates for FIRSTMENTION features Pattern filterRegex; // Matching features from the FIRSTMENTION will be included boolean includeFiltered; // If false, then EXCLUDE feature names that match the pattern public FeaturesOfFirstMention (String namePrefix, String firstMentionName, Pattern featureRegex) { this.namePrefix = namePrefix; this.firstMentionName = firstMentionName; this.featureRegex = featureRegex; } public FeaturesOfFirstMention (String namePrefix, Pattern featureRegex, Pattern featureFilterRegex, boolean includeFiltered) { this (namePrefix, null, featureRegex); this.filterRegex = featureFilterRegex; this.includeFiltered = includeFiltered; } public FeaturesOfFirstMention (String namePrefix, Pattern featureRegex) { this (namePrefix, null, featureRegex); } public FeaturesOfFirstMention (String namePrefix) { this (namePrefix, null); } public Instance pipe (Instance carrier) { TokenSequence ts = (TokenSequence) carrier.getData(); int tsSize = ts.size(); for (int i = tsSize-1; i >= 0; i--) { Token t = ts.get (i); String text = t.getText(); if (featureRegex != null && !featureRegex.matcher(text).matches()) continue; for (int j = 0; j < i; j++) { if (ts.get(j).getText().equals(text)) { PropertyList.Iterator iter = ts.get(j).getFeatures().iterator(); while (iter.hasNext()) { iter.next(); String key = iter.getKey(); if (filterRegex == null || (filterRegex.matcher(key).matches() ^ !includeFiltered)) t.setFeatureValue (namePrefix+key, iter.getNumericValue()); } break; } if (firstMentionName != null) t.setFeatureValue (firstMentionName, 1.0); } } return carrier; } // Serialization private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 0; private void writeObject (ObjectOutputStream out) throws IOException { out.writeInt (CURRENT_SERIAL_VERSION); out.writeObject (namePrefix); out.writeObject (featureRegex); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { int version = in.readInt (); namePrefix = (String) in.readObject(); featureRegex = (Pattern) in.readObject(); } }
3,386
30.95283
97
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/pipe/tsf/CountMatches.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** Count the number of times the provided regular expression matches the token text, and add a feature with the provided name having value equal to the count. @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.pipe.tsf; import java.util.regex.Pattern; import java.util.regex.Matcher; import cc.mallet.pipe.*; import cc.mallet.types.*; public class CountMatches extends Pipe { public static final int INTEGER_COUNT = 0; public static final int BINARY_COUNT = 1; public static final int NORMALIZED_COUNT = 2; public static final int OVER_MAX = 3; Pattern regex; String feature; boolean normalizeByCharLength = false; boolean countIsBinary = false; public CountMatches (String featureName, Pattern regex, int countType) { this.feature = featureName; this.regex = regex; if (countType == BINARY_COUNT) countIsBinary = true; else if (countType == NORMALIZED_COUNT) normalizeByCharLength = true; else if (countType >= OVER_MAX) throw new IllegalArgumentException ("Bad countType."); } public CountMatches (String featureName, Pattern regex) { this (featureName, regex, INTEGER_COUNT); } public Instance pipe (Instance carrier) { TokenSequence ts = (TokenSequence) carrier.getData(); int count; for (int i = 0; i < ts.size(); i++) { count = 0; Token t = ts.get(i); Matcher matcher = regex.matcher (t.getText()); while (matcher.find ()) { count++; if (countIsBinary) break; } if (count > 0) t.setFeatureValue (feature, (normalizeByCharLength ? ((double)count)/t.getText().length() : (double)count)); } return carrier; } }
2,135
26.384615
90
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/pipe/tsf/TokenTextNGrams.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** Add the token text as a feature with value 1.0. @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.pipe.tsf; import java.io.*; import cc.mallet.pipe.*; import cc.mallet.types.*; public class TokenTextNGrams extends Pipe implements Serializable { static char startBorderChar = '>'; static char endBorderChar = '<'; String prefix; int[] gramSizes; boolean distinguishBorders = false; public TokenTextNGrams (String prefix, int[] gramSizes) { this.prefix=prefix; this.gramSizes = gramSizes; } public TokenTextNGrams () { this ("CHARBIGRAM=", new int[] {2}); } public Instance pipe (Instance carrier) { TokenSequence ts = (TokenSequence) carrier.getData(); for (int i = 0; i < ts.size(); i++) { Token t = ts.get(i); String s = t.getText(); if (distinguishBorders) s = startBorderChar + s + endBorderChar; int slen = s.length(); for (int j = 0; j < gramSizes.length; j++) { int size = gramSizes[j]; for (int k = 0; k < slen - size; k++) t.setFeatureValue (s.substring (k, k+size), 1.0);//original was substring(k, size), changed by Fuchun } } return carrier; } // Serialization private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 0; private void writeObject (ObjectOutputStream out) throws IOException { out.writeInt (CURRENT_SERIAL_VERSION); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { int version = in.readInt (); } }
1,996
24.935065
106
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/pipe/tsf/TokenTextCharPrefix.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** Add the token text as a feature with value 1.0. @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.pipe.tsf; import java.io.*; import cc.mallet.pipe.*; import cc.mallet.types.*; public class TokenTextCharPrefix extends Pipe implements Serializable { String prefix; int prefixLength; public TokenTextCharPrefix (String prefix, int prefixLength) { this.prefix=prefix; this.prefixLength = prefixLength; } public TokenTextCharPrefix () { this ("PREFIX=", 2); } public Instance pipe (Instance carrier) { TokenSequence ts = (TokenSequence) carrier.getData(); for (int i = 0; i < ts.size(); i++) { Token t = ts.get(i); String s = t.getText(); if (s.length() > prefixLength) t.setFeatureValue ((prefix + s.substring (0, prefixLength)), 1.0); } return carrier; } // Serialization private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 0; private void writeObject (ObjectOutputStream out) throws IOException { out.writeInt (CURRENT_SERIAL_VERSION); out.writeObject (prefix); out.writeInt (prefixLength); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { int version = in.readInt (); prefix = (String) in.readObject(); prefixLength = in.readInt (); } }
1,796
24.309859
92
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/pipe/tsf/TokenFirstPosition.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** Add a feature that is true if the token is the first in the sequence. @author David Mimno */ package cc.mallet.pipe.tsf; import java.io.*; import cc.mallet.pipe.*; import cc.mallet.types.*; public class TokenFirstPosition extends Pipe implements Serializable { String featureName; public TokenFirstPosition (String featureName) { this.featureName = featureName; } public TokenFirstPosition () { } public Instance pipe (Instance instance) { TokenSequence sequence = (TokenSequence) instance.getData(); Token token = sequence.get(0); token.setFeatureValue(featureName, 1.0); return instance; } // Serialization private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 1; private void writeObject (ObjectOutputStream out) throws IOException { out.writeInt (CURRENT_SERIAL_VERSION); out.writeObject (featureName); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { int version = in.readInt (); featureName = (String) in.readObject (); } }
1,503
24.931034
92
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/pipe/tsf/Target2BIOFormat.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Aron Culotta <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.pipe.tsf; import java.io.*; import cc.mallet.pipe.*; import cc.mallet.types.*; /** Creates a {@link LabelSequence} out of a {@link TokenSequence} that is the target of an {@link Instance}. Labels are constructed out of each Token in the TokenSequence to conform with BIO format (Begin, Inside, Outside of Segment). Prepends a "B-" to Tokens that leave a background state and an "I-" to tags that have the same label as the previous Token. NOTE: This class assumes that subsequent identical tags belong to the same Segment. This means that you cannot have B B I, only B I I. */ public class Target2BIOFormat extends Pipe implements Serializable { String backgroundTag; public Target2BIOFormat () { super (null, new LabelAlphabet()); backgroundTag = "O"; } /** @param background represents Tokens that are not part of a target Segment. */ public Target2BIOFormat (String background) { super (null, new LabelAlphabet()); this.backgroundTag = background; } public Instance pipe (Instance carrier) { Object target = carrier.getTarget(); if (target instanceof TokenSequence) { Alphabet v = getTargetAlphabet (); TokenSequence ts = (TokenSequence) target; int indices[] = new int[ts.size()]; String previousString = this.backgroundTag; for (int i = 0; i < ts.size(); i++) { String s = ts.get (i).getText (); String tag = s; if (!tag.equals (this.backgroundTag)) { if (tag.equals (previousString)) tag = "I-" + tag; else tag = "B-" + tag; } indices[i] = v.lookupIndex (tag); previousString = s; } LabelSequence ls = new LabelSequence ((LabelAlphabet)getTargetAlphabet(), indices); carrier.setTarget(ls); } else { throw new IllegalArgumentException ("Unrecognized target type."); } return carrier; } // Serialization private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 0; private void writeObject (ObjectOutputStream out) throws IOException { out.writeInt (CURRENT_SERIAL_VERSION); out.writeObject (backgroundTag); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { int version = in.readInt (); backgroundTag = (String) in.readObject (); } }
2,822
27.806122
92
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/pipe/tsf/CountMatchesMatching.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** Count the number of times the provided regular expression matches the token text, and add a feature with the provided name having value equal to the count. @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.pipe.tsf; import java.util.regex.Pattern; import java.util.regex.Matcher; import java.util.logging.*; import cc.mallet.pipe.*; import cc.mallet.types.*; import cc.mallet.util.MalletLogger; public class CountMatchesMatching extends Pipe { private static Logger logger = MalletLogger.getLogger(CountMatchesMatching.class.getName()); String feature; Pattern regex; Pattern moreSpecificRegex; boolean normalizeByRegexMatches = false; public CountMatchesMatching (String featureName, Pattern regex, Pattern moreSpecificRegex, boolean normalizeByRegexMatches) { this.feature = featureName; this.regex = regex; this.moreSpecificRegex = regex; this.normalizeByRegexMatches = normalizeByRegexMatches; } public CountMatchesMatching (String featureName, Pattern regex, Pattern moreSpecificRegex) { this (featureName, regex, moreSpecificRegex, false); } public Instance pipe (Instance carrier) { TokenSequence ts = (TokenSequence) carrier.getData(); int count; int moreSpecificCount; for (int i = 0; i < ts.size(); i++) { count = 0; moreSpecificCount = 0; Token t = ts.get(i); Matcher matcher = regex.matcher (t.getText()); while (matcher.find()) { count++; logger.info ("CountMatchesMatching found >"+matcher.group()+"<"); Matcher moreSpecificMatcher = moreSpecificRegex.matcher (t.getText().substring(matcher.start())); if (moreSpecificMatcher.lookingAt ()) { moreSpecificCount++; logger.info ("CountMatchesMatching sound >"+moreSpecificMatcher.group()+"<"); } } if (moreSpecificCount > 0) t.setFeatureValue (feature, (normalizeByRegexMatches ? ((double)moreSpecificCount)/count : moreSpecificCount)); } return carrier; } }
2,489
29.365854
101
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/pipe/tsf/OffsetConjunctions.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** Create new features from all possible conjunctions with other (possibly position-offset) features. @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.pipe.tsf; import java.io.*; import java.util.regex.*; import cc.mallet.pipe.Pipe; import cc.mallet.types.Instance; import cc.mallet.types.Token; import cc.mallet.types.TokenSequence; import cc.mallet.util.PropertyList; public class OffsetConjunctions extends Pipe implements Serializable { int[][] conjunctions; boolean includeOriginalSingletons; // boolean includeBeginEndBoundaries; Pattern featureRegex; static final int maxWindowSize = 50; static final PropertyList[] startfs = new PropertyList[maxWindowSize]; static final PropertyList[] endfs = new PropertyList[maxWindowSize]; static { initStartEndFs (); } private static void initStartEndFs () { for (int i = 0; i < maxWindowSize; i++) { startfs[i] = PropertyList.add ("<START"+i+">", 1.0, null); endfs[i] = PropertyList.add ("<END"+i+">", 1.0, null); } } // To include all the old previous singleton features, pass {{0}} // For a conjunction at the current time step, pass {{0,0}} // For a conjunction of current and previous, pass {{0,-1}} // For a conjunction of the current and next two, pass {{0,1,2}} public OffsetConjunctions (boolean includeOriginalSingletons, Pattern featureRegex, int[][] conjunctions) { this.conjunctions = conjunctions; this.featureRegex = featureRegex; this.includeOriginalSingletons = includeOriginalSingletons; } public OffsetConjunctions (boolean includeOriginalSingletons, int[][] conjunctions) { this (includeOriginalSingletons, null, conjunctions); } public OffsetConjunctions (int[][] conjunctions) { this (true, conjunctions); } public Instance pipe (Instance carrier) { TokenSequence ts = (TokenSequence) carrier.getData(); int tsSize = ts.size(); PropertyList[] oldfs = null; PropertyList[] newfs = null; try { oldfs = new PropertyList[ts.size()]; } catch (Exception e) { System.err.println("Exception allocating oldfs: " + e); } try { newfs = new PropertyList[ts.size()]; } catch (Exception e) { System.err.println("Exception allocating newfs: " + e); } for (int i = 0; i < tsSize; i++) oldfs[i] = ts.get(i).getFeatures (); if (includeOriginalSingletons) for (int i = 0; i < tsSize; i++) newfs[i] = ts.get(i).getFeatures (); for (int i = 0; i < tsSize; i++) { for (int j = 0; j < conjunctions.length; j++) { // allow conjunction offsets of length n - awc PropertyList.Iterator[] iters = getOffsetIters (conjunctions, j, tsSize, i, oldfs); if (iters == null) continue; int[] iterIndices = new int[iters.length]; for (int ii=0; ii < iterIndices.length; ii++) iterIndices[ii] = -1; newfs[i] = makeConjunctions (iters, 0, conjunctions, j, tsSize, newfs[i], i, oldfs, iterIndices); } } // Put the new PropertyLists in place for (int i = 0; i < ts.size(); i++) ts.get(i).setFeatures (newfs[i]); return carrier; } /** Recursively makes conjunctions by iterating through features at each offset * @param iters iterate over the PropertyLists at each offset * @param currIndex which offset we're currently on, e..g 1 in the list [0,1,2] * @param conjunctions list of conjunctions * @param j which offset list we're currently on, e.g. [0,1,2] in the list [[0,1],[0,1,2]] * @param tsSize size of token sequence * @param newfs new features * @param tsi token sequence index * @param oldfs old features * @param iterIndices counter to keep track how far in each iterator in "iters" * @return new features */ private PropertyList makeConjunctions (PropertyList.Iterator[] iters, int currIndex, int[][] conjunctions, int j, int tsSize, PropertyList newfs, int tsi, PropertyList[] oldfs, int[] iterIndices) { if (iters.length == currIndex) { // base case: add feature for current conjunction of iters // avoid redundant doubling of feature space; include only upper triangle if (redundant (conjunctions, j, iterIndices)) { return newfs; } String newFeature = ""; double newValue = 1.0; for (int i=0; i < iters.length; i++) { String s = iters[i].getKey(); if (featureRegex != null && !featureRegex.matcher(s).matches()) return newfs; newFeature += (i==0 ? "" : "_&_") + s + (conjunctions[j][i]==0 ? "" : ("@" + conjunctions[j][i])); newValue *= iters[i].getNumericValue(); } //System.err.println ("Adding new feature " + newFeature); newfs = PropertyList.add (newFeature, newValue, newfs); } else { // recursive step while (iters[currIndex].hasNext()) { iters[currIndex].next(); iterIndices[currIndex]++; newfs = makeConjunctions (iters, currIndex+1, conjunctions, j, tsSize, newfs, tsi, oldfs, iterIndices); } // reset iterator at currIndex iters[currIndex] = getOffsetIter (conjunctions, j, currIndex, tsSize, tsi, oldfs); iterIndices[currIndex] = -1; } return newfs; } /** Is the current feature redundant? The current feature is * determined by the current values in iterIndices, which tells us * where we are in each PropertyList.Iterator. We do this test to * ensure we only include the upper triange of conjunctions. * @param conjunctions conjunction array * @param j which offset we're on * @param iterIndices counters for each PropertyList.Iterator * @return true if feature is redundant */ private boolean redundant (int[][] conjunctions, int j, int[] iterIndices) { for (int i=1; i < iterIndices.length; i++) { if (conjunctions[j][i-1] == conjunctions[j][i] && iterIndices[i] <= iterIndices[i-1]) return true; } return false; } /** Get iterators for each token in this offset */ private PropertyList.Iterator[] getOffsetIters (int [][] conjunctions, int j, int tsSize, int tsi, PropertyList[] oldfs) { PropertyList.Iterator[] iters = new PropertyList.Iterator[conjunctions[j].length]; // get iterators for offsets for (int iteri=0; iteri < iters.length; iteri++) { iters[iteri] = getOffsetIter (conjunctions, j, iteri, tsSize, tsi, oldfs); if (iters[iteri]==null) return null; } return iters; } private PropertyList.Iterator getOffsetIter (int [][] conjunctions, int j, int iteri, int tsSize, int tsi, PropertyList[] oldfs) { PropertyList.Iterator iter; if (tsi+conjunctions[j][iteri] < 0) iter = startfs[-(tsi+conjunctions[j][iteri])-1].iterator(); else if (conjunctions[j][iteri]+tsi > tsSize-1) iter = endfs[tsi+conjunctions[j][iteri]-tsSize].iterator(); else if (oldfs[conjunctions[j][iteri]+tsi] == null) iter = null; else iter = oldfs[tsi+conjunctions[j][iteri]].iterator(); return iter; } // Serialization private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 0; private static final int NULL_INTEGER = -1; private void writeObject (ObjectOutputStream out) throws IOException { out.writeInt (CURRENT_SERIAL_VERSION); int size1, size2; size1 = (conjunctions == null) ? NULL_INTEGER : conjunctions.length; out.writeInt(size1); if (size1 != NULL_INTEGER) { for (int i = 0; i <size1; i++) { size2 = (conjunctions[i] == null) ? NULL_INTEGER: conjunctions[i].length; out.writeInt(size2); if (size2 != NULL_INTEGER) { for (int j = 0; j <size2; j++) { out.writeInt(conjunctions[i][j]); } } } } out.writeBoolean(includeOriginalSingletons); out.writeObject(featureRegex); //add by fuchun } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { int size1, size2; int version = in.readInt (); size1 = in.readInt(); // Deserialization doesn't call the unnamed class initializer, so do it here if (startfs[0] == null) initStartEndFs (); if (size1 == NULL_INTEGER) { conjunctions = null; } else { conjunctions = new int[size1][]; for (int i = 0; i < size1; i++) { size2 = in.readInt(); if (size2 == NULL_INTEGER) { conjunctions[i] = null; } else { conjunctions[i] = new int[size2]; for (int j = 0; j < size2; j++) { conjunctions[i][j] = in.readInt(); } } } } includeOriginalSingletons = in.readBoolean(); featureRegex = (Pattern) in.readObject();//add by fuchun } }
8,906
32.996183
107
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/pipe/tsf/TokenTextCharSuffix.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** Add the token text as a feature with value 1.0. @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.pipe.tsf; import java.io.*; import cc.mallet.pipe.*; import cc.mallet.types.*; public class TokenTextCharSuffix extends Pipe implements Serializable { String prefix; int suffixLength; public TokenTextCharSuffix (String prefix, int suffixLength) { this.prefix=prefix; this.suffixLength = suffixLength; } public TokenTextCharSuffix () { this ("SUFFIX=", 2); } public Instance pipe (Instance carrier) { TokenSequence ts = (TokenSequence) carrier.getData(); for (int i = 0; i < ts.size(); i++) { Token t = ts.get(i); String s = t.getText(); int slen = s.length(); if (slen > suffixLength) t.setFeatureValue ((prefix + s.substring (slen - suffixLength, slen)), 1.0); } return carrier; } // Serialization private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 0; private void writeObject (ObjectOutputStream out) throws IOException { out.writeInt (CURRENT_SERIAL_VERSION); out.writeObject (prefix); out.writeInt (suffixLength); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { int version = in.readInt (); prefix = (String) in.readObject(); suffixLength = in.readInt (); } }
1,826
24.375
92
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/pipe/tsf/tests/TestOffsetFeatureConjunctions.java
/* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.pipe.tsf.tests; import junit.framework.*; import java.util.regex.Pattern; import java.io.IOException; import cc.mallet.pipe.Pipe; import cc.mallet.pipe.PrintInputAndTarget; import cc.mallet.pipe.SerialPipes; import cc.mallet.pipe.SimpleTaggerSentence2TokenSequence; import cc.mallet.pipe.iterator.ArrayIterator; import cc.mallet.pipe.tsf.OffsetFeatureConjunction; import cc.mallet.pipe.tsf.RegexMatches; import cc.mallet.pipe.tsf.TokenText; import cc.mallet.types.Instance; import cc.mallet.types.InstanceList; import cc.mallet.types.Token; import cc.mallet.types.TokenSequence; import cc.mallet.types.tests.TestSerializable; /** * $Id: TestOffsetFeatureConjunctions.java,v 1.1 2007/10/22 21:37:57 mccallum Exp $ */ public class TestOffsetFeatureConjunctions extends TestCase { public TestOffsetFeatureConjunctions (String name) { super (name); } private static String[] doc1 = { "Meet\nme\nat\n4\nPM\ntomorrow" }; public static void testMultiTag () { Pipe mtPipe = new SerialPipes (new Pipe[] { new SimpleTaggerSentence2TokenSequence (), new TokenText (), new RegexMatches ("digits", Pattern.compile ("[0-9]+")), new RegexMatches ("ampm", Pattern.compile ("[aApP][mM]")), new OffsetFeatureConjunction ("time", new String[] { "digits", "ampm" }, new int[] { 0, 1 }, true), new PrintInputAndTarget (), }); Pipe noMtPipe = new SerialPipes (new Pipe[] { new SimpleTaggerSentence2TokenSequence (), new TokenText (), new RegexMatches ("digits", Pattern.compile ("[0-9]+")), new RegexMatches ("ampm", Pattern.compile ("[aApP][mM]")), new OffsetFeatureConjunction ("time", new String[] { "digits", "ampm" }, new int[] { 0, 1 }, false), new PrintInputAndTarget (), }); InstanceList mtLst = new InstanceList (mtPipe); InstanceList noMtLst = new InstanceList (noMtPipe); mtLst.addThruPipe (new ArrayIterator (doc1)); noMtLst.addThruPipe (new ArrayIterator (doc1)); Instance mtInst = mtLst.get (0); Instance noMtInst = noMtLst.get (0); TokenSequence mtTs = (TokenSequence) mtInst.getData (); TokenSequence noMtTs = (TokenSequence) noMtInst.getData (); assertEquals (6, mtTs.size ()); assertEquals (6, noMtTs.size ()); assertEquals (1.0, mtTs.get (3).getFeatureValue ("time"), 1e-15); assertEquals (1.0, noMtTs.get (3).getFeatureValue ("time"), 1e-15); assertEquals (1.0, mtTs.get (4).getFeatureValue ("time"), 1e-15); assertEquals (0.0, noMtTs.get (4).getFeatureValue ("time"), 1e-15); } public static void testMultiTagSerialization () throws IOException, ClassNotFoundException { Pipe origPipe = new SerialPipes (new Pipe[] { new SimpleTaggerSentence2TokenSequence (), new TokenText (), new RegexMatches ("digits", Pattern.compile ("[0-9]+")), new RegexMatches ("ampm", Pattern.compile ("[aApP][mM]")), new OffsetFeatureConjunction ("time", new String[] { "digits", "ampm" }, new int[] { 0, 1 }, true), new PrintInputAndTarget (), }); Pipe mtPipe = (Pipe) TestSerializable.cloneViaSerialization (origPipe); InstanceList mtLst = new InstanceList (mtPipe); mtLst.addThruPipe (new ArrayIterator (doc1)); Instance mtInst = mtLst.get (0); TokenSequence mtTs = (TokenSequence) mtInst.getData (); assertEquals (6, mtTs.size ()); assertEquals (1.0, mtTs.get (3).getFeatureValue ("time"), 1e-15); assertEquals (1.0, mtTs.get (4).getFeatureValue ("time"), 1e-15); } /** * @return a <code>TestSuite</code> */ public static TestSuite suite () { return new TestSuite (TestOffsetFeatureConjunctions.class); } public static void main (String[] args) { TestSuite theSuite; if (args.length > 0) { theSuite = new TestSuite (); for (int i = 0; i < args.length; i++) { theSuite.addTest (new TestOffsetFeatureConjunctions (args[i])); } } else { theSuite = (TestSuite) suite (); } junit.textui.TestRunner.run (theSuite); } }
4,760
34.007353
92
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/pipe/tsf/tests/TestOffsetConjunctions.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.pipe.tsf.tests; import junit.framework.*; import java.util.regex.*; import cc.mallet.pipe.*; import cc.mallet.pipe.tsf.*; import cc.mallet.types.*; public class TestOffsetConjunctions extends TestCase { public TestOffsetConjunctions (String name) { super (name); } public void testOne () { String input = "abcdefghijklmnopqrstuvwxyz"; Pipe p = new SerialPipes (new Pipe[] { new CharSequence2TokenSequence ("."), //new PrintInput("1:"), new TokenSequenceLowercase (), //new PrintInput("2:"), new TokenText (), //new PrintInput("3:"), new RegexMatches ("V", Pattern.compile("[aeiou]")), //new PrintInput("4:"), new OffsetConjunctions (new int[][] {{0,0}, {0,1}, {-1,0,1}, {-1}, {-2}}), new PrintInput("5:"), }); Instance carrier = p.instanceFrom(new Instance (input, null, null, null)); TokenSequence ts = (TokenSequence) carrier.getData(); assertTrue (ts.size() == 26); assertTrue (ts.get(0).getFeatureValue("a_&_b@1") == 1.0); assertTrue (ts.get(0).getFeatureValue("V_&_a") == 1.0); assertTrue (ts.get(2).getFeatureValue("b@-1_&_c_&_d@1") == 1.0); } public static Test suite () { return new TestSuite (TestOffsetConjunctions.class); } protected void setUp () { } public static void main (String[] args) { junit.textui.TestRunner.run (suite()); } }
1,884
25.180556
91
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/pipe/tsf/tests/TestSequencePrintingPipe.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.pipe.tsf.tests; import junit.framework.*; import java.io.PrintWriter; import java.io.StringWriter; import cc.mallet.pipe.Pipe; import cc.mallet.pipe.SerialPipes; import cc.mallet.pipe.tsf.SequencePrintingPipe; import cc.mallet.types.*; /** * Created: Jul 8, 2005 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: TestSequencePrintingPipe.java,v 1.1 2007/10/22 21:37:57 mccallum Exp $ */ public class TestSequencePrintingPipe extends TestCase { public TestSequencePrintingPipe (String name) { super (name); } public static Test suite () { return new TestSuite (TestSequencePrintingPipe.class); } public static void testPrinting () { Alphabet dict = dictOfSize (3); FeatureVector[] vecs = new FeatureVector[] { new FeatureVector (dict, new int[] { 0, 1 }), new FeatureVector (dict, new int[] { 0, 2 }), new FeatureVector (dict, new int[] { 2 }), new FeatureVector (dict, new int[] { 1, 2 }), }; LabelAlphabet ld = labelDictOfSize (3); LabelSequence lbls = new LabelSequence (ld, new int [] { 0, 2, 0, 1}); FeatureVectorSequence fvs = new FeatureVectorSequence (vecs); StringWriter sw = new StringWriter (); PrintWriter w = new PrintWriter (sw); Pipe p = new SequencePrintingPipe (w); // pipe the instance p.instanceFrom(new Instance (fvs, lbls, null, null)); // Do a second one FeatureVectorSequence fvs2 = new FeatureVectorSequence (new FeatureVector[] { new FeatureVector (dict, new int[] { 1 }), new FeatureVector (dict, new int[] { 0 }), }); LabelSequence lbls2 = new LabelSequence (ld, new int[] { 2, 1 }); p.instanceFrom(new Instance (fvs2, lbls2, null, null)); w.close(); assertEquals ("LABEL0 feature0 feature1\n" + "LABEL2 feature0 feature2\n" + "LABEL0 feature2\n" + "LABEL1 feature1 feature2\n" + "\n" + "LABEL2 feature1\n" + "LABEL1 feature0\n\n", sw.toString()); } private static Alphabet dictOfSize (int n) { Alphabet dict = new Alphabet (); for (int i = 0; i < n; i++) { dict.lookupIndex ("feature"+i); } return dict; } private static LabelAlphabet labelDictOfSize (int n) { LabelAlphabet dict = new LabelAlphabet (); for (int i = 0; i < n; i++) { dict.lookupIndex ("LABEL"+i); } return dict; } public static void main (String[] args) throws Throwable { TestSuite theSuite; if (args.length > 0) { theSuite = new TestSuite (); for (int i = 0; i < args.length; i++) { theSuite.addTest (new TestSequencePrintingPipe (args[i])); } } else { theSuite = (TestSuite) suite (); } junit.textui.TestRunner.run (theSuite); } }
3,260
28.116071
87
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/evaluation/Evaluator.java
package it.poliba.sisinflab.LODRec.evaluation; import java.io.BufferedReader; import java.io.BufferedWriter; import java.io.File; import java.io.FileReader; import java.io.FileWriter; import java.io.IOException; import java.text.DecimalFormat; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; import gnu.trove.map.hash.TIntFloatHashMap; import gnu.trove.map.hash.TIntObjectHashMap; import gnu.trove.map.hash.TObjectFloatHashMap; import gnu.trove.set.TIntSet; import gnu.trove.set.hash.TIntHashSet; public class Evaluator { private float evalRatingThresh; private float relUnknownItems; private TIntObjectHashMap<TIntFloatHashMap> validationRatings; private HashSet<Integer> items; private int topK; private String workingDir; private String evalFile; private String itemsMetadataFile; private String itemsMetadataEvalFile; private String trainRatingFile; private String testRatingFile; private float negRatingThresh; private int topN; private List<Integer> topKList; private String recDir; private TIntObjectHashMap<TIntFloatHashMap> itemSim; private TIntObjectHashMap<TIntFloatHashMap> map_item_intFeatures; private DecimalFormat df = new DecimalFormat(); private Map<Integer, Map<Integer, Float>> testRatings; private Map<Integer, Map<Integer, Float>> trainRatings; private static final float MIN_SIM = 0.0001f; private Map<Integer, Float> mapItemPopularity; private Map<Integer, List<Integer>> recommendations; private BufferedWriter out; private boolean process_triple_list = false; private Set<Integer> users; private boolean WRITE_HEADER = false; private static Logger logger = LogManager .getLogger(Evaluator.class.getName()); public Evaluator(String validationRatingFile, int topK, float thresh, float relUnknownItems) { this.evalRatingThresh = thresh; this.relUnknownItems = relUnknownItems; this.topK = topK; this.readValidationData(validationRatingFile); } public Evaluator(String workingDir, String evalFile, String itemMetadataFile, String itemMetadataEvalFile, String trainRatingFile, String testRatingFile, float ratingThreshold, float negRatingThresh, float relUnknownItems, int topN) { logger.info("starting evaluation"); this.workingDir = workingDir; this.evalFile = this.evalFile + ".txt"; this.itemsMetadataFile = itemMetadataFile; this.itemsMetadataEvalFile = itemMetadataEvalFile; this.trainRatingFile = trainRatingFile; this.testRatingFile = testRatingFile; this.evalRatingThresh = ratingThreshold; this.negRatingThresh = negRatingThresh; this.relUnknownItems = relUnknownItems; this.topN = topN; init(); } private void init() { this.topKList = Arrays.asList(1, 5, 10, 25, 50, 100, 250); this.recDir = this.workingDir + "out/"; this.itemSim = new TIntObjectHashMap<TIntFloatHashMap>(); map_item_intFeatures = new TIntObjectHashMap<TIntFloatHashMap>(); loadItemIDsFromItemTextualFile(itemsMetadataFile); if (itemsMetadataEvalFile == null) itemsMetadataEvalFile = itemsMetadataFile; loadItemFeatureData(itemsMetadataEvalFile); computeItemSim(); logger.info(items.size() + " items loaded"); readTrainData(trainRatingFile); readTestData(testRatingFile); cmpItemPopularity(); logger.info(testRatingFile + " - " + testRatings.size()); df.setMaximumFractionDigits(5); } private void loadItemIDsFromItemTextualFile(String itemsFile) { BufferedReader reader; try { items = new HashSet<Integer>(); reader = new BufferedReader(new FileReader(itemsFile)); String line = ""; int item_id; while ((line = reader.readLine()) != null) { String[] parts = line.split("\t"); item_id = Integer.parseInt(parts[0]); this.items.add(item_id); } reader.close(); } catch (IOException e) { e.printStackTrace(); } } private void loadItemFeatureData(String file_name) { BufferedReader br; try { br = new BufferedReader(new FileReader(file_name)); String line = null; int count = 0; while ((line = br.readLine()) != null) { try { String[] vals = line.split("\t"); int id = Integer.parseInt(vals[0]); if (items.contains(id)) { map_item_intFeatures.put(id, new TIntFloatHashMap()); String[] values = vals[1].trim().split(" "); for (int i = 0; i < values.length; i++) { String[] pair = values[i].split(":"); int fId = Integer.parseInt(pair[0]); float fVal = Float.parseFloat(pair[1]); map_item_intFeatures.get(id).put(fId, fVal); } count++; } } catch (Exception ex) { // System.out.println(ex.getMessage()); // System.out.println(line); } } logger.info("item metadata loaded for evaluation - " + count + " items"); br.close(); } catch (IOException e) { e.printStackTrace(); } } private void computeItemSim() { List<Integer> sortedItems = new ArrayList<Integer>(); sortedItems.addAll(items); Collections.sort(sortedItems); int id1, id2; for (int i = 0; i < sortedItems.size() - 1; i++) { id1 = sortedItems.get(i); this.itemSim.put(id1, new TIntFloatHashMap()); for (int j = i + 1; j < sortedItems.size(); j++) { id2 = sortedItems.get(j); float val = 0; if (map_item_intFeatures.containsKey(id1) & map_item_intFeatures.containsKey(id2)) val = cmpJaccardSim(this.map_item_intFeatures.get(id1) .keySet(), this.map_item_intFeatures.get(id2) .keySet()); if (val > MIN_SIM) { itemSim.get(id1).put(id2, val); } } } } private float cmpJaccardSim(TIntSet tIntSet, TIntSet tIntSet2) { TIntSet inters = new TIntHashSet(); inters.addAll(tIntSet); inters.retainAll(tIntSet2); if (inters.size() == 0) return 0; else return (inters.size() / (float) (tIntSet.size() + tIntSet2.size() - inters .size())); } public void readTestData(String filename) { try { BufferedReader reader = new BufferedReader(new FileReader(filename)); String line = reader.readLine(); testRatings = new HashMap<Integer, Map<Integer, Float>>(); boolean add = false; Set<Integer> tmp = new HashSet(); while (line != null) { try { String[] str = line.split("\t"); int u = Integer.parseInt(str[0].trim()); int i = Integer.parseInt(str[1].trim()); if (items != null) { if (items.contains(i)) { add = true; } else add = false; } else { add = true; tmp.add(i); } if (add) { float rel = Float.parseFloat(str[2].trim()); if (!testRatings.containsKey(u)) testRatings.put(u, new HashMap()); testRatings.get(u).put(i, rel); } } catch (Exception ex) { System.out.println(ex.getMessage()); } line = reader.readLine(); } if (items == null) { items = new HashSet<Integer>(); items.addAll(tmp); } } catch (IOException e) { } } public void readTrainData(String filename) { try { BufferedReader reader = new BufferedReader(new FileReader(filename)); String line = reader.readLine(); trainRatings = new HashMap<Integer, Map<Integer, Float>>(); while (line != null) { String[] str = line.split("\t"); int u = Integer.parseInt(str[0].trim()); int i = Integer.parseInt(str[1].trim()); if (items.contains(i)) { float rel = Float.parseFloat(str[2].trim()); if (!trainRatings.containsKey(u)) trainRatings.put(u, new HashMap()); trainRatings.get(u).put(i, rel); } line = reader.readLine(); } } catch (IOException e) { System.out.println(e.getMessage()); } } public void cmpItemPopularity() { float rel; mapItemPopularity = new HashMap<Integer, Float>(); for (int u : trainRatings.keySet()) { for (int i : trainRatings.get(u).keySet()) { rel = trainRatings.get(u).get(i); if (rel >= this.evalRatingThresh) { if (!mapItemPopularity.containsKey(i)) mapItemPopularity.put(i, 1f); else mapItemPopularity.put(i, mapItemPopularity.get(i) + 1); } } } // Map<Integer, Set<Integer>> tmp = new HashMap<Integer, // Set<Integer>>(); for (int i : mapItemPopularity.keySet()) { // System.out.println("item " + i + ". n.ratings " // + mapItemPopularity.get(i) + ". norm pop " // + mapItemPopularity.get(i) // / (float) (trainRatings.keySet().size())); // int n = (mapItemPopularity.get(i)).intValue(); // // if (!tmp.containsKey(n)) // tmp.put(n, new HashSet()); // tmp.get(n).add(i); float f = mapItemPopularity.get(i) / (float) (trainRatings.keySet().size()); mapItemPopularity.put(i, mapItemPopularity.get(i) / (float) (trainRatings.keySet().size())); } // List<Integer> vals=new ArrayList<Integer>(); // vals.addAll(tmp.keySet()); // Collections.sort(vals,Collections.reverseOrder()); // for(int v:vals){ // System.out.print(v+": "); // for(int ii:tmp.get(v)) // System.out.print(ii+", "); // // System.out.println(); // } } public void readValidationData(String filename) { validationRatings = new TIntObjectHashMap<TIntFloatHashMap>(); items = new HashSet<Integer>(); try { @SuppressWarnings("resource") BufferedReader reader = new BufferedReader(new FileReader(filename)); String line = null; while ((line=reader.readLine()) != null) { try { String[] str = line.split("\t"); int u = Integer.parseInt(str[0].trim()); int i = Integer.parseInt(str[1].trim()); float r = Float.parseFloat(str[2].trim()); if (!validationRatings.containsKey(u)) validationRatings.put(u, new TIntFloatHashMap()); validationRatings.get(u).put(i, r); items.add(i); } catch (Exception ex) { System.out.println(ex.getMessage()); } } } catch (IOException e) { e.printStackTrace(); } } public TObjectFloatHashMap<String> eval(Map<Integer, List<Integer>> recommendations) { TObjectFloatHashMap<String> res = new TObjectFloatHashMap<String>(); int count = 0; float precision = 0, ndcg = 0; for (int uid : recommendations.keySet()) { TIntHashSet relevItems = new TIntHashSet(); List<Integer> sortedRecc = null; if (validationRatings.containsKey(uid)) { for (int i : validationRatings.get(uid).keys()) { if (validationRatings.get(uid).get(i) >= evalRatingThresh) relevItems.add(i); } List<Float> idealRanking = new ArrayList<Float>(); Iterator<Integer> it = items.iterator(); while(it.hasNext()) { int i = it.next(); if (validationRatings.get(uid).containsKey(i)) idealRanking.add(validationRatings.get(uid).get(i)); else idealRanking.add(relUnknownItems); } Collections.sort(idealRanking, Collections.reverseOrder()); if (relevItems.size() > 0) { sortedRecc = recommendations.get(uid); int hits = 0; for (int i = 0; i < topK && i < sortedRecc.size(); i++) { if (relevItems.contains(sortedRecc.get(i))) { hits++; } } float prec = hits / (float) topK; precision += prec; float rel = 0, dcg = 0, idcg = 0; int p = 0; for (int j = 0; j < topK && j < idealRanking.size(); j++) { p = j + 1; idcg += ((Math.pow(2, idealRanking.get(j)) - 1) / (Math .log(p + 1) / Math.log(2))); } p = 0; int item_id; for (int i = 0; i < topK && i < sortedRecc.size(); i++) { rel = 0; item_id = sortedRecc.get(i); p = (i + 1); if (validationRatings.get(uid).containsKey(item_id)) rel = validationRatings.get(uid).get(item_id); else rel = relUnknownItems; dcg += ((Math.pow(2, rel) - 1) / (Math.log(p + 1) / Math .log(2))); } float tmp_ndcg = dcg / idcg; ndcg += tmp_ndcg; count++; } } } res.put("P", precision/count); res.put("NDCG", ndcg/count); return res; } public void evalDir(String dir) { this.recDir = this.workingDir + dir; System.out.println("starting evaluation for " + this.recDir); File directory = new File(this.recDir); int inf = -1, sup = -1; File[] files = directory.listFiles(); for (int i = 0; i < files.length; i++) { String filename; filename = files[i].getName(); if (!filename.contains("user_scores")) { this.loadRecommendations(this.recDir + filename); eval(this.recDir + filename, -1, -1); // eval(this.reccDir + filename, -1,25 ); // eval(this.reccDir + filename, -1, 50); // eval(this.reccDir + filename, -1, 100); // // if (cmpBinsBasedEvaluation) { // for (int j = 0; j < bins.size() - 1; j++) { // inf = bins.get(j); // sup = bins.get(j + 1); // eval(this.reccDir + filename, inf, sup); // } // inf = bins.get(bins.size() - 1); // sup = Integer.MAX_VALUE; // eval(this.reccDir + filename, inf, sup); // // } } } } public void eval(String recFile, String evalFile) { this.evalFile = "./" + evalFile; this.recDir = this.workingDir; this.loadRecommendations(recFile); System.out.println("loaded recc from " + recFile); eval(recFile, -1, -1); } public void loadRecommendations(String fileReccData) { try { int k = topKList.get(topKList.size() - 1); try { recommendations = new HashMap<Integer, List<Integer>>(); BufferedReader reader = new BufferedReader(new FileReader( fileReccData)); String line = reader.readLine(); process_triple_list = false; if (line != null) { if (line.split("\t").length == 3) process_triple_list = true; } reader = new BufferedReader(new FileReader(fileReccData)); Map<Integer, Map<Float, Set<Integer>>> tmp = new HashMap(); while (line != null) { if (!process_triple_list) processeRecList(line); else processeTripleList(line, tmp); line = reader.readLine(); } if (tmp.size() > 0) { for (int u : tmp.keySet()) { List<Float> scores = new ArrayList<Float>(); scores.addAll(tmp.get(u).keySet()); Collections.sort(scores, Collections.reverseOrder()); List<Integer> sortedRecc = new ArrayList(); int c = 0; float score; for (int j = 0; j < scores.size() & c < k; j++) { score = scores.get(j); Set<Integer> items = tmp.get(u).get(score); for (int i : items) { sortedRecc.add(i); c++; } } recommendations.put(u, sortedRecc); } } } catch (IOException ex) { ex.printStackTrace(); } } catch (Exception e) { e.printStackTrace(); recommendations = new HashMap<Integer, List<Integer>>(); } } private void processeRecList(String line) { line = line.replace("[", "").replace("]", ""); String[] str = line.split("\t"); int uid = Integer.parseInt(str[0].trim()); if (str.length > 1) { List<Integer> sortedRecc = new ArrayList(); Set<Integer> trainItems = new HashSet(); if (trainRatings.containsKey(uid)) trainItems = this.trainRatings.get(uid).keySet(); String pairsSep = " "; if (!str[1].contains(pairsSep)) pairsSep = ","; String[] pairs = str[1].split(pairsSep); int c = 0; for (String pair : pairs) { c++; if (c > topN) break; String id = pair.substring(0, pair.indexOf(":")); int iditem = Integer.parseInt(id.trim()); if (!trainItems.contains(iditem)) sortedRecc.add(iditem); } recommendations.put(uid, sortedRecc); } } private void processeTripleList(String line, Map<Integer, Map<Float, Set<Integer>>> tmp) { // user item score triples String[] str; if (line.contains(" ")) str = line.split(" "); else str = line.split("\t"); int u = Integer.parseInt(str[0].trim()); int i = Integer.parseInt(str[1].trim()); float score = Float.parseFloat(str[2].trim()); if (!tmp.containsKey(u)) tmp.put(u, new HashMap()); if (!tmp.get(u).containsKey(score)) tmp.get(u).put(score, new HashSet()); tmp.get(u).get(score).add(i); } public void eval(String fileReccData, int min, int max) { try { users = new HashSet<Integer>(); out = new BufferedWriter(new FileWriter(evalFile, true)); BufferedWriter user_scores = new BufferedWriter(new FileWriter( fileReccData + "_user_scores", false)); float mrr = 0; String header = ""; Map<Integer, Set<Integer>> mapRecommendedItemsInCatalog = new HashMap(); Map<Integer, Float> mapPrecision = new HashMap<Integer, Float>(); Map<Integer, Float> mapRecall = new HashMap<Integer, Float>(); Map<Integer, Float> mapBADPrecision = new HashMap<Integer, Float>(); Map<Integer, Float> mapBADRecall = new HashMap<Integer, Float>(); Map<Integer, Float> mapNDCG = new HashMap<Integer, Float>(); Map<Integer, Float> mapEBN = new HashMap<Integer, Float>(); Map<Integer, Float> mapILD = new HashMap<Integer, Float>(); int count = 0; Set<Integer> usersWithBADItemsInTestSet = new HashSet(); for (int k : topKList) { mapPrecision.put(k, 0f); mapRecall.put(k, 0f); mapBADPrecision.put(k, 0f); mapBADRecall.put(k, 0f); mapNDCG.put(k, 0f); mapEBN.put(k, 0f); mapILD.put(k, 0f); mapRecommendedItemsInCatalog.put(k, new HashSet()); } for (int uid : recommendations.keySet()) { int n_train_ratings = 0; if (trainRatings.containsKey(uid)) n_train_ratings = trainRatings.get(uid).size(); if ((min == -1 & max == -1) || (n_train_ratings >= min & n_train_ratings < max)) { Set<Integer> relevItems = new HashSet<Integer>(); List<Integer> sortedRecc = null; Set<Integer> BADItems = new HashSet<Integer>(); // & trainRatings.containsKey(uid) --> can be removed // actually if (testRatings.containsKey(uid) & trainRatings.containsKey(uid)) { for (int i : testRatings.get(uid).keySet()) { if (testRatings.get(uid).get(i) >= evalRatingThresh) relevItems.add(i); if (testRatings.get(uid).get(i) <= negRatingThresh) BADItems.add(i); } if (relevItems.size() > 0) { users.add(uid); TIntFloatHashMap user_prof = build_user_content_profile(uid); sortedRecc = recommendations.get(uid); boolean found = false; float rr = 0; for (int i = 0; i < sortedRecc.size() & !found; i++) { if (relevItems.contains(sortedRecc.get(i))) { rr = 1 / (float) (i + 1); found = true; } } mrr += rr; user_scores.append("" + uid); List<Float> idealRanking = new ArrayList<Float>(); for (int i : items) { if (testRatings.get(uid).containsKey(i)) idealRanking.add(testRatings.get(uid) .get(i)); else // we do not know the real relevance for // this // item and assign it default value idealRanking.add(relUnknownItems); } Collections.sort(idealRanking, Collections.reverseOrder()); for (int k : topKList) { // ----computation of precision, recall int hits = 0; float nov = 0, pop = 0, ild = 0; Set<Integer> topNitems = new HashSet(); int id1, id2, id, idd; float sim = 0; int ild_count = 0; for (int i = 0; i < k && i < sortedRecc.size(); i++) { if (relevItems.contains(sortedRecc.get(i))) { hits++; } if (mapItemPopularity .containsKey(sortedRecc.get(i))) { pop = mapItemPopularity.get(sortedRecc .get(i)); nov += (-1) * pop * (Math.log(pop) / Math.log(2)); } id = sortedRecc.get(i); for (int j = i + 1; j < k && j < sortedRecc.size(); j++) { idd = sortedRecc.get(j); if (idd > id) { id1 = id; id2 = idd; } else { id2 = id; id1 = idd; } sim = 0; if (itemSim.contains(id1)) { if (itemSim.get(id1).contains(id2)) sim = itemSim.get(id1).get(id2); } ild += (1 - sim); ild_count++; } topNitems.add(sortedRecc.get(i)); } mapRecommendedItemsInCatalog.get(k).addAll( topNitems); ild /= (float) ild_count; float prec = hits / (float) k; float rec = hits / (float) relevItems.size(); float BADPrec = 0; float BADRec = 0; if (BADItems.size() > 0) { if (!usersWithBADItemsInTestSet .contains(uid)) usersWithBADItemsInTestSet.add(uid); hits = 0; for (int i = 0; i < k && i < sortedRecc.size(); i++) { if (BADItems .contains(sortedRecc.get(i))) { hits++; } } BADPrec = hits / (float) k; BADRec = hits / (float) BADItems.size(); } float rel = 0, dcg = 0, idcg = 0; int p = 0; for (int j = 0; j < k && j < idealRanking.size(); j++) { p = j + 1; idcg += ((Math.pow(2, idealRanking.get(j)) - 1) / (Math .log(p + 1) / Math.log(2))); } p = 0; int item_id; for (int i = 0; i < k && i < sortedRecc.size(); i++) { rel = 0; item_id = sortedRecc.get(i); p = (i + 1); if (testRatings.get(uid).containsKey( item_id)) rel = testRatings.get(uid).get(item_id); else rel = relUnknownItems; dcg += ((Math.pow(2, rel) - 1) / (Math .log(p + 1) / Math.log(2))); } float ndcg = dcg / idcg; mapPrecision.put(k, mapPrecision.get(k) + prec); mapRecall.put(k, mapRecall.get(k) + rec); mapBADPrecision.put(k, mapBADPrecision.get(k) + BADPrec); mapBADRecall.put(k, mapBADRecall.get(k) + BADRec); mapNDCG.put(k, mapNDCG.get(k) + ndcg); mapEBN.put(k, mapEBN.get(k) + nov); mapILD.put(k, mapILD.get(k) + ild); user_scores.append(" " + k + " " + prec + " " + rec + " " + ndcg + " " + nov + " " + ild + " " + BADPrec + " " + BADRec); } user_scores.append("\n"); // ----------------------------------------- count++; } } } } Date d = new Date(); mrr = mrr / (float) count; // String strBin = "_allUsers"; // // if (max > count) // // max = count; // // if (min != -1 || max != -1) // strBin = "users_Min" + min + "_Max" + max; String sep = "\t"; header = "date" + sep + "name" + sep + "thresh" + sep + "badThresh" + sep + "relevUnknownValues" + sep + "n.users" + sep + "n.items" + sep + "MRR" + sep; String outline = d.toGMTString() + sep + fileReccData + sep + this.evalRatingThresh + sep + this.negRatingThresh + sep + this.relUnknownItems + sep + count + sep + items.size() + sep + formatVal(mrr) + sep; for (int k : topKList) { float prec = mapPrecision.get(k) / count; float rec = mapRecall.get(k) / count; float ndcg = mapNDCG.get(k) / count; float nov = mapEBN.get(k) / count; float uNov = mapILD.get(k) / count; float BADP = mapBADPrecision.get(k) / usersWithBADItemsInTestSet.size(); float BADR = mapBADRecall.get(k) / usersWithBADItemsInTestSet.size(); float item_cat_cov = mapRecommendedItemsInCatalog.get(k).size() / (float) items.size(); header += "P@" + k + sep + "R@" + k + sep + "nDCG@" + k + sep + "EBN@" + k + sep + "ILD@" + k + sep + "ItemCov@" + k + sep + "BAD_P@" + k + sep + "BAD_R@" + k + sep; outline += formatVal(prec) + sep + formatVal(rec) + sep + formatVal(ndcg) + sep + formatVal(nov) + sep + formatVal(uNov) + sep + formatVal(item_cat_cov) + sep + formatVal(BADP) + sep + formatVal(BADR) + sep; } if (WRITE_HEADER) out.append(header + "\n"); out.append(outline + "\n"); System.out.println(header + "\n" + outline); // System.out.println("num users:" + count + ". num items:" // + items.size()); out.flush(); out.close(); user_scores.flush(); user_scores.close(); cmpDatasetStats(); } catch (IOException ex) { ex.printStackTrace(); } } private void cmpDatasetStats() { int c = 0; for (int u : trainRatings.keySet()) { if (users.contains(u)) c += trainRatings.get(u).size(); } System.out.println("train set - ratings:" + c); c = 0; for (int u : testRatings.keySet()) { if (users.contains(u)) c += testRatings.get(u).size(); } System.out .println("test set - users:" + users.size() + " ratings:" + c); } private String formatVal(float val) { if (Float.isNaN(val)) val = 0; return df.format(val).replace(".", ","); } private TIntFloatHashMap build_user_content_profile(int uid) { TIntFloatHashMap user_prof = new TIntFloatHashMap(); int c = 0; for (int i : trainRatings.get(uid).keySet()) { if (trainRatings.get(uid).get(i) >= evalRatingThresh) { c++; if (map_item_intFeatures.containsKey(i)) { for (int j : this.map_item_intFeatures.get(i).keys()) { float val = this.map_item_intFeatures.get(i).get(j); user_prof.adjustOrPutValue(j, val, val); } } } } for (int i : user_prof.keys()) user_prof.adjustValue(i, 1 / (float) (c)); return user_prof; } }
25,686
25.37269
87
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/learning/LibLinearLearner.java
package it.poliba.sisinflab.LODRec.learning; import java.io.BufferedReader; import java.io.File; import java.io.FileReader; import java.io.IOException; import java.util.List; import java.util.Map; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; import de.bwaldvogel.liblinear.FeatureNode; import de.bwaldvogel.liblinear.Linear; import de.bwaldvogel.liblinear.Model; import de.bwaldvogel.liblinear.Parameter; import de.bwaldvogel.liblinear.Problem; import de.bwaldvogel.liblinear.SolverType; import gnu.trove.iterator.TDoubleIterator; import gnu.trove.iterator.TIntIterator; import gnu.trove.list.array.TDoubleArrayList; import gnu.trove.list.array.TIntArrayList; import gnu.trove.map.hash.TObjectFloatHashMap; import it.poliba.sisinflab.LODRec.evaluation.Evaluator; import it.poliba.sisinflab.LODRec.recommender.Recommender; public class LibLinearLearner { private String workingDir; private String modelFileName; private String trainingFile; private String validationFile; private String user_path_index_file; private int num_features; private TDoubleArrayList listC; private TDoubleArrayList listEps; private TDoubleArrayList listP; private TIntArrayList listSolverType; boolean silent; private String evalMetric; private float relUnknownItems; private float evalRatingThresh; private String validationRatingFile; private FeatureNode[][] XTrain; private double[] yTrain; private int topN = 100; private static Logger logger = LogManager.getLogger(LibLinearLearner.class.getName()); public LibLinearLearner(String workingDir, String validationRatingFile, float evalRatingThresh, boolean silentLearning, String listStrSolverType, String listStrC, String listStrEps, String listStrP, String evalMetric, float relUnknownItems){ this.workingDir = workingDir; this.evalRatingThresh = evalRatingThresh; this.relUnknownItems = relUnknownItems; this.evalMetric = evalMetric; this.silent = silentLearning; this.validationRatingFile = validationRatingFile; modelFileName = workingDir + "bestModel"; listC = new TDoubleArrayList(); String[] parts = listStrC.split(","); for (int i = 0; i < parts.length; i++) { double val = Double.parseDouble(parts[i]); listC.add(val); } listEps = new TDoubleArrayList(); parts = listStrEps.split(","); for (int i = 0; i < parts.length; i++) { double val = Double.parseDouble(parts[i]); listEps.add(val); } listP = new TDoubleArrayList(); parts = listStrP.split(","); for (int i = 0; i < parts.length; i++) { double val = Double.parseDouble(parts[i]); listP.add(val); } listSolverType = new TIntArrayList(); parts = listStrSolverType.split(","); for (int i = 0; i < parts.length; i++) { int val = Integer.parseInt(parts[i]); listSolverType.add(val); } init(); } private void init() { this.user_path_index_file = workingDir + "user_path_index"; countNumFeatures(); this.trainingFile = workingDir + "train"; this.validationFile = workingDir + "validation"; } private void countNumFeatures(){ try { int count = 0; BufferedReader br = new BufferedReader(new FileReader(user_path_index_file)); while(br.readLine()!=null) count++; num_features = count; br.close(); } catch(Exception e){ e.printStackTrace(); num_features = 0; } } private void loadTrainDataset(String file){ logger.info("Loading dataset"); int nRows = computeTrainRows(file); try{ BufferedReader br = new BufferedReader(new FileReader(file)); XTrain = new FeatureNode[nRows][]; yTrain = new double[nRows]; String line = null; int j = 0; FeatureNode[] fn = null; while((line=br.readLine()) != null){ String[] vals = line.split(" "); fn = new FeatureNode[vals.length-3]; for(int i = 3; i < vals.length; i++){ String[] ss = vals[i].split(":"); int key = Integer.parseInt(ss[0]); double value = Double.parseDouble(ss[1]); fn[i-3] = new FeatureNode(key, value); } XTrain[j] = fn; yTrain[j] = Double.parseDouble(vals[0]); j++; } br.close(); } catch(Exception e){ e.printStackTrace(); } } private int computeTrainRows(String trainFile) { int nRows = 0; try { BufferedReader br = new BufferedReader(new FileReader(trainFile)); while ((br.readLine()) != null) nRows++; br.close(); } catch (IOException e) { e.printStackTrace(); } return nRows; } private Problem createProblem(){ logger.info("Creating problem"); Problem problem = new Problem(); problem.l = XTrain.length;// - 1; // number of training examples problem.n = num_features + 1; // number of features problem.x = XTrain; // feature nodes problem.y = yTrain; // target values logger.info("Number of training examples: " + problem.l); logger.info("Number of features: " + problem.n); return problem; } public void train(){ try { double bestPerf = 0, bestC = 0, bestEps = 0; Model bestModel = null; int bestModelType = 0; Model model; Recommender pred; TObjectFloatHashMap<String> evalRes = new TObjectFloatHashMap<String>(); String[] str = evalMetric.split("@"); int topK = Integer.parseInt(str[1]); String metric = str[0]; Evaluator evaluator = new Evaluator(validationRatingFile, topK, evalRatingThresh, relUnknownItems); if (silent) Linear.setDebugOutput(null); loadTrainDataset(trainingFile); Problem problem = createProblem(); logger.info("Start learning process"); TIntIterator itS = listSolverType.iterator(); while(itS.hasNext()) { int s = itS.next(); if (s != 11 & s != 12 & s != 13) { listP = new TDoubleArrayList(); listP.add(0.1); } TDoubleIterator itC = listC.iterator(); while(itC.hasNext()) { double c = itC.next(); TDoubleIterator itE = listEps.iterator(); while(itE.hasNext()) { double e = itE.next(); TDoubleIterator itP = listP.iterator(); while(itP.hasNext()) { double p = itP.next(); SolverType solver = SolverType.getById(s); Parameter parameter = new Parameter(solver, c, e, p); logger.info("solver: " + s + ", c:" + c + ", eps:" + e+ ", p:" + p); model = Linear.train(problem, parameter); pred = new Recommender(model, topN); Map<Integer, List<Integer>> validRec = pred .computeRec(this.validationFile); evalRes = evaluator.eval(validRec); float perf = evalRes.get(metric); if (perf >= bestPerf) { bestPerf = perf; bestModel = model; bestModelType = parameter.getSolverType() .getId(); bestC = parameter.getC(); bestEps = parameter.getEps(); } } } } } logger.info("-----------------------------------------------------------------"); logger.info("BEST MODEL " + bestModelType + ". C: " + bestC + ". Eps: " + bestEps + " . Metric " + evalMetric + ": " + bestPerf); File modelFile = new File(modelFileName); bestModel.save(modelFile); } catch(Exception e) { e.printStackTrace(); } } }
7,602
24.009868
87
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/learning/RankLibLearner.java
package it.poliba.sisinflab.LODRec.learning; import java.util.ArrayList; import java.util.List; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; import ciir.umass.edu.features.FeatureManager; import ciir.umass.edu.learning.CoorAscent; import ciir.umass.edu.learning.RANKER_TYPE; import ciir.umass.edu.learning.RankList; import ciir.umass.edu.learning.Ranker; import ciir.umass.edu.learning.RankerTrainer; import ciir.umass.edu.learning.boosting.AdaRank; import ciir.umass.edu.learning.boosting.RankBoost; import ciir.umass.edu.learning.tree.LambdaMART; import ciir.umass.edu.learning.tree.RFRanker; import ciir.umass.edu.metric.MetricScorer; import ciir.umass.edu.metric.MetricScorerFactory; import ciir.umass.edu.utilities.MyThreadPool; import gnu.trove.map.hash.TIntObjectHashMap; public class RankLibLearner { private String modelFileName; private String trainingFile; private String validationFile; TIntObjectHashMap<RANKER_TYPE> map_rankers; String trainMetric; private float evalRatingThresh; protected RANKER_TYPE rankerType; int nThreads; boolean useSparseRepresentation = true; private boolean silent; private static Logger logger = LogManager.getLogger(RankLibLearner.class.getName()); public RankLibLearner(String workingDir, int nThreads, int rankerType, float evalRatingThresh, String trainMetric, boolean silent, int nIteration, double tolerance, int nThreshold, int nTrees, int nTreeLeaves, float learningRate, int minLeafSupport, int maxSelCount, int nMaxIteration, int nRestart, boolean regularized, int nRoundToStopEarly, int nBag, float featureSamplingRate, float subSamplingRate) { this.nThreads = nThreads; this.evalRatingThresh = evalRatingThresh; this.trainMetric = trainMetric; this.silent = silent; trainingFile = workingDir + "train"; validationFile = workingDir + "validation"; modelFileName = workingDir + "bestModel"; init(); this.rankerType = map_rankers.get(rankerType); if(this.rankerType.compareTo(RANKER_TYPE.RANKBOOST)==0){ if(nIteration >= 0) RankBoost.nIteration=nIteration; if(nThreshold >= 0) RankBoost.nThreshold = nThreshold; }else if(this.rankerType.compareTo(RANKER_TYPE.ADARANK)==0){ if(nIteration >= 0) AdaRank.nIteration=nIteration; if(tolerance >= 0) AdaRank.tolerance = tolerance; AdaRank.maxSelCount = maxSelCount; }else if(this.rankerType.compareTo(RANKER_TYPE.COOR_ASCENT)==0){ CoorAscent.nMaxIteration=nMaxIteration; if(tolerance >= 0) CoorAscent.tolerance = tolerance; CoorAscent.nRestart = nRestart; CoorAscent.regularized = regularized; }else if(this.rankerType.compareTo(RANKER_TYPE.LAMBDAMART)==0){ if(nTrees >= 0) LambdaMART.nTrees=nTrees; if(nTreeLeaves >= 0) LambdaMART.nTreeLeaves=nTreeLeaves; if(nThreshold >= 0) LambdaMART.nThreshold = nThreshold; LambdaMART.learningRate=learningRate; LambdaMART.nRoundToStopEarly=nRoundToStopEarly; LambdaMART.minLeafSupport = minLeafSupport; }else if(this.rankerType.compareTo(RANKER_TYPE.RANDOM_FOREST)==0){ RFRanker.nBag=nBag; if(nTrees >= 0) RFRanker.nTrees = nTrees; if(nTreeLeaves >= 0) RFRanker.nTreeLeaves=nTreeLeaves; if(nThreshold >= 0) RFRanker.nThreshold = nThreshold; RFRanker.featureSamplingRate = featureSamplingRate; RFRanker.learningRate = learningRate; RFRanker.minLeafSupport = minLeafSupport; RFRanker.subSamplingRate = subSamplingRate; } } public void init() { map_rankers = new TIntObjectHashMap<RANKER_TYPE>(); map_rankers.put(1, RANKER_TYPE.RANKBOOST); map_rankers.put(2, RANKER_TYPE.ADARANK); map_rankers.put(3, RANKER_TYPE.COOR_ASCENT); map_rankers.put(4, RANKER_TYPE.LAMBDAMART); map_rankers.put(5, RANKER_TYPE.RANDOM_FOREST); } public void train() { MetricScorer trainScorer = new MetricScorerFactory().createScorer(trainMetric, evalRatingThresh); MyThreadPool.init(nThreads); List<RankList> train = new ArrayList<RankList>(); List<RankList> validation = new ArrayList<RankList>(); int[] features = loadData(trainingFile, validationFile, train, validation); logger.info("Start learning process"); Ranker ranker = new RankerTrainer().train(rankerType, train, validation, features, trainScorer, silent); logger.info("ranker: " + ranker.name() + " - " + trainMetric + " = " + ranker.getScoreOnValidationData()); ranker.save(modelFileName); logger.info("Model saved to: " + modelFileName); MyThreadPool.getInstance().shutdown(); } public int[] loadData(String trainFile, String validFile, List<RankList> train, List<RankList> validation) { int[] features = null; List<RankList> tmp = readInput(trainFile); // read input train.addAll(tmp); tmp = readInput(validFile); // read input validation.addAll(tmp); features = FeatureManager.getFeatureFromSampleVector(train); return features; } public List<RankList> readInput(String inputFile) { return FeatureManager.readInput(inputFile, false, useSparseRepresentation); } }
5,188
26.026042
85
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/tree/NNode.java
package it.poliba.sisinflab.LODRec.tree; import java.util.ArrayList; public class NNode { private String value; private ArrayList<NNode> childs; public NNode(){ this.value = null; this.childs = new ArrayList<NNode>(); } public NNode(String value){ this.value = value; this.childs = new ArrayList<NNode>(); } public void setValue(String value){ this.value = value; } public String getValue(){ return value; } public boolean hasChilds(){ if(childs!=null) if(childs.size()>0) return true; else return false; else return false; } public void addChilds(ArrayList<NNode> childs){ this.childs = childs; } public ArrayList<NNode> getChilds(){ return childs; } public void equals(){ } }
757
13.862745
48
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/tree/NTree.java
package it.poliba.sisinflab.LODRec.tree; public class NTree { private NNode root; public NTree(){ root = null; } public boolean isEmpty(){ if(root==null) return false; else return true; } public void addRoot(NNode root){ this.root = root; } public NNode getRoot(){ return root; } public void print() { print(root, 0); } private void print(NNode v, int level) { if (v == null) return; for (int i = 0; i < level - 1; i++) System.out.print(" "); if (level > 0) System.out.print(" |--"); System.out.println(v.getValue()); for (NNode children : v.getChilds()) { print(children, level + 1); } } }
781
14.959184
46
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/sparqlDataExtractor/MultiPropQueryExecutor.java
package it.poliba.sisinflab.LODRec.sparqlDataExtractor; import gnu.trove.map.hash.THashMap; import gnu.trove.map.hash.TObjectCharHashMap; import gnu.trove.map.hash.TObjectIntHashMap; import it.poliba.sisinflab.LODRec.fileManager.ItemFileManager; import it.poliba.sisinflab.LODRec.fileManager.TextFileManager; import it.poliba.sisinflab.LODRec.itemManager.ItemTree; import it.poliba.sisinflab.LODRec.itemManager.PropertyIndexedItemTree; import it.poliba.sisinflab.LODRec.tree.NNode; import it.poliba.sisinflab.LODRec.tree.NTree; import it.poliba.sisinflab.LODRec.utils.SynchronizedCounter; import java.util.ArrayList; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; import com.hp.hpl.jena.query.Query; import com.hp.hpl.jena.query.QueryExecution; import com.hp.hpl.jena.query.QueryExecutionFactory; import com.hp.hpl.jena.query.QueryFactory; import com.hp.hpl.jena.query.QuerySolution; import com.hp.hpl.jena.query.ResultSet; import com.hp.hpl.jena.query.ResultSetFactory; import com.hp.hpl.jena.rdf.model.Model; /** * This class is part of the LOD Recommender * * This class is used by RDFTripleExtractor for multi-threading RDF triples extraction * * @author Vito Mastromarino */ public class MultiPropQueryExecutor implements Runnable { private String uri; // uri resource private NTree props; // properties map private TObjectIntHashMap<String> props_index; // properties index private String graphURI; // graph uri private String endpoint; // endpoint address private SynchronizedCounter counter; // synchronized counter for metadata index private TObjectIntHashMap<String> metadata_index; // metadata index private TextFileManager textWriter; private Model model; // local dataset model private ItemTree itemTree; // item tree private boolean inverseProps; // directed property private ItemFileManager fileManager; private boolean caching; private static Logger logger = LogManager.getLogger(MultiPropQueryExecutor.class.getName()); /** * Constuctor */ public MultiPropQueryExecutor(String uri, int uri_id, NTree props, TObjectIntHashMap<String> props_index, String graphURI, String endpoint, SynchronizedCounter counter, TObjectIntHashMap<String> metadata_index, TextFileManager textWriter, ItemFileManager fileManager, boolean inverseProps, boolean caching){ this.uri = uri; this.props = props; this.props_index = props_index; this.graphURI = graphURI; this.endpoint = endpoint; this.counter = counter; this.textWriter = textWriter; this.metadata_index = metadata_index; this.model = null; this.fileManager = fileManager; this.inverseProps = inverseProps; this.itemTree = new PropertyIndexedItemTree(uri_id); this.caching = caching; } /** * Constuctor for local dataset query */ public MultiPropQueryExecutor(String uri, int uri_id, NTree props, TObjectIntHashMap<String> props_index, String graphURI, String endpoint, SynchronizedCounter counter, TObjectIntHashMap<String> metadata_index, TextFileManager textWriter, ItemFileManager fileManager, boolean inverseProps, boolean caching, Model model){ this(uri, uri_id, props, props_index, graphURI, endpoint, counter, metadata_index, textWriter, fileManager, inverseProps, caching); this.model = model; } /** * Start RDF triple extraction */ public void run(){ logger.info(uri + ": start data extraction"); long start = System.currentTimeMillis(); NNode root = props.getRoot(); // execute query if(caching) execWithCaching(root, "", uri); else exec(root, "", uri); if(itemTree.size()>0){ // text file writing if(textWriter != null) textWriter.write(itemTree.serialize()); // binary file writing if(fileManager != null) fileManager.write(itemTree); } long stop = System.currentTimeMillis(); logger.info(uri + ": data extraction terminated in [sec]: " + ((stop - start) / 1000)); } /** * Execute RDF triple extraction */ private void exec(NNode node, String list_props, String uri){ if(node.hasChilds()){ ArrayList<String> props = new ArrayList<String>(); for (NNode children : node.getChilds()) props.add(children.getValue()); THashMap<String, TObjectCharHashMap<String>> res = new THashMap<String, TObjectCharHashMap<String>>(); res.putAll(runQuery(uri, props)); if(res.size()>0){ for(NNode n : node.getChilds()){ String p = n.getValue(); if(res.containsKey(p)){ String p_index; for(String uri_res : res.get(p).keySet()){ p_index = String.valueOf(props_index.get(p)); if(inverseProps){ if(res.get(p).get(uri_res)=='s') p_index = String.valueOf(props_index.get("inv_" + p)); } if(list_props.length()>0){ itemTree.addBranches(list_props + "-" + p_index, extractKey(uri_res)); exec(n, list_props + "-" + p_index, uri_res); } else{ itemTree.addBranches(p_index, extractKey(uri_res)); exec(n, p_index, uri_res); } } } } } } } /** * Execute RDF triple extraction with caching */ private void execWithCaching(NNode node, String list_props, String uri){ if(node.hasChilds()){ ArrayList<String> props = new ArrayList<String>(); THashMap<String, TObjectCharHashMap<String>> res = new THashMap<String, TObjectCharHashMap<String>>(); for (NNode children : node.getChilds()){ String id_prop = String.valueOf(props_index.get(children.getValue())); if(!RDFTripleExtractor.cache.containsKey(uri)) props.add(children.getValue()); // get prop results from cache else{ logger.debug("Cache: " + uri); if(RDFTripleExtractor.cache.get(uri).containsKey(id_prop)){ for(String uri_res : RDFTripleExtractor.cache.get(uri).get(id_prop)){ if(list_props.length()>0){ itemTree.addBranches(list_props + "-" + id_prop, extractKey(uri_res)); execWithCaching(children, list_props + "-" + id_prop, uri_res); } else{ itemTree.addBranches(id_prop, extractKey(uri_res)); execWithCaching(children, id_prop, uri_res); } } } if(inverseProps){ id_prop = String.valueOf(props_index.get("inv_" + children.getValue())); if(RDFTripleExtractor.cache.get(uri).containsKey(id_prop)){ for(String uri_res : RDFTripleExtractor.cache.get(uri).get(id_prop)){ if(list_props.length()>0){ itemTree.addBranches(list_props + "-" + id_prop, extractKey(uri_res)); execWithCaching(children, list_props + "-" + id_prop, uri_res); } else{ itemTree.addBranches(id_prop, extractKey(uri_res)); execWithCaching(children, id_prop, uri_res); } } } } } } if(props.size()>0){ res.putAll(runQuery(uri, props)); if(res.size()>0){ RDFTripleExtractor.cache.putIfAbsent(uri, new ConcurrentHashMap<String, CopyOnWriteArrayList<String>>()); for(NNode n : node.getChilds()){ String p = n.getValue(); if(res.containsKey(p)){ String p_index; for(String uri_res : res.get(p).keySet()){ p_index = String.valueOf(props_index.get(p)); if(inverseProps){ if(res.get(p).get(uri_res)=='s') p_index = String.valueOf(props_index.get("inv_" + p)); } RDFTripleExtractor.cache.get(uri).putIfAbsent(p_index, new CopyOnWriteArrayList<String>()); RDFTripleExtractor.cache.get(uri).get(p_index).add(uri_res); if(list_props.length()>0){ itemTree.addBranches(list_props + "-" + p_index, extractKey(uri_res)); execWithCaching(n, list_props + "-" + p_index, uri_res); } else{ itemTree.addBranches(p_index, extractKey(uri_res)); execWithCaching(n, p_index, uri_res); } } } } } } } } /** * Run SPARQL query * @param uri uri resource * @param props list of properties * @return results map: uri-s (if uri is a subject), uri-o (if uri is an object) */ private THashMap<String, TObjectCharHashMap<String>> runQuery(String uri, ArrayList<String> props){ THashMap<String, TObjectCharHashMap<String>> results = new THashMap<String, TObjectCharHashMap<String>>(); Query query; String q; q = "SELECT * WHERE { "; for(String p : props) q += "{{?s <" + p + "> <" + uri + ">. FILTER isIRI(?s). } UNION " + "{<" + uri + "> <" + p + "> ?o. FILTER isIRI(?o). } BIND (<" + p + "> AS ?p).} UNION "; q = q.substring(0, q.length()-6) + "}"; logger.debug(q); try { query = QueryFactory.create(q); results = executeQuery(query); } catch (Exception e) { e.printStackTrace(); } return results; } /** * Execute SPARQL query * @param query sparql query * @param p property * @return results map: prop[uri-s] (if uri is a subject), prop[uri-o] (if uri is an object) */ private THashMap<String, TObjectCharHashMap<String>> executeQuery(Query query) { THashMap<String, TObjectCharHashMap<String>> results = new THashMap<String, TObjectCharHashMap<String>>(); QueryExecution qexec = null; if(model==null){ if(graphURI == null) qexec = QueryExecutionFactory.sparqlService(endpoint, query); // remote query else qexec = QueryExecutionFactory.sparqlService(endpoint, query, graphURI); // remote query } else qexec = QueryExecutionFactory.create(query, model); // local query try{ ResultSet res = ResultSetFactory.copyResults(qexec.execSelect()) ; QuerySolution qs; String n; String p; while (res.hasNext()) { qs = res.next(); p = qs.get("p").toString(); results.putIfAbsent(p, new TObjectCharHashMap<String>()); if (qs.get("o") == null) { // get subject n = qs.get("s").toString(); // consider only the type "yago" if (!p.contains("type")) results.get(p).put(n, 's'); // target as subject else { if (n.contains("yago")) results.get(p).put(n, 's'); // target as subject } } else { // get object n = qs.get("o").toString(); // consider only the type "yago" if (!p.contains("type")) results.get(p).put(n, 'o'); // target as object else { if (n.contains("yago")) results.get(p).put(n, 'o'); // target as object } } } } catch(Exception e){ e.printStackTrace(); } finally{ qexec.close(); } return results; } /** * Extract key from metadata index * @param s string to index * @return index of s */ private int extractKey(String s) { synchronized (metadata_index) { if(metadata_index.containsKey(s)){ return metadata_index.get(s); } else{ int id = counter.value(); metadata_index.put(s, id); return id; } } } }
11,374
25.392111
108
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/sparqlDataExtractor/RDFTripleExtractor.java
package it.poliba.sisinflab.LODRec.sparqlDataExtractor; import gnu.trove.map.hash.TObjectIntHashMap; import it.poliba.sisinflab.LODRec.fileManager.ItemFileManager; import it.poliba.sisinflab.LODRec.fileManager.TextFileManager; import it.poliba.sisinflab.LODRec.tree.NTree; import it.poliba.sisinflab.LODRec.utils.SynchronizedCounter; import it.poliba.sisinflab.LODRec.utils.TextFileUtils; import it.poliba.sisinflab.LODRec.utils.XMLUtils; import java.io.InputStream; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; import com.hp.hpl.jena.query.Dataset; import com.hp.hpl.jena.query.ReadWrite; import com.hp.hpl.jena.rdf.model.Model; import com.hp.hpl.jena.tdb.TDBFactory; import com.hp.hpl.jena.tdb.TDBLoader; import com.hp.hpl.jena.tdb.base.file.Location; import com.hp.hpl.jena.tdb.sys.TDBInternal; import com.hp.hpl.jena.util.FileManager; /** * This class is part of the LOD Recommender * * This class extracts RDF triples * * @author Vito Mastromarino */ public class RDFTripleExtractor { private String workingDir; // working directory private int nThreads; // threads number private boolean jenatdb; // use jena tdb private String endpoint; // endpoint sparql address private String graphURI; // graph uri private String tdbDirectory; // local TDB directory private String datasetFile; // local dataset private Model model; // local dataset model private String inputFile; // input filename private String propsFile; // properties filename private boolean outputBinaryFormat; // output metadata format private String metadataFile; // output metadata filename private boolean outputTextFormat; // output text format private String textFile; // metadata text file private boolean inverseProps; // directed property private boolean caching; // caching private boolean append; // append to previous extraction private TObjectIntHashMap<String> URI_ID; private TObjectIntHashMap<String> props_index; // properties index private NTree props; // properties map private TObjectIntHashMap<String> metadata_index; // metadata index private String uriIdIndexFile; // uri-id index file private String propsIndexFile; // properties index file private String metadataIndexFile; // metadata index file private SynchronizedCounter counter; // synchronized counter for metadata index public static ConcurrentHashMap<String, ConcurrentHashMap<String, CopyOnWriteArrayList<String>>> cache; private static Logger logger = LogManager .getLogger(RDFTripleExtractor.class.getName()); /** * Constuctor */ public RDFTripleExtractor(String workingDir, String itemMetadataFile, String inputItemURIsFile, String endpoint, String graphURI, String tdbDirectory, String datasetFile, Boolean inverseProps, Boolean outputTextFormat, Boolean outputBinaryFormat, String propsFile, Boolean caching, Boolean append, int nThreads, boolean jenatdb) { this.workingDir=workingDir; this.metadataFile = itemMetadataFile; this.append = append; this.caching = caching; this.datasetFile = datasetFile; this.endpoint = endpoint; this.graphURI = graphURI; this.inputFile = inputItemURIsFile; this.tdbDirectory = tdbDirectory; this.outputBinaryFormat = outputBinaryFormat; this.outputTextFormat = outputTextFormat; this.propsFile = propsFile; this.inverseProps = inverseProps; this.nThreads = nThreads; this.jenatdb = jenatdb; init(); } private void init() { this.model = null; this.textFile = this.metadataFile; this.propsIndexFile = this.workingDir + "props_index"; this.metadataIndexFile = metadataFile + "_index"; this.uriIdIndexFile = workingDir + "input_uri_id"; // load input uri file loadInputFile(); // load properties file loadProps(); // load metadata index loadMetadataIndex(); // if jenatdb is true load local dataset if (jenatdb) { TDBloading(); logger.debug("Using TDB."); } // if caching is true initialize cache hash map if (caching) { cache = new ConcurrentHashMap<String, ConcurrentHashMap<String, CopyOnWriteArrayList<String>>>(); logger.debug("Caching enabled."); } } /** * Load jena TDB */ private void TDBloading(){ logger.info("TDB loading"); // create model from tdb Dataset dataset = TDBFactory.createDataset(tdbDirectory); // assume we want the default model, or we could get a named model here dataset.begin(ReadWrite.READ); model = dataset.getDefaultModel(); dataset.end() ; // if model is null load local dataset into jena TDB if(model == null) TDBloading(datasetFile); } /** * Load local dataset into jena TDB */ private void TDBloading(String fileDump){ logger.info("TDB creation"); // create tdb from .nt local file FileManager fm = FileManager.get(); fm.addLocatorClassLoader(RDFTripleExtractor.class.getClassLoader()); InputStream in = fm.open(fileDump); Location location = new Location (tdbDirectory); // load some initial data try{ TDBLoader.load(TDBInternal.getBaseDatasetGraphTDB(TDBFactory.createDatasetGraph(location)), in, true); } catch(Exception e){ logger.error("TDB loading error: " + e.getMessage()); } logger.info("TDB loading"); //create model from tdb Dataset dataset = TDBFactory.createDataset(tdbDirectory); // assume we want the default model, or we could get a named model here dataset.begin(ReadWrite.READ) ; model = dataset.getDefaultModel(); dataset.end(); } /** * Load properties from XML file */ private void loadProps(){ props = new NTree(); props_index = new TObjectIntHashMap<String>(); try { // load properties map from XML file XMLUtils.parseXMLFile(propsFile, props_index, props, inverseProps); logger.debug("Properties tree loading."); // write properties index file TextFileUtils.writeData(propsIndexFile, props_index); } catch (Exception e1) { // TODO Auto-generated catch block e1.printStackTrace(); } } /** * Load input items file */ private void loadInputFile(){ URI_ID = new TObjectIntHashMap<String>(); // load [uri: id] from input file TextFileUtils.loadInputURIs(inputFile, URI_ID, append, uriIdIndexFile); logger.debug("Input items loading: " + URI_ID.size() + " URIs loaded."); } /** * Load metadata index */ private void loadMetadataIndex(){ metadata_index = new TObjectIntHashMap<String>(); if(append){ TextFileUtils.loadIndex(metadataIndexFile, metadata_index); logger.debug("Metadata index loading: " + metadata_index.size() + " metadata loaded."); } counter = new SynchronizedCounter(metadata_index.size()); } /** * Run RDF triple extraction */ public void run(){ //nThreads = 4; logger.debug("Threads number: " + nThreads); ExecutorService executor; executor = Executors.newFixedThreadPool(nThreads); logger.info("Resources to be queried: " + this.URI_ID.size()); try{ TextFileManager textWriter = null; if(outputTextFormat) textWriter = new TextFileManager(textFile, append); ItemFileManager fileManager = null; if(outputBinaryFormat){ if(append) fileManager = new ItemFileManager(metadataFile, ItemFileManager.APPEND); else fileManager = new ItemFileManager(metadataFile, ItemFileManager.WRITE); } for (String uri : this.URI_ID.keySet()) { Runnable worker; // create worker thread worker = new QueryExecutor(uri, URI_ID.get(uri), props, props_index, graphURI, endpoint, counter, metadata_index, textWriter, fileManager, inverseProps, caching, model); executor.execute(worker); } // This will make the executor accept no new threads // and finish all existing threads in the queue executor.shutdown(); // Wait until all threads are done executor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); if(textWriter!=null) textWriter.close(); if(fileManager!=null) fileManager.close(); } catch(Exception e){ e.printStackTrace(); } // write metadata index file TextFileUtils.writeData(metadataIndexFile, metadata_index); } }
8,315
26.812709
105
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/sparqlDataExtractor/TreeQueryExecutor.java
package it.poliba.sisinflab.LODRec.sparqlDataExtractor; import gnu.trove.map.hash.TObjectCharHashMap; import gnu.trove.map.hash.TObjectIntHashMap; import it.poliba.sisinflab.LODRec.fileManager.ItemFileManager; import it.poliba.sisinflab.LODRec.fileManager.TextFileManager; import it.poliba.sisinflab.LODRec.itemManager.ItemTree; import it.poliba.sisinflab.LODRec.itemManager.PropertyIndexedItemTree; import it.poliba.sisinflab.LODRec.tree.NNode; import it.poliba.sisinflab.LODRec.tree.NTree; import it.poliba.sisinflab.LODRec.utils.SynchronizedCounter; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; import com.hp.hpl.jena.query.Query; import com.hp.hpl.jena.query.QueryExecution; import com.hp.hpl.jena.query.QueryExecutionFactory; import com.hp.hpl.jena.query.QueryFactory; import com.hp.hpl.jena.query.QuerySolution; import com.hp.hpl.jena.query.ResultSet; import com.hp.hpl.jena.query.ResultSetFactory; import com.hp.hpl.jena.rdf.model.Model; /** * This class is part of the LOD Recommender * * This class is used by RDFTripleExtractor for multi-threading RDF triples extraction * * @author Vito Mastromarino */ public class TreeQueryExecutor implements Runnable { private String uri; // uri resource private NTree props; // properties map private TObjectIntHashMap<String> props_index; // properties index private String graphURI; // graph uri private String endpoint; // endpoint address private SynchronizedCounter counter; // synchronized counter for metadata index private TObjectIntHashMap<String> metadata_index; // metadata index private TextFileManager textWriter; private Model model; // local dataset model private ItemTree itemTree; // item tree private boolean inverseProps; // directed property private ItemFileManager fileManager; private boolean caching; private static Logger logger = LogManager.getLogger(TreeQueryExecutor.class.getName()); /** * Constuctor */ public TreeQueryExecutor(String uri, int uri_id, NTree props, TObjectIntHashMap<String> props_index, String graphURI, String endpoint, SynchronizedCounter counter, TObjectIntHashMap<String> metadata_index, TextFileManager textWriter, ItemFileManager fileManager, boolean inverseProps, boolean caching){ this.uri = uri; this.props = props; this.props_index = props_index; this.graphURI = graphURI; this.endpoint = endpoint; this.counter = counter; this.textWriter = textWriter; this.metadata_index = metadata_index; this.model = null; this.fileManager = fileManager; this.inverseProps = inverseProps; this.itemTree = new PropertyIndexedItemTree(uri_id); this.caching = caching; } /** * Constuctor for local dataset query */ public TreeQueryExecutor(String uri, int uri_id, NTree props, TObjectIntHashMap<String> props_index, String graphURI, String endpoint, SynchronizedCounter counter, TObjectIntHashMap<String> metadata_index, TextFileManager textWriter, ItemFileManager fileManager, boolean inverseProps, boolean caching, Model model){ this(uri, uri_id, props, props_index, graphURI, endpoint, counter, metadata_index, textWriter, fileManager, inverseProps, caching); this.model = model; } /** * Start RDF triple extraction */ public void run(){ logger.info(uri + ": start data extraction"); long start = System.currentTimeMillis(); NNode root = props.getRoot(); // execute query if(caching) execWithCaching(root, "", uri); else exec(root, "", uri); if(itemTree.size()>0){ // text file writing if(textWriter != null) textWriter.write(itemTree.serialize()); // binary file writing if(fileManager != null) fileManager.write(itemTree); } long stop = System.currentTimeMillis(); logger.info(uri + ": data extraction terminated in [sec]: " + ((stop - start) / 1000)); } /** * Execute RDF triple extraction */ private void exec(NNode node, String list_props, String uri){ if(node.hasChilds()){ String p; for (NNode children : node.getChilds()) { p = children.getValue(); String p_index; TObjectCharHashMap<String> result = new TObjectCharHashMap<String>(); result.putAll(runQuery(uri, "<" + p + ">")); if(result.size()>0){ for(String uri_res : result.keySet()){ p_index = String.valueOf(props_index.get(p)); if(inverseProps){ if(result.get(uri_res)=='s') p_index = String.valueOf(props_index.get("inv_" + p)); } int id_res = extractKey(uri_res); if(list_props.length()>0){ itemTree.addBranches(list_props + "-" + p_index + "#" + id_res, id_res); exec(children, list_props + "-" + p_index + "#" + id_res, uri_res); } else{ itemTree.addBranches(p_index + "#" + id_res, id_res); exec(children, p_index + "#" + id_res, uri_res); } } } } } } /** * Execute RDF triple extraction with caching */ private void execWithCaching(NNode node, String list_props, String uri){ if(node.hasChilds()){ String p; for (NNode children : node.getChilds()) { p = children.getValue(); String p_index = String.valueOf(props_index.get(p)); TObjectCharHashMap<String> result = new TObjectCharHashMap<String>(); if(!RDFTripleExtractor.cache.containsKey(uri) || !RDFTripleExtractor.cache.get(uri).containsKey(p_index)){ result.putAll(runQuery(uri, "<" + p + ">")); if(result.size()>0){ RDFTripleExtractor.cache.putIfAbsent(uri, new ConcurrentHashMap<String, CopyOnWriteArrayList<String>>()); for(String uri_res : result.keySet()){ p_index = String.valueOf(props_index.get(p)); if(inverseProps){ if(result.get(uri_res)=='s') p_index = String.valueOf(props_index.get("inv_" + p)); } RDFTripleExtractor.cache.get(uri).putIfAbsent(p_index, new CopyOnWriteArrayList<String>()); RDFTripleExtractor.cache.get(uri).get(p_index).add(uri_res); if(list_props.length()>0){ itemTree.addBranches(list_props + "-" + p_index, extractKey(uri_res)); execWithCaching(children, list_props + "-" + p_index, uri_res); } else{ itemTree.addBranches(p_index, extractKey(uri_res)); execWithCaching(children, p_index, uri_res); } } } } // uri in cache else{ logger.debug("Cache: " + uri); for(String uri_res : RDFTripleExtractor.cache.get(uri).get(p_index)){ if(list_props.length()>0){ itemTree.addBranches(list_props + "-" + p_index, extractKey(uri_res)); execWithCaching(children, list_props + "-" + p_index, uri_res); } else{ itemTree.addBranches(p_index, extractKey(uri_res)); execWithCaching(children, p_index, uri_res); } } if(inverseProps){ p_index = String.valueOf(props_index.get("inv_" + p)); if(RDFTripleExtractor.cache.get(uri).containsKey(p_index)){ for(String uri_res : RDFTripleExtractor.cache.get(uri).get(p_index)){ if(list_props.length()>0){ itemTree.addBranches(list_props + "-" + p_index, extractKey(uri_res)); execWithCaching(children, list_props + "-" + p_index, uri_res); } else{ itemTree.addBranches(p_index, extractKey(uri_res)); execWithCaching(children, p_index, uri_res); } } } } } } } } /** * Run SPARQL query * @param uri uri resource * @param p property * @return results map: uri-s (if uri is a subject), uri-o (if uri is an object) */ private TObjectCharHashMap<String> runQuery(String uri, String p){ TObjectCharHashMap<String> results = new TObjectCharHashMap<String>(); Query query; String q; q = "SELECT * WHERE {{?s " + p + " <" + uri + ">. FILTER isIRI(?s). } UNION " + "{<" + uri + "> " + p + " ?o ." + " FILTER isIRI(?o). }} "; logger.debug(q); try { query = QueryFactory.create(q); results = executeQuery(query, p); } catch (Exception e) { e.printStackTrace(); } return results; } /** * Execute SPARQL query * @param query sparql query * @param p property * @return results map: uri-s (if uri is a subject), uri-o (if uri is an object) */ private TObjectCharHashMap<String> executeQuery(Query query, String p) { TObjectCharHashMap<String> results = new TObjectCharHashMap<String>(); QueryExecution qexec = null; if(model==null){ if(graphURI == null) qexec = QueryExecutionFactory.sparqlService(endpoint, query); // remote query else qexec = QueryExecutionFactory.sparqlService(endpoint, query, graphURI); // remote query } else qexec = QueryExecutionFactory.create(query, model); // local query try{ //ResultSet results = qexec.execSelect(); ResultSet res = ResultSetFactory.copyResults(qexec.execSelect()) ; QuerySolution qs; String n; while (res.hasNext()) { qs = res.next(); if (qs.get("o") == null) { // get subject n = qs.get("s").toString(); // consider only the type "yago" if (!p.contains("type")) results.put(n, 's'); // target as subject else { if (n.contains("yago")) results.put(n, 's'); // target as subject } } else { // get object n = qs.get("o").toString(); // consider only the type "yago" if (!p.contains("type")) results.put(n, 'o'); // target as object else { if (n.contains("yago")) results.put(n, 'o'); // target as object } } } } catch(Exception e){ e.printStackTrace(); } finally{ qexec.close(); } return results; } /** * Extract key from metadata index * @param s string to index * @return index of s */ private int extractKey(String s) { synchronized (metadata_index) { if(metadata_index.containsKey(s)){ return metadata_index.get(s); } else{ int id = counter.value(); metadata_index.put(s, id); return id; } } } }
10,480
25.534177
99
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/sparqlDataExtractor/QueryExecutor.java
package it.poliba.sisinflab.LODRec.sparqlDataExtractor; import gnu.trove.map.hash.TObjectByteHashMap; import gnu.trove.map.hash.TObjectIntHashMap; import it.poliba.sisinflab.LODRec.fileManager.ItemFileManager; import it.poliba.sisinflab.LODRec.fileManager.TextFileManager; import it.poliba.sisinflab.LODRec.itemManager.ItemTree; import it.poliba.sisinflab.LODRec.itemManager.PropertyIndexedItemTree; import it.poliba.sisinflab.LODRec.tree.NNode; import it.poliba.sisinflab.LODRec.tree.NTree; import it.poliba.sisinflab.LODRec.utils.SynchronizedCounter; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; import com.hp.hpl.jena.query.Query; import com.hp.hpl.jena.query.QueryExecution; import com.hp.hpl.jena.query.QueryExecutionFactory; import com.hp.hpl.jena.query.QueryFactory; import com.hp.hpl.jena.query.QuerySolution; import com.hp.hpl.jena.query.ResultSet; import com.hp.hpl.jena.query.ResultSetFactory; import com.hp.hpl.jena.rdf.model.Model; /** * This class is part of the LOD Recommender * * This class is used by RDFTripleExtractor for multi-threading RDF triples * extraction * * @author Vito Mastromarino */ public class QueryExecutor implements Runnable { private String uri; // uri resource private NTree props; // properties map private TObjectIntHashMap<String> props_index; // properties index private String graphURI; // graph uri private String endpoint; // endpoint address private SynchronizedCounter counter; // synchronized counter for metadata index private TObjectIntHashMap<String> metadata_index; // metadata index private TextFileManager textWriter; private Model model; // local dataset model private ItemTree itemTree; // item tree private boolean inverseProps; // directed property private ItemFileManager fileManager; private boolean caching; private static Logger logger = LogManager.getLogger(QueryExecutor.class.getName()); /** * Constuctor */ public QueryExecutor(String uri, int uri_id, NTree props, TObjectIntHashMap<String> props_index, String graphURI, String endpoint, SynchronizedCounter counter, TObjectIntHashMap<String> metadata_index, TextFileManager textWriter, ItemFileManager fileManager, boolean inverseProps, boolean caching, Model model){ this.uri = uri; this.props = props; this.props_index = props_index; this.graphURI = graphURI; this.endpoint = endpoint; this.counter = counter; this.textWriter = textWriter; this.metadata_index = metadata_index; this.model = null; this.fileManager = fileManager; this.inverseProps = inverseProps; this.itemTree = new PropertyIndexedItemTree(uri_id); this.caching = caching; this.model = model; } /** * Start RDF triple extraction for selected uri */ public void run(){ logger.info(uri + ": start data extraction"); long start = System.currentTimeMillis(); NNode root = props.getRoot(); // execute query if(caching) execWithCaching(root, "", uri); else exec(root, "", uri); if(itemTree.size()>0){ // text file writing if(textWriter != null) textWriter.write(itemTree.serialize()); // binary file writing if(fileManager != null) fileManager.write(itemTree); } long stop = System.currentTimeMillis(); logger.info(uri + ": data extraction terminated in [sec]: " + ((stop - start) / 1000)); } /** * Execute RDF triple extraction */ private void exec(NNode node, String list_props, String uri){ if(node.hasChilds()){ String p; for (NNode children : node.getChilds()) { p = children.getValue(); String p_index; TObjectByteHashMap<String> result = new TObjectByteHashMap<String>(); result.putAll(runQuery(uri, "<" + p + ">")); if(result.size()>0){ for(String uri_res : result.keySet()){ p_index = String.valueOf(props_index.get(p)); if(inverseProps){ if(result.get(uri_res) == (byte) 1) p_index = String.valueOf(props_index.get("inv_" + p)); } if(list_props.length()>0){ itemTree.addBranches(list_props + "-" + p_index, extractKey(uri_res)); exec(children, list_props + "-" + p_index, uri_res); } else{ itemTree.addBranches(p_index, extractKey(uri_res)); exec(children, p_index, uri_res); } } } } } } /** * Execute RDF triple extraction with caching */ private void execWithCaching(NNode node, String list_props, String uri){ if(node.hasChilds()){ String p; for (NNode children : node.getChilds()) { p = children.getValue(); String p_index = String.valueOf(props_index.get(p)); TObjectByteHashMap<String> result = new TObjectByteHashMap<String>(); if(!RDFTripleExtractor.cache.containsKey(uri) || !RDFTripleExtractor.cache.get(uri).containsKey(p_index)){ result.putAll(runQuery(uri, "<" + p + ">")); if(result.size()>0){ RDFTripleExtractor.cache.putIfAbsent(uri, new ConcurrentHashMap<String, CopyOnWriteArrayList<String>>()); for(String uri_res : result.keySet()){ p_index = String.valueOf(props_index.get(p)); if(inverseProps){ if(result.get(uri_res) == (byte) 1) p_index = String.valueOf(props_index.get("inv_" + p)); } RDFTripleExtractor.cache.get(uri).putIfAbsent(p_index, new CopyOnWriteArrayList<String>()); RDFTripleExtractor.cache.get(uri).get(p_index).add(uri_res); if(list_props.length()>0){ itemTree.addBranches(list_props + "-" + p_index, extractKey(uri_res)); execWithCaching(children, list_props + "-" + p_index, uri_res); } else { itemTree.addBranches(p_index, extractKey(uri_res)); execWithCaching(children, p_index, uri_res); } } } } // uri in cache else{ logger.debug("Cache: " + uri); for(String uri_res : RDFTripleExtractor.cache.get(uri).get(p_index)){ if(list_props.length()>0){ itemTree.addBranches(list_props + "-" + p_index, extractKey(uri_res)); execWithCaching(children, list_props + "-" + p_index, uri_res); } else { itemTree.addBranches(p_index, extractKey(uri_res)); execWithCaching(children, p_index, uri_res); } } if(inverseProps){ p_index = String.valueOf(props_index.get("inv_" + p)); if(RDFTripleExtractor.cache.get(uri).containsKey(p_index)){ for(String uri_res : RDFTripleExtractor.cache.get(uri).get(p_index)){ if(list_props.length()>0){ itemTree.addBranches(list_props + "-" + p_index, extractKey(uri_res)); execWithCaching(children, list_props + "-" + p_index, uri_res); } else{ itemTree.addBranches(p_index, extractKey(uri_res)); execWithCaching(children, p_index, uri_res); } } } } } } } } /** * Run SPARQL query * @param uri resource uri * @param p property * @return results map: uri-s (if uri is a subject), uri-o (if uri is an object) */ private TObjectByteHashMap<String> runQuery(String uri, String p){ TObjectByteHashMap<String> results = new TObjectByteHashMap<String>(); Query query; String q; q = "SELECT * WHERE {{?s " + p + " <" + uri + ">. FILTER isIRI(?s). } UNION " + "{<" + uri + "> " + p + " ?o ." + " FILTER isIRI(?o). }} "; logger.debug(q); try { query = QueryFactory.create(q); results = executeQuery(query, p); } catch (Exception e) { e.printStackTrace(); } return results; } /** * Execute SPARQL query * @param query sparql query * @param p property * @return results map: uri-s (if uri is a subject), uri-o (if uri is an object) */ private TObjectByteHashMap<String> executeQuery(Query query, String p) { TObjectByteHashMap<String> results = new TObjectByteHashMap<String>(); QueryExecution qexec = null; if(model==null){ if(graphURI == null) qexec = QueryExecutionFactory.sparqlService(endpoint, query); // remote query else qexec = QueryExecutionFactory.sparqlService(endpoint, query, graphURI); // remote query } else qexec = QueryExecutionFactory.create(query, model); // local query try{ //ResultSet res = qexec.execSelect(); ResultSet res = ResultSetFactory.copyResults(qexec.execSelect()) ; QuerySolution qs; String n; while (res.hasNext()) { qs = res.next(); if (qs.get("o") == null) { // get subject n = qs.get("s").toString(); // consider only the type "yago" if (!p.contains("type")) results.put(n, (byte) 1); // target as subject else { if (n.contains("yago")) results.put(n, (byte) 1); // target as subject } } else { // get object n = qs.get("o").toString(); // consider only the type "yago" if (!p.contains("type")) results.put(n, (byte) 0); // target as object else { if (n.contains("yago")) results.put(n, (byte) 0); // target as object } } } } catch(Exception e){ e.printStackTrace(); } finally{ //qexec.close(); } return results; } /** * Extract key from metadata index * @param s string to index * @return index of s */ private int extractKey(String s) { synchronized (metadata_index) { if(metadata_index.containsKey(s)){ return metadata_index.get(s); } else { int id = counter.value(); metadata_index.put(s, id); return id; } } } }
9,863
25.877384
91
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/fileManager/ItemFileManager.java
package it.poliba.sisinflab.LODRec.fileManager; import gnu.trove.map.hash.THashMap; import gnu.trove.map.hash.TIntObjectHashMap; import it.poliba.sisinflab.LODRec.itemManager.ItemTree; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.ObjectInputStream; import java.io.ObjectOutput; import java.io.ObjectOutputStream; public class ItemFileManager extends FileManager{ public ItemFileManager(String filename, int mode) { super(filename, mode); // TODO Auto-generated constructor stub } public ItemFileManager(String filename, THashMap<String, String> items_index) { super(filename, items_index); // TODO Auto-generated constructor stub } public synchronized void write(ItemTree i){ try{ // Serialize to a byte array ByteArrayOutputStream bos = new ByteArrayOutputStream() ; ObjectOutput out = new ObjectOutputStream(bos); out.writeObject(i); out.close(); // Get the bytes of the serialized object byte[] buf = bos.toByteArray(); // save the position and length of the serialized object file_index_writer.append(i.getItemId() + "\t" + position + "\t" + buf.length); file_index_writer.newLine(); // write to the file file.seek(position); file.write(buf); position += buf.length; } catch(Exception e){ e.printStackTrace(); } } public ItemTree read(String key){ String[] vals = file_index.get(key).split(":"); try{ ItemTree res = null; byte[] buf = new byte[Integer.parseInt(vals[1])]; file.seek(Long.parseLong(vals[0])); // seek to the objects data file.readFully(buf); // read the data ByteArrayInputStream bis = new ByteArrayInputStream(buf); ObjectInputStream ois = new ObjectInputStream(bis); res = (ItemTree) ois.readObject(); // deserialize data return res; } catch(Exception e){ e.printStackTrace(); } return null; } public TIntObjectHashMap<ItemTree> read(long n){ TIntObjectHashMap<ItemTree> res = new TIntObjectHashMap<ItemTree>(); long count = 0; for(String key : file_index.keySet()){ if(count < n){ ItemTree tmp = read(key); res.put(tmp.getItemId(), tmp); count++; } else break; } return res; } }
2,429
23.795918
87
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/fileManager/StringFileManager.java
package it.poliba.sisinflab.LODRec.fileManager; import gnu.trove.map.hash.THashMap; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.ObjectInputStream; import java.io.ObjectOutput; import java.io.ObjectOutputStream; import java.util.Set; public class StringFileManager extends FileManager { public StringFileManager(String filename, Set<Integer> entries_to_consider) { super(filename, entries_to_consider); // TODO Auto-generated constructor stub } public StringFileManager(String filename, int mode) { super(filename, mode); // TODO Auto-generated constructor stub } public StringFileManager(String filename, THashMap<String, String> items_path_index) { super(filename, items_path_index); // TODO Auto-generated constructor stub } public synchronized void write(String str){ try{ String[] vals = str.split("\t"); // Serialize to a byte array ByteArrayOutputStream bos = new ByteArrayOutputStream() ; ObjectOutput out = new ObjectOutputStream(bos); out.writeObject(vals[1]); out.close(); // Get the bytes of the serialized object byte[] buf = bos.toByteArray(); // save the position and length of the serialized object file_index_writer.append(vals[0] + "\t" + position + "\t" + buf.length); file_index_writer.newLine(); // write to the file file.seek(position); file.write(buf); position += buf.length; } catch(Exception e){ e.printStackTrace(); } } public String read(String key){ try{ String[] vals = file_index.get(key).split(":"); byte[] buf = new byte[Integer.parseInt(vals[1])]; file.seek(Long.parseLong(vals[0])); // seek to the objects data file.readFully(buf); // read the data ByteArrayInputStream bis = new ByteArrayInputStream(buf); ObjectInputStream ois = new ObjectInputStream(bis); String res = (String) ois.readObject(); return res; } catch(Exception e){ return null; } } public THashMap<String, String> read(long n){ THashMap<String,String> res = new THashMap<String,String>(); long count = 0; for(String key : file_index.keySet()){ if(count < n){ String tmp = read(key); res.put(key, tmp); count++; } else break; } return res; } }
2,531
24.069307
87
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/fileManager/FileManager.java
package it.poliba.sisinflab.LODRec.fileManager; import gnu.trove.map.hash.THashMap; import java.io.BufferedReader; import java.io.BufferedWriter; import java.io.File; import java.io.FileReader; import java.io.FileWriter; import java.io.IOException; import java.io.RandomAccessFile; import java.util.Set; public class FileManager { public static int WRITE = 1; public static int READ = 2; public static int APPEND = 3; protected long position = 0; protected RandomAccessFile file; protected BufferedWriter file_index_writer; protected BufferedReader file_index_reader; protected THashMap<String, String> file_index; protected long index_size = 0; public FileManager(String filename, int mode){ switch(mode){ case 1:{ try{ File f = new File(filename + ".dat"); f.delete(); file = new RandomAccessFile(filename + ".dat", "rw"); file_index_writer = new BufferedWriter(new FileWriter(filename + "_file_index")); } catch(Exception e){ e.printStackTrace(); } break; } case 2:{ try{ file = new RandomAccessFile(filename + ".dat", "r"); file_index_reader = new BufferedReader(new FileReader(filename + "_file_index")); file_index = readFileIndex(); } catch(Exception e){ e.printStackTrace(); } break; } case 3:{ try{ file = new RandomAccessFile(filename + ".dat", "rw"); file_index_writer = new BufferedWriter(new FileWriter(filename + "_file_index", true)); setPosition(); } catch(Exception e){ e.printStackTrace(); } } } } public FileManager(String filename, Set<Integer> entries_to_consider){ try{ file = new RandomAccessFile(filename + ".dat", "r"); file_index_reader = new BufferedReader(new FileReader(filename + "_file_index")); file_index = readFileIndex(entries_to_consider); } catch(Exception e){ e.printStackTrace(); } } public FileManager(String filename, THashMap<String, String> file_index){ try{ file = new RandomAccessFile(filename + ".dat", "r"); file_index_reader = new BufferedReader(new FileReader(filename + "_file_index")); this.file_index = file_index; } catch(Exception e){ e.printStackTrace(); } } private void setPosition(){ try { position = file.length(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } public THashMap<String, String> getFileIndex(){ return file_index; } private THashMap<String, String> readFileIndex(){ THashMap<String,String> file_index = new THashMap<String,String>(); try{ String line = null; while((line=file_index_reader.readLine()) != null){ String[] vals = line.split("\t"); file_index.put(vals[0], vals[1]+":"+vals[2]); index_size++; } return file_index; } catch(Exception e){ e.printStackTrace(); } return null; } private THashMap<String, String> readFileIndex(Set<Integer> entries_to_consider){ THashMap<String,String> file_index = new THashMap<String,String>(); try{ String line = null; while((line=file_index_reader.readLine()) != null){ String[] vals = line.split("\t"); String[] items = vals[0].split("-"); int items1 = Integer.parseInt(items[0]); int items2 = Integer.parseInt(items[1]); if(entries_to_consider.contains(items1) || entries_to_consider.contains(items2)){ file_index.put(vals[0], vals[1]+":"+vals[2]); index_size++; } } return file_index; } catch(Exception e){ e.printStackTrace(); } return null; } public long getIndexSize(){ return index_size; } public Set<String> getKeysIndex(){ return file_index.keySet(); } public boolean containsKey(String key){ if(file_index.containsKey(key)) return true; else return false; } public void close(){ try{ file.close(); if(file_index_writer!=null){ file_index_writer.flush(); file_index_writer.close(); } if(file_index_reader!=null) file_index_reader.close(); } catch(Exception e){ e.printStackTrace(); } } }
4,336
19.951691
92
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/fileManager/TextFileManager.java
package it.poliba.sisinflab.LODRec.fileManager; import java.io.BufferedWriter; import java.io.FileWriter; public class TextFileManager { private String filename; private BufferedWriter bw; private boolean append; public TextFileManager(String filename){ this.filename = filename; this.append = false; openFile(); } public TextFileManager(String filename, boolean append){ this(filename); this.append = append; } private void openFile(){ try{ bw = new BufferedWriter(new FileWriter(filename, append)); } catch(Exception e){ e.printStackTrace(); } } public synchronized void write(String str){ try{ bw.append(str); bw.newLine(); } catch(Exception e){ e.printStackTrace(); } } public void close(){ try{ bw.flush(); bw.close(); } catch(Exception e){ e.printStackTrace(); } } /** * @param args */ public static void main(String[] args) { // TODO Auto-generated method stub } }
1,019
12.246753
61
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/graphkernel/ItemPreProcessing.java
package it.poliba.sisinflab.LODRec.graphkernel; import gnu.trove.map.hash.TIntFloatHashMap; import gnu.trove.map.hash.TIntObjectHashMap; import gnu.trove.map.hash.TObjectIntHashMap; import java.io.BufferedReader; import java.io.BufferedWriter; import java.io.FileReader; import java.io.FileWriter; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; public class ItemPreProcessing { private TIntObjectHashMap<TIntFloatHashMap> map_item_intFeatures; private TIntObjectHashMap<String> featureStringIndex; private TObjectIntHashMap<String> featureStringInverseIndex; private HashSet<Integer> items; private int embeddingOption; private String entityMapFile; private String branchMapFile; private String itemMetadataFile; private int max_f; private int min_f; private boolean minmax_norm; private boolean idf; private boolean length_normaliz; // Euclidean length normalization -> // http://nlp.stanford.edu/IR-book/html/htmledition/dot-products-1.html#sec:inner public ItemPreProcessing(String itemMetadataFile, int embeddingOption, String entityMapFile, String branchMapFile, boolean minmax_norm, boolean idf, int max_f, int min_f, boolean length_normaliz) { this.itemMetadataFile = itemMetadataFile; this.embeddingOption = embeddingOption; this.entityMapFile = entityMapFile; this.branchMapFile = branchMapFile; this.min_f = min_f; this.max_f = max_f; this.minmax_norm = minmax_norm; this.idf = idf; this.length_normaliz = length_normaliz; if (embeddingOption == 1) itemMetadataFile = itemMetadataFile + entityMapFile; else if (embeddingOption == 2) itemMetadataFile = itemMetadataFile + branchMapFile; this.loadItemData(itemMetadataFile); } public ItemPreProcessing(String itemMetadataFile, boolean minmax_norm, boolean idf, int max_f, int min_f, boolean length_normaliz) { this.itemMetadataFile = itemMetadataFile; this.min_f = min_f; this.max_f = max_f; this.minmax_norm = minmax_norm; this.idf = idf; this.length_normaliz = length_normaliz; this.loadItemData(itemMetadataFile); } public void exec() { if (idf) this.computeIDF(); if (minmax_norm) this.min_max_normalize(); this.filterByFrequency(); if (length_normaliz) this.length_normaliz(); this.writeData(itemMetadataFile); } private void writeData(String filename) { try { BufferedWriter writer = new BufferedWriter(new FileWriter(filename)); TIntFloatHashMap m; StringBuffer buf; for (int id : items) { buf = new StringBuffer(); buf.append(id + "\t"); m = map_item_intFeatures.get(id); int[] fIDs = m.keys(); Arrays.sort(fIDs); for (int i = 0; i < fIDs.length; i++) { buf.append(fIDs[i] + ":" + m.get(fIDs[i]) + " "); } writer.append(buf); writer.newLine(); } writer.flush(); writer.close(); } catch (IOException ex) { System.out.println(ex.getMessage()); } } private void loadItemData(String file_name) { this.items = new HashSet<Integer>(); map_item_intFeatures = new TIntObjectHashMap<TIntFloatHashMap>(); featureStringIndex = new TIntObjectHashMap<String>(); featureStringInverseIndex = new TObjectIntHashMap<String>(); BufferedReader br; float avg = 0; try { br = new BufferedReader(new FileReader(file_name)); String line = null; while ((line = br.readLine()) != null) { try { String[] vals = line.split("\t"); int id = Integer.parseInt(vals[0]); if (!items.contains(id)) items.add(id); // System.out.println("count "+ items.size()); map_item_intFeatures.put(id, new TIntFloatHashMap()); String[] values = vals[1].trim().split(" "); for (int i = 0; i < values.length; i++) { String[] pair = values[i].split(":"); String fStr = pair[0]; int fId; if (featureStringInverseIndex.containsKey(fStr)) fId = featureStringInverseIndex.get(fStr); else { fId = featureStringInverseIndex.size() + 1; featureStringIndex.put(fId, fStr); featureStringInverseIndex.put(fStr, fId); } float fVal = Float.parseFloat(pair[1]); map_item_intFeatures.get(id).put(fId, fVal); } avg += map_item_intFeatures.get(id).size(); } catch (Exception ex) { System.out.println(ex.getMessage()); System.out.println(line); } } br.close(); } catch (IOException e) { e.printStackTrace(); } // num_features = featureStringIndex.size(); avg = avg / (float) map_item_intFeatures.keySet().size(); System.out .println("item data loading terminated. avg features (considering also collaborative features) per item: " + avg + ". n. features in the index: " + featureStringIndex.size()); } private void min_max_normalize() { System.out.println("computing MIN MAX normalization"); Map<Integer, Float> attribute_min_vals = new HashMap<Integer, Float>(); Map<Integer, Float> attribute_max_vals = new HashMap<Integer, Float>(); for (int i : map_item_intFeatures.keys()) { for (int j : map_item_intFeatures.get(i).keys()) { float v = map_item_intFeatures.get(i).get(j); if (!attribute_min_vals.containsKey(j)) attribute_min_vals.put(j, v); if (attribute_min_vals.get(j) > v) attribute_min_vals.put(j, v); if (!attribute_max_vals.containsKey(j)) attribute_max_vals.put(j, v); if (attribute_max_vals.get(j) < v) attribute_max_vals.put(j, v); } } for (int i : map_item_intFeatures.keys()) { for (int j : map_item_intFeatures.get(i).keys()) { float v = map_item_intFeatures.get(i).get(j); float max = attribute_max_vals.get(j); // float min = attribute_min_vals.get(j); float norm_val = (v) / (max); if (!Float.isNaN(norm_val)) map_item_intFeatures.get(i).put(j, norm_val); } } } private void length_normaliz() { System.out.println("computing euclidean length normalization"); float sum = 0, val = 0; for (int i : map_item_intFeatures.keys()) { sum = 0; for (int j : map_item_intFeatures.get(i).keys()) { val = map_item_intFeatures.get(i).get(j); sum += (val * val); } sum = (float) Math.sqrt(sum); for (int j : map_item_intFeatures.get(i).keys()) { val = map_item_intFeatures.get(i).get(j); map_item_intFeatures.get(i).put(j, val / sum); } } } private void computeIDF() { System.out.println("computing IDF"); Map<Integer, Integer> attribute_nnz_vals = new HashMap<Integer, Integer>(); for (int i : map_item_intFeatures.keys()) { for (int j : map_item_intFeatures.get(i).keys()) { if (!attribute_nnz_vals.containsKey(j)) attribute_nnz_vals.put(j, 1); else attribute_nnz_vals.put(j, attribute_nnz_vals.get(j) + 1); } } int n_items = map_item_intFeatures.keys().length; for (int i : map_item_intFeatures.keys()) { for (int j : map_item_intFeatures.get(i).keys()) { float v = map_item_intFeatures.get(i).get(j); // System.out.println(v); v = (float) (v * Math.log(n_items / (1 + attribute_nnz_vals.get(j)))); map_item_intFeatures.get(i).put(j, v); // System.out.println(v); // System.out.println(" ------ "); } } } private void filterByFrequency() { System.out.println("computing min max filtering"); float avg_nfeatures = 0; Map<Integer, Integer> attribute_nnz_vals = new HashMap<Integer, Integer>(); for (int i : map_item_intFeatures.keys()) { for (int j : map_item_intFeatures.get(i).keys()) { if (!attribute_nnz_vals.containsKey(j)) attribute_nnz_vals.put(j, 1); else attribute_nnz_vals.put(j, attribute_nnz_vals.get(j) + 1); } } HashSet<Integer> toRemove = new HashSet<Integer>(); for (int j : attribute_nnz_vals.keySet()) { int occ = attribute_nnz_vals.get(j); if (occ >= max_f || occ <= min_f) toRemove.add(j); } for (int i : map_item_intFeatures.keys()) { for (int j : toRemove) { if (map_item_intFeatures.get(i).containsKey(j)) map_item_intFeatures.get(i).remove(j); } avg_nfeatures += map_item_intFeatures.get(i).keys().length; } System.out.println("removed " + toRemove.size() + " features"); avg_nfeatures = avg_nfeatures / (float) map_item_intFeatures.size(); System.out .println("avg n. features (considering also collaborative features) per item after minmax filtering " + avg_nfeatures); } private void cmpItemFeatureStats() { Map<Integer, Float> attribute_min_vals = new HashMap<Integer, Float>(); Map<Integer, Float> attribute_max_vals = new HashMap<Integer, Float>(); Map<Integer, Integer> attribute_nnz_vals = new HashMap<Integer, Integer>(); Map<Integer, Set<Integer>> attribute_inverse_nnz_vals = new HashMap<Integer, Set<Integer>>(); for (int i : map_item_intFeatures.keys()) { for (int j : map_item_intFeatures.get(i).keys()) { float v = map_item_intFeatures.get(i).get(j); if (!attribute_nnz_vals.containsKey(j)) attribute_nnz_vals.put(j, 1); else attribute_nnz_vals.put(j, attribute_nnz_vals.get(j) + 1); // System.out.println(i+" "+" " + j + " " +v +" curr min: " + // attribute_min_vals.get(j)); if (!attribute_min_vals.containsKey(j)) attribute_min_vals.put(j, v); if (attribute_min_vals.get(j) > v) attribute_min_vals.put(j, v); if (!attribute_max_vals.containsKey(j)) attribute_max_vals.put(j, v); if (attribute_max_vals.get(j) < v) attribute_max_vals.put(j, v); } } for (int j : attribute_nnz_vals.keySet()) { int occ = attribute_nnz_vals.get(j); if (!attribute_inverse_nnz_vals.containsKey(occ)) attribute_inverse_nnz_vals.put(occ, new HashSet()); attribute_inverse_nnz_vals.get(occ).add(j); } List<Integer> listOcc = new ArrayList<Integer>(); listOcc.addAll(attribute_inverse_nnz_vals.keySet()); Collections.sort(listOcc, Collections.reverseOrder()); int topN = 250; int count = 0; for (int occ : listOcc) { for (int j : attribute_inverse_nnz_vals.get(occ)) { if (count >= topN) break; count++; System.out.print(j + " " + this.featureStringIndex.get(j) + ". nnz:" + " " + occ); if (attribute_min_vals.containsKey(j)) System.out.print(" min:" + attribute_min_vals.get(j)); if (attribute_max_vals.containsKey(j)) System.out.print(" max:" + attribute_max_vals.get(j)); System.out.println(); } } } }
10,553
27.069149
110
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/graphkernel/graphEmbedding/Tokenizer.java
package it.poliba.sisinflab.LODRec.graphkernel.graphEmbedding; import gnu.trove.map.hash.THashMap; import gnu.trove.map.hash.TIntIntHashMap; import gnu.trove.map.hash.TIntObjectHashMap; import it.poliba.sisinflab.LODRec.fileManager.ItemFileManager; import it.poliba.sisinflab.LODRec.itemManager.ItemTree; import it.poliba.sisinflab.LODRec.utils.MemoryMonitor; import it.poliba.sisinflab.LODRec.utils.PropertyFileReader; import it.poliba.sisinflab.LODRec.utils.TextFileUtils; import java.io.BufferedWriter; import java.io.FileWriter; import java.io.IOException; import java.util.ArrayList; import java.util.Map; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; public class Tokenizer { private String items_file; private TIntObjectHashMap<String> props_index; // property index private TIntObjectHashMap<String> metadata_index; // metadata index private static Logger logger = LogManager.getLogger(Tokenizer.class.getName()); public Tokenizer(){ // load config file Map<String, String> prop = null; try { prop = PropertyFileReader.loadProperties("config.properties"); this.items_file = prop.get("itemsFile"); loadPropsIndex(); loadMetadataIndex(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } prop.clear(); } private void loadPropsIndex(){ props_index = new TIntObjectHashMap<String>(); TextFileUtils.loadIndex("props_index", props_index); } private void loadMetadataIndex(){ metadata_index = new TIntObjectHashMap<String>(); TextFileUtils.loadIndex("metadata_index", metadata_index); } private void tokenize(){ try{ BufferedWriter br = new BufferedWriter(new FileWriter("metadata_string_1")); ItemFileManager itemReader = new ItemFileManager(items_file, ItemFileManager.READ); ArrayList<String> items_id = new ArrayList<String>(itemReader.getKeysIndex()); THashMap<String, TIntIntHashMap> branches = null; ItemTree item = null; StringBuffer str = null; for(String item_id : items_id){ item = itemReader.read(item_id); branches = item.getBranches(); logger.info("Convert " + item_id); str = new StringBuffer(); str.append(item_id + "\t"); for(String s : branches.keySet()){ String prop = ""; String[] prop_vals = s.split("-"); if(prop_vals.length==1){ for(String ss: prop_vals){ String[] p = props_index.get(Integer.parseInt(ss)).split("/"); prop += p[p.length-1] + "--"; } for(int f : branches.get(s).keys()){ String[] lbl = metadata_index.get(f).split("/"); str.append(prop + lbl[lbl.length-1].replaceAll("[{'.,;:/}]", "_") + ":" + branches.get(s).get(f) + " "); } } } br.append(str); br.newLine(); } br.flush(); br.close(); itemReader.close(); } catch(Exception e){ e.printStackTrace(); } } /** * @param args * @throws IOException */ public static void main(String[] args) throws IOException { // TODO Auto-generated method stub Tokenizer ss = new Tokenizer(); long start = System.currentTimeMillis(); ss.tokenize(); long stop = System.currentTimeMillis(); logger.info("Conversion terminated in [sec]: " + ((stop - start) / 1000)); MemoryMonitor.stats(); } }
3,402
23.134752
111
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/graphkernel/graphEmbedding/ItemGraphEmbedder.java
package it.poliba.sisinflab.LODRec.graphkernel.graphEmbedding; import gnu.trove.iterator.TIntIntIterator; import gnu.trove.map.hash.THashMap; import gnu.trove.map.hash.TIntIntHashMap; import gnu.trove.map.hash.TIntObjectHashMap; import gnu.trove.map.hash.TObjectFloatHashMap; import gnu.trove.map.hash.TObjectIntHashMap; import it.poliba.sisinflab.LODRec.fileManager.ItemFileManager; import it.poliba.sisinflab.LODRec.graphkernel.ItemPreProcessing; import it.poliba.sisinflab.LODRec.itemManager.ItemTree; import it.poliba.sisinflab.LODRec.utils.TextFileUtils; import java.io.BufferedReader; import java.io.BufferedWriter; import java.io.FileReader; import java.io.FileWriter; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.Map; import java.util.Set; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; public class ItemGraphEmbedder { private static final String ENTITY_DELIM = "#E"; private static final String PROP_DELIM = "#P"; private String workingDir; private String itemsFile; private TIntObjectHashMap<String> metadata_index; // metadata index private TIntObjectHashMap<String> props_index; // property index private String entityMapFile; private String branchMapFile; private int max_branch_length = 4; private boolean onlyEntityBranches = true; private Map<Integer, Float> alphaParams; private static Logger logger = LogManager.getLogger(ItemGraphEmbedder.class .getName()); private int option = 1; private String trainRatingFile; private boolean collabFeatures = true; float thresh = 1; private boolean onlyCollabFeatures = false; // item -> user,rate private Map<Integer, Map<Integer, Float>> mapItemUserRatings; private String datasetFile; private int max_f; private int min_f; private boolean minmax_norm; private boolean idf; private boolean length_normaliz; // Euclidean length normalization -> // http://nlp.stanford.edu/IR-book/html/htmledition/dot-products-1.html#sec:inner private Map<Integer, Float> alpha_vals; public ItemGraphEmbedder(String workingDir, String itemMetadataFile, String entityMapFile, String branchMapFile, int embeddingOption, String trainRatingFile, int max_branch_length, boolean addCollabFeatures, boolean onlyEntityBranches, boolean minmax_norm, boolean idf, int max_f, int min_f, boolean length_normaliz, String listAlphaVals) { this.workingDir = workingDir; this.itemsFile = itemMetadataFile; this.entityMapFile = entityMapFile; this.branchMapFile = branchMapFile; this.option = embeddingOption; this.trainRatingFile = trainRatingFile; this.max_branch_length = max_branch_length; this.collabFeatures = addCollabFeatures; this.onlyEntityBranches = onlyEntityBranches; this.min_f = min_f; this.max_f = max_f; this.minmax_norm = minmax_norm; this.idf = idf; this.length_normaliz = length_normaliz; if (listAlphaVals!=null) { this.alpha_vals = new HashMap<Integer, Float>(); String[] parts = listAlphaVals.split(","); for (int i = 0; i < parts.length; i++) { float val = Float.parseFloat(parts[i]); alpha_vals.put((i + 1), val); } } init(); } public ItemGraphEmbedder(String workingDir, String itemMetadataFile, String entityMapFile, String branchMapFile, int embeddingOption, String trainRatingFile, int max_branch_length, boolean addCollabFeatures, boolean onlyEntityBranches, boolean minmax_norm, boolean idf, int max_f, int min_f, boolean length_normaliz) { this.workingDir = workingDir; this.itemsFile = itemMetadataFile; this.entityMapFile = entityMapFile; this.branchMapFile = branchMapFile; this.option = embeddingOption; this.trainRatingFile = trainRatingFile; this.max_branch_length = max_branch_length; this.collabFeatures = addCollabFeatures; this.onlyEntityBranches = onlyEntityBranches; this.min_f = min_f; this.max_f = max_f; this.minmax_norm = minmax_norm; this.idf = idf; this.length_normaliz = length_normaliz; init(); } private void init() { mapItemUserRatings = new HashMap<Integer, Map<Integer, Float>>(); this.branchMapFile = this.itemsFile + this.branchMapFile; this.entityMapFile = this.itemsFile + this.entityMapFile; if (this.collabFeatures) { mapItemUserRatings = this.loadRatingData(trainRatingFile); } alphaParams = new HashMap<Integer, Float>(); if (option == 4) loadPropsIndex(); if (option == 3 || option == 4) loadMetadataIndex(); if (alpha_vals != null) { for (int h : alpha_vals.keySet()) { alphaParams.put(h, alpha_vals.get(h)); } } else { for (int h = 1; h < 10; h++) { // if (h >= 8) // alphaParams.put(h, 0f); // else alphaParams.put(h, 1 / (1 + (float) Math.log(h))); } } System.out.println("ALPHA WEIGHTS"); for(int h:alphaParams.keySet()){ if(h>max_branch_length) break; System.out.println(h+":"+alphaParams.get(h)); } } public void computeMapping() { if (option == 1) { entity_based_mapping_from_textfile(); } else if (option == 2) { branch_based_mapping_from_textfile(); } // else if (option == 3) { // entity_based_mapping(); // // } else if (option == 4) { // branch_based_mapping(); // // } System.out.println("embedding terminated"); preprocess(); } private void preprocess() { ItemPreProcessing prep = new ItemPreProcessing(datasetFile, minmax_norm, idf, max_f, min_f, length_normaliz); prep.exec(); } private void loadPropsIndex() { props_index = new TIntObjectHashMap<String>(); TextFileUtils.loadIndex(workingDir + "props_index", props_index); } private void loadMetadataIndex() { metadata_index = new TIntObjectHashMap<String>(); TextFileUtils.loadIndex(itemsFile + "_index", metadata_index); } public Map<Integer, Map<Integer, Float>> loadRatingData(String filename) { Map<Integer, Map<Integer, Float>> ratings = new HashMap<Integer, Map<Integer, Float>>(); try { BufferedReader reader = new BufferedReader(new FileReader(filename)); String line = reader.readLine(); while (line != null) { String[] str = line.split("\t"); int u = Integer.parseInt(str[0].trim()); int i = Integer.parseInt(str[1].trim()); float rel = Float.parseFloat(str[2].trim()); if (rel >= thresh) { if (!ratings.containsKey(i)) ratings.put(i, new HashMap()); ratings.get(i).put(u, rel); } line = reader.readLine(); } } catch (IOException e) { } return ratings; } private void branch_based_mapping_from_textfile() { try { BufferedWriter writer = new BufferedWriter(new FileWriter( branchMapFile)); datasetFile = branchMapFile; BufferedReader reader = new BufferedReader(new FileReader( this.itemsFile)); StringBuffer str = null; TObjectFloatHashMap<String> res = null; String line = ""; int item_id; while ((line = reader.readLine()) != null) { line = line.replace(":", ""); res = new TObjectFloatHashMap<String>(); String[] parts = line.split("\t"); if (parts.length == 2) { item_id = Integer.parseInt(parts[0]); String[] branches = parts[1].split(" "); str = new StringBuffer(); str.append(item_id + "\t"); float val = 0; for (int i = 0; i < branches.length; i++) { String branch = cut_branch(branches[i]); if (!onlyEntityBranches) res.adjustOrPutValue(branch, 1, 1); int ind = 0; int j = 1; int ent_start = 0, ent_end = 0; while (ind != -1) { j++; ind = branch.indexOf(PROP_DELIM); if (ind != -1) { branch = branch.substring(ind + PROP_DELIM.length()); val = 1 / (float) j; if (!onlyEntityBranches) res.adjustOrPutValue( branch.replace(ENTITY_DELIM, "#") .replace(PROP_DELIM, "#"), val, val); String entityBranch = ""; ent_start = 0; ent_end = 0; while (ent_start != -1) { ent_start = branch.indexOf(ENTITY_DELIM, ent_end + PROP_DELIM.length()); ent_end = branch.indexOf(PROP_DELIM, ent_start + ENTITY_DELIM.length()); if (ent_end == -1) ent_end = branch.length(); if (ent_start != -1) { entityBranch += branch .substring( ent_start + ENTITY_DELIM .length(), ent_end); if (ent_end != branch.length()) entityBranch += "#"; } } res.adjustOrPutValue( entityBranch.replace(ENTITY_DELIM, "#") .replace(PROP_DELIM, "#"), val, val); } } } if (!onlyCollabFeatures) { for (String s : res.keySet()) { if(res.get(s)>0) str.append(s + ":" + res.get(s) + " "); } } if (collabFeatures & mapItemUserRatings.containsKey(item_id)) { for (int u : this.mapItemUserRatings.get(item_id) .keySet()) { str.append(u + ":" + 1 + " "); } } writer.append(str); writer.newLine(); } else System.out.println(line + " ---- no data"); } writer.flush(); writer.close(); reader.close(); } catch (Exception e) { e.printStackTrace(); } } private void entity_based_mapping_from_textfile() { try { BufferedWriter writer = new BufferedWriter(new FileWriter( this.entityMapFile)); datasetFile = entityMapFile; BufferedReader reader = new BufferedReader(new FileReader( this.itemsFile)); StringBuilder str = null; Map<String, Map<String, Float>> map = null; String line = ""; int item_id; while ((line = reader.readLine()) != null) { line = line.replace(":", ""); String[] parts = line.split("\t"); if (parts.length == 2) { item_id = Integer.parseInt(parts[0]); String[] branches = parts[1].split(" "); int h = 0; str = new StringBuilder(); str.append(item_id + "\t"); float val = 0, w = 0; map = new HashMap(); String ent, prefix; int start, end; for (int i = 0; i < branches.length; i++) { String branch = cut_branch(branches[i]); start = 0; while (start != -1) { start = branch.lastIndexOf(ENTITY_DELIM); end = branch.indexOf(PROP_DELIM, start + ENTITY_DELIM.length()); if (end == -1) end = branch.length(); if (start != -1) { ent = branch.substring( start + ENTITY_DELIM.length(), end); if (start != -1) prefix = branch.substring(0, start); else prefix = ent; if (!map.containsKey(ent)) map.put(ent, new HashMap<String, Float>()); h = branch.split(ENTITY_DELIM).length - 1; map.get(ent).put(prefix, alphaParams.get(h)); branch = prefix; } } } if (!onlyCollabFeatures) { for (String s : map.keySet()) { w = 0; for (String ss : map.get(s).keySet()) { w += map.get(s).get(ss); } if(w>0) str.append(s + ":" + w + " "); } } if (collabFeatures & mapItemUserRatings.containsKey(item_id)) { for (int u : this.mapItemUserRatings.get(item_id) .keySet()) { str.append(u + ":" + 1 + " "); } } writer.append(str); writer.newLine(); } else System.out.println(line + " ---- no data"); } writer.flush(); writer.close(); reader.close(); } catch (Exception e) { e.printStackTrace(); } } // cut the last part of the branch if it is longer than max_branch_length private String cut_branch(String branch) { int l = branch.split(PROP_DELIM).length - 1; if (l > max_branch_length) { String tmp = ""; int ind = PROP_DELIM.length(); for (int i = 0; i < max_branch_length; i++) { ind = branch.indexOf(PROP_DELIM, ind) + PROP_DELIM.length(); } tmp = branch.substring(0, ind - PROP_DELIM.length()); return tmp; } else return branch; } // da controllare private void branch_based_mapping() { try { BufferedWriter br = new BufferedWriter( new FileWriter(branchMapFile)); ItemFileManager itemReader = new ItemFileManager(itemsFile, ItemFileManager.READ); ArrayList<String> items_id = new ArrayList<String>( itemReader.getKeysIndex()); THashMap<String, TIntIntHashMap> branches = null; ItemTree item = null; StringBuilder str = null; TObjectIntHashMap<String> res = null; for (String item_id : items_id) { res = new TObjectIntHashMap<String>(); item = itemReader.read(item_id); branches = item.getBranches(); str = new StringBuilder(); str.append(item_id + "\t"); for (String s : branches.keySet()) { if (!isPrefix(s, branches.keySet())) { String b[] = s.split("-"); for (int i = 0; i < b.length; i++) { String path = ""; String features = ""; for (int j = i; j < b.length; j++) { String[] bb = b[j].split("#"); String[] p = props_index.get( Integer.parseInt(bb[0])).split("/"); String[] f = metadata_index.get( Integer.parseInt(bb[1])).split("/"); String clean_p = p[p.length - 1].replaceAll( "[{'.,;:/\\-}]", "_"); String clean_f = f[f.length - 1].replaceAll( "[{'.,;:/\\-}]", "_") + "-"; path += clean_p + "-" + clean_f; features += clean_f; } res.adjustOrPutValue( path.substring(0, path.length() - 1), 1, 1); res.adjustOrPutValue(features.substring(0, features.length() - 1), 1, 1); } } } for (String s : res.keySet()) str.append(s + ":" + res.get(s) + " "); br.append(str); br.newLine(); } br.flush(); br.close(); itemReader.close(); } catch (Exception e) { e.printStackTrace(); } } // da rivedere private void entity_based_mapping() { try { BufferedWriter br = new BufferedWriter( new FileWriter(entityMapFile)); ItemFileManager itemReader = new ItemFileManager(itemsFile, ItemFileManager.READ); ArrayList<String> items_id = new ArrayList<String>( itemReader.getKeysIndex()); THashMap<String, TIntIntHashMap> branches = null; ItemTree item = null; StringBuffer str = null; TObjectFloatHashMap<String> res = null; TIntIntHashMap resources = null; for (String item_id : items_id) { item = itemReader.read(item_id); branches = item.getBranches(); str = new StringBuffer(); str.append(item_id + "\t"); res = new TObjectFloatHashMap<String>(); float weight = 0; int h = 0; for (String prop : branches.keySet()) { resources = new TIntIntHashMap(); resources = branches.get(prop); h = prop.split("-").length; if (h > max_branch_length) { System.out.println(prop + " branch longer than " + max_branch_length); break; } TIntIntIterator it = resources.iterator(); int key, value = 0; while (it.hasNext()) { it.advance(); key = it.key(); value = it.value(); weight = value * alphaParams.get(h); String[] lbl = metadata_index.get(key).split("/"); String clean_lbl = lbl[lbl.length - 1].replaceAll( "[{'.,;:/}]", "_"); res.adjustOrPutValue(clean_lbl, weight, weight); } } for (String s : res.keySet()) str.append(s + ":" + res.get(s) + " "); br.append(str); br.newLine(); } br.flush(); br.close(); itemReader.close(); } catch (Exception e) { e.printStackTrace(); } } private boolean isPrefix(String s, Set<String> list) { // System.out.println(s + "-" + list); for (String ss : list) { if (!ss.contentEquals(s) && ss.startsWith(s)) { // System.out.println(s + " - " + ss); return true; } } return false; } }
15,928
23.282012
91
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/graphkernel/heuristic/UserProfileSimilarityRecommender.java
package it.poliba.sisinflab.LODRec.graphkernel.heuristic; import gnu.trove.map.hash.TIntFloatHashMap; import gnu.trove.map.hash.TIntObjectHashMap; import java.io.BufferedReader; import java.io.BufferedWriter; import java.io.FileReader; import java.io.FileWriter; import java.io.IOException; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; public class UserProfileSimilarityRecommender { private static final float POS_FB_VALUE = 1; private TIntObjectHashMap<TIntFloatHashMap> map_item_intFeatures; private String outFile; private boolean implicit; private BufferedWriter bw; Map<Integer, Map<Integer, Float>> trainRatings; private String trainRatingFile; private TIntObjectHashMap<TIntFloatHashMap> itemSim; private int topN; private HashSet<Integer> items; private int embeddingOption; private String entityMapFile; private String branchMapFile; private static final float MIN_SIM = 0f; private String itemMetadataFile; private Float evalRatingThresh; private int nThreads; private static Logger logger = LogManager .getLogger(UserProfileSimilarityRecommender.class.getName()); private void init() { if (embeddingOption == 1) itemMetadataFile = itemMetadataFile + entityMapFile; else if (embeddingOption == 2) itemMetadataFile = itemMetadataFile + branchMapFile; this.loadItemFeatureData(itemMetadataFile); trainRatings = this.loadRatingData(trainRatingFile); } public UserProfileSimilarityRecommender(int topN, String reccOutputFile, String itemMetadataFile, int embeddingOption, String entityMapFile, String branchMapFile, String trainRatingFile, boolean implicit, float evalRatingThresh,int nThreads) { this.topN = topN; this.outFile = reccOutputFile; this.itemMetadataFile = itemMetadataFile; this.embeddingOption = embeddingOption; this.entityMapFile = entityMapFile; this.branchMapFile = branchMapFile; this.trainRatingFile = trainRatingFile; this.implicit = implicit; this.evalRatingThresh = evalRatingThresh; this.nThreads=nThreads; init(); } private void loadItemFeatureData(String file_name) { int maxfID = 0; this.items = new HashSet<Integer>(); map_item_intFeatures = new TIntObjectHashMap<TIntFloatHashMap>(); BufferedReader br; float avg = 0; try { br = new BufferedReader(new FileReader(file_name)); String line = null; while ((line = br.readLine()) != null) { try { String[] vals = line.split("\t"); int id = Integer.parseInt(vals[0]); if (!items.contains(id)) items.add(id); map_item_intFeatures.put(id, new TIntFloatHashMap()); String[] values = vals[1].trim().split(" "); for (int i = 0; i < values.length; i++) { String[] pair = values[i].split(":"); int fId = Integer.parseInt(pair[0]); float fVal = Float.parseFloat(pair[1]); map_item_intFeatures.get(id).put(fId, fVal); if (fId > maxfID) maxfID = fId; } avg += map_item_intFeatures.get(id).size(); } catch (Exception ex) { System.out.println(ex.getMessage()); System.out.println(line); } } br.close(); } catch (IOException e) { e.printStackTrace(); } avg = avg / (float) map_item_intFeatures.keySet().size(); System.out .println("item data loading terminated. avg features (considering also collaborative features) per item: " + avg); } public void exec() { try { bw = new BufferedWriter(new FileWriter(outFile)); ExecutorService executor; executor = Executors.newFixedThreadPool(nThreads); for (int u : trainRatings.keySet()) { Map<Integer, Float> userTrainRatings = trainRatings.get(u); Runnable worker = new UserProfileSimilarityRecommenderWorker(u,items, bw, map_item_intFeatures, topN, userTrainRatings, implicit,evalRatingThresh); // run the worker thread executor.execute(worker); } executor.shutdown(); executor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); bw.flush(); bw.close(); } catch (Exception e) { e.printStackTrace(); } } public Map<Integer, Map<Integer, Float>> loadRatingData(String filename) { Map<Integer, Map<Integer, Float>> ratings = new HashMap<Integer, Map<Integer, Float>>(); try { BufferedReader reader = new BufferedReader(new FileReader(filename)); String line = reader.readLine(); while (line != null) { String[] str = line.split("\t"); int u = Integer.parseInt(str[0].trim()); int i = Integer.parseInt(str[1].trim()); float rel = Float.parseFloat(str[2].trim()); if (items.contains(i)) { if (implicit) rel = POS_FB_VALUE; if (!ratings.containsKey(u)) ratings.put(u, new HashMap()); ratings.get(u).put(i, rel); } line = reader.readLine(); } } catch (IOException e) { } System.out.println(filename + " . loaded " + ratings.size() + " users"); return ratings; } }
5,117
25.246154
110
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/graphkernel/heuristic/UserProfileSimilarityRecommenderWorker.java
package it.poliba.sisinflab.LODRec.graphkernel.heuristic; import gnu.trove.iterator.TIntIterator; import gnu.trove.map.hash.TIntFloatHashMap; import gnu.trove.map.hash.TIntObjectHashMap; import gnu.trove.set.hash.TIntHashSet; import java.io.BufferedWriter; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; public class UserProfileSimilarityRecommenderWorker implements Runnable { private TIntObjectHashMap<TIntFloatHashMap> map_item_intFeatures; private String outFile; private boolean implicit; private BufferedWriter bw; private Map<Integer, Float> userTrainRatings; private int u; private int topN; private HashSet<Integer> items; private static final float MIN_SIM = 0f; private Float evalRatingThresh; private static Logger logger = LogManager .getLogger(UserProfileSimilarityRecommenderWorker.class.getName()); public UserProfileSimilarityRecommenderWorker(int u, HashSet<Integer> items, BufferedWriter bw, TIntObjectHashMap<TIntFloatHashMap> map_item_intFeatures, int topN, Map<Integer, Float> userTrainRatings, boolean implicit, Float evalRatingThresh) { this.u = u; this.items = items; this.bw = bw; this.map_item_intFeatures = map_item_intFeatures; this.topN = topN; this.userTrainRatings = userTrainRatings; this.implicit = implicit; this.evalRatingThresh = evalRatingThresh; } private float cmpCosineSim(TIntFloatHashMap v1, TIntFloatHashMap v2) { TIntHashSet inters = new TIntHashSet(); inters.addAll(v1.keySet()); inters.retainAll(v2.keySet()); if (inters.size() == 0) return 0; else { int i = 0; TIntIterator it = inters.iterator(); float num = 0; float norm_v1 = 0; float norm_v2 = 0; while (it.hasNext()) { i = it.next(); num += v1.get(i) * v2.get(i); } for (int k1 : v1.keys()) norm_v1 += (v1.get(k1) * v1.get(k1)); for (int k2 : v2.keys()) norm_v2 += (v2.get(k2) * v2.get(k2)); return num / (float) (Math.sqrt(norm_v1) * Math.sqrt(norm_v2)); } } private void computeRecc(int u, Set<Integer> trainItems, TIntFloatHashMap user_prof) { Map<Double, Set<Integer>> map = new HashMap<Double, Set<Integer>>(); double pred; double[] prob_estimates; for (int id : items) { pred = 0; if (!trainItems.contains(id)) { if (map_item_intFeatures.containsKey(id)) { pred = this.cmpCosineSim(map_item_intFeatures.get(id), user_prof); if (pred > MIN_SIM) { if (!map.containsKey(pred)) map.put(pred, new HashSet()); map.get(pred).add(id); } } } } List<Double> scores = new ArrayList<Double>(); scores.addAll(map.keySet()); Collections.sort(scores, Collections.reverseOrder()); int c = 0; Iterator<Double> it = scores.iterator(); StringBuffer line = new StringBuffer(); line.append(u + "\t"); while (it.hasNext() & c < topN) { double s = it.next(); for (int i : map.get(s)) { if (c == topN) break; line.append(i + ":" + String.format("%.3f", s).replace(",", ".") + " "); c++; } } try { synchronized (bw) { bw.append(line); bw.newLine(); } } catch (IOException e) { e.printStackTrace(); } } @Override public void run() { TIntFloatHashMap user_prof = new TIntFloatHashMap(); int c = 0; for (int i : userTrainRatings.keySet()) { if (implicit || userTrainRatings.get(i) >= evalRatingThresh) { c++; for (int j : this.map_item_intFeatures.get(i).keys()) { float val = this.map_item_intFeatures.get(i).get(j); user_prof.adjustOrPutValue(j, val, val); } } } for (int i : user_prof.keys()) user_prof.adjustValue(i, 1 / (float) (c)); computeRecc(u, userTrainRatings.keySet(), user_prof); } }
3,960
23.602484
73
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/graphkernel/model/UserModelRecommenderWorker.java
package it.poliba.sisinflab.LODRec.graphkernel.model; import gnu.trove.map.hash.TIntFloatHashMap; import gnu.trove.map.hash.TIntObjectHashMap; import gnu.trove.map.hash.TObjectFloatHashMap; import gnu.trove.set.hash.TIntHashSet; import it.poliba.sisinflab.LODRec.evaluation.Evaluator; import it.poliba.sisinflab.LODRec.recommender.Recommender; import java.io.BufferedWriter; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Random; import java.util.Set; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; import de.bwaldvogel.liblinear.FeatureNode; import de.bwaldvogel.liblinear.Linear; import de.bwaldvogel.liblinear.Model; import de.bwaldvogel.liblinear.Parameter; import de.bwaldvogel.liblinear.Problem; import de.bwaldvogel.liblinear.SolverType; public class UserModelRecommenderWorker implements Runnable { private TIntObjectHashMap<TIntFloatHashMap> map_item_intFeatures; private boolean implicit; private int num_features; private List<Double> listC; private List<Double> listEps; private List<Integer> listSolverType; private TIntHashSet selectedFeatures; private float relUnknownItems; private BufferedWriter bw; private Map<Integer, Float> userTrainRatings; private Set<Integer> originalTrainItems; private Map<Integer, Float> userValRatings; private Evaluator trainEval; private Evaluator validEval; private boolean silent = true; private int topN; private HashSet<Integer> items; private boolean addNegValidationEx; private int timesRealFb; // = 5 -> era 3 private int nValidNegEx; // = 1000 private int minTrainEx; // = 100; private int u; private int topK; private String metric; // private int n_features; private static Logger logger = LogManager .getLogger(UserModelRecommenderWorker.class.getName()); public UserModelRecommenderWorker(int u, BufferedWriter bw, TIntObjectHashMap<TIntFloatHashMap> map_item_intFeatures, Evaluator trainEval, Evaluator validEval, boolean silent, int topN, int num_features, List<Double> listC, List<Double> listEps, List<Integer> listSolverType, Map<Integer, Float> userTrainRatings, Map<Integer, Float> userValRatings, boolean implicit, int nValidNegEx, boolean addNegValidationEx, int timesRealFb, int minTrainEx, HashSet<Integer> items, float relUnknownItems, int topK, String metric) { this.topK = topK; this.metric = metric; this.u = u; this.bw = bw; this.map_item_intFeatures = map_item_intFeatures; this.trainEval = trainEval; this.validEval = validEval; this.silent = silent; this.topN = topN; this.num_features = num_features; this.listC = listC; this.listEps = listEps; this.listSolverType = listSolverType; this.userTrainRatings = userTrainRatings; this.userValRatings = userValRatings; this.implicit = implicit; this.relUnknownItems = relUnknownItems; this.nValidNegEx = nValidNegEx; this.timesRealFb = timesRealFb; this.minTrainEx = minTrainEx; this.items = items; this.addNegValidationEx = addNegValidationEx; originalTrainItems = new HashSet<Integer>(); } public void run() { // Recommender pred; long start = System.currentTimeMillis(); Model model = train(); if (model != null) computeRecc(model); long stop = System.currentTimeMillis(); // logger.info("user " + u + " terminated in " + (stop - start) / 1000); } private void computeRecc(Model model) { Map<Double, Set<Integer>> map = new HashMap<Double, Set<Integer>>(); double pred; double[] prob_estimates; FeatureNode[] f; // System.out.println("train ratings for user " + u + " : " // + originalTrainItems.size()); // int count_pred=0; for (int id : items) { pred = 0; if (!originalTrainItems.contains(id)) { if (map_item_intFeatures.containsKey(id)) { // count_pred++; int[] fIDs = map_item_intFeatures.get(id).keys(); Arrays.sort(fIDs); int h = 0; for (int i = 0; i < fIDs.length; i++) { if (selectedFeatures.contains(fIDs[i])) { h++; } } f = new FeatureNode[h]; h = 0; for (int i = 0; i < fIDs.length; i++) { if (selectedFeatures.contains(fIDs[i])) { f[h] = new FeatureNode(fIDs[i], map_item_intFeatures.get(id).get(fIDs[i])); h++; } } // for (int i = 0; i < fIDs.length; i++) { // f[i] = new FeatureNode(fIDs[i], map_item_intFeatures // .get(id).get(fIDs[i])); // } if (model.isProbabilityModel() & implicit) { prob_estimates = new double[2]; Linear.predictProbability(model, f, prob_estimates); pred = prob_estimates[0]; } else pred = Linear.predict(model, f); if (!map.containsKey(pred)) map.put(pred, new HashSet()); // if (pred != 0) map.get(pred).add(id); } } } // System.out.println("n.pred :"+ count_pred); List<Double> scores = new ArrayList<Double>(); scores.addAll(map.keySet()); Collections.sort(scores, Collections.reverseOrder()); int c = 0; Iterator<Double> it = scores.iterator(); StringBuffer line = new StringBuffer(); line.append(u + "\t"); while (it.hasNext() & c < topN) { double s = it.next(); for (int i : map.get(s)) { if (c == topN) break; line.append(i + ":" + String.format("%.3f", s).replace(",", ".") + " "); c++; } } try { synchronized (bw) { bw.append(line); bw.newLine(); } } catch (IOException e) { e.printStackTrace(); } } public Model train() { Set<Integer> trainNegItems = new HashSet<Integer>(); Set<Integer> valNegItems = new HashSet<Integer>(); TObjectFloatHashMap<String> evalRes = new TObjectFloatHashMap<String>(); // System.out.println("n.user train ratings at the beginning " + // userTrainRatings.size()); originalTrainItems.addAll(userTrainRatings.keySet()); if (userValRatings == null) userValRatings = new HashMap<Integer, Float>(); if (implicit) { int negEx = timesRealFb * userTrainRatings.keySet().size(); if ((negEx + userTrainRatings.keySet().size()) < minTrainEx) negEx = (minTrainEx - userTrainRatings.keySet().size()); trainNegItems = selectNegativeItems(userTrainRatings.keySet(), items, negEx); valNegItems = selectNegativeItems(userValRatings.keySet(), items, this.nValidNegEx); } else if (userTrainRatings.keySet().size() < minTrainEx) { int negEx = (minTrainEx - userTrainRatings.keySet().size()); trainNegItems = selectNegativeItems(userTrainRatings.keySet(), items, negEx); } if (addNegValidationEx) { // System.out.println(); valNegItems = selectNegativeItems(userValRatings.keySet(), items, this.nValidNegEx); } // if(implicit) relUnknownItems = 0; for (int i : trainNegItems) userTrainRatings.put(i, relUnknownItems); for (int i : valNegItems) userValRatings.put(i, relUnknownItems); Model model = null; int nRows = userTrainRatings.keySet().size(); FeatureNode[][] XTrain = new FeatureNode[nRows][]; double[] yTrain = new double[nRows]; int[] trainUserIndex = new int[nRows]; int[] trainItemIndex = new int[nRows]; analyzeTrainingFeatures(userTrainRatings); buildDataset(u, userTrainRatings, XTrain, yTrain, trainUserIndex, trainItemIndex); // logger.info("Creating problem"); Problem problem = new Problem(); problem.l = XTrain.length;// number of training examples problem.n = num_features + 1; // number of features problem.x = XTrain; // feature nodes problem.y = yTrain; // target values // logger.info("Number of training examples: " + problem.l); // logger.info("Number of features: " + problem.n); nRows = userValRatings.keySet().size(); FeatureNode[][] XValid = new FeatureNode[nRows][]; double[] yValid = new double[nRows]; int[] validUserIndex = new int[nRows]; int[] validItemIndex = new int[nRows]; buildDataset(u, userValRatings, XValid, yValid, validUserIndex, validItemIndex); // logger.info("user " + u + " -- Number of training examples: " // + problem.l + ", Number of features: " + problem.n); Map<Integer, Float> trainMapPrec = new HashMap<Integer, Float>(); Map<Integer, Float> trainMapRec = new HashMap<Integer, Float>(); Map<Integer, Float> trainMapNDCG = new HashMap<Integer, Float>(); Map<Integer, Float> valMapPrec = new HashMap<Integer, Float>(); Map<Integer, Float> valMapRec = new HashMap<Integer, Float>(); Map<Integer, Float> valMapNDCG = new HashMap<Integer, Float>(); double bestPerf = 0, bestPerfTrain = 0, bestC = 0, bestEps=0; Model bestModel = null; int bestModelType = 0; long start=System.currentTimeMillis(); try { for (int solverType : listSolverType) { for (double c : listC) { for (double eps : listEps) { SolverType solver = SolverType.getById(solverType); Parameter parameter = new Parameter(solver, c, eps); if (silent) Linear.setDebugOutput(null); model = Linear.train(problem, parameter); Recommender pred = new Recommender(model, topN); // Map<Integer, List<Integer>> trainRecc = new // HashMap<Integer, List<Integer>>(); // trainRecc = pred.computeRecc(XTrain, trainUserIndex, // trainItemIndex, implicit); // // trainEval.eval(trainRecc, trainMapNDCG, trainMapPrec, // trainMapRec); Map<Integer, List<Integer>> validRecc = pred .computeRecc(XValid, validUserIndex, validItemIndex, implicit); evalRes = validEval.eval(validRecc); float perf = evalRes.get(metric); if (perf >= bestPerf) { bestPerf = perf; bestModel = model; bestModelType = solverType; bestC = c; bestEps = eps; } // System.out.println(" config -- model: " // + parameter.getSolverType() + ". C: " // + parameter.getC() + ", eps: " // + parameter.getEps() + ". validationset prec: " // + valMapPrec + ". validationset rec: " // + valMapRec); } } } } catch (Exception ex) { ex.printStackTrace(); return null; } if (bestModel == null) bestModel = model; long stop=System.currentTimeMillis(); // logger.info("best model - bestPerf_" + k_modeSel + ": " + bestPerf); // System.out // .println("-----------------------------------------------------------------"); System.out .println("-----------------------------------------------------------------"); System.out.println("user " + u + " --------- BEST MODEL " + bestModelType + ". C: " + bestC + ", eps: " + bestEps + " . Metric " + metric + "@" + topK + ": " + bestPerf + " n.train ex " + userTrainRatings.keySet().size() + " n.valid ex " + userValRatings.keySet().size()+" tot time: " +(stop-start)/1000); return model; } private void analyzeTrainingFeatures(Map<Integer, Float> ratings) { selectedFeatures = new TIntHashSet(); for (int id : ratings.keySet()) { if (map_item_intFeatures.containsKey(id)) { selectedFeatures.addAll(map_item_intFeatures.get(id).keySet()); } } // System.out.println(u + " train features " + selectedFeatures.size()); } private void buildDataset(int u, Map<Integer, Float> ratings, FeatureNode[][] X, double[] y, int[] userIndex, int[] itemIndex) { try { int j = 0; FeatureNode[] f; for (int id : ratings.keySet()) { if (map_item_intFeatures.containsKey(id)) { int[] fIDs = map_item_intFeatures.get(id).keys(); Arrays.sort(fIDs); int h = 0; for (int i = 0; i < fIDs.length; i++) { if (selectedFeatures.contains(fIDs[i])) { h++; } } f = new FeatureNode[h]; h = 0; for (int i = 0; i < fIDs.length; i++) { if (selectedFeatures.contains(fIDs[i])) { f[h] = new FeatureNode(fIDs[i], map_item_intFeatures.get(id).get(fIDs[i])); h++; } } X[j] = f; y[j] = ratings.get(id); userIndex[j] = u; itemIndex[j] = id; j++; } } } catch (Exception ex) { ex.printStackTrace(); } } private Set<Integer> selectNegativeItems(Set<Integer> positiveItems, Set<Integer> items, int N) { Set<Integer> set_candidate = new HashSet<Integer>(); set_candidate.addAll(items); set_candidate.removeAll(positiveItems); List<Integer> candidate = new ArrayList<Integer>(); candidate.addAll(set_candidate); return chooseRndItems(candidate, N); } private Set<Integer> chooseRndItems(List<Integer> list, int N) { Set<Integer> keys = new HashSet<Integer>(); Set<Integer> ret = new HashSet<Integer>(); Random r = new Random(); int cont = 0; if (list.size() < N) return ret; while (cont < N) { int rr = r.nextInt(list.size()); while (keys.contains(rr)) rr = r.nextInt(list.size()); keys.add(rr); cont++; } for (int k : keys) ret.add(list.get(k)); return ret; } }
13,031
26.263598
85
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/graphkernel/model/UserModelRecommender.java
package it.poliba.sisinflab.LODRec.graphkernel.model; import gnu.trove.map.hash.TIntFloatHashMap; import gnu.trove.map.hash.TIntObjectHashMap; import it.poliba.sisinflab.LODRec.evaluation.Evaluator; import java.io.BufferedReader; import java.io.BufferedWriter; import java.io.FileReader; import java.io.FileWriter; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; public class UserModelRecommender { // private static final Float NEG_FB_VALUE = 0f; private static final Float POS_FB_VALUE = 1f; // private String workingDir; private TIntObjectHashMap<TIntFloatHashMap> map_item_intFeatures; private String outFile; private int nThreads; private boolean implicit; private int num_features; private List<Double> listC = new ArrayList<Double>(); private List<Double> listEps = new ArrayList<Double>(); private List<Integer> listSolverType = new ArrayList<Integer>(); private BufferedWriter bw; Map<Integer, Map<Integer, Float>> trainRatings; Map<Integer, Map<Integer, Float>> validationRatings; float evalRatingThresh, negRatingThresh, relUnknownItems; private String validRatingFile; private String trainRatingFile; private boolean silent = true; private int topN; private HashSet<Integer> items; private boolean addNegValidationEx; private int timesRealFb; private int nValidNegEx; private int minTrainEx; private int embeddingOption; private String entityMapFile; private String branchMapFile; private String listStrSolverType; private String listStrC; private String listStrEps; private String itemMetadataFile; private String evalMetric; private String metric; private int topK; private static Logger logger = LogManager .getLogger(UserModelRecommender.class.getName()); private void init() { if (embeddingOption == 1) itemMetadataFile = itemMetadataFile + entityMapFile; else if (embeddingOption == 2) itemMetadataFile = itemMetadataFile + branchMapFile; String[] parts = listStrC.split(","); for (int i = 0; i < parts.length; i++) { double val = Double.parseDouble(parts[i]); listC.add(val); } parts = listStrEps.split(","); for (int i = 0; i < parts.length; i++) { double val = Double.parseDouble(parts[i]); listEps.add(val); } parts = listStrSolverType.split(","); for (int i = 0; i < parts.length; i++) { int val = Integer.parseInt(parts[i]); listSolverType.add(val); } this.loadItemFeatureData(itemMetadataFile); trainRatings = this.loadRatingData(trainRatingFile); this.validationRatings = this.loadRatingData(validRatingFile); String[] str = evalMetric.split("@"); this.topK = Integer.parseInt(str[1]); this.metric = str[0]; } public UserModelRecommender(int topN, int nThreads, String reccOutputFile, String itemMetadataFile, int embeddingOption, String entityMapFile, String branchMapFile, String trainRatingFile, String validationRatingFile, boolean implicit, String listStrSolverType, String listStrC, String listStrEps, float evalRatingThresh, float relUnknownItems, float negRatingThresh, int timesRealFb, int nValidNegEx, int minTrainEx, boolean addNegValidationEx, String evalMetric) { this.topN = topN; this.nThreads = nThreads; this.outFile = reccOutputFile; this.itemMetadataFile = itemMetadataFile; this.embeddingOption = embeddingOption; this.entityMapFile = entityMapFile; this.branchMapFile = branchMapFile; this.trainRatingFile = trainRatingFile; this.validRatingFile = validationRatingFile; this.implicit = implicit; this.listStrSolverType = listStrSolverType; this.listStrC = listStrC; this.listStrEps = listStrEps; this.evalRatingThresh = evalRatingThresh; this.relUnknownItems = relUnknownItems; this.negRatingThresh = negRatingThresh; this.timesRealFb = timesRealFb; this.nValidNegEx = nValidNegEx; this.minTrainEx = minTrainEx; this.addNegValidationEx = addNegValidationEx; this.evalMetric = evalMetric; init(); } private void loadItemFeatureData(String file_name) { int maxfID = 0; this.items = new HashSet<Integer>(); map_item_intFeatures = new TIntObjectHashMap<TIntFloatHashMap>(); BufferedReader br; float avg = 0; try { br = new BufferedReader(new FileReader(file_name)); String line = null; while ((line = br.readLine()) != null) { try { String[] vals = line.split("\t"); int id = Integer.parseInt(vals[0]); if (!items.contains(id)) items.add(id); map_item_intFeatures.put(id, new TIntFloatHashMap()); String[] values = vals[1].trim().split(" "); for (int i = 0; i < values.length; i++) { String[] pair = values[i].split(":"); int fId = Integer.parseInt(pair[0]); float fVal = Float.parseFloat(pair[1]); map_item_intFeatures.get(id).put(fId, fVal); if (fId > maxfID) maxfID = fId; } avg += map_item_intFeatures.get(id).size(); } catch (Exception ex) { System.out.println(ex.getMessage()); System.out.println(line); } } br.close(); } catch (IOException e) { e.printStackTrace(); } num_features = maxfID; avg = avg / (float) map_item_intFeatures.keySet().size(); System.out .println("item data loading terminated. avg features (considering also collaborative features) per item: " + avg + ". n. features in the index: " + num_features); } public void exec() { try { bw = new BufferedWriter(new FileWriter(outFile)); Evaluator trainEval = new Evaluator(trainRatingFile, topK, evalRatingThresh, relUnknownItems); Evaluator validEval = new Evaluator(validRatingFile, topK, evalRatingThresh, relUnknownItems); ExecutorService executor; executor = Executors.newFixedThreadPool(nThreads); int limit = -1; int count = 0; for (int u : trainRatings.keySet()) { count++; if (count == limit) break; // train ------------------ Map<Integer, Float> userTrainRatings = trainRatings.get(u); Map<Integer, Float> userValRatings = validationRatings.get(u); Runnable worker = new UserModelRecommenderWorker(u, bw, map_item_intFeatures, trainEval, validEval, silent, topN, num_features, listC, listEps, listSolverType, userTrainRatings, userValRatings, implicit, nValidNegEx, addNegValidationEx, timesRealFb, minTrainEx, items, relUnknownItems, topK, metric); // run the worker thread executor.execute(worker); } executor.shutdown(); executor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); bw.flush(); bw.close(); } catch (Exception e) { e.printStackTrace(); } } public Map<Integer, Map<Integer, Float>> loadRatingData(String filename) { Map<Integer, Map<Integer, Float>> ratings = new HashMap<Integer, Map<Integer, Float>>(); try { BufferedReader reader = new BufferedReader(new FileReader(filename)); String line = reader.readLine(); while (line != null) { String[] str = line.split("\t"); int u = Integer.parseInt(str[0].trim()); int i = Integer.parseInt(str[1].trim()); float rel = Float.parseFloat(str[2].trim()); if (items.contains(i)) { if (implicit) rel = POS_FB_VALUE; if (!ratings.containsKey(u)) ratings.put(u, new HashMap()); ratings.get(u).put(i, rel); } line = reader.readLine(); } } catch (IOException e) { } System.out.println(filename + " . loaded " + ratings.size() + " users"); return ratings; } }
7,770
26.853047
110
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/itemManager/PropertyIndexedItemTree.java
package it.poliba.sisinflab.LODRec.itemManager; import gnu.trove.map.hash.THashMap; import gnu.trove.map.hash.TIntIntHashMap; import java.io.Serializable; public class PropertyIndexedItemTree implements ItemTree, Serializable { private static final long serialVersionUID = 1L; private int item_id; private THashMap<String, TIntIntHashMap> branches; public PropertyIndexedItemTree(int item_id){ this.item_id = item_id; branches = new THashMap<String, TIntIntHashMap>(); } @Override public void setItemId(int item_id) { this.item_id = item_id; } @Override public int getItemId() { return item_id; } @Override public int size() { return branches.size(); } @Override public boolean isEmpty() { if(size()>0) return true; else return false; } @Override public void addBranches(String key, int value){ if (!this.branches.containsKey(key)) this.branches.put(key, new TIntIntHashMap()); if(this.branches.get(key)!=null){ if(this.branches.get(key).containsKey(value)){ if(this.branches.get(key).get(value) < Integer.MAX_VALUE) this.branches.get(key).adjustValue(value, 1); } else this.branches.get(key).put(value, 1); } else{ TIntIntHashMap tmp = new TIntIntHashMap(); tmp.put(value, 1); this.branches.put(key, tmp); } } @Override public THashMap<String, TIntIntHashMap> getBranches() { return branches; } @Override public String serialize(){ StringBuffer res = new StringBuffer(); for(String s : branches.keySet()) res.append(this.item_id + "\t" + s + "\t" + branches.get(s) + "\n"); // togliamo lo \n finale return res.substring(0, res.length()-2); } @Override public boolean equals(Object object) { boolean isEqual= false; if (object != null && object instanceof PropertyIndexedItemTree) isEqual = (this.item_id == ((PropertyIndexedItemTree) object).item_id); return isEqual; } }
1,946
19.494737
77
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/itemManager/ItemTree.java
package it.poliba.sisinflab.LODRec.itemManager; import gnu.trove.map.hash.THashMap; import gnu.trove.map.hash.TIntIntHashMap; public interface ItemTree{ void setItemId(int value); int getItemId(); int size(); boolean isEmpty(); void addBranches(String key, int value); THashMap<String, TIntIntHashMap> getBranches(); String serialize(); boolean equals(Object object); }
395
15.5
48
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/sprank/itemPathExtractor/ItemPathExtractorWorker.java
package it.poliba.sisinflab.LODRec.sprank.itemPathExtractor; import java.util.ArrayList; import gnu.trove.iterator.TIntIntIterator; import gnu.trove.iterator.TIntIterator; import gnu.trove.map.hash.THashMap; import gnu.trove.map.hash.TIntIntHashMap; import gnu.trove.map.hash.TIntObjectHashMap; import gnu.trove.map.hash.TObjectIntHashMap; import gnu.trove.set.TIntSet; import gnu.trove.set.hash.TIntHashSet; import it.poliba.sisinflab.LODRec.fileManager.StringFileManager; import it.poliba.sisinflab.LODRec.fileManager.TextFileManager; import it.poliba.sisinflab.LODRec.itemManager.ItemTree; import it.poliba.sisinflab.LODRec.utils.StringUtils; import it.poliba.sisinflab.LODRec.utils.SynchronizedCounter; /** * This class is part of the LOD Recommender * * This class is used by PathExtractor for multi-threading paths extraction * * @author Vito Mastromarino */ public class ItemPathExtractorWorker implements Runnable { private SynchronizedCounter counter; // synchronized counter for path index private TObjectIntHashMap<String> path_index; // path index private TIntObjectHashMap<String> inverse_path_index; // key-value path index private ItemTree main_item; // main item private ArrayList<ItemTree> items; // items private TIntObjectHashMap<String> props_index; // property index private boolean inverseProps; // directed property private TextFileManager textWriter; private StringFileManager pathWriter; private boolean select_top_path; private TIntIntHashMap input_metadata_id; private boolean computeInversePaths; private int main_item_id; private TIntObjectHashMap<TIntHashSet> items_link; /** * Constuctor */ public ItemPathExtractorWorker(SynchronizedCounter counter, TObjectIntHashMap<String> path_index, TIntObjectHashMap<String> inverse_path_index, ItemTree main_item, ArrayList<ItemTree> items, TIntObjectHashMap<String> props_index, boolean inverseProps, TextFileManager textWriter, StringFileManager pathWriter, boolean select_top_path, TIntIntHashMap input_metadata_id, boolean computeInversePaths, TIntObjectHashMap<TIntHashSet> items_link){ this.counter = counter; this.path_index = path_index; this.main_item = main_item; this.items = items; this.props_index = props_index; this.inverseProps = inverseProps; this.textWriter = textWriter; this.pathWriter = pathWriter; this.select_top_path = select_top_path; this.input_metadata_id = input_metadata_id; this.computeInversePaths = computeInversePaths; this.items_link = items_link; this.inverse_path_index = inverse_path_index; } /** * run path extraction */ public void run(){ main_item_id = main_item.getItemId(); start(); } /** * start path extraction considering all the pairs main_item-items */ private void start(){ TIntIntHashMap paths = null; String item_pair_paths = ""; for(int j = 0; j < items.size(); j++){ ItemTree b = items.get(j); int b_id = b.getItemId(); paths = computePaths(main_item, b); if(paths.size() > 0){ item_pair_paths = main_item_id + "-" + b_id + "\t"; TIntIntIterator it = paths.iterator(); while(it.hasNext()){ it.advance(); item_pair_paths += it.key() + "=" + it.value() + ","; } item_pair_paths = item_pair_paths.substring(0, item_pair_paths.length()-1); // text file writing if(textWriter != null) textWriter.write(item_pair_paths); // binary file writing if(pathWriter != null) pathWriter.write(item_pair_paths); if(computeInversePaths){ item_pair_paths = b_id + "-" + main_item_id + "\t"; it = paths.iterator(); while(it.hasNext()){ it.advance(); item_pair_paths += reverse(it.key()) + "=" + it.value() + ","; } item_pair_paths = item_pair_paths.substring(0, item_pair_paths.length()-1); // text file writing if(textWriter != null) textWriter.write(item_pair_paths); // binary file writing if(pathWriter != null){ pathWriter.write(item_pair_paths); } } } } } private int reverse(int k){ String path; synchronized(inverse_path_index){ path = inverse_path_index.get(k); } String[] paths = path.split("#"); if(inverseProps) return extractKey(StringUtils.reverseDirected(paths[1], props_index) + "#" + StringUtils.reverseDirected(paths[0], props_index)); else return extractKey(StringUtils.reverse(paths[1]) + "#" + StringUtils.reverse(paths[0])); } /** * Extract paths from a pair of item trees * @param a first item tree * @param b second item tree * @return paths map (path index:freq) */ private TIntIntHashMap computePaths(ItemTree a, ItemTree b) { TIntIntHashMap items_path = new TIntIntHashMap(); //int a_id = a.getItemId(); int b_id = b.getItemId(); // get a branches THashMap<String, TIntIntHashMap> branches_a = ((ItemTree) a).getBranches(); // get b branches THashMap<String, TIntIntHashMap> branches_b = ((ItemTree) b).getBranches(); if(select_top_path) items_path = computeTopPaths(branches_a, branches_b, b_id); else{ for(String s : branches_a.keySet()){ // per item collegati in un solo hop if(input_metadata_id.containsKey(b_id)){ if(branches_a.get(s).containsKey(input_metadata_id.get(b_id))) items_path.put(extractKey(s), branches_a.get(s).get(input_metadata_id.get(b_id))); } for(String ss : branches_b.keySet()){ int path_id = 0; String path = ""; if(inverseProps) path = s + "#" + StringUtils.reverseDirected(ss, props_index); else path = s + "#" + StringUtils.reverse(ss); path_id = extractKey(path); TIntSet items = branches_a.get(s).keySet(); TIntSet items1 = branches_b.get(ss).keySet(); TIntSet tmp; // items intersection if(items.size() < items1.size()){ tmp = new TIntHashSet(items); tmp.retainAll(items1); } else{ tmp = new TIntHashSet(items1); tmp.retainAll(items); } if(tmp.size()>0){ TIntIterator it = tmp.iterator(); int count = 0; while(it.hasNext()){ int val = it.next(); count += (branches_a.get(s).get(val) * branches_b.get(ss).get(val)); } items_path.put(path_id, count); } } } } if(items_path.size() > 0){ synchronized(items_link){ items_link.putIfAbsent(main_item_id, new TIntHashSet()); items_link.get(main_item_id).add(b_id); items_link.putIfAbsent(b_id, new TIntHashSet()); items_link.get(b_id).add(main_item_id); } } return items_path; } private TIntIntHashMap computeTopPaths(THashMap<String, TIntIntHashMap> branches_a, THashMap<String, TIntIntHashMap> branches_b, int b_id) { TIntIntHashMap items_path = new TIntIntHashMap(); for(String s : branches_a.keySet()){ if(isPrefix(s)){ for(String ss : branches_b.keySet()){ if(isPostfix(ss)){ int path_id = 0; String path = ""; if(inverseProps) path = s + "#" + StringUtils.reverseDirected(ss, props_index); else path = s + "#" + StringUtils.reverse(ss); if(existKey(path)){ path_id = extractKey(path); TIntSet items = branches_a.get(s).keySet(); TIntSet items1 = branches_b.get(ss).keySet(); TIntSet tmp; // items intersection if(items.size() < items1.size()){ tmp = new TIntHashSet(items); tmp.retainAll(items1); } else{ tmp = new TIntHashSet(items1); tmp.retainAll(items); } if(tmp.size()>0){ TIntIterator it = tmp.iterator(); int count = 0; while(it.hasNext()){ int val = it.next(); count += (branches_a.get(s).get(val) * branches_b.get(ss).get(val)); } items_path.put(path_id, count); } } } } } } return items_path; } /** * Extract key from path index * @param s string to index * @return index of s */ private int extractKey(String s) { synchronized(path_index){ if(path_index.containsKey(s)) return path_index.get(s); else{ int id = counter.value(); path_index.put(s, id); if(computeInversePaths) inverse_path_index.put(id, s); return id; } } } private boolean existKey(String s){ if(path_index.containsKey(s)) return true; return false; } private boolean isPostfix(String s){ if(ItemPathExtractor.top_path_postfix.contains(s)) return true; return false; } private boolean isPrefix(String s){ if(ItemPathExtractor.top_path_prefix.contains(s)) return true; return false; } }
9,018
23.98338
118
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/sprank/itemPathExtractor/TopItemPathExtractor.java
package it.poliba.sisinflab.LODRec.sprank.itemPathExtractor; import gnu.trove.map.hash.TIntObjectHashMap; import gnu.trove.map.hash.TObjectIntHashMap; import gnu.trove.set.hash.TIntHashSet; import it.poliba.sisinflab.LODRec.fileManager.ItemFileManager; import it.poliba.sisinflab.LODRec.itemManager.ItemTree; import it.poliba.sisinflab.LODRec.utils.ItemUtils; import it.poliba.sisinflab.LODRec.utils.SynchronizedCounter; import it.poliba.sisinflab.LODRec.utils.TextFileUtils; import java.util.ArrayList; import java.util.HashMap; import java.util.Map; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; /** * This class is part of the LOD Recommender * * This class extracts paths from item trees * * @author Vito Mastromarino */ public class TopItemPathExtractor { private String workingDir; private int nThreads; private String itemsFile; // metadata file name private boolean inverseProps; // directed property private TObjectIntHashMap<String> path_index; // path index private TIntObjectHashMap<String> props_index; // property index private SynchronizedCounter counter; // synchronized counter for path index private HashMap<Integer, Integer> path; private int numTopPaths; private int numItemTopPaths; private String propsIndexFile; private String pathIndexFile; private static Logger logger = LogManager .getLogger(TopItemPathExtractor.class.getName()); /** * Constuctor */ public TopItemPathExtractor(String workingDir, boolean inverseProps, String itemContentFile, int nThreads, int numTopPaths, int numItemTopPaths) { this.workingDir = workingDir; this.numItemTopPaths = numItemTopPaths; this.numTopPaths = numTopPaths; this.pathIndexFile = workingDir + "path_index"; this.propsIndexFile = this.workingDir + "props_index"; this.inverseProps = inverseProps; this.itemsFile = itemContentFile; this.nThreads = nThreads; loadPropsIndex(); } /** * load property index */ private void loadPropsIndex(){ props_index = new TIntObjectHashMap<String>(); TextFileUtils.loadIndex(propsIndexFile, props_index); logger.debug("Properties index loaded"); } /** * start path extraction */ public void start(){ //nThreads = 4; logger.debug("Threads number: " + nThreads); ExecutorService executor; executor = Executors.newFixedThreadPool(nThreads); counter = new SynchronizedCounter(); path_index = new TObjectIntHashMap<String>(); //path = new THashMap<String, TIntIntHashMap>(); path = new HashMap<Integer, Integer>(); logger.info("Top paths to select: " + numTopPaths); logger.info("Items to consider: " + numItemTopPaths); try { ItemFileManager itemReader = new ItemFileManager(itemsFile, ItemFileManager.READ); ArrayList<String> items_id = new ArrayList<String>(itemReader.getKeysIndex()); int num_items = items_id.size(); ArrayList<ItemTree> items = new ArrayList<ItemTree>(); ItemTree tmp = null; // carico dim_blocks items in verticale for(int i = 0; i < numItemTopPaths && i < num_items; i++){ tmp = itemReader.read(items_id.get(i)); if(tmp!=null) items.add(tmp); } for(ItemTree item : items){ // path extraction t-cols Runnable worker = new TopItemPathExtractorWorker(counter, path_index, item, items, props_index, inverseProps, path); // run the worker thread executor.execute(worker); } executor.shutdown(); executor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); itemReader.close(); } catch(Exception e){ e.printStackTrace(); } Map<Integer, Integer> sorted_paths = ItemUtils.sortByValues(path); TIntHashSet top_path_id = new TIntHashSet(); int i = 0; for(int p : sorted_paths.keySet()){ if(i < numTopPaths){ top_path_id.add(p); i++; } else break; } logger.info(top_path_id.size() + " of " + path.size() + " paths selected"); TObjectIntHashMap<String> top_path_index = new TObjectIntHashMap<String>(top_path_id.size()); i = 1; for(String ss : path_index.keySet()){ if(top_path_id.contains(path_index.get(ss))) top_path_index.put(ss, i++); } // write path index TextFileUtils.writeData(pathIndexFile, top_path_index); } }
4,423
26.308642
95
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/sprank/itemPathExtractor/ItemPathExtractor.java
package it.poliba.sisinflab.LODRec.sprank.itemPathExtractor; import gnu.trove.map.hash.TIntIntHashMap; import gnu.trove.map.hash.TIntObjectHashMap; import gnu.trove.map.hash.TObjectIntHashMap; import gnu.trove.set.hash.THashSet; import gnu.trove.set.hash.TIntHashSet; import it.poliba.sisinflab.LODRec.fileManager.ItemFileManager; import it.poliba.sisinflab.LODRec.fileManager.StringFileManager; import it.poliba.sisinflab.LODRec.fileManager.TextFileManager; import it.poliba.sisinflab.LODRec.itemManager.ItemTree; import it.poliba.sisinflab.LODRec.utils.StringUtils; import it.poliba.sisinflab.LODRec.utils.SynchronizedCounter; import it.poliba.sisinflab.LODRec.utils.TextFileUtils; import java.util.ArrayList; import java.util.Iterator; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; /** * This class is part of the LOD Recommender * * This class extracts paths from item trees * * @author Vito Mastromarino */ public class ItemPathExtractor { private String workingDir; private int nThreads; private String itemsFile; // metadata file private String metadataIndexFile; // metadata index file private TIntIntHashMap input_metadata_id; private String uriIdIndexFile; private int itemsInMemory; // number of items to load in memory private TObjectIntHashMap<String> path_index; // path index private String pathIndexFile; // path index file private TIntObjectHashMap<String> inverse_path_index; // key-value path index private TIntObjectHashMap<String> props_index; // properties index private String propsIndexFile; // properties file index private boolean inverseProps; // directed properties private boolean outputPathTextFormat; private boolean outputPathBinaryFormat; private String path_file; private boolean computeInversePaths; private boolean selectTopPaths; private int numTopPaths; private int numItemTopPaths; private TIntObjectHashMap<TIntHashSet> items_link; private String itemLinkFile; protected static THashSet<String> top_path_prefix; protected static THashSet<String> top_path_postfix; private SynchronizedCounter counter; // synchronized counter for path index private static Logger logger = LogManager .getLogger(ItemPathExtractor.class.getName()); /** * Constuctor */ public ItemPathExtractor(String workingDir, String itemMetadataFile, String pathFile, Boolean computeInversePaths, Boolean selectTopPaths, int numTopPaths, int numItemsTopPaths, Boolean outputPathBinaryFormat, Boolean outputPathTextFormat, Boolean inverseProps, int itemsInMemory, int nThreads) { this.workingDir = workingDir; this.itemsFile = itemMetadataFile; this.path_file = pathFile; this.computeInversePaths = computeInversePaths; this.selectTopPaths = selectTopPaths; this.outputPathBinaryFormat = outputPathBinaryFormat; this.outputPathTextFormat = outputPathTextFormat; this.inverseProps=inverseProps; this.nThreads = nThreads; this.itemsInMemory=itemsInMemory/2; this.numTopPaths = numTopPaths; this.numItemTopPaths = numItemsTopPaths; init(); } private void init() { this.propsIndexFile = this.workingDir + "props_index"; this.pathIndexFile = this.workingDir + "path_index"; this.metadataIndexFile = this.workingDir + "metadata_index"; this.uriIdIndexFile = workingDir + "input_uri_id"; this.itemLinkFile = workingDir + "items_link"; loadPropsIndex(); loadInputMetadataID(); if (computeInversePaths) logger.debug("Compute inverse paths abilited"); if (selectTopPaths) { logger.debug("Top paths selection abilited"); computeTopPaths(numTopPaths, numItemTopPaths); } } public void computeTopPaths(int numTopPaths, int numItemTopPaths){ TopItemPathExtractor top = new TopItemPathExtractor(workingDir, inverseProps, itemsFile, nThreads, numTopPaths, numItemTopPaths); top.start(); loadPathIndex(); computePathPrePostfix(); } private void computePathPrePostfix(){ top_path_prefix = new THashSet<String>(); top_path_postfix = new THashSet<String>(); for(String ss : path_index.keySet()){ top_path_prefix.add(ss.split("#")[0]); if(inverseProps) top_path_postfix.add(StringUtils.reverseDirected(ss.split("#")[1], props_index)); else top_path_postfix.add(StringUtils.reverse(ss.split("#")[1])); } logger.info("Top path prefixes: " + top_path_prefix.size()); logger.info("Top path postfixes: " + top_path_postfix.size()); } /** * load property index */ private void loadPropsIndex(){ props_index = new TIntObjectHashMap<String>(); TextFileUtils.loadIndex(propsIndexFile, props_index); logger.debug("Properties index loading"); } /** * load path index */ private void loadPathIndex(){ path_index = new TObjectIntHashMap<String>(); inverse_path_index = new TIntObjectHashMap<String>(); TextFileUtils.loadIndex(pathIndexFile, path_index); if(computeInversePaths) TextFileUtils.loadIndex(pathIndexFile, inverse_path_index); logger.info("Path index loading: " + path_index.size() + " paths loaded"); } /** * load metadata input id */ private void loadInputMetadataID(){ input_metadata_id = new TIntIntHashMap(); TextFileUtils.loadInputMetadataID(metadataIndexFile, uriIdIndexFile, input_metadata_id); logger.debug("Metadata index loading"); } /** * start path extraction */ public void start(){ //nThreads = 4; logger.debug("Threads number: " + nThreads); logger.info("Path extraction started"); ExecutorService executor; executor = Executors.newFixedThreadPool(nThreads); counter = new SynchronizedCounter(); items_link = new TIntObjectHashMap<TIntHashSet>(); if(!selectTopPaths){ path_index = new TObjectIntHashMap<String>(); if(computeInversePaths) inverse_path_index = new TIntObjectHashMap<String>(); } try { TextFileManager textWriter = null; if(outputPathTextFormat) textWriter = new TextFileManager(path_file + ".txt"); StringFileManager pathWriter = null; if(outputPathBinaryFormat) pathWriter = new StringFileManager(path_file, StringFileManager.WRITE); ItemFileManager itemReader = new ItemFileManager(itemsFile, ItemFileManager.READ); ArrayList<String> items_id = new ArrayList<String>(itemReader.getKeysIndex()); int num_items = items_id.size(); int index_v = 0; int index_o = 0; int index_item = 0; ArrayList<ItemTree> items = null; ItemTree tmp = null; if(num_items < itemsInMemory) { itemsInMemory = num_items/2; } while(index_v < num_items) { // creo lista items verticali ArrayList<ItemTree> items_v = new ArrayList<ItemTree>(); for(int i = index_v; i < (index_v + itemsInMemory) && i < num_items; i++){ tmp = itemReader.read(items_id.get(i)); if(tmp!=null) items_v.add(tmp); } index_o = index_v + itemsInMemory; while(index_o < num_items) { if(executor.isTerminated()) executor = Executors.newFixedThreadPool(nThreads); // carico lista items in orizzontali ArrayList<ItemTree> items_o = new ArrayList<ItemTree>(); for(int i = index_o; i < (index_o + itemsInMemory) && i < num_items; i++){ tmp = itemReader.read(items_id.get(i)); if(tmp!=null) items_o.add(tmp); } if(items_o.size() > 0) { for(ItemTree item : items_v){ Runnable worker = new ItemPathExtractorWorker(counter, path_index, inverse_path_index, item, items_o, props_index, inverseProps, textWriter, pathWriter, selectTopPaths, input_metadata_id, computeInversePaths, items_link); executor.execute(worker); } } // calcolo blocchi sulla diagonale // primo blocco diagonale solo su items verticali if(index_o == itemsInMemory) { Iterator<ItemTree> it = items_v.iterator(); ItemTree item = null; index_item = 0; while(it.hasNext()) { item = it.next(); //index_item = items_v.indexOf(item); items = new ArrayList<ItemTree>(); items.addAll(items_v.subList(index_item + 1, items_v.size())); index_item++; if(items.size() > 0) { Runnable worker = new ItemPathExtractorWorker(counter, path_index, inverse_path_index, item, items, props_index, inverseProps, textWriter, pathWriter, selectTopPaths, input_metadata_id, computeInversePaths, items_link); executor.execute(worker); } } } // altri blocchi diagonali su items orizzontali if(index_v == 0) { Iterator<ItemTree> it = items_o.iterator(); ItemTree item = null; index_item = 0; while(it.hasNext()) { item = it.next(); //index_item = items_o.indexOf(item); items = new ArrayList<ItemTree>(); items.addAll(items_o.subList(index_item + 1, items_o.size())); index_item++; if(items.size() > 0) { Runnable worker = new ItemPathExtractorWorker(counter, path_index, inverse_path_index, item, items, props_index, inverseProps, textWriter, pathWriter, selectTopPaths, input_metadata_id, computeInversePaths, items_link); executor.execute(worker); } } } executor.shutdown(); executor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); index_o += itemsInMemory; } if(index_v + itemsInMemory < num_items - itemsInMemory) index_v += itemsInMemory; else index_v = num_items; logger.info(index_v + " of " + num_items + " items completed"); } itemReader.close(); if(textWriter!=null) textWriter.close(); if(pathWriter!=null) pathWriter.close(); TextFileUtils.writeData(pathIndexFile, path_index); TextFileUtils.writeTIntMapTIntHashSet(itemLinkFile, items_link); } catch(Exception e){ e.printStackTrace(); } } }
10,408
27.132432
102
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/sprank/itemPathExtractor/TopItemPathExtractorWorker.java
package it.poliba.sisinflab.LODRec.sprank.itemPathExtractor; import java.util.ArrayList; import java.util.HashMap; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; import gnu.trove.iterator.TIntIntIterator; import gnu.trove.iterator.TIntIterator; import gnu.trove.iterator.TObjectIntIterator; import gnu.trove.map.hash.THashMap; import gnu.trove.map.hash.TIntIntHashMap; import gnu.trove.map.hash.TIntObjectHashMap; import gnu.trove.map.hash.TObjectIntHashMap; import gnu.trove.set.TIntSet; import gnu.trove.set.hash.TIntHashSet; import it.poliba.sisinflab.LODRec.itemManager.ItemTree; import it.poliba.sisinflab.LODRec.utils.StringUtils; import it.poliba.sisinflab.LODRec.utils.SynchronizedCounter; /** * This class is part of the LOD Recommender * * This class is used by PathExtractor for multi-threading paths extraction * * @author Vito Mastromarino */ public class TopItemPathExtractorWorker implements Runnable { private SynchronizedCounter counter; // synchronized counter for path index private TObjectIntHashMap<String> path_index; // path index private ItemTree main_item; // main item private ArrayList<ItemTree> items; // items private TIntObjectHashMap<String> props_index; // property index private boolean inverseProps; // directed property private HashMap<Integer, Integer> path; private static Logger logger = LogManager.getLogger(TopItemPathExtractorWorker.class.getName()); /** * Constuctor */ public TopItemPathExtractorWorker(SynchronizedCounter counter, TObjectIntHashMap<String> path_index, ItemTree main_item, ArrayList<ItemTree> items, TIntObjectHashMap<String> props_index, boolean inverseProps, HashMap<Integer, Integer> path){ this.counter = counter; this.path_index = path_index; this.main_item = main_item; this.items = items; this.props_index = props_index; this.inverseProps = inverseProps; this.path = path; } /** * run path extraction */ public void run(){ logger.info("item " + main_item.getItemId() + ": start paths extraction"); long start = System.currentTimeMillis(); start(); long stop = System.currentTimeMillis(); logger.info("item " + main_item.getItemId() + ": paths extraction terminated in [sec]: " + ((stop - start) / 1000)); } /** * start path extraction considering all the pairs main_item-items */ public void start(){ int count = 0; // number of computed pairs int main_item_id = main_item.getItemId(); TIntIntHashMap paths = null; // indicates the item from which to start the extraction boolean start = false; for(int j = 0; j < items.size(); j++){ ItemTree b = items.get(j); int b_id = b.getItemId(); if(start){ paths = computePaths(main_item, b); if(paths!=null){ count++; TIntIntIterator it = paths.iterator(); while(it.hasNext()){ it.advance(); int key = it.key(); int value = it.value(); synchronized(path){ if(path.containsKey(key)) value += path.get(key); path.put(key, value); } } } } if(!start){ if(main_item_id==b_id) start = true; } } logger.debug(main_item_id + ": extraction completed (computed pairs " + count + ")"); } /** * Reverse directed paths * @param in path to reverse * @return reversed path */ public String reverse(String in){ String p = StringUtils.reverse(in); String[] vals = p.split("-"); String out = ""; String prop = ""; String to_search = ""; for(String s : vals){ prop = props_index.get(Integer.parseInt(s)); to_search = ""; if(prop.startsWith("inv_")) to_search = prop.substring(4); else to_search = "inv_" + prop; for(int i : props_index.keys()){ if(props_index.get(i).equals(to_search)) out += i + "-"; } } return out.substring(0, out.length()-1); } /** * Get string path from index * @param index index of the string path * @return string path */ public String getPathFromIndex(int index){ synchronized(path_index){ TObjectIntIterator<String> it = path_index.iterator(); while(it.hasNext()){ it.advance(); if(it.value()==index) return (String) it.key(); } return null; } } /** * Extract paths from a pair of item trees * @param a first item tree * @param b second item tree * @return paths map (path index:freq) */ public TIntIntHashMap computePaths(ItemTree a, ItemTree b) { TIntIntHashMap items_path = new TIntIntHashMap(); // get a branches THashMap<String, TIntIntHashMap> branches_a = ((ItemTree) a).getBranches(); // get b branches THashMap<String, TIntIntHashMap> branches_b = ((ItemTree) b).getBranches(); //System.out.println(branches_a.size() + "-" + branches_b.size()); for(String s : branches_a.keySet()){ TIntSet items = branches_a.get(s).keySet(); for(String ss : branches_b.keySet()){ TIntSet items1 = branches_b.get(ss).keySet(); TIntSet tmp; // items intersection if(items.size() < items1.size()){ tmp = new TIntHashSet(items); tmp.retainAll(items1); } else{ tmp = new TIntHashSet(items1); tmp.retainAll(items); } if(tmp.size()>0){ TIntIterator it = tmp.iterator(); int count = 0; while(it.hasNext()){ int val = it.next(); count += (branches_a.get(s).get(val) * branches_b.get(ss).get(val)); } if(inverseProps) items_path.put(extractKey(s + "#" + reverse(ss)), count); else items_path.put(extractKey(s + "#" + StringUtils.reverse(ss)), count); } } } return items_path; } /** * Extract key from path index * @param s string to index * @return index of s */ private int extractKey(String s) { synchronized(path_index){ if(path_index.contains(s)){ return path_index.get(s); } else{ int id = counter.value(); path_index.put(s, id); return id; } } } }
6,151
21.869888
97
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/sprank/userPathExtractor/UserPathExtractorWorker.java
package it.poliba.sisinflab.LODRec.sprank.userPathExtractor; import java.io.BufferedWriter; import java.io.IOException; import java.math.RoundingMode; import java.text.DecimalFormat; import java.util.ArrayList; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; import gnu.trove.iterator.TIntIterator; import gnu.trove.list.array.TIntArrayList; import gnu.trove.map.hash.THashMap; import gnu.trove.map.hash.TIntFloatHashMap; import gnu.trove.map.hash.TIntIntHashMap; import gnu.trove.map.hash.TIntObjectHashMap; import gnu.trove.map.hash.TObjectIntHashMap; import gnu.trove.set.hash.TIntHashSet; import it.poliba.sisinflab.LODRec.fileManager.StringFileManager; import it.poliba.sisinflab.LODRec.utils.StringUtils; /** * This class is part of the LOD Recommender * <p> * This class is used by UserPathExtractor for multi-threading paths extraction * * @author Vito Mastromarino */ public class UserPathExtractorWorker implements Runnable { private ArrayList<String> items_id; // items private TIntArrayList user_items; private int user_id; private TIntFloatHashMap trainRatings; private TIntFloatHashMap validationRatings; private BufferedWriter train_file; private BufferedWriter validation_file; private BufferedWriter test_file; private boolean normalize; private THashMap<String, String> items_path_index; private StringFileManager pathReader; private TObjectIntHashMap<String> path_index; private THashMap<String, String> paths; private String path_file; private int user_items_sampling; private float ratesThreshold; private TIntObjectHashMap<TIntHashSet> items_link; private TIntHashSet items_to_process; private static Logger logger = LogManager.getLogger(UserPathExtractorWorker.class.getName()); /** * Constuctor */ public UserPathExtractorWorker(int user_id, TIntFloatHashMap trainRatings, TIntFloatHashMap validationRatings, ArrayList<String> items_id, BufferedWriter train_file, BufferedWriter validation_file, BufferedWriter test_file, boolean normalize, THashMap<String, String> items_path_index, String path_file, TObjectIntHashMap<String> path_index, THashMap<String, String> paths, int user_items_sampling, float ratesThreshold, TIntObjectHashMap<TIntHashSet> items_link) { this.user_id = user_id; this.items_id = items_id; this.trainRatings = trainRatings; this.validationRatings = validationRatings; this.train_file = train_file; this.validation_file = validation_file; this.test_file = test_file; this.normalize = normalize; this.items_path_index = items_path_index; this.path_index = path_index; this.paths = paths; this.path_file = path_file; this.user_items_sampling = user_items_sampling; this.ratesThreshold = ratesThreshold; this.items_link = items_link; } /** * run path extraction */ public void run() { try { start(); } catch (NumberFormatException e) { // TODO Auto-generated catch block e.printStackTrace(); } catch (ClassNotFoundException e) { // TODO Auto-generated catch block e.printStackTrace(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } /** * start path extraction considering all the pairs main_item-items * * @throws IOException */ public void start() throws NumberFormatException, ClassNotFoundException, IOException { long start = System.currentTimeMillis(); user_items = new TIntArrayList(); items_to_process = new TIntHashSet(); for (int item_rated : trainRatings.keys()) { /* controllo se l'item * 1) è presente nel file metadata, * 2) è collegato ad altri item */ if (items_id.contains(Integer.toString(item_rated)) && items_link.contains(item_rated)) { user_items.add(item_rated); } } int real_num_items = user_items.size(); int num_user_items = (user_items_sampling * user_items.size()) / 100; user_items = (TIntArrayList) user_items.subList(0, num_user_items); if (user_items.size() > 0) { TIntIterator it = user_items.iterator(); while (it.hasNext()) items_to_process.addAll(items_link.get(it.next())); //logger.info("user " + user_id + " start paths extraction"); pathReader = new StringFileManager(path_file, items_path_index); TIntIterator it1 = items_to_process.iterator(); while (it1.hasNext()) { int item_id = it1.next(); buildFeatureVector(item_id, computePaths(item_id)); } pathReader.close(); synchronized (train_file) { train_file.flush(); } synchronized (validation_file) { validation_file.flush(); } synchronized (test_file) { test_file.flush(); } } long stop = System.currentTimeMillis(); logger.info("user " + user_id + "(" + user_items.size() + "/" + real_num_items + " items rated): paths extraction terminated in [sec] " + ((stop - start) / 1000)); } /** * Extract paths from a user tree and an item tree * * @return paths map (path index:freq) * @throws IOException * @throws ClassNotFoundException * @throws NumberFormatException */ public TIntIntHashMap computePaths(int item_id) { TIntIntHashMap res = new TIntIntHashMap(); String item_pair_paths; boolean reverse; TIntIterator it = user_items.iterator(); while (it.hasNext()) { reverse = false; int user_item_id = it.next(); if (items_link.get(user_item_id).contains(item_id)) { if (user_item_id != item_id) { String user_item_rate = StringUtils.extractRate( trainRatings.get(user_item_id), ratesThreshold); String key = user_item_id + "-" + item_id; if (!pathReader.containsKey(key)) { reverse = true; key = item_id + "-" + user_item_id; } item_pair_paths = loadPathsFromMap(key); String[] pair_vals = item_pair_paths.split(","); if (pair_vals.length > 0) { for (String s : pair_vals) { String[] path_freq = s.split("="); int key1 = 0; if (reverse) key1 = extractKey(user_item_rate + "-inv_" + path_freq[0]); else key1 = extractKey(user_item_rate + "-" + path_freq[0]); res.adjustOrPutValue(key1, Integer.parseInt(path_freq[1]), Integer.parseInt(path_freq[1])); } } } } } return res; } private String loadPathsFromFile(String key) { return pathReader.read(key); } private String loadPathsFromMap(String key) { if (paths.containsKey(key)) return paths.get(key); else return loadPathsFromFile(key); } /** * Extract key from path index * * @param s string to index * @return index of s */ private int extractKey(String s) { return path_index.get(s); } private void buildFeatureVector(int item_id, TIntIntHashMap paths) { try { double rate = 0; double n = 1; boolean training = false, validation = false; DecimalFormat form = new DecimalFormat("#.####"); form.setRoundingMode(RoundingMode.CEILING); StringBuffer str = new StringBuffer(); if (trainRatings.containsKey(item_id)) { training = true; rate = trainRatings.get(item_id); } else if (validationRatings.containsKey(item_id)) { validation = true; rate = validationRatings.get(item_id); } if (normalize) n = norm(paths); str.append(rate + " qid:" + user_id + " 1:" + item_id + " "); for (int i = 1; i <= path_index.size(); i++) { int count = 0; if (paths.size() == 1) n = norm(paths); if (paths.containsKey(i)) { count = paths.get(i); if (normalize) str.append(i + 1 + ":" + form.format(count / n).replace(",", ".") + " "); else str.append(i + 1 + ":" + count + " "); } } if (training) { synchronized (train_file) { train_file.append(str); train_file.newLine(); } } else if (validation) { synchronized (validation_file) { validation_file.append(str); validation_file.newLine(); } } else { synchronized (test_file) { test_file.append(str); test_file.newLine(); } } } catch (Exception e) { e.printStackTrace(); } } private double norm(TIntIntHashMap map) { int sum = 0; for (int i : map.keys()) { sum += (Math.pow(map.get(i), 2)); } return Math.sqrt(sum); } }
10,472
30.077151
104
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/sprank/userPathExtractor/UserPathExtractor.java
package it.poliba.sisinflab.LODRec.sprank.userPathExtractor; import gnu.trove.iterator.TFloatIterator; import gnu.trove.iterator.TIntObjectIterator; import gnu.trove.map.hash.THashMap; import gnu.trove.map.hash.TIntFloatHashMap; import gnu.trove.map.hash.TIntObjectHashMap; import gnu.trove.map.hash.TObjectIntHashMap; import gnu.trove.set.hash.TFloatHashSet; import gnu.trove.set.hash.TIntHashSet; import it.poliba.sisinflab.LODRec.fileManager.FileManager; import it.poliba.sisinflab.LODRec.fileManager.ItemFileManager; import it.poliba.sisinflab.LODRec.fileManager.StringFileManager; import it.poliba.sisinflab.LODRec.utils.StringUtils; import it.poliba.sisinflab.LODRec.utils.TextFileUtils; import java.io.BufferedWriter; import java.io.FileWriter; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; /** * This class is part of the LOD Recommender * * This class extracts paths from user and item trees * * @author Vito Mastromarino */ public class UserPathExtractor { private String workingDir; private int nThreads; private TObjectIntHashMap<String> path_index; // path index private TIntObjectHashMap<TIntFloatHashMap> trainRatings; // map user-[item-rating] private TIntObjectHashMap<TIntFloatHashMap> validationRatings; // map user-[item-rating] private String trainRatingFile; // input user train ratings filename private String validationRatingFile; // input user validation ratings filename private boolean normalize; private String path_file; private String pathIndexFile; private String itemsFile; private THashMap<String, String> paths; private int paths_in_memory; private THashMap<String, String> items_path_index; private int user_items_sampling; private float ratesThreshold; private TIntObjectHashMap<TIntHashSet> items_link; private TFloatHashSet labels; private String itemLinkFile; private String trainFile; private String validationFile; private String testFile; private String userPathIndexFile; private int numUsersValidationSet; private static Logger logger = LogManager.getLogger(UserPathExtractor.class.getName()); /** * Constuctor */ public UserPathExtractor(String workingDir, String trainRatingFile, String validationRatingFile, boolean normalize, String pathFile, String itemMetadataFile, int paths_in_memory, int user_items_sampling, float ratingThreshold, int nThreads) { this.workingDir = workingDir; this.trainRatingFile = trainRatingFile; this.validationRatingFile = validationRatingFile; this.normalize = normalize; this.path_file = pathFile; this.itemsFile = itemMetadataFile; this.paths_in_memory = paths_in_memory; this.user_items_sampling = user_items_sampling; this.ratesThreshold = ratingThreshold; this.nThreads = nThreads; init(); } private void init() { this.pathIndexFile = this.workingDir + "path_index"; this.itemLinkFile = workingDir + "items_link"; this.trainFile = workingDir + "train"; this.validationFile = workingDir + "validation"; this.testFile = workingDir + "test"; this.userPathIndexFile = workingDir + "user_path_index"; loadRatings(); loadItemPaths(); buildPathIndex(); loadItemsLink(); } private void loadRatings() { labels = new TFloatHashSet(); trainRatings = new TIntObjectHashMap<TIntFloatHashMap>(); TextFileUtils.loadInputUsersRatings(trainRatingFile, trainRatings, labels); numUsersValidationSet = trainRatings.size()/nThreads + 1; logger.info("Users per test set: " + numUsersValidationSet + "(" + trainRatings.size() + ")"); validationRatings = new TIntObjectHashMap<TIntFloatHashMap>(); TextFileUtils.loadInputUsersRatings(validationRatingFile, validationRatings, labels); } private void loadItemsLink(){ logger.info("Items link loading"); items_link = new TIntObjectHashMap<TIntHashSet>(); TextFileUtils.loadTIntMapTIntHashSet(itemLinkFile, items_link); } private void loadItemPaths(){ StringFileManager pathReader; /* se non carico tutti i paths in memoria, * calcolo quali items, tra quelli votati dall'utente, * sono più ricorrenti, in modo da privilegiare il caricamento * in memoria dei path più interessanti. */ if(paths_in_memory < 100) { HashMap<Integer, Integer> items_count = new HashMap<Integer, Integer>(); TIntObjectIterator<TIntFloatHashMap> it = trainRatings.iterator(); while(it.hasNext()){ it.advance(); for(int i : it.value().keys()){ if(!items_count.containsKey(i)) items_count.put(i, 1); else{ int old_count = items_count.get(i); items_count.put(i, old_count + 1); } } } pathReader = new StringFileManager(path_file, items_count.keySet()); } else pathReader = new StringFileManager(path_file, FileManager.READ); logger.info("Paths file index loading"); items_path_index = new THashMap<String, String>(pathReader.getFileIndex()); long num_item_pair = (paths_in_memory * items_path_index.size()) / 100; logger.info("Loading " + num_item_pair + " of " + items_path_index.size() + " (" + paths_in_memory + "%) item pair paths in memory"); paths = pathReader.read(num_item_pair); logger.info("Paths loading completed"); pathReader.close(); } private void buildPathIndex(){ path_index = new TObjectIntHashMap<String>(); // costruisce l'index di tutti i possibili path dell'utente HashSet<String> string_labels = new HashSet<String>(); TFloatIterator it = labels.iterator(); while(it.hasNext()) string_labels.add(StringUtils.extractRate(it.next(), ratesThreshold)); TextFileUtils.computeIndex(pathIndexFile, path_index, string_labels); logger.info("New path index built: " + path_index.size() + " paths loaded"); } /** * start path extraction */ public void start(){ //nThreads = 4; logger.debug("Threads number: " + nThreads); ExecutorService executor; executor = Executors.newFixedThreadPool(nThreads); try { BufferedWriter train_file = new BufferedWriter(new FileWriter( trainFile)); BufferedWriter validation_file = new BufferedWriter(new FileWriter( validationFile)); BufferedWriter test_file = new BufferedWriter(new FileWriter( testFile)); ItemFileManager itemReader = new ItemFileManager(itemsFile, ItemFileManager.READ); ArrayList<String> items_list = new ArrayList<String>(itemReader.getKeysIndex()); itemReader.close(); logger.info("Users: " + trainRatings.size()); logger.info("Items: " + items_list.size()); int[] users = trainRatings.keys(); Arrays.sort(users); int count = 0; for(int user_id : users){ if(executor.isTerminated()) { executor = Executors.newFixedThreadPool(nThreads); test_file = new BufferedWriter(new FileWriter( testFile + "_" + count)); } // path extraction worker user-items Runnable worker = new UserPathExtractorWorker(user_id, trainRatings.get(user_id), validationRatings.get(user_id), items_list, train_file, validation_file, test_file, normalize, items_path_index, path_file, path_index, paths, user_items_sampling, ratesThreshold, items_link); // run the worker thread executor.execute(worker); count++; if(count % numUsersValidationSet == 0) { executor.shutdown(); executor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); test_file.flush(); test_file.close(); } } if(!executor.isTerminated()) { // This will make the executor accept no new threads // and finish all existing threads in the queue executor.shutdown(); // Wait until all threads are finish executor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); test_file.flush(); test_file.close(); } train_file.flush(); train_file.close(); validation_file.flush(); validation_file.close(); } catch(Exception e){ e.printStackTrace(); } // write path index TextFileUtils.writeData(userPathIndexFile, path_index); } }
8,432
27.683673
88
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/utils/BST.java
package it.poliba.sisinflab.LODRec.utils; import java.util.ArrayList; import java.util.List; public class BST { private Node root; private int numNodes = 0; private int maxSize = 0; private List<Double> sortedKeys; private List<Integer> sortedValues; public BST(int max) { this.maxSize = max; } public int getNumNodes() { return this.numNodes; } private class Node { private double key; private int value; private boolean isRoot = false; private Node left, right; public Node(double k, int v) { this.key = k; this.value = v; } public Node(double k, int v, boolean isRoot) { this.key = k; this.value = v; this.isRoot = isRoot; } } public void visit() { sortedKeys = new ArrayList<Double>(); sortedValues = new ArrayList<Integer>(); if (root != null) visit(root); } private void visit(Node node) { if (node.right != null) visit(node.right); this.sortedKeys.add(node.key); this.sortedValues.add(node.value); if (node.left != null) visit(node.left); } public List<Integer> getSortedValues() { return this.sortedValues; } public List<Double> getSortedKeys() { return this.sortedKeys; } public void insert(double k, int v) { // System.out.println("inserting " + k); // this.numNodes++; if (root == null) { root = new Node(k, v, true); this.numNodes++; } else insert(root, k, v); // keep only the maxSize nodes with highest value if (this.numNodes > this.maxSize) { // System.out.println("---pruning----"); // Node min = getMin(); deleteMin(root); // delete min } } // public Node getMin() { // if (root == null) // return null; // else // return getMin(root); // } // // private Node getMin(Node node) { // if (node.left == null) // return node; // else // return getMin(node.left); // } private Node deleteMin(Node node) { // min node is root if (node.isRoot & node.left == null) { this.root = node.right; this.root.isRoot = true; this.numNodes--; return null; } // min node has no childs if (node.left == null & node.right == null) { // if true -> node is min this.numNodes--; return null; // min node has right child } else if (node.left == null & node.right != null) { this.numNodes--; return node.right; } Node ret = deleteMin(node.left); node.left = ret; return node; } public Node insert(Node curr, double k, int v) { if (k >= curr.key) { if (curr.right != null) return insert(curr.right, k, v); else { curr.right = new Node(k, v); this.numNodes++; } } else { if (curr.left != null) return insert(curr.left, k, v); else { curr.left = new Node(k, v); this.numNodes++; } } return null; } public static void main(String[] args) { BST bst = new BST(10); bst.insert(6.0, 2); bst.insert(3.0, 3); bst.insert(1.0, 5); bst.insert(5.0, 5); bst.insert(7.0, 3); bst.insert(6.0, 2); bst.insert(8.0, 3); bst.insert(3.0, 3); bst.insert(4.0, 5); bst.insert(0.0, 3); bst.insert(1.0, 2); bst.insert(10.0, 2); bst.insert(1.0, 2); bst.insert(100.0, 2); bst.insert(15.0, 2); bst.insert(110.0, 2); bst.insert(5.0, 5); bst.insert(7.0, 3); bst.insert(6.0, 2); bst.insert(8.0, 3); bst.insert(3.0, 3); bst.insert(4.0, 5); bst.insert(0.0, 3); bst.insert(1.0, 2); bst.insert(5.0, 5); bst.insert(7.0, 3); bst.insert(6.0, 2); bst.insert(8.0, 3); bst.insert(3.0, 3); bst.insert(4.0, 5); bst.insert(0.0, 3); bst.insert(1.0, 2); bst.insert(10.0, 2); bst.insert(100.0, 2); bst.insert(15.0, 2); bst.insert(110.0, 2); bst.insert(3.0, 3); bst.insert(4.0, 5); bst.insert(0.0, 3); bst.insert(1.0, 2); bst.insert(10.0, 2); bst.insert(100.0, 2); bst.insert(15.0, 2); System.out.println("final tree"); System.out.println("num nodes " + bst.numNodes); bst.visit(); System.out.println("size: " + bst.sortedKeys.size()); for (double i : bst.sortedKeys) { System.out.println(i); } // System.out.println(bst.getMin().key); // bst.descVisit(); // bst.deleteMin(bst.root); } }
4,124
17.415179
55
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/utils/PropertyFileReader.java
package it.poliba.sisinflab.LODRec.utils; import java.io.FileInputStream; import java.io.IOException; import java.util.HashMap; import java.util.Map; import java.util.Properties; public class PropertyFileReader { public static Map<String, String> loadProperties(String fileName) throws IOException{ Properties props = new Properties(); FileInputStream fis = new FileInputStream(fileName); //loading properties from properties file props.load(fis); //reading property Map<String, String> prop = new HashMap<String, String>(); // general settings if(props.containsKey("recommendationAlgorithm")) prop.put("recAlgorithm", props.getProperty("recAlgorithm")); if(props.containsKey("dataExtraction")) prop.put("dataExtraction", props.getProperty("dataExtraction")); if(props.containsKey("itemGraphEmbedding")) prop.put("itemGraphEmbedding", props.getProperty("itemGraphEmbedding")); if(props.containsKey("itemPathExtraction")) prop.put("itemPathExtraction", props.getProperty("itemPathExtraction")); if(props.containsKey("userPathExtraction")) prop.put("userPathExtraction", props.getProperty("userPathExtraction")); if(props.containsKey("computeRecommendation")) prop.put("computeRec", props.getProperty("computeRecommendation")); if(props.containsKey("evaluation")) prop.put("evaluation", props.getProperty("evaluation")); if(props.containsKey("evaluationDir")) prop.put("evaluationDir", props.getProperty("evaluationDir")); if(props.containsKey("workingDir")) prop.put("workingDir", props.getProperty("workingDir")); if(props.containsKey("nThreads")) prop.put("nThreads", props.getProperty("nThreads")); if(props.containsKey("implicit")) prop.put("implicit", props.getProperty("implicit")); if(props.containsKey("inputTrainRatingFile")) prop.put("inputTrainRatingFile", props.getProperty("inputTrainRatingFile")); if(props.containsKey("inputValidationRatingFile")) prop.put("inputValidationRatingFile", props.getProperty("inputValidationRatingFile")); if(props.containsKey("inputTestRatingFile")) prop.put("inputTestRatingFile", props.getProperty("inputTestRatingFile")); // data extraction if(props.containsKey("endpoint")) prop.put("endpoint", props.getProperty("endpoint")); if(props.containsKey("graphURI")) prop.put("graphURI", props.getProperty("graphURI")); if(props.containsKey("localDatasetFile")) prop.put("localDatasetFile", props.getProperty("localDatasetFile")); if(props.containsKey("tdbDirectory")) prop.put("tdbDirectory", props.getProperty("tdbDirectory")); if(props.containsKey("inputItemURIsFile")) prop.put("inputItemURIsFile", props.getProperty("inputItemURIsFile")); if(props.containsKey("directed")) prop.put("directed", props.getProperty("directed")); if(props.containsKey("propertiesFile")) prop.put("propertiesFile", props.getProperty("propertiesFile")); if(props.containsKey("itemsMetadataFile")) prop.put("itemsMetadataFile", props.getProperty("itemsMetadataFile")); if(props.containsKey("outputTextFormat")) prop.put("outputTextFormat", props.getProperty("outputTextFormat")); if(props.containsKey("outputBinaryFormat")) prop.put("outputBinaryFormat", props.getProperty("outputBinaryFormat")); if(props.containsKey("jenatdb")) prop.put("jenatdb", props.getProperty("jenatdb")); if(props.containsKey("caching")) prop.put("caching", props.getProperty("caching")); if(props.containsKey("append")) prop.put("append", props.getProperty("append")); // item graph embedding if(props.containsKey("embeddingOption")) prop.put("embeddingOption", props.getProperty("embeddingOption")); if(props.containsKey("maxBranchLength")) prop.put("maxBranchLength", props.getProperty("maxBranchLength")); if(props.containsKey("addCollabFeatures")) prop.put("addCollabFeatures", props.getProperty("addCollabFeatures")); if(props.containsKey("listAlphaVals")) prop.put("listAlphaVals", props.getProperty("listAlphaVals")); if(props.containsKey("minMaxNorm")) prop.put("minMaxNorm", props.getProperty("minMaxNorm")); if(props.containsKey("minFreq")) prop.put("minFreq", props.getProperty("minFreq")); if(props.containsKey("maxFreq")) prop.put("maxFreq", props.getProperty("maxFreq")); if(props.containsKey("idf")) prop.put("idf", props.getProperty("idf")); if(props.containsKey("lengthNorm")) prop.put("lengthNorm", props.getProperty("lengthNorm")); if(props.containsKey("onlyEntityBranches")) prop.put("onlyEntityBranches", props.getProperty("onlyEntityBranches")); if(props.containsKey("entityBasedEmbFile")) prop.put("entityBasedEmbFile", props.getProperty("entityBasedEmbFile")); if(props.containsKey("branchBasedEmbFile")) prop.put("branchBasedEmbFile", props.getProperty("branchBasedEmbFile")); // paths extraction if(props.containsKey("computeTopPaths")) prop.put("computeTopPaths", props.getProperty("computeTopPaths")); if(props.containsKey("pathsInMemory")) prop.put("pathsInMemory", props.getProperty("pathsInMemory")); if(props.containsKey("nTopPaths")) prop.put("nTopPaths", props.getProperty("nTopPaths")); if(props.containsKey("nItemTopPaths")) prop.put("nItemTopPaths", props.getProperty("nItemTopPaths")); if(props.containsKey("pathsFile")) prop.put("pathsFile", props.getProperty("pathsFile")); if(props.containsKey("outputPathsTextFormat")) prop.put("outputPathsTextFormat", props.getProperty("outputPathsTextFormat")); if(props.containsKey("outputPathsBinaryFormat")) prop.put("outputPathsBinaryFormat", props.getProperty("outputPathsBinaryFormat")); if(props.containsKey("computeInversePaths")) prop.put("computeInversePaths", props.getProperty("computeInversePaths")); // user paths extraction if(props.containsKey("itemsInMemory")) prop.put("itemsInMemory", props.getProperty("itemsInMemory")); if(props.containsKey("userItemsSampling")) prop.put("userItemsSampling", props.getProperty("userItemsSampling")); if(props.containsKey("ratesThreshold")) prop.put("ratesThreshold", props.getProperty("ratesThreshold")); if(props.containsKey("normalize")) prop.put("normalize", props.getProperty("normalize")); if(props.containsKey("splitValidationSet")) prop.put("splitValidationSet", props.getProperty("splitValidationSet")); // learning if(props.containsKey("libLinear")) prop.put("libLinear", props.getProperty("libLinear")); if(props.containsKey("rankLib")) prop.put("rankLib", props.getProperty("rankLib")); if(props.containsKey("silentLearning")) prop.put("silentLearning", props.getProperty("silentLearning")); // liblinear if(props.containsKey("listSolverType")) prop.put("listSolverType", props.getProperty("listSolverType")); if(props.containsKey("listC")) prop.put("listC", props.getProperty("listC")); if(props.containsKey("listEps")) prop.put("listEps", props.getProperty("listEps")); if(props.containsKey("listP")) prop.put("listP", props.getProperty("listP")); // ranklib if(props.containsKey("rankerType")) prop.put("rankerType", props.getProperty("rankerType")); if(props.containsKey("nIteration")) prop.put("nIteration", props.getProperty("nIteration")); if(props.containsKey("tolerance")) prop.put("tolerance", props.getProperty("tolerance")); if(props.containsKey("nThreshold")) prop.put("nThreshold", props.getProperty("nThreshold")); if(props.containsKey("nTrees")) prop.put("nTrees", props.getProperty("nTrees")); if(props.containsKey("nTreeLeaves")) prop.put("nTreeLeaves", props.getProperty("nTreeLeaves")); if(props.containsKey("learningRate")) prop.put("learningRate", props.getProperty("learningRate")); if(props.containsKey("minLeafSupport")) prop.put("minLeafSupport", props.getProperty("minLeafSupport")); if(props.containsKey("maxSelCount")) prop.put("maxSelCount", props.getProperty("maxSelCount")); if(props.containsKey("nMaxIteration")) prop.put("nMaxIteration", props.getProperty("nMaxIteration")); if(props.containsKey("nRestart")) prop.put("nRestart", props.getProperty("nRestart")); if(props.containsKey("regularized")) prop.put("regularized", props.getProperty("regularized")); if(props.containsKey("nRoundToStopEarly")) prop.put("nRoundToStopEarly", props.getProperty("nRoundToStopEarly")); if(props.containsKey("nBag")) prop.put("nBag", props.getProperty("nBag")); if(props.containsKey("featureSamplingRate")) prop.put("featureSamplingRate", props.getProperty("featureSamplingRate")); if(props.containsKey("subSamplingRate")) prop.put("subSamplingRate", props.getProperty("subSamplingRate")); //evaluation if(props.containsKey("evalMetric")) prop.put("evalMetric", props.getProperty("evalMetric")); if(props.containsKey("evalRatingThresh")) prop.put("evalRatingThresh", props.getProperty("evalRatingThresh")); if(props.containsKey("relUnknownItems")) prop.put("relUnknownItems", props.getProperty("relUnknownItems")); if(props.containsKey("negRatingThresh")) prop.put("negRatingThresh", props.getProperty("negRatingThresh")); if(props.containsKey("itemsMetadataEvalFile")) prop.put("itemsMetadataEvalFile", props.getProperty("itemsMetadataEvalFile")); if(props.containsKey("outputEvaluationFile")) prop.put("outputEvaluationFile", props.getProperty("outputEvaluationFile")); if(props.containsKey("recDirToEval")) prop.put("recDirToEval", props.getProperty("recDirToEval")); //recommendation if(props.containsKey("recommendationsFile")) prop.put("recommendationsFile", props.getProperty("recommendationsFile")); if(props.containsKey("topN")) prop.put("topN", props.getProperty("topN")); return prop; } }
10,829
48.907834
95
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/utils/FileInputConversion.java
package it.poliba.sisinflab.LODRec.utils; import java.io.BufferedReader; import java.io.BufferedWriter; import java.io.FileReader; import java.io.FileWriter; public class FileInputConversion { /** * @param args */ public static void main(String[] args) { // TODO Auto-generated method stub try{ BufferedReader br = new BufferedReader(new FileReader("C://Users/Vito/Desktop/item_index.tsv")); BufferedWriter bw = new BufferedWriter(new FileWriter("C://Users/Vito/Desktop/lastfm-big")); String line = null; while((line=br.readLine()) != null){ String[] vals = line.split("\t"); bw.append(vals[2] + "\t" + vals[0] + "\t" + vals[1]); bw.newLine(); } br.close(); bw.flush(); bw.close(); } catch(Exception e){ e.printStackTrace(); } } }
808
19.74359
99
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/utils/StringUtils.java
package it.poliba.sisinflab.LODRec.utils; import gnu.trove.map.hash.TIntObjectHashMap; import java.util.Arrays; import java.util.Collections; public class StringUtils { public static String extractRate(float rate, float ratesThreshold){ if(rate > ratesThreshold) return "r1"; else return "r0"; } public static String reverse(String ss) { String[] vals = ss.split("-"); Collections.reverse(Arrays.asList(vals)); String res = ""; for(String s : vals){ res += s.trim() + "-"; } return res.substring(0, res.length()-1); } public static String reverseDirected(String ss, TIntObjectHashMap<String> props_index){ String p = reverse(ss); String[] vals = p.split("-"); String out = ""; String prop = ""; String to_search = ""; for(String s : vals){ prop = props_index.get(Integer.parseInt(s)); to_search = ""; if(prop.startsWith("inv_")) to_search = prop.substring(4); else to_search = "inv_" + prop; for(int i : props_index.keys()){ if(props_index.get(i).equals(to_search)) out += i + "-"; } } return out.substring(0, out.length()-1); } }
1,170
17.296875
88
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/utils/ItemUtils.java
package it.poliba.sisinflab.LODRec.utils; import java.util.Collections; import java.util.Comparator; import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Map.Entry; public class ItemUtils { /* * Java method to sort Map in Java by value e.g. HashMap or Hashtable * throw NullPointerException if Map contains null values * It also sort values even if they are duplicates */ public static <K extends Comparable,V extends Comparable> Map<K,V> sortByValues(Map<K,V> map){ List<Map.Entry<K,V>> entries = new LinkedList<Map.Entry<K,V>>(map.entrySet()); Collections.sort(entries, new Comparator<Map.Entry<K,V>>() { @Override public int compare(Entry<K, V> o1, Entry<K, V> o2) { float val1 = Float.parseFloat(o1.getValue().toString()); float val2 = Float.parseFloat(o2.getValue().toString()); if(val1 > val2) return -1; else if(val1 < val2) return 1; else return 0; } }); //LinkedHashMap will keep the keys in the order they are inserted //which is currently sorted on natural ordering Map<K,V> sortedMap = new LinkedHashMap<K,V>(); for(Map.Entry<K,V> entry: entries){ sortedMap.put(entry.getKey(), entry.getValue()); } return sortedMap; } }
1,545
27.62963
98
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/utils/SynchronizedCounter.java
package it.poliba.sisinflab.LODRec.utils; public class SynchronizedCounter { private int c = 0; public SynchronizedCounter(int value) { this.c=value; } public SynchronizedCounter() { } public synchronized void increment() { c++; } public synchronized void decrement() { c--; } public synchronized int value() { this.increment(); return c; } }
416
15.68
43
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/utils/TrainValidationDataSplitter.java
package it.poliba.sisinflab.LODRec.utils; import gnu.trove.iterator.TIntIterator; import gnu.trove.list.array.TIntArrayList; import gnu.trove.map.hash.TIntFloatHashMap; import gnu.trove.map.hash.TIntObjectHashMap; import gnu.trove.set.TIntSet; import gnu.trove.set.hash.TIntHashSet; import java.io.BufferedReader; import java.io.BufferedWriter; import java.io.FileReader; import java.io.FileWriter; import java.io.IOException; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Random; import java.util.Set; public class TrainValidationDataSplitter { private Set<Integer> items; private Map<Integer, Map<Integer, Float>> testRatings; private Map<Integer, Map<Integer, Float>> trainRatings; private Map<Integer, Map<Integer, Float>> validRatings; public void buildTrainValTestRatings(String originalFile, String trainFile, String validFile, String testFile, float valRatio, float testRatio, int minTrainRatings, int minValRatings, boolean implicit, int max_users) { TIntObjectHashMap<TIntFloatHashMap> original_train = TextFileUtils .loadInputUsersRatings(originalFile); TIntObjectHashMap<TIntFloatHashMap> train = new TIntObjectHashMap<TIntFloatHashMap>(); int[] users = original_train.keys(); for (int i = 0; i < max_users & i < original_train.keys().length; i++) train.put(users[i], original_train.get(users[i])); TIntObjectHashMap<TIntFloatHashMap> valid = new TIntObjectHashMap<TIntFloatHashMap>(); TIntObjectHashMap<TIntFloatHashMap> test = new TIntObjectHashMap<TIntFloatHashMap>(); // this.splitRatings(train, test, testRatio, minTrainRatings); this.splitRatings(train, valid, valRatio, minValRatings); try { BufferedWriter train_file = new BufferedWriter(new FileWriter( trainFile)); BufferedWriter validation_file = new BufferedWriter(new FileWriter( validFile)); BufferedWriter test_file = new BufferedWriter(new FileWriter( testFile)); for (int u : train.keys()) { for (int i : train.get(u).keys()) { if (!implicit) train_file.append(u + "\t" + i + "\t" + train.get(u).get(i) + "\n"); else train_file.append(u + "\t" + i + "\t" + 1 + "\n"); } } for (int u : valid.keys()) { for (int i : valid.get(u).keys()) { if (!implicit) validation_file.append(u + "\t" + i + "\t" + valid.get(u).get(i) + "\n"); else validation_file.append(u + "\t" + i + "\t" + 1 + "\n"); } } for (int u : test.keys()) { for (int i : test.get(u).keys()) { if (!implicit) test_file.append(u + "\t" + i + "\t" + test.get(u).get(i) + "\n"); else test_file.append(u + "\t" + i + "\t" + 1 + "\n"); } } train_file.flush(); validation_file.flush(); test_file.flush(); train_file.close(); validation_file.close(); test_file.close(); System.out.println("n.train users: " + train.keys().length + " n.validation users: " + valid.keys().length + " n.test users: " + test.keys().length); } catch (IOException ex) { ex.printStackTrace(); } } private void splitRatings(TIntObjectHashMap<TIntFloatHashMap> map1, TIntObjectHashMap<TIntFloatHashMap> map2, float ratio, int minRatings) { TIntSet users = map1.keySet(); TIntIterator it = users.iterator(); int u, i; TIntArrayList candidate; Set<Integer> usersToRemove = new HashSet(); int splits = 0; while (it.hasNext()) { u = it.next(); TIntSet tmp = map1.get(u).keySet(); candidate = new TIntArrayList(); candidate.addAll(tmp); int n = (int) (ratio * candidate.size()); if (n > minRatings) { map2.put(u, new TIntFloatHashMap()); splits++; TIntSet validItems = chooseRndItems(candidate, n); TIntIterator itt = validItems.iterator(); // System.out.println(this.trainRatings.get(u).size() +" - "+ // this.validationRatings.get(u).size()); while (itt.hasNext()) { i = itt.next(); map2.get(u).put(i, map1.get(u).get(i)); map1.get(u).remove(i); } } else usersToRemove.add(u); } for (int us : usersToRemove) map1.remove(us); System.out.println("n.users map1 " + map1.keys().length + " n.users map2 " + map2.keys().length + " n.splits " + splits); } private TIntSet chooseRndItems(TIntArrayList list, int N) { TIntSet keys = new TIntHashSet(); TIntSet ret = new TIntHashSet(); Random r = new Random(); int cont = 0; if (list.size() < N) return ret; while (cont < N) { int rr = r.nextInt(list.size()); while (keys.contains(rr)) rr = r.nextInt(list.size()); keys.add(rr); cont++; } TIntIterator it = keys.iterator(); while (it.hasNext()) ret.add(list.get(it.next())); return ret; } public void analyze(String train, String valid, String test, String metadata) { loadItemIDsFromItemTextualFile(metadata); this.readTrainData(train); this.readValidData(valid); this.readTestData(test); Set<Integer> users = trainRatings.keySet(); int min = 1000; int max = 0; int count = 0; float avg=0; for (int u : users) { count = 0; count += trainRatings.get(u).size(); if (testRatings.containsKey(u)) { count += testRatings.get(u).size(); } if (validRatings.containsKey(u)) { count += validRatings.get(u).size(); } if (count < min) min = count; if (count > max) max = count; avg+=count; } avg/=(float)users.size(); System.out.println("n. train users " + users.size() + " min: " + min + " max: " + max+" avg: "+avg); } public void readTestData(String filename) { try { BufferedReader reader = new BufferedReader(new FileReader(filename)); String line = reader.readLine(); testRatings = new HashMap<Integer, Map<Integer, Float>>(); boolean add = false; Set<Integer> tmp = new HashSet(); while (line != null) { try { String[] str = line.split("\t"); int u = Integer.parseInt(str[0].trim()); int i = Integer.parseInt(str[1].trim()); if (items != null) { if (items.contains(i)) { add = true; } else add = false; } else { add = true; tmp.add(i); } if (add) { float rel = Float.parseFloat(str[2].trim()); if (!testRatings.containsKey(u)) testRatings.put(u, new HashMap()); testRatings.get(u).put(i, rel); } } catch (Exception ex) { System.out.println(ex.getMessage()); } line = reader.readLine(); } if (items == null) { items = new HashSet<Integer>(); items.addAll(tmp); } } catch (IOException e) { } } public void readTrainData(String filename) { try { BufferedReader reader = new BufferedReader(new FileReader(filename)); String line = reader.readLine(); trainRatings = new HashMap<Integer, Map<Integer, Float>>(); while (line != null) { String[] str = line.split("\t"); int u = Integer.parseInt(str[0].trim()); int i = Integer.parseInt(str[1].trim()); if (items.contains(i)) { float rel = Float.parseFloat(str[2].trim()); if (!trainRatings.containsKey(u)) trainRatings.put(u, new HashMap()); trainRatings.get(u).put(i, rel); } line = reader.readLine(); } } catch (IOException e) { System.out.println(e.getMessage()); } System.out.println("size "+ trainRatings.size()); } public void readValidData(String filename) { try { BufferedReader reader = new BufferedReader(new FileReader(filename)); String line = reader.readLine(); validRatings = new HashMap<Integer, Map<Integer, Float>>(); while (line != null) { String[] str = line.split("\t"); int u = Integer.parseInt(str[0].trim()); int i = Integer.parseInt(str[1].trim()); if (items.contains(i)) { float rel = Float.parseFloat(str[2].trim()); if (!validRatings.containsKey(u)) validRatings.put(u, new HashMap()); validRatings.get(u).put(i, rel); } line = reader.readLine(); } } catch (IOException e) { System.out.println(e.getMessage()); } } private void loadItemIDsFromItemTextualFile(String itemsFile) { BufferedReader reader; try { items = new HashSet<Integer>(); reader = new BufferedReader(new FileReader(itemsFile)); String line = ""; int item_id; while ((line = reader.readLine()) != null) { String[] parts = line.split("\t"); item_id = Integer.parseInt(parts[0]); this.items.add(item_id); } reader.close(); } catch (IOException e) { e.printStackTrace(); } System.out.println("Loaded "+items.size()); } public static void main(String[] args) { TrainValidationDataSplitter splitter = new TrainValidationDataSplitter(); /*System.out.println("Movielens"); splitter.analyze("ML/ratings/TrainSetML_65_15_20_reduced_min50", "ML/ratings/ValidSetML_65_15_reduced_min50", "ML/ratings/TestSetML_80_20_reduced_min50", "ML/itemMetadata"); System.out.println("Library"); splitter.analyze("Lib/rating/TrainSetLibrary_65_15_20_reduced", "Lib/rating/ValidSetLibrary_15_20_reduced", "Lib/rating/TestSetLibrary_80_20_reduced", "Lib/itemMetadata"); System.out.println("LastFM"); splitter.analyze("LF/feedback/TrainSetLF_65_15_20_percentile_rank_norm_reduced_v2_noNegExam", "LF/feedback/ValidSetLF_15_20_percentile_rank_norm_reduced_v2_noNegExam", "LF/feedback/TestSetLF_80_20_percentile_rank_norm_reduced_v2", "LF/itemMetadata"); */ boolean implicit=false; int n_users=1000000; splitter.buildTrainValTestRatings("ml_sample/TrainSetML_80_20_reduced_min50", "ml_sample/TrainSetML_80_20", "ml_sample/ValidationSetML_80_20", "XXX", 0.2f, 0f, 0, 3 ,implicit, n_users); } }
9,690
25.550685
252
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/utils/TextFileUtils.java
package it.poliba.sisinflab.LODRec.utils; import gnu.trove.iterator.TIntIterator; import gnu.trove.iterator.TIntObjectIterator; import gnu.trove.list.array.TIntArrayList; import gnu.trove.map.hash.THashMap; import gnu.trove.map.hash.TIntFloatHashMap; import gnu.trove.map.hash.TIntIntHashMap; import gnu.trove.map.hash.TIntObjectHashMap; import gnu.trove.map.hash.TObjectIntHashMap; import gnu.trove.set.hash.TFloatHashSet; import gnu.trove.set.hash.TIntHashSet; import java.io.BufferedReader; import java.io.BufferedWriter; import java.io.File; import java.io.FileReader; import java.io.FileWriter; import java.util.ArrayList; import java.util.HashSet; public class TextFileUtils { public static void writeTIntMapArrayString(String file, TIntObjectHashMap<ArrayList<String>> map){ try{ BufferedWriter writer = new BufferedWriter(new FileWriter(file)); TIntObjectIterator<ArrayList<String>> it = map.iterator(); StringBuffer str; while(it.hasNext()){ str = new StringBuffer(); it.advance(); str.append(it.key() + "\t"); ArrayList<String> tmp = it.value(); for(String s : tmp) str.append(s + ","); writer.append(str.substring(0, str.length()-1)); writer.newLine(); } writer.flush(); writer.close(); } catch(Exception e){ e.printStackTrace(); } } public static void loadTIntMapArrayString(String file, TIntObjectHashMap<ArrayList<String>> map){ try{ BufferedReader br = new BufferedReader(new FileReader(file)); String line = null; while((line=br.readLine()) != null){ String[] vals = line.split("\t"); String[] vals1 = vals[1].split(","); ArrayList<String> tmp = new ArrayList<String>(); for(String s : vals1) tmp.add(s); map.put(Integer.parseInt(vals[0]), tmp); } br.close(); } catch(Exception e){ e.printStackTrace(); } } public static void loadTIntMapTIntHashSet(String file, TIntObjectHashMap<TIntHashSet> map){ try{ BufferedReader br = new BufferedReader(new FileReader(file)); String line = null; while((line=br.readLine()) != null){ String[] vals = line.split("\t"); String[] vals1 = vals[1].split(","); TIntHashSet tmp = new TIntHashSet(); for(String s : vals1) tmp.add(Integer.parseInt(s)); map.put(Integer.parseInt(vals[0]), tmp); } br.close(); } catch(Exception e){ e.printStackTrace(); } } public static void writeTIntMapTIntHashSet(String file, TIntObjectHashMap<TIntHashSet> map){ try{ BufferedWriter writer = new BufferedWriter(new FileWriter(file)); TIntObjectIterator<TIntHashSet> it = map.iterator(); StringBuffer str; while(it.hasNext()){ str = new StringBuffer(); it.advance(); str.append(it.key() + "\t"); TIntHashSet tmp = it.value(); TIntIterator it1 = tmp.iterator(); while(it1.hasNext()) str.append(it1.next() + ","); writer.append(str.substring(0, str.length()-1)); writer.newLine(); } writer.flush(); writer.close(); } catch(Exception e){ e.printStackTrace(); } } public static void writeData(String file, TIntObjectHashMap<TIntObjectHashMap<TIntIntHashMap>> path){ BufferedWriter writer; try { writer = new BufferedWriter(new FileWriter(file)); TIntObjectIterator<TIntObjectHashMap<TIntIntHashMap>> it = path.iterator(); while(it.hasNext()){ it.advance(); int a = it.key(); TIntObjectHashMap<TIntIntHashMap> tmp = it.value(); if(tmp!=null){ TIntObjectIterator<TIntIntHashMap> it1 = tmp.iterator(); while(it1.hasNext()){ it1.advance(); int b = it1.key(); writer.append(a + "-" + b + ": " + it1.value()); writer.newLine(); } } } writer.flush(); writer.close(); } catch (Exception e){ e.printStackTrace(); } } public static void writeData(String file, TObjectIntHashMap<String> data){ BufferedWriter writer; try { writer = new BufferedWriter(new FileWriter(file)); for (String s : data.keySet()) { writer.append(data.get(s) + "\t" + s); writer.newLine(); } writer.flush(); writer.close(); } catch (Exception e){ e.printStackTrace(); } } public static void loadInputURIs(String file, TObjectIntHashMap<String> uri_id, boolean append, String outFile){ try{ int index = 1; if(append){ if(new File(outFile).exists()){ BufferedReader br = new BufferedReader(new FileReader(outFile)); while(br.readLine() != null) index++; br.close(); } } BufferedReader br = new BufferedReader(new FileReader(file)); String line; while((line=br.readLine()) != null){ String[] vals = line.split("\t"); if(vals.length==2){ uri_id.put(vals[1], Integer.parseInt(vals[0])); }else if(vals.length>1){ uri_id.put(vals[2], Integer.parseInt(vals[0])); } else{ uri_id.put(vals[0], index); index++; } } br.close(); write(uri_id, append, outFile); } catch(Exception e){ e.printStackTrace(); } } public static void write(TObjectIntHashMap<String> uri_id, boolean append, String outFile){ BufferedWriter writer; try { writer = new BufferedWriter(new FileWriter(outFile, append)); for (String s : uri_id.keySet()) { writer.append(uri_id.get(s) + "\t" + s); writer.newLine(); } writer.flush(); writer.close(); } catch (Exception e){ e.printStackTrace(); } } public static TIntObjectHashMap<TIntFloatHashMap> loadInputUsersRatings(String file) { TIntObjectHashMap<TIntFloatHashMap> user_rating =new TIntObjectHashMap<TIntFloatHashMap>(); try { BufferedReader br = new BufferedReader(new FileReader(file)); String line = null; float rate; int user_id, item_id; while ((line = br.readLine()) != null) { String[] vals = line.split("\t"); if(vals.length==2) rate=1; else rate = Float.parseFloat(vals[2]); user_id = Integer.parseInt(vals[0]); item_id = Integer.parseInt(vals[1]); user_rating.putIfAbsent(user_id, new TIntFloatHashMap()); user_rating.get(user_id).put(item_id, rate); } br.close(); } catch (Exception e) { e.printStackTrace(); } return user_rating; } public static void loadInputUsersRatings(String file, TIntObjectHashMap<TIntFloatHashMap> user_rating, TFloatHashSet labels){ try{ BufferedReader br = new BufferedReader(new FileReader(file)); String line = null; float rate; int user_id, item_id; while((line=br.readLine()) != null){ String[] vals = line.split("\t"); rate = Float.parseFloat(vals[2]); user_id = Integer.parseInt(vals[0]); item_id = Integer.parseInt(vals[1]); user_rating.putIfAbsent(user_id, new TIntFloatHashMap()); user_rating.get(user_id).put(item_id, rate); labels.add(rate); } br.close(); } catch(Exception e){ e.printStackTrace(); } } public static void loadFileIndex(String file, THashMap<String, String> items_pair_value){ try{ BufferedReader br = new BufferedReader(new FileReader(file)); String line = null; while((line=br.readLine()) != null){ String[] vals = line.split("\t"); items_pair_value.put(vals[0], vals[1]+":"+vals[2]); } br.close(); } catch(Exception e){ e.printStackTrace(); } } public static void loadFileIndex(String file, TIntObjectHashMap<String> id_value){ try{ BufferedReader br = new BufferedReader(new FileReader(file)); String line = null; while((line=br.readLine()) != null){ String[] vals = line.split("\t"); id_value.put(Integer.parseInt(vals[0]), vals[1]+":"+vals[2]); } br.close(); } catch(Exception e){ e.printStackTrace(); } } public static void loadIndex(String file, TIntObjectHashMap<String> id_value){ try{ BufferedReader br = new BufferedReader(new FileReader(file)); String line = null; while((line=br.readLine()) != null){ String[] vals = line.split("\t"); id_value.put(Integer.parseInt(vals[0]), vals[1]); } br.close(); } catch(Exception e){ e.printStackTrace(); } } public static void loadIndex(String file, TObjectIntHashMap<String> value_id){ if(new File(file).exists()){ try{ BufferedReader br = new BufferedReader(new FileReader(file)); String line = null; int index = 1; while((line=br.readLine()) != null){ String[] vals = line.split("\t"); if(vals.length>1) value_id.put(vals[1], Integer.parseInt(vals[0])); else value_id.put(vals[0], index++); } br.close(); } catch(Exception e){ e.printStackTrace(); } } } public static void computeIndex(String file, TObjectIntHashMap<String> value_id, HashSet<String> labels){ if(new File(file).exists()){ try{ BufferedReader br = new BufferedReader(new FileReader(file)); String line = null; int index = 1; while((line=br.readLine()) != null){ String[] vals = line.split("\t"); for(String s : labels){ value_id.put(s + "-" + vals[0], index++); value_id.put(s + "-inv_" + vals[0], index++); } } br.close(); } catch(Exception e){ e.printStackTrace(); } } } public static void loadInputMetadataID(String metadata_file_index, String input_uri, TIntIntHashMap input_metadata_id){ TObjectIntHashMap<String> metadata_index = new TObjectIntHashMap<String>(); loadIndex(metadata_file_index, metadata_index); try{ BufferedReader br = new BufferedReader(new FileReader(input_uri)); String line = null; while((line=br.readLine()) != null){ String[] vals = line.split("\t"); if(metadata_index.containsKey(vals[1])); input_metadata_id.put(Integer.parseInt(vals[0]), metadata_index.get(vals[1])); } br.close(); } catch(Exception e){ e.printStackTrace(); } } public static void loadRelatedURIs(String file, TIntObjectHashMap<TIntArrayList> related_uri, TObjectIntHashMap<String> uri_id){ try{ BufferedReader br = new BufferedReader(new FileReader(file)); String line = null; TIntArrayList tmp = null; while((line=br.readLine()) != null){ tmp = new TIntArrayList(); String[] vals = line.split(","); for(int i = 1; i < vals.length; i++){ if(!vals[i].contentEquals("null") && uri_id.containsKey(vals[i])) tmp.add(uri_id.get(vals[i])); } if(uri_id.containsKey(vals[0])) related_uri.put(uri_id.get(vals[0]), tmp); } br.close(); } catch(Exception e){ e.printStackTrace(); } } }
10,861
20.855131
102
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/utils/MemoryMonitor.java
package it.poliba.sisinflab.LODRec.utils; public class MemoryMonitor { public static void stats(){ Runtime runtime = Runtime.getRuntime(); long maxMemory = runtime.maxMemory(); long allocatedMemory = runtime.totalMemory(); long freeMemory = runtime.freeMemory(); long usedMemory = allocatedMemory - freeMemory; StringBuilder sb = new StringBuilder(); sb.append("\nused memory: " + (usedMemory / (1024 * 1024)) + " MB\n"); sb.append("free memory: " + freeMemory / (1024 * 1024) + " MB\n"); sb.append("allocated memory: " + allocatedMemory / (1024 * 1024) + " MB\n"); sb.append("max memory: " + maxMemory / (1024 * 1024) + "MB\n"); System.out.println(sb); } }
714
23.655172
60
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/utils/XMLUtils.java
package it.poliba.sisinflab.LODRec.utils; import gnu.trove.map.hash.TObjectIntHashMap; import it.poliba.sisinflab.LODRec.tree.NNode; import it.poliba.sisinflab.LODRec.tree.NTree; import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.HashSet; import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; import javax.xml.parsers.ParserConfigurationException; import org.w3c.dom.Document; import org.w3c.dom.Node; import org.xml.sax.SAXException; public class XMLUtils { public static void parseXMLFile(String file, TObjectIntHashMap<String> props_index, NTree props_tree, boolean directed) throws ParserConfigurationException, SAXException, IOException{ DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); DocumentBuilder builder = factory.newDocumentBuilder(); Document doc = builder.parse(new File(file)); Node root = doc.getFirstChild(); HashSet<String> props = new HashSet<String>(); NNode root_node = new NNode("props"); walkTree(root, root_node, props, directed); props_tree.addRoot(root_node); int index=0; for(String s : props) props_index.put(s, index++); props.clear(); if(root.hasAttributes()){ int hop = Integer.parseInt(root.getAttributes().item(0).getNodeValue()); if(hop > 1){ ArrayList<NNode> props_to_reply = new ArrayList<NNode>(); for(NNode node : root_node.getChilds()) props_to_reply.add(new NNode(node.getValue())); constructTree(root_node.getChilds(), props_to_reply, hop-1); } } } public static void constructTree(ArrayList<NNode> nodes_to_expand, ArrayList<NNode> props_to_reply, int lev){ for(NNode n : nodes_to_expand) n.addChilds(constructBranches(props_to_reply)); lev--; if(lev>0){ ArrayList<NNode> new_nodes_to_expand = new ArrayList<NNode>(); for(NNode n : nodes_to_expand) new_nodes_to_expand.addAll(n.getChilds()); constructTree(new_nodes_to_expand, props_to_reply, lev); } } public static ArrayList<NNode> constructBranches(ArrayList<NNode> props_to_reply){ ArrayList<NNode> res = new ArrayList<NNode>(); for(NNode n : props_to_reply) res.add(new NNode(n.getValue())); return res; } public static void walkTree(Node node, NNode nnode, HashSet<String> props, boolean directed){ if(node.hasChildNodes()){ ArrayList<NNode> childs = new ArrayList<NNode>(); for(int i=0; i<node.getChildNodes().getLength();i++){ if(node.getChildNodes().item(i).getNodeType() == Node.ELEMENT_NODE){ String p = new String(node.getChildNodes().item(i).getAttributes().item(0).getNodeValue()); props.add(p); if(directed) props.add("inv_" + p); NNode n = new NNode(p); walkTree(node.getChildNodes().item(i), n, props, directed); childs.add(n); } } nnode.addChilds(childs); } } }
2,979
24.689655
102
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/main/Main.java
package it.poliba.sisinflab.LODRec.main; import java.io.IOException; import java.util.Map; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; import org.apache.log4j.PropertyConfigurator; import it.poliba.sisinflab.LODRec.evaluation.Evaluator; import it.poliba.sisinflab.LODRec.graphkernel.graphEmbedding.ItemGraphEmbedder; import it.poliba.sisinflab.LODRec.graphkernel.heuristic.UserProfileSimilarityRecommender; import it.poliba.sisinflab.LODRec.graphkernel.model.UserModelRecommender; import it.poliba.sisinflab.LODRec.learning.LibLinearLearner; import it.poliba.sisinflab.LODRec.learning.RankLibLearner; import it.poliba.sisinflab.LODRec.recommender.Recommender; import it.poliba.sisinflab.LODRec.sparqlDataExtractor.RDFTripleExtractor; import it.poliba.sisinflab.LODRec.sprank.itemPathExtractor.ItemPathExtractor; import it.poliba.sisinflab.LODRec.sprank.userPathExtractor.UserPathExtractor; import it.poliba.sisinflab.LODRec.utils.PropertyFileReader; public class Main { private static Logger logger = LogManager .getLogger(Main.class.getName()); // default configuration file private static String configFile = "config.properties"; /* ---------- RECOMMENDATION ALGORITHM ---------- */ /* ---------------------------------------------- */ static int recAlgorithm = 1; // SPrank /* -------------- GENERAL SETTINGS -------------- */ /* ---------------------------------------------- */ static boolean dataExtraction = false; static boolean itemGraphEmbedding = false; static boolean itemPathExtraction = false; static boolean userPathExtraction = false; static boolean computeRec = false; static boolean evaluation = false; static boolean evaluationDir = false; public static String workingDir = "./"; public static int nThreads = Runtime.getRuntime().availableProcessors(); public static boolean implicit = false; // input training file private static String inputTrainRatingFile = "TrainRating"; // input validation file private static String inputValidationRatingFile = "ValidationRating"; // input test file private static String inputTestRatingFile = "TestRating"; /* -------------- DATA EXTRACTION --------------- */ /* ---------------------------------------------- */ // set jenatdb=false to query remote endpoint - jenatdb=true to query local dataset private static boolean jenatdb = false; // sparql endpoint address private static String endpoint = "http://live.dbpedia.org/sparql"; private static String graphURI = "http://dbpedia.org"; // if jenatdb=true set local dataset parameters private static String tdbDirectory = "TDB"; private static String localDatasetFile = "dump.nt"; private static String inputItemURIsFile = "input_uri"; // set inverseProps=true to consider directed property private static boolean inverseProps = true; // output file public static String itemsMetadataFile = "itemsMetadata"; private static boolean outputTextFormat = true; private static boolean outputBinaryFormat = true; private static String propertiesFile = "props.xml"; // set caching=true to enable caching (requires a lot of memory) private static boolean caching = false; // set append=true to continue a previous extraction private static boolean append = false; /* ------------- ITEM GRAPH EMBEDDING ----------- */ /* ---------------------------------------------- */ // 1 -> entity from text // 2 -> branch from text // 3 -> entity from binary // 4 -> branch from binary private static int embeddingOption = 1; private static String entityMapFile = "_entity_mapping"; private static String branchMapFile = "_branch_mapping"; private static boolean addCollabFeatures = true; private static int maxBranchLength = 1; private static boolean minMaxNorm = false; private static int maxFreq= 1000000; private static int minFreq = 0; private static boolean idf = false; private static boolean lengthNorm = false; private static boolean onlyEntityBranches = true; private static String listAlphaVals = ""; /* -------------- PATHS EXTRACTION -------------- */ /* ---------------------------------------------- */ // set selectTopPaths=true to consider only popular path private static boolean selectTopPaths = true; // number of top paths to consider in paths extraction private static int nTopPaths = 50; // number of items to consider in top paths computation private static int nItemTopPaths = 100; // number of items loaded in memory in paths extraction private static int itemsInMemory = 1000; // output format private static String pathsFile = workingDir + "itemPaths"; private static boolean outputPathsTextFormat = false; private static boolean outputPathsBinaryFormat = true; // if you want to consider i1-i2 and i2-i1 private static boolean computeInversePaths = false; /* ----------- USER PATHS EXTRACTION ------------ */ /* ---------------------------------------------- */ // percentage of paths loaded in memory in user paths extraction private static int pathsInMemory = 100; // percentage of rated user items to consider in user paths extraction private static int userItemsSampling = 100; // user rates threshold (>) private static float ratingThreshold = 3; private static boolean normalize = true; /* -------------- LEARNING ---------------- */ /* ---------------------------------------------- */ private static boolean rankLib = true; private static boolean libLinear = false; private static boolean silentLearning = true; // liblinear parameters // for multi-class classification // 0 -- L2-regularized logistic regression (primal) // 1 -- L2-regularized L2-loss support vector classification (dual) // 2 -- L2-regularized L2-loss support vector classification (primal) // 3 -- L2-regularized L1-loss support vector classification (dual) // 4 -- support vector classification by Crammer and Singer // 5 -- L1-regularized L2-loss support vector classification // 6 -- L1-regularized logistic regression // 7 -- L2-regularized logistic regression (dual) // for regression // 11 -- L2-regularized L2-loss support vector regression (primal) // 12 -- L2-regularized L2-loss support vector regression (dual) // 13 -- L2-regularized L1-loss support vector regression (dual) private static String listStrSolverType = "11"; private static String listStrC = "1,10,100,1000"; private static String listStrEps = "0.1"; private static String listStrP = "0.1"; private static int timesRealFb = 5; private static int nValidNegEx = 1000; private static int minTrainEx = 100; private static boolean addNegValidationEx = true; // ranklib parameters // 1->RANKER_TYPE.RANKBOOST // 2->RANKER_TYPE.ADARANK // 3->RANKER_TYPE.COOR_ASCENT // 4->RANKER_TYPE.LAMBDAMART // 5->RANKER_TYPE.RANDOM_FOREST private static int rankerType = 4; // ranker parameters private static int nIteration = -1; private static double tolerance = -1; private static int nThreshold = -1; private static int nTrees = -1; private static int nTreeLeaves = -1; private static float learningRate = (float) 0.1; private static int minLeafSupport = 1; // ADARANK parameters private static int maxSelCount = 5; // COOR_ASCENT parameters private static int nMaxIteration = 25; private static int nRestart = 5; private static boolean regularized = false; // LAMBDAMART parameters private static int nRoundToStopEarly = 100; // RANDOM_FOREST parameters private static int nBag = 300; private static float subSamplingRate = (float) 1; private static float featureSamplingRate = (float) 0.3; /* ---------------- EVALUATION ------------------ */ /* ---------------------------------------------- */ private static String evalMetric = "P@10"; private static float evalRatingThresh = 4; private static float relUnknownItems = 3; private static float negRatingThresh = 2; private static String itemsMetadataEvalFile = "metadata_eval"; private static String outputEvaluationFile = "evaluation"; private static String recDirToEval = ""; /* -------------- RECOMMENDATION ---------------- */ /* ---------------------------------------------- */ private static String recommendationsFile = "rec"; private static int topN = 15000; public static void loadParams() { try { PropertyConfigurator.configure("log4j.properties"); Map<String, String> prop = PropertyFileReader .loadProperties(configFile); // general settings if(prop.containsKey("recommendationAlgorithm")) recAlgorithm = Integer.parseInt(prop.get("recommendationAlgorithm")); if(prop.containsKey("dataExtraction")) dataExtraction = Boolean.parseBoolean(prop.get("dataExtraction")); if(prop.containsKey("itemGraphEmbedding")) itemGraphEmbedding = Boolean.parseBoolean(prop.get("itemGraphEmbedding")); if(prop.containsKey("itemPathExtraction")) itemPathExtraction = Boolean.parseBoolean(prop.get("itemPathExtraction")); if(prop.containsKey("userPathExtraction")) userPathExtraction = Boolean.parseBoolean(prop.get("userPathExtraction")); if(prop.containsKey("computeRec")) computeRec = Boolean.parseBoolean(prop.get("computeRec")); if(prop.containsKey("evaluation")) evaluation = Boolean.parseBoolean(prop.get("evaluation")); if(prop.containsKey("evaluationDir")) evaluationDir = Boolean.parseBoolean(prop.get("evaluationDir")); if(prop.containsKey("workingDir")) workingDir = prop.get("workingDir"); if(prop.containsKey("nThreads")) { if(Integer.parseInt(prop.get("nThreads")) > 0) nThreads = Integer.parseInt(prop.get("nThreads")); } if(prop.containsKey("implicit")) implicit = Boolean.parseBoolean(prop.get("implicit")); if(prop.containsKey("inputTrainRatingFile")) inputTrainRatingFile = prop.get("inputTrainRatingFile"); if(prop.containsKey("inputValidationRatingFile")) inputValidationRatingFile = prop.get("inputValidationRatingFile"); if(prop.containsKey("inputTestRatingFile")) inputTestRatingFile = prop.get("inputTestRatingFile"); // data extraction if(prop.containsKey("itemsMetadataFile")) itemsMetadataFile = workingDir + prop.get("itemsMetadataFile"); if(prop.containsKey("inputItemURIsFile")) inputItemURIsFile = prop.get("inputItemURIsFile"); if(prop.containsKey("endpoint")) endpoint = prop.get("endpoint"); if(prop.containsKey("graphURI")) graphURI = prop.get("graphURI"); if(prop.containsKey("tdbDirectory")) tdbDirectory = workingDir + prop.get("tdbDirectory"); if(prop.containsKey("localDatasetFile")) localDatasetFile = prop.get("localDatasetFile"); if(prop.containsKey("inverseProps")) inverseProps = Boolean.parseBoolean(prop.get("inverseProps")); if(prop.containsKey("outputTextFormat")) outputTextFormat = Boolean.parseBoolean(prop.get("outputTextFormat")); if(prop.containsKey("outputBinaryFormat")) outputBinaryFormat = Boolean.parseBoolean(prop.get("outputBinaryFormat")); if(prop.containsKey("jenatdb")) jenatdb = Boolean.parseBoolean(prop.get("jenatdb")); if(prop.containsKey("propertiesFile")) propertiesFile = prop.get("propertiesFile"); if(prop.containsKey("caching")) caching = Boolean.parseBoolean(prop.get("caching")); if(prop.containsKey("append")) append = Boolean.parseBoolean(prop.get("append")); // item graph embedding if(prop.containsKey("embeddingOption")) embeddingOption = Integer.parseInt(prop.get("embeddingOption")); if(prop.containsKey("entityMapFile")) entityMapFile = prop.get("entityMapFile"); if(prop.containsKey("branchMapFile")) branchMapFile = prop.get("branchMapFile"); if(prop.containsKey("addCollabFeatures")) addCollabFeatures = Boolean.parseBoolean(prop.get("addCollabFeatures")); if(prop.containsKey("maxBranchLength")) maxBranchLength = Integer.parseInt(prop.get("maxBranchLength")); if(prop.containsKey("minMaxNorm")) minMaxNorm = Boolean.parseBoolean(prop.get("minMaxNorm")); if(prop.containsKey("maxFreq")) maxFreq = Integer.parseInt(prop.get("maxFreq")); if(prop.containsKey("minFreq")) minFreq = Integer.parseInt(prop.get("minFreq")); if(prop.containsKey("idf")) idf = Boolean.parseBoolean(prop.get("idf")); if(prop.containsKey("lengthNorm")) lengthNorm = Boolean.parseBoolean(prop.get("lengthNorm")); if(prop.containsKey("onlyEntityBranches")) onlyEntityBranches = Boolean.parseBoolean(prop.get("onlyEntityBranches")); if(prop.containsKey("listAlphaVals")) listAlphaVals = prop.get("listAlphaVals"); // path extraction if(prop.containsKey("computeTopPaths")) selectTopPaths = Boolean.parseBoolean(prop.get("computeTopPaths")); if(prop.containsKey("nTopPaths")) nTopPaths = Integer.parseInt(prop.get("nTopPaths")); if(prop.containsKey("nItemTopPaths")) nItemTopPaths = Integer.parseInt(prop.get("nItemTopPaths")); if(prop.containsKey("itemsInMemory")) itemsInMemory = Integer.parseInt(prop.get("itemsInMemory")); if(prop.containsKey("pathsFile")) pathsFile = workingDir + prop.get("pathsFile"); if(prop.containsKey("outputPathsTextFormat")) outputPathsTextFormat = Boolean.parseBoolean(prop.get("outputPathsTextFormat")); if(prop.containsKey("outputPathsBinaryFormat")) outputPathsBinaryFormat = Boolean.parseBoolean(prop.get("outputPathsBinaryFormat")); if(prop.containsKey("computeInversePaths")) computeInversePaths = Boolean.parseBoolean(prop.get("computeInversePaths")); // user path extraction if(prop.containsKey("pathsInMemory")) pathsInMemory = Integer.parseInt(prop.get("pathsInMemory")); if(prop.containsKey("userItemsSampling")) userItemsSampling = Integer.parseInt(prop.get("userItemsSampling")); if(prop.containsKey("ratingThreshold")) ratingThreshold = Integer.parseInt(prop.get("ratingThreshold")); if(prop.containsKey("normalize")) normalize = Boolean.parseBoolean(prop.get("normalize")); // learning if(prop.containsKey("libLinear")) libLinear = Boolean.parseBoolean(prop.get("libLinear")); if(prop.containsKey("rankLib")) rankLib = Boolean.parseBoolean(prop.get("rankLib")); if(prop.containsKey("silentLearning")) silentLearning = Boolean.parseBoolean(prop.get("silentLearning")); // liblinear if(prop.containsKey("listSolverType")) listStrSolverType = prop.get("listSolverType"); if(prop.containsKey("listC")) listStrC = prop.get("listC"); if(prop.containsKey("listEps")) listStrEps = prop.get("listEps"); if(prop.containsKey("listP")) listStrP = prop.get("listP"); if(prop.containsKey("timesRealFb")) timesRealFb = Integer.parseInt(prop.get("timesRealFb")); if(prop.containsKey("nValidNegEx")) nValidNegEx = Integer.parseInt(prop.get("nValidNegEx")); if(prop.containsKey("minTrainEx")) minTrainEx = Integer.parseInt(prop.get("minTrainEx")); if(prop.containsKey("addNegValidationEx")) addNegValidationEx = Boolean.parseBoolean(prop.get("addNegValidationEx")); // ranklib if(prop.containsKey("rankerType")) rankerType = Integer.parseInt(prop.get("rankerType")); // ranker parameters if(prop.containsKey("nIteration")) nIteration = Integer.parseInt(prop.get("nIteration")); if(prop.containsKey("tolerance")) tolerance = Double.parseDouble(prop.get("tolerance")); if(prop.containsKey("nThreshold")) nThreshold = Integer.parseInt(prop.get("nThreshold")); if(prop.containsKey("nTrees")) nTrees = Integer.parseInt(prop.get("nTrees")); if(prop.containsKey("nTreeLeaves")) nTreeLeaves = Integer.parseInt(prop.get("nTreeLeaves")); if(prop.containsKey("learningRate")) learningRate = Float.parseFloat(prop.get("learningRate")); if(prop.containsKey("minLeafSupport")) minLeafSupport = Integer.parseInt(prop.get("minLeafSupport")); // ADARANK parameters if(prop.containsKey("maxSelCount")) maxSelCount = Integer.parseInt(prop.get("maxSelCount")); // COOR_ASCENT parameters if(prop.containsKey("nMaxIteration")) nMaxIteration = Integer.parseInt(prop.get("nMaxIteration")); if(prop.containsKey("nRestart")) nRestart = Integer.parseInt(prop.get("nRestart")); if(prop.containsKey("regularized")) regularized = Boolean.parseBoolean(prop.get("regularized")); // LAMBDAMART if(prop.containsKey("nRoundToStopEarly")) nRoundToStopEarly = Integer.parseInt(prop.get("nRoundToStopEarly")); // RANDOM_FOREST if(prop.containsKey("nBag")) nBag = Integer.parseInt(prop.get("nBag")); if(prop.containsKey("featureSamplingRate")) featureSamplingRate = Float.parseFloat(prop.get("featureSamplingRate")); if(prop.containsKey("subSamplingRate")) subSamplingRate = Float.parseFloat(prop.get("subSamplingRate")); // evaluation if(prop.containsKey("evalMetric")) evalMetric = prop.get("evalMetric"); if(prop.containsKey("evalRatingThresh")) evalRatingThresh = Float.parseFloat(prop.get("evalRatingThresh")); if(prop.containsKey("relUnknownItems")) relUnknownItems = Float.parseFloat(prop.get("relUnknownItems")); if(prop.containsKey("negRatingThresh")) negRatingThresh = Float.parseFloat(prop.get("negRatingThresh")); if(prop.containsKey("itemsMetadataEvalFile")) itemsMetadataEvalFile = prop.get("itemsMetadataEvalFile"); if(prop.containsKey("outputEvaluationFile")) outputEvaluationFile = prop.get("outputEvaluationFile"); if(prop.containsKey("recDirToEval")) recDirToEval = prop.get("recDirToEval"); // recommendation if(prop.containsKey("recommendationsFile")) recommendationsFile = workingDir + prop.get("recommendationsFile"); if(prop.containsKey("topN")) topN = Integer.parseInt(prop.get("topN")); } catch (IOException e) { e.printStackTrace(); } } public static void loadCommandParams(String[] args){ // load other command line params String arg = ""; String val = ""; for(int i = 0; i < args.length; i++){ arg = args[i].split("=")[0]; val = args[i].split("=")[1]; // general settings if (arg.compareTo("recommendationAlgorithm") == 0) { recAlgorithm = Integer.parseInt(val); } else if(arg.compareTo("dataExtraction") == 0) { dataExtraction = Boolean.parseBoolean(val); } else if(arg.compareTo("computeRecommendation") == 0) { computeRec = Boolean.parseBoolean(val); } else if(arg.compareTo("itemPathExtraction") == 0) { itemPathExtraction = Boolean.parseBoolean(val); } else if(arg.compareTo("userPathExtraction") == 0) { userPathExtraction = Boolean.parseBoolean(val); } else if(arg.compareTo("evaluation") == 0) { evaluation = Boolean.parseBoolean(val); } else if(arg.compareTo("evaluationDir") == 0) { evaluationDir = Boolean.parseBoolean(val); } else if(arg.compareTo("workingDir") == 0) { workingDir = val; } else if(arg.compareTo("nThreads") == 0) { if(Integer.parseInt(val) > 0) nThreads = Integer.parseInt(val); } else if(arg.compareTo("implicit") == 0) { implicit = Boolean.parseBoolean(val); } else if (arg.compareTo("inputTrainRatingFile") == 0) { inputTrainRatingFile = val; } else if (arg.compareTo("inputValidationRatingFile") == 0) { inputValidationRatingFile = val; } else if (arg.compareTo("inputTestRatingFile") == 0) { inputTestRatingFile = val; } // data extraction else if(arg.compareTo("itemsMetadataFile") == 0) { itemsMetadataFile = val; } else if(arg.compareTo("jenatdb") == 0) { jenatdb = Boolean.parseBoolean(val); } else if(arg.compareTo("localDatasetFile") == 0) { localDatasetFile = val; } else if(arg.compareTo("TDBdirectory") == 0) { tdbDirectory = val; } else if(arg.compareTo("endpoint") == 0) { endpoint = val; } else if(arg.compareTo("graphURI") == 0) { graphURI = val; } else if(arg.compareTo("inputURIfile") == 0) { inputItemURIsFile = val; } else if(arg.compareTo("propertiesFile") == 0) { propertiesFile = val; } else if(arg.compareTo("directed") == 0) { inverseProps = Boolean.parseBoolean(val); } else if(arg.compareTo("outputTextFormat") == 0) { outputTextFormat = Boolean.parseBoolean(val); } else if(arg.compareTo("outputBinaryFormat") == 0) { outputBinaryFormat = Boolean.parseBoolean(val); } else if (arg.compareTo("append") == 0) { append = Boolean.parseBoolean(val); } else if (arg.compareTo("caching") == 0) { caching = Boolean.parseBoolean(val); } // item graph embedding else if(arg.compareTo("embeddingOption") == 0) { embeddingOption = Integer.parseInt(val); } else if(arg.compareTo("entityMapFile") == 0) { entityMapFile = val; } else if(arg.compareTo("branchMapFile") == 0) { branchMapFile = val; } else if(arg.compareTo("addCollabFeatures") == 0) { addCollabFeatures = Boolean.parseBoolean(val); } else if(arg.compareTo("maxBranchLength") == 0) { maxBranchLength = Integer.parseInt(val); } else if(arg.compareTo("minMaxNorm") == 0) { minMaxNorm = Boolean.parseBoolean(val); } else if(arg.compareTo("maxFreq") == 0) { maxFreq = Integer.parseInt(val); } else if(arg.compareTo("minFreq") == 0) { minFreq = Integer.parseInt(val); } else if(arg.compareTo("idf") == 0) { idf = Boolean.parseBoolean(val); } else if(arg.compareTo("lengthNorm") == 0) { lengthNorm = Boolean.parseBoolean(val); } else if(arg.compareTo("onlyEntityBranches") == 0) { onlyEntityBranches = Boolean.parseBoolean(val); } else if(arg.compareTo("listAlphaVals") == 0) { listAlphaVals = val; } // paths extraction else if (arg.compareTo("itemsInMemory") == 0) { itemsInMemory = Integer.parseInt(val); } else if (arg.compareTo("outputPathsTextFormat") == 0) { outputPathsTextFormat = Boolean.parseBoolean(val); } else if (arg.compareTo("outputPathsBinaryFormat") == 0) { outputPathsBinaryFormat = Boolean.parseBoolean(val); } else if (arg.compareTo("pathsFile") == 0) { pathsFile = val; } else if (arg.compareTo("computeInversePaths") == 0) { computeInversePaths = Boolean.parseBoolean(val); } else if (arg.compareTo("computeTopPaths") == 0) { selectTopPaths = Boolean.parseBoolean(val); } else if (arg.compareTo("nTopPaths") == 0) { nTopPaths = Integer.parseInt(val); } else if (arg.compareTo("nItemTopPaths") == 0) { nItemTopPaths = Integer.parseInt(val); } // user paths extraction else if (arg.compareTo("pathsInMemory") == 0) { pathsInMemory = Integer.parseInt(val); } else if (arg.compareTo("userItemsSampling") == 0) { userItemsSampling = Integer.parseInt(val); } else if (arg.compareTo("ratingThreshold") == 0) { ratingThreshold = Integer.parseInt(val); } else if (arg.compareTo("normalize") == 0) { normalize = Boolean.parseBoolean(val); } // learning else if(arg.compareTo("silentLearning") == 0) { silentLearning = Boolean.parseBoolean(val); } else if(arg.compareTo("libLinear") == 0) { libLinear = Boolean.parseBoolean(val); } else if(arg.compareTo("rankLib") == 0) { rankLib = Boolean.parseBoolean(val); } else if(arg.compareTo("rankerType") == 0) { rankerType = Integer.parseInt(val); } else if(arg.compareTo("nIterations") == 0) { nIteration = Integer.parseInt(val); } else if(arg.compareTo("tolerance") == 0) { tolerance = Double.parseDouble(val); } else if(arg.compareTo("nThreshold") == 0) { nThreshold = Integer.parseInt(val); } else if(arg.compareTo("nTrees") == 0) { nTrees = Integer.parseInt(val); } else if(arg.compareTo("nTreeLeaves") == 0) { nTreeLeaves = Integer.parseInt(val); } else if(arg.compareTo("learningRate") == 0) { learningRate = Float.parseFloat(val); } else if(arg.compareTo("minLeafSupport") == 0) { minLeafSupport = Integer.parseInt(val); } else if(arg.compareTo("maxSelCount") == 0) { maxSelCount = Integer.parseInt(val); } else if(arg.compareTo("nMaxIteration") == 0) { nMaxIteration = Integer.parseInt(val); } else if(arg.compareTo("nRestart") == 0) { nRestart = Integer.parseInt(val); } else if(arg.compareTo("regularized") == 0) { regularized = Boolean.parseBoolean(val); } else if(arg.compareTo("nRoundToStopEarly") == 0) { nRoundToStopEarly = Integer.parseInt(val); } else if(arg.compareTo("nBag") == 0) { nBag = Integer.parseInt(val); } else if(arg.compareTo("featureSamplingRate") == 0) { featureSamplingRate = Float.parseFloat(val); } else if(arg.compareTo("subSamplingRate") == 0) { subSamplingRate = Float.parseFloat(val); } else if (arg.compareTo("listSolverType") == 0) { listStrSolverType = val; } else if (arg.compareTo("listC") == 0) { listStrC = val; } else if (arg.compareTo("listEps") == 0) { listStrEps = val; } else if (arg.compareTo("listP") == 0) { listStrP = val; } else if(arg.compareTo("timesRealFb") == 0) { timesRealFb = Integer.parseInt(val); } else if(arg.compareTo("nValidNegEx") == 0) { nValidNegEx = Integer.parseInt(val); } else if(arg.compareTo("minTrainEx") == 0) { minTrainEx = Integer.parseInt(val); } else if(arg.compareTo("addNegValidationEx") == 0) { addNegValidationEx = Boolean.parseBoolean(val); } // evaluation else if(arg.compareTo("evalMetric") == 0) { evalMetric = val; } else if(arg.compareTo("evalRatingThresh") == 0) { evalRatingThresh = Float.parseFloat(val); } else if(arg.compareTo("relUnknownItems") == 0) { relUnknownItems = Float.parseFloat(val); } else if(arg.compareTo("negRatingThresh") == 0) { negRatingThresh = Float.parseFloat(val); } else if(arg.compareTo("itemsMetadataEvalFile") == 0) { itemsMetadataEvalFile = val; } else if(arg.compareTo("outputEvaluationFile") == 0) { outputEvaluationFile = val; } else if(arg.compareTo("recDirToEval") == 0) { recDirToEval = val; } // recommentation else if (arg.compareTo("recommendationsFile") == 0) { recommendationsFile = val; } else if (arg.compareTo("topN") == 0) { topN = Integer.parseInt(val); } } } /** * @param args */ public static void main(String[] args) { System.out.println("start"); long start, stop; // read arguments for (int i = 0; i < args.length; i++) { if (args[i].contains("=")) { String arg = args[i].split("=")[0]; String val = args[i].split("=")[1]; if (arg.compareTo("configFile") == 0) configFile = val; } } // load parameters from config file loadParams(); System.out.println("parameters loaded"); // load parameters from command line loadCommandParams(args); if (dataExtraction) { RDFTripleExtractor m = new RDFTripleExtractor(workingDir, itemsMetadataFile, inputItemURIsFile, endpoint, graphURI, tdbDirectory, localDatasetFile, inverseProps, outputTextFormat, outputBinaryFormat, propertiesFile, caching, append, nThreads, jenatdb); start = System.currentTimeMillis(); m.run(); stop = System.currentTimeMillis(); logger.info("Finished all threads. Data extraction terminated in [sec]: " + ((stop - start) / 1000)); } // ItemPathExtractor if (itemPathExtraction & recAlgorithm == 1) { ItemPathExtractor pe = new ItemPathExtractor(workingDir, itemsMetadataFile, pathsFile, computeInversePaths, selectTopPaths, nTopPaths, nItemTopPaths, outputPathsBinaryFormat, outputPathsTextFormat, inverseProps, itemsInMemory, nThreads); start = System.currentTimeMillis(); pe.start(); stop = System.currentTimeMillis(); logger.info("Item paths extraction terminated in [sec]: " + ((stop - start) / 1000)); } else if (itemPathExtraction) { logger.info("the recommendation algorithm you set [" + recAlgorithm + "] is not expected to compute this operation "); } // UserPathExtractor if (userPathExtraction & recAlgorithm == 1) { UserPathExtractor upe = new UserPathExtractor(workingDir, inputTrainRatingFile, inputValidationRatingFile, normalize, pathsFile, itemsMetadataFile, pathsInMemory, userItemsSampling, ratingThreshold, nThreads); start = System.currentTimeMillis(); upe.start(); stop = System.currentTimeMillis(); logger.info("User path extraction terminated in [sec]: " + ((stop - start) / 1000)); } else if (userPathExtraction) { logger.info("the recommendation algorithm you set [" + recAlgorithm + "] is not expected to compute this operation "); } // itemGraphEmbedding if (itemGraphEmbedding & (recAlgorithm == 2 || recAlgorithm == 3)) { ItemGraphEmbedder mapper = new ItemGraphEmbedder(workingDir, itemsMetadataFile, entityMapFile, branchMapFile, embeddingOption, inputTrainRatingFile, maxBranchLength, addCollabFeatures, onlyEntityBranches, minMaxNorm, idf, maxFreq, minFreq, lengthNorm, listAlphaVals); start = System.currentTimeMillis(); mapper.computeMapping(); stop = System.currentTimeMillis(); logger.info("item graph embedding terminated in [sec]: " + ((stop - start) / 1000)); } else if (itemGraphEmbedding) { logger.info("the reccommendation algorithm you set [" + recAlgorithm + "] does not support graph embedding "); } // Compute recommendation with SPrank if(computeRec & (recAlgorithm == 1)) { // start learning start = System.currentTimeMillis(); if(libLinear) { LibLinearLearner l = new LibLinearLearner(workingDir, inputValidationRatingFile, evalRatingThresh, silentLearning, listStrSolverType, listStrC, listStrEps, listStrP, evalMetric, relUnknownItems); l.train(); } else if(rankLib) { RankLibLearner l = new RankLibLearner(workingDir, nThreads, rankerType, evalRatingThresh, evalMetric, silentLearning, nIteration, tolerance, nThreshold, nTrees, nTreeLeaves, learningRate, minLeafSupport, maxSelCount, nMaxIteration, nRestart, regularized, nRoundToStopEarly, nBag, featureSamplingRate, subSamplingRate); l.train(); } stop = System.currentTimeMillis(); logger.info("Learning terminated in [sec]: " + ((stop - start) / 1000)); // start computing recommendations start = System.currentTimeMillis(); Recommender p = new Recommender(workingDir, recommendationsFile, topN, nThreads, libLinear, rankLib); p.computeRec(); stop = System.currentTimeMillis(); logger.info("Recommendation comp. terminated in [sec]: " + ((stop - start) / 1000)); } if (computeRec & (recAlgorithm == 2)) { start = System.currentTimeMillis(); UserModelRecommender rec = new UserModelRecommender(topN, nThreads, recommendationsFile, itemsMetadataFile, embeddingOption, entityMapFile, branchMapFile, inputTrainRatingFile, inputValidationRatingFile, implicit, listStrSolverType, listStrC, listStrEps, evalRatingThresh, relUnknownItems, negRatingThresh, timesRealFb, nValidNegEx, minTrainEx, addNegValidationEx, evalMetric); rec.exec(); stop = System.currentTimeMillis(); logger.info("Single user recommendation comp. terminated in [sec]: " + ((stop - start) / 1000)); } if (computeRec & (recAlgorithm == 3)) { start = System.currentTimeMillis(); UserProfileSimilarityRecommender rec = new UserProfileSimilarityRecommender( topN, recommendationsFile, itemsMetadataFile, embeddingOption, entityMapFile, branchMapFile, inputTrainRatingFile, implicit, evalRatingThresh, nThreads); rec.exec(); stop = System.currentTimeMillis(); logger.info("Single user ProfileSim recommendation comp. terminated in [sec]: " + ((stop - start) / 1000)); } if (evaluation) { start = System.currentTimeMillis(); Evaluator ev = new Evaluator(workingDir, outputEvaluationFile, itemsMetadataFile, itemsMetadataEvalFile, inputTrainRatingFile, inputTestRatingFile, evalRatingThresh, negRatingThresh, relUnknownItems, topN); ev.eval(recommendationsFile, outputEvaluationFile); stop = System.currentTimeMillis(); logger.info("Evaluation terminated in [sec]: " + ((stop - start) / 1000)); } // if (evaluationDir) { start = System.currentTimeMillis(); Evaluator ev = new Evaluator(workingDir,outputEvaluationFile, itemsMetadataFile, itemsMetadataEvalFile, inputTrainRatingFile, inputTestRatingFile, evalRatingThresh, negRatingThresh, relUnknownItems, topN); ev.evalDir(recDirToEval); stop = System.currentTimeMillis(); logger.info("Evaluation terminated in [sec]: " + ((stop - start) / 1000)); } } }
33,091
33.760504
89
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/recommender/RecommenderWorker.java
package it.poliba.sisinflab.LODRec.recommender; import it.poliba.sisinflab.LODRec.utils.BST; import java.io.BufferedReader; import java.io.BufferedWriter; import java.io.FileReader; import java.math.RoundingMode; import java.text.DecimalFormat; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import ciir.umass.edu.learning.DataPoint; import ciir.umass.edu.learning.Ranker; import ciir.umass.edu.learning.SparseDataPoint; import de.bwaldvogel.liblinear.FeatureNode; import de.bwaldvogel.liblinear.Linear; import de.bwaldvogel.liblinear.Model; public class RecommenderWorker implements Runnable { private BufferedWriter bw; private Model libLinearModel; private Ranker rankLibModel; private String testFile; private int topN; private Map<Integer, List<Integer>> recc; private Map<Integer, BST> predictions; private boolean libLinear; private boolean rankLib; public RecommenderWorker(String testFile, BufferedWriter bw, Model libLinearModel, Ranker rankLibModel, int topN, boolean libLinear, boolean rankLib) { this.testFile = testFile; this.libLinearModel = libLinearModel; this.rankLibModel = rankLibModel; this.bw = bw; this.topN = topN; this.libLinear = libLinear; this.rankLib = rankLib; } public RecommenderWorker(String testFile, Map<Integer, List<Integer>> recc, Model model, int topN) { this.testFile = testFile; this.libLinearModel = model; this.recc = recc; this.topN = topN; this.libLinear = true; this.rankLib = false; } @Override public void run() { // TODO Auto-generated method stub computePredictions(); if(bw != null) { writeOnFile(); } else { makeMap(); } } private void makeMap() { for (int user_id : predictions.keySet()) { BST bst = predictions.get(user_id); bst.visit(); List<Integer> list_items = bst.getSortedValues(); recc.put(user_id, list_items); } } private void writeOnFile() { try { List<Integer> users = new ArrayList<Integer>(); users.addAll(predictions.keySet()); Collections.sort(users); DecimalFormat form = new DecimalFormat("#.###"); form.setRoundingMode(RoundingMode.CEILING); StringBuffer str = null; for (int user_id : users) { str = new StringBuffer(); str.append(user_id + "\t"); BST bst = predictions.get(user_id); bst.visit(); List<Double> list_scores = bst.getSortedKeys(); List<Integer> list_items = bst.getSortedValues(); for (int i = 0; i < list_items.size(); i++) { str.append(list_items.get(i) + ":" + form.format(list_scores.get(i)) .replace(",", ".") + " "); } synchronized(bw){ bw.append(str); bw.newLine(); } } } catch(Exception e) { e.printStackTrace(); } } private void computePredictions() { predictions = new HashMap<Integer, BST>(); try { BufferedReader br = new BufferedReader(new FileReader(testFile)); double pred = 0; String line = null; while ((line = br.readLine()) != null) { String[] vals = line.split(" "); int user_id = Integer.parseInt(vals[1].split(":")[1]); int item_id = Integer.parseInt(vals[2].split(":")[1]); if(libLinear) { FeatureNode[] f = new FeatureNode[vals.length - 3]; for (int i = 3; i < vals.length; i++) { String[] ss = vals[i].split(":"); int key = Integer.parseInt(ss[0]); double value = Double.parseDouble(ss[1]); f[i - 3] = new FeatureNode(key, value); } pred = computePred(f); } else if(rankLib){ DataPoint p = new SparseDataPoint(line); pred = rankLibModel.predictScore(p); } predictions.putIfAbsent(user_id, new BST(topN)); predictions.get(user_id).insert(pred, item_id); } br.close(); } catch (Exception e) { e.printStackTrace(); } } private double computePred(FeatureNode[] f) { double pred = 0; //double[] prob_estimates = new double[2]; /*if (libLinearModel.isProbabilityModel()) { prob_estimates = new double[2]; Linear.predictProbability(libLinearModel, f, prob_estimates); pred = prob_estimates[0]; } else pred += Linear.predict(libLinearModel, f);*/ pred = Linear.predict(libLinearModel, f); return pred; } }
4,393
21.191919
84
java
lodreclib
lodreclib-master/src/main/java/it/poliba/sisinflab/LODRec/recommender/Recommender.java
package it.poliba.sisinflab.LODRec.recommender; import it.poliba.sisinflab.LODRec.main.Main; import it.poliba.sisinflab.LODRec.utils.BST; import java.io.BufferedWriter; import java.io.File; import java.io.FileWriter; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; import ciir.umass.edu.learning.Ranker; import ciir.umass.edu.learning.RankerFactory; import de.bwaldvogel.liblinear.FeatureNode; import de.bwaldvogel.liblinear.Linear; import de.bwaldvogel.liblinear.Model; public class Recommender { private String workingDir; private int nThreads; private String recFile; private String modelFile; private int topN; private boolean libLinear; private boolean rankLib; private Ranker rankLibModel; private Model libLinearModel; private static Logger logger = LogManager.getLogger(Recommender.class.getName()); public Recommender(Model model, int topN) { this.workingDir = Main.workingDir; this.libLinearModel = model; this.topN = topN; } public Recommender(String workingDir, String recOutputFile, int topN, int nThreads, boolean libLinear, boolean rankLib) { this.workingDir = workingDir; this.recFile = recOutputFile; this.topN = topN; this.nThreads = nThreads; this.libLinear = libLinear; this.rankLib = rankLib; init(); } private void init() { this.modelFile = workingDir +"bestModel"; loadModel(); } private void loadModel() { logger.info("Loading prediction model"); try { if (libLinear) { File model_file = new File(modelFile); libLinearModel = Model.load(model_file); } else if (rankLib) { RankerFactory rFact = new RankerFactory(); rankLibModel = rFact.loadRanker(modelFile); } } catch (Exception e) { e.printStackTrace(); } } public Map<Integer, List<Integer>> computeRec(String file) { Map<Integer, List<Integer>> recc = new HashMap<Integer, List<Integer>>(); try { ExecutorService executor = Executors.newFixedThreadPool(1); Runnable worker = new RecommenderWorker(file, recc, libLinearModel, topN); executor.execute(worker); executor.shutdown(); executor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch(Exception e) { e.printStackTrace(); } return recc; } public void computeRec(){ logger.info("Start computing recommendations"); ExecutorService executor; executor = Executors.newFixedThreadPool(nThreads); try { BufferedWriter bw = new BufferedWriter(new FileWriter(recFile)); File directory = new File(workingDir); File[] files = directory.listFiles(); for (int i = 0; i < files.length; i++) { if(files[i].getName().contains("test")) { String testFile = workingDir + files[i].getName(); // path extraction worker user-items Runnable worker = new RecommenderWorker(testFile, bw, libLinearModel, rankLibModel, topN, libLinear, rankLib); // run the worker thread executor.execute(worker); } } executor.shutdown(); executor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); bw.flush(); bw.close(); } catch(Exception e) { e.printStackTrace(); } } public Map<Integer, List<Integer>> computeRecc(FeatureNode[][] X, int[] userIndex, int[] itemIndex, boolean implicit) { Map<Integer, List<Integer>> recc = new HashMap<Integer, List<Integer>>(); // logger.info("Start computing reccomendations"); // usare un Btree invece della mappa per tener traccia solo dei topN Map<Integer, BST> predictions = new HashMap(); double[] prob_estimates; double pred; for (int i = 0; i < X.length; i++) { /*if (libLinearModel.isProbabilityModel() & implicit) { prob_estimates = new double[2]; Linear.predictProbability(libLinearModel, X[i], prob_estimates); pred = prob_estimates[0]; } else pred = Linear.predict(libLinearModel, X[i]);*/ pred = Linear.predict(libLinearModel, X[i]); int user_id = userIndex[i]; int item_id = itemIndex[i]; if (!predictions.containsKey(user_id)) predictions.put(user_id, new BST(topN)); predictions.get(user_id).insert(pred, item_id); } for (int user_id : predictions.keySet()) { BST bst = predictions.get(user_id); bst.visit(); List<Integer> list_items = bst.getSortedValues(); recc.put(user_id, list_items); } return recc; } }
4,623
21.891089
84
java
lodreclib
lodreclib-master/src/main/java/ciir/umass/edu/features/FeatureManager.java
/*=============================================================================== * Copyright (c) 2010-2012 University of Massachusetts. All Rights Reserved. * * Use of the RankLib package is subject to the terms of the software license set * forth in the LICENSE file included with this software, and also available at * http://people.cs.umass.edu/~vdang/ranklib_license.html *=============================================================================== */ package ciir.umass.edu.features; import java.io.BufferedReader; import java.io.BufferedWriter; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.InputStreamReader; import java.io.OutputStreamWriter; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Set; import ciir.umass.edu.learning.DataPoint; import ciir.umass.edu.learning.DenseDataPoint; import ciir.umass.edu.learning.SparseDataPoint; import ciir.umass.edu.learning.RankList; import ciir.umass.edu.utilities.FileUtils; public class FeatureManager { /** * @param args */ public static void main(String[] args) { List<String> rankingFiles = new ArrayList<String>(); String outputDir = ""; boolean shuffle = false; int nFold = 0; float tvs = -1;// train-validation split in each fold if (args.length < 3) { System.out .println("Usage: java -cp bin/RankLib.jar ciir.umass.edu.features.FeatureManager <Params>"); System.out.println("Params:"); System.out.println("\t-input <file>\t\tSource data (ranked lists)"); System.out.println("\t-output <dir>\t\tThe output directory"); System.out.println(""); System.out.println(" [+] Shuffling"); System.out .println("\t-shuffle\t\tCreate a copy of the input file in which the ordering of all ranked lists (e.g. queries) is randomized."); System.out .println("\t\t\t\t(the order among objects (e.g. documents) within each ranked list is certainly unchanged)."); // System.out.println(""); System.out.println(" [+] k-fold Partitioning (sequential split)"); System.out.println("\t-k <fold>\t\tThe number of folds"); System.out .println("\t[ -tvs <x \\in [0..1]> ] Train-validation split ratio (x)(1.0-x)"); System.out.println(""); System.out .println(" NOTE: If both -shuffle and -k are specified, the input data will be shuffled and then sequentially partitioned."); System.out.println(""); return; } for (int i = 0; i < args.length; i++) { if (args[i].compareTo("-input") == 0) rankingFiles.add(args[++i]); else if (args[i].compareTo("-k") == 0) nFold = Integer.parseInt(args[++i]); else if (args[i].compareTo("-shuffle") == 0) shuffle = true; else if (args[i].compareTo("-tvs") == 0) tvs = Float.parseFloat(args[++i]); else if (args[i].compareTo("-output") == 0) outputDir = FileUtils.makePathStandard(args[++i]); } if (shuffle || nFold > 0) { List<RankList> samples = readInput(rankingFiles); if (samples.size() == 0) { System.out.println("Error: The input file is empty."); return; } String fn = FileUtils.getFileName(rankingFiles.get(0)); if (shuffle) { fn += ".shuffled"; System.out.print("Shuffling... "); Collections.shuffle(samples); System.out.println("[Done]"); System.out.print("Saving... "); FeatureManager.save(samples, outputDir + fn); System.out.println("[Done]"); } if (nFold > 0) { List<List<RankList>> trains = new ArrayList<List<RankList>>(); List<List<RankList>> tests = new ArrayList<List<RankList>>(); List<List<RankList>> valis = new ArrayList<List<RankList>>(); System.out.println("Partitioning... "); prepareCV(samples, nFold, tvs, trains, valis, tests); System.out.println("[Done]"); try { for (int i = 0; i < trains.size(); i++) { System.out.print("Saving fold " + (i + 1) + "/" + nFold + "... "); save(trains.get(i), outputDir + "f" + (i + 1) + ".train." + fn); save(tests.get(i), outputDir + "f" + (i + 1) + ".test." + fn); if (tvs > 0) save(valis.get(i), outputDir + "f" + (i + 1) + ".validation." + fn); System.out.println("[Done]"); } } catch (Exception ex) { System.out.println("Error: Cannot save partition data."); System.out.println("Occured in FeatureManager::main(): " + ex.toString()); } } } } /** * Read a set of rankings from a single file. * * @param inputFile * @return */ public static List<RankList> readInput(String inputFile) { return readInput(inputFile, false, false); } /** * Read a set of rankings from a single file. * * @param inputFile * @param mustHaveRelDoc * @param useSparseRepresentation * @return */ public static List<RankList> readInput(String inputFile, boolean mustHaveRelDoc, boolean useSparseRepresentation, Set<Integer> listFeatures) { List<RankList> samples = new ArrayList<RankList>(); // int countRL = 0; int countEntries = 0; System.out.println("start reading file " + inputFile); System.out.println("N. features: " + listFeatures.size()); try { String content = ""; BufferedReader in = new BufferedReader(new InputStreamReader( new FileInputStream(inputFile), "ASCII")); boolean mixedRankListLines = false; String lastID = ""; // boolean hasRel = false; List<DataPoint> rl = new ArrayList<DataPoint>(); while ((content = in.readLine()) != null) { content = content.trim(); if (content.length() == 0) continue; if (content.indexOf("#") == 0) continue; if (countEntries % 100000 == 0) System.out.println("Reading feature file [" + inputFile + "]: " + countEntries + "... "); DataPoint qp = null; if (useSparseRepresentation) qp = new SparseDataPoint(content, listFeatures); else qp = new DenseDataPoint(content); if (lastID.compareTo("") != 0 && lastID.compareTo(qp.getID()) != 0 || mixedRankListLines) { // se i campioni nelle ranklist appaiono in modo sparso // bisogna aggregarli alla ranklist appropriata int pos = getSample(samples, qp.getID()); if (pos != -1) { if (!mixedRankListLines) { samples.add(new RankList(rl)); rl = new ArrayList<DataPoint>(); } mixedRankListLines = true; samples.get(pos).addPoint(qp); }// if (!mixedRankListLines) { else { if (rl.size() > 0) samples.add(new RankList(rl)); rl = new ArrayList<DataPoint>(); mixedRankListLines = false; } } lastID = qp.getID(); if (!mixedRankListLines) rl.add(qp); countEntries++; } if (rl.size() > 0) samples.add(new RankList(rl)); in.close(); System.out.println("\rReading feature file [" + inputFile + "]... [Done.] "); System.out.println("(" + samples.size() + " ranked lists, " + countEntries + " entries read)"); countEntries = 0; for (int i = 0; i < samples.size(); i++) { countEntries += samples.get(i).size(); } System.out.println("(" + samples.size() + " ranked lists, " + countEntries + " entries read)"); } catch (Exception ex) { System.out.println("Error in FeatureManager::readInput(): " + ex.toString()); ex.printStackTrace(); System.exit(1); } return samples; } /** * Read a set of rankings from a single file. * * @param inputFile * @param mustHaveRelDoc * @param useSparseRepresentation * @return */ public static List<RankList> readInput(String inputFile, boolean mustHaveRelDoc, boolean useSparseRepresentation) { List<RankList> samples = new ArrayList<RankList>(); // int countRL = 0; int countEntries = 0; try { String content = ""; BufferedReader in = new BufferedReader(new InputStreamReader( new FileInputStream(inputFile), "ASCII")); boolean mixedRankListLines = false; String lastID = ""; // boolean hasRel = false; List<DataPoint> rl = new ArrayList<DataPoint>(); while ((content = in.readLine()) != null) { content = content.trim(); if (content.length() == 0) continue; if (content.indexOf("#") == 0) continue; if (countEntries % 250000 == 0) System.out.println("Reading feature file [" + inputFile + "]: " + countEntries + "... "); DataPoint qp = null; if (useSparseRepresentation) qp = new SparseDataPoint(content); else qp = new DenseDataPoint(content); if (lastID.compareTo("") != 0 && lastID.compareTo(qp.getID()) != 0 || mixedRankListLines) { // se i campioni nelle ranklist appaiono in modo sparso // bisogna aggregarli alla ranklist appropriata int pos = getSample(samples, qp.getID()); if (pos != -1) { if (!mixedRankListLines) { samples.add(new RankList(rl)); rl = new ArrayList<DataPoint>(); } mixedRankListLines = true; samples.get(pos).addPoint(qp); }// if (!mixedRankListLines) { else { if (rl.size() > 0) samples.add(new RankList(rl)); rl = new ArrayList<DataPoint>(); mixedRankListLines = false; } } lastID = qp.getID(); if (!mixedRankListLines) rl.add(qp); countEntries++; } if (rl.size() > 0) samples.add(new RankList(rl)); in.close(); System.out.println("\rReading feature file [" + inputFile + "]... [Done.] "); System.out.println("(" + samples.size() + " ranked lists, " + countEntries + " entries read)"); countEntries = 0; for (int i = 0; i < samples.size(); i++) { countEntries += samples.get(i).size(); } System.out.println("(" + samples.size() + " ranked lists, " + countEntries + " entries read)"); } catch (Exception ex) { System.out.println("Error in FeatureManager::readInput(): " + ex.toString()); ex.printStackTrace(); System.exit(1); } return samples; } private static int getSample(List<RankList> samples, String id) { for (int i = 0; i < samples.size(); i++) { RankList l = samples.get(i); if (l.size() > 0) { if (l.getID().compareTo(id) == 0) return i; } else { return -1; } } return -1; } /** * Read sets of rankings from multiple files. Then merge them altogether * into a single ranking. * * @param inputFiles * @return */ public static List<RankList> readInput(List<String> inputFiles) { List<RankList> samples = new ArrayList<RankList>(); for (int i = 0; i < inputFiles.size(); i++) { List<RankList> s = readInput(inputFiles.get(i), false, false); samples.addAll(s); } return samples; } /** * Read features specified in an input feature file. Expecting one feature * per line. * * @param featureDefFile * @return */ public static int[] readFeature(String featureDefFile) { int[] features = null; List<String> fids = new ArrayList<String>(); try { String content = ""; BufferedReader in = new BufferedReader(new InputStreamReader( new FileInputStream(featureDefFile))); while ((content = in.readLine()) != null) { content = content.trim(); if (content.length() == 0) continue; if (content.indexOf("#") == 0) continue; fids.add(content.split("\t")[0].trim()); } in.close(); features = new int[fids.size()]; for (int i = 0; i < fids.size(); i++) features[i] = Integer.parseInt(fids.get(i)); } catch (Exception ex) { System.out.println("Error in FeatureManager::readFeature(): " + ex.toString()); System.exit(1); } return features; } /** * Obtain all features present in a sample set. Important: If your data * (DataPoint objects) is loaded by RankLib (e.g. command-line use) or its * APIs, there is nothing to watch out for. If you create the DataPoint * objects yourself, make sure DataPoint.featureCount correctly reflects the * total number features present in your dataset. * * @param samples * @return */ public static int[] getFeatureFromSampleVector(List<RankList> samples) { if (samples.size() == 0) { System.out .println("Error in FeatureManager::getFeatureFromSampleVector(): There are no training samples."); System.exit(1); } int fc = DataPoint.getFeatureCount(); int[] features = new int[fc]; for (int i = 1; i <= fc; i++) features[i - 1] = i; return features; } /** * Split the input sample set into k chunks (folds) of roughly equal size * and create train/test data for each fold. Note that NO randomization is * done. If you want to randomly split the data, make sure that you * randomize the order in the input samples prior to calling this function. * * @param samples * @param nFold * @param trainingData * @param testData */ public static void prepareCV(List<RankList> samples, int nFold, List<List<RankList>> trainingData, List<List<RankList>> testData) { prepareCV(samples, nFold, -1, trainingData, null, testData); } /** * Split the input sample set into k chunks (folds) of roughly equal size * and create train/test data for each fold. Then it further splits the * training data in each fold into train and validation. Note that NO * randomization is done. If you want to randomly split the data, make sure * that you randomize the order in the input samples prior to calling this * function. * * @param samples * @param nFold * @param tvs * Train/validation split ratio * @param trainingData * @param validationData * @param testData */ public static void prepareCV(List<RankList> samples, int nFold, float tvs, List<List<RankList>> trainingData, List<List<RankList>> validationData, List<List<RankList>> testData) { /* * int[][] testQueries = new int[][]{ {67, 125, 78, 96, 174, 112, 62, * 92, 19, 181, 89, 126, 104, 111, 41, 170, 131, 59, 162, 83, 58, 70, * 161, 4, 184, 80, 109, 132, 168, 49, 197, 50, 176, 200, 53, 103, 199, * 25, 94, 66}, {13, 124, 55, 180, 65, 105, 31, 24, 163, 123, 33, 159, * 101, 36, 149, 127, 142, 22, 143, 7, 68, 69, 15, 147, 57, 196, 23, 77, * 1, 2, 106, 5, 64, 137, 14, 86, 73, 183, 114, 18}, {54, 63, 9, 17, * 182, 44, 29, 186, 128, 99, 135, 167, 139, 133, 146, 82, 34, 141, 154, * 194, 16, 140, 26, 75, 190, 173, 93, 179, 6, 11, 28, 38, 189, 193, 51, * 171, 40, 3, 90, 20}, {178, 98, 130, 37, 172, 165, 85, 122, 115, 117, * 153, 46, 30, 152, 138, 79, 81, 95, 91, 187, 100, 110, 56, 169, 175, * 157, 87, 160, 43, 47, 88, 27, 155, 195, 129, 45, 21, 145, 8, 121}, * {116, 166, 32, 60, 61, 52, 118, 177, 72, 156, 76, 108, 151, 71, 35, * 150, 113, 164, 107, 134, 48, 10, 12, 84, 188, 39, 158, 191, 102, 74, * 185, 136, 119, 42, 97, 198, 192, 148, 144, 120} }; * * List<List<Integer>> trainSamplesIdx = new ArrayList<List<Integer>>(); * for(int f=0;f<nFold;f++) trainSamplesIdx.add(new * ArrayList<Integer>()); * * for(int i=0;i<samples.size();i++) { int qid = * Integer.parseInt(samples.get(i).getID()); int f = -1; for(int * j=0;j<testQueries.length&&f==-1;j++) { for(int * k=0;k<testQueries[j].length&&f==-1;k++) if(qid == testQueries[j][k]) * f = j; } if(f==-1) { System.out.println("Error: qid=" + qid); * System.exit(1); } trainSamplesIdx.get(f).add(i); } */ List<List<Integer>> trainSamplesIdx = new ArrayList<List<Integer>>(); int size = samples.size() / nFold; int start = 0; int total = 0; for (int f = 0; f < nFold; f++) { List<Integer> t = new ArrayList<Integer>(); for (int i = 0; i < size && start + i < samples.size(); i++) t.add(start + i); trainSamplesIdx.add(t); total += t.size(); start += size; } for (; total < samples.size(); total++) trainSamplesIdx.get(trainSamplesIdx.size() - 1).add(total); for (int i = 0; i < trainSamplesIdx.size(); i++) { System.out.print("\rCreating data for fold-" + (i + 1) + "..."); List<RankList> train = new ArrayList<RankList>(); List<RankList> test = new ArrayList<RankList>(); List<RankList> vali = new ArrayList<RankList>(); // train-test split List<Integer> t = trainSamplesIdx.get(i); for (int j = 0; j < samples.size(); j++) { if (t.contains(j)) test.add(new RankList(samples.get(j))); else train.add(new RankList(samples.get(j))); } // train-validation split if specified if (tvs > 0) { int validationSize = (int) (train.size() * (1.0 - tvs)); for (int j = 0; j < validationSize; j++) { vali.add(train.get(train.size() - 1)); train.remove(train.size() - 1); } } // save them trainingData.add(train); testData.add(test); if (tvs > 0) validationData.add(vali); } System.out.println("\rCreating data for " + nFold + " folds... [Done] "); } /** * Split the input sample set into 2 chunks: one for training and one for * either validation or testing * * @param sampleFile * @param featureDefFile * @param percentTrain * The percentage of data used for training * @param trainingData * @param testData */ public static void prepareSplit(List<RankList> samples, double percentTrain, List<RankList> trainingData, List<RankList> testData) { int size = (int) (samples.size() * percentTrain); for (int i = 0; i < size; i++) trainingData.add(new RankList(samples.get(i))); for (int i = size; i < samples.size(); i++) testData.add(new RankList(samples.get(i))); } /** * Save a sample set to file * * @param samples * @param outputFile */ public static void save(List<RankList> samples, String outputFile) { try { BufferedWriter out = new BufferedWriter(new OutputStreamWriter( new FileOutputStream(outputFile))); for (int j = 0; j < samples.size(); j++) save(samples.get(j), out); out.close(); } catch (Exception ex) { System.out.println("Error in FeatureManager::save(): " + ex.toString()); System.exit(1); } } /** * Write a ranked list to a file object. * * @param r * @param out * @throws Exception */ private static void save(RankList r, BufferedWriter out) throws Exception { for (int j = 0; j < r.size(); j++) { out.write(r.get(j).toString()); out.newLine(); } } /** * Get feature id(s) from a description file * * @param fn * @return */ public int[] getFeatureIDFromFile(String fn) { if (fn.compareTo("") == 0) return null; List<String> l = getFeatureNameFromFile(fn); int[] fv = new int[l.size()]; for (int i = 0; i < l.size(); i++) fv[i] = Integer.parseInt(l.get(i)); return fv; } /** * Get feature names from a description file * * @param fn * @return */ public List<String> getFeatureNameFromFile(String fn) { List<String> fName = new ArrayList<String>(); try { String content = ""; BufferedReader in = new BufferedReader(new InputStreamReader( new FileInputStream(fn), "ASCII")); while ((content = in.readLine()) != null) { content = content.trim(); if (content.length() == 0) continue; if (content.indexOf("#") == 0) continue; if (content.contains(":")) { content = content.split(":")[0]; } if (content.contains("\t")) { content = content.split("\t")[0]; } fName.add(content); } in.close(); } catch (Exception ex) { System.out.println(ex.toString()); } return fName; } }
19,369
29.697306
135
java
lodreclib
lodreclib-master/src/main/java/ciir/umass/edu/features/LinearNormalizer.java
/*=============================================================================== * Copyright (c) 2010-2012 University of Massachusetts. All Rights Reserved. * * Use of the RankLib package is subject to the terms of the software license set * forth in the LICENSE file included with this software, and also available at * http://people.cs.umass.edu/~vdang/ranklib_license.html *=============================================================================== */ package ciir.umass.edu.features; import java.util.Arrays; import ciir.umass.edu.learning.DataPoint; import ciir.umass.edu.learning.RankList; /** * @author Laura Dietz, vdang */ public class LinearNormalizer extends Normalizer { @Override public void normalize(RankList rl) { if (rl.size() == 0) { System.out .println("Error in LinearNormalizor::normalize(): The input ranked list is empty"); System.exit(1); } int nFeature = DataPoint.getFeatureCount(); int[] fids = new int[nFeature]; for (int i = 1; i <= nFeature; i++) fids[i - 1] = i; normalize(rl, fids); } @Override public void normalize(RankList rl, int[] fids) { if (rl.size() == 0) { System.out .println("Error in LinearNormalizor::normalize(): The input ranked list is empty"); System.exit(1); } // remove duplicate features from the input @fids ==> avoid normalizing // the same features multiple times fids = removeDuplicateFeatures(fids); float[] min = new float[fids.length]; float[] max = new float[fids.length]; Arrays.fill(min, 0); Arrays.fill(max, 0); for (int i = 0; i < rl.size(); i++) { DataPoint dp = rl.get(i); for (int j = 0; j < fids.length; j++) { min[j] = Math.min(min[j], dp.getFeatureValue(fids[j])); max[j] = Math.max(max[j], dp.getFeatureValue(fids[j])); } } for (int i = 0; i < rl.size(); i++) { DataPoint dp = rl.get(i); for (int j = 0; j < fids.length; j++) { if (dp.getFeatureValue(fids[j]) > 0) { if (max[j] > min[j]) { float value = (dp.getFeatureValue(fids[j]) - min[j]) / (max[j] - min[j]); dp.setFeatureValue(fids[j], value); } else dp.setFeatureValue(fids[j], 0); } } } } public String name() { return "linear"; } }
2,230
27.974026
88
java
lodreclib
lodreclib-master/src/main/java/ciir/umass/edu/features/ZScoreNormalizor.java
/*=============================================================================== * Copyright (c) 2010-2012 University of Massachusetts. All Rights Reserved. * * Use of the RankLib package is subject to the terms of the software license set * forth in the LICENSE file included with this software, and also available at * http://people.cs.umass.edu/~vdang/ranklib_license.html *=============================================================================== */ package ciir.umass.edu.features; import java.util.Arrays; import ciir.umass.edu.learning.DataPoint; import ciir.umass.edu.learning.RankList; /** * @author vdang */ public class ZScoreNormalizor extends Normalizer { @Override public void normalize(RankList rl) { if(rl.size() == 0) { System.out.println("Error in ZScoreNormalizor::normalize(): The input ranked list is empty"); System.exit(1); } int nFeature = DataPoint.getFeatureCount(); double[] means = new double[nFeature]; Arrays.fill(means, 0); for(int i=0;i<rl.size();i++) { DataPoint dp = rl.get(i); for(int j=1;j<=nFeature;j++) means[j-1] += dp.getFeatureValue(j); } for(int j=1;j<=nFeature;j++) { means[j-1] = means[j-1] / rl.size(); double std = 0; for(int i=0;i<rl.size();i++) { DataPoint p = rl.get(i); double x = p.getFeatureValue(j) - means[j-1]; std += x*x; } std = Math.sqrt(std / (rl.size()-1)); //normalize if(std > 0) { for(int i=0;i<rl.size();i++) { DataPoint p = rl.get(i); double x = (p.getFeatureValue(j) - means[j-1])/std;//x ~ standard normal (0, 1) p.setFeatureValue(j, (float)x); } } } } @Override public void normalize(RankList rl, int[] fids) { if(rl.size() == 0) { System.out.println("Error in SumNormalizor::normalize(): The input ranked list is empty"); System.exit(1); } //remove duplicate features from the input @fids ==> avoid normalizing the same features multiple times fids = removeDuplicateFeatures(fids); double[] means = new double[fids.length]; Arrays.fill(means, 0); for(int i=0;i<rl.size();i++) { DataPoint dp = rl.get(i); for(int j=0;j<fids.length;j++) means[j] += dp.getFeatureValue(fids[j]); } for(int j=0;j<fids.length;j++) { means[j] = means[j] / rl.size(); double std = 0; for(int i=0;i<rl.size();i++) { DataPoint p = rl.get(i); double x = p.getFeatureValue(fids[j]) - means[j]; std += x*x; } std = Math.sqrt(std / (rl.size()-1)); //normalize if(std > 0.0) { for(int i=0;i<rl.size();i++) { DataPoint p = rl.get(i); double x = (p.getFeatureValue(fids[j]) - means[j])/std;//x ~ standard normal (0, 1) p.setFeatureValue(fids[j], (float)x); } } } } public String name() { return "zscore"; } }
2,810
24.788991
105
java
lodreclib
lodreclib-master/src/main/java/ciir/umass/edu/features/Normalizer.java
/*=============================================================================== * Copyright (c) 2010-2012 University of Massachusetts. All Rights Reserved. * * Use of the RankLib package is subject to the terms of the software license set * forth in the LICENSE file included with this software, and also available at * http://people.cs.umass.edu/~vdang/ranklib_license.html *=============================================================================== */ package ciir.umass.edu.features; import java.util.HashSet; import java.util.List; import ciir.umass.edu.learning.RankList; /** * @author vdang * * Abstract class for feature normalization */ public class Normalizer { public void normalize(RankList rl) { //need overriding in subclass } public void normalize(List<RankList> samples) { for(int i=0;i<samples.size();i++) normalize(samples.get(i)); } public void normalize(RankList rl, int[] fids) { //need overriding in subclass } public void normalize(List<RankList> samples, int[] fids) { for(int i=0;i<samples.size();i++) normalize(samples.get(i), fids); } public int[] removeDuplicateFeatures(int[] fids) { HashSet<Integer> uniqueSet = new HashSet<Integer>(); for(int i=0;i<fids.length;i++) if(!uniqueSet.contains(fids[i])) uniqueSet.add(fids[i]); fids = new int[uniqueSet.size()]; int fi=0; for(Integer i : uniqueSet) fids[fi++] = i.intValue(); return fids; } public String name() { //need overriding in subclass return ""; } }
1,517
24.728814
82
java
lodreclib
lodreclib-master/src/main/java/ciir/umass/edu/features/SumNormalizor.java
/*=============================================================================== * Copyright (c) 2010-2012 University of Massachusetts. All Rights Reserved. * * Use of the RankLib package is subject to the terms of the software license set * forth in the LICENSE file included with this software, and also available at * http://people.cs.umass.edu/~vdang/ranklib_license.html *=============================================================================== */ package ciir.umass.edu.features; import java.util.Arrays; import ciir.umass.edu.learning.DataPoint; import ciir.umass.edu.learning.RankList; /** * @author vdang */ public class SumNormalizor extends Normalizer { @Override public void normalize(RankList rl) { if (rl.size() == 0) { System.out .println("Error in SumNormalizor::normalize(): The input ranked list is empty"); System.exit(1); } int nFeature = DataPoint.getFeatureCount(); double[] norm = new double[nFeature]; Arrays.fill(norm, 0); for (int i = 0; i < rl.size(); i++) { DataPoint dp = rl.get(i); for (int j = 1; j <= nFeature; j++) norm[j - 1] += Math.abs(dp.getFeatureValue(j)); } for (int i = 0; i < rl.size(); i++) { DataPoint dp = rl.get(i); for (int j = 1; j <= nFeature; j++) { if (norm[j - 1] > 0) { if (dp.getFeatureValue(j) > 0) { dp.setFeatureValue(j, (float) (dp.getFeatureValue(j) / norm[j - 1])); } } } } } @Override public void normalize(RankList rl, int[] fids) { if (rl.size() == 0) { System.out .println("Error in SumNormalizor::normalize(): The input ranked list is empty"); System.exit(1); } // remove duplicate features from the input @fids ==> avoid normalizing // the same features multiple times fids = removeDuplicateFeatures(fids); double[] norm = new double[fids.length]; Arrays.fill(norm, 0); for (int i = 0; i < rl.size(); i++) { DataPoint dp = rl.get(i); for (int j = 0; j < fids.length; j++) norm[j] += Math.abs(dp.getFeatureValue(fids[j])); } for (int i = 0; i < rl.size(); i++) { DataPoint dp = rl.get(i); for (int j = 0; j < fids.length; j++) if (norm[j] > 0) { if (dp.getFeatureValue(fids[j]) > 0) { dp.setFeatureValue(fids[j], (float) (dp.getFeatureValue(fids[j]) / norm[j])); } } } } public String name() { return "sum"; } }
2,375
27.285714
85
java
lodreclib
lodreclib-master/src/main/java/ciir/umass/edu/metric/DCGScorer.java
/*=============================================================================== * Copyright (c) 2010-2012 University of Massachusetts. All Rights Reserved. * * Use of the RankLib package is subject to the terms of the software license set * forth in the LICENSE file included with this software, and also available at * http://people.cs.umass.edu/~vdang/ranklib_license.html *=============================================================================== */ package ciir.umass.edu.metric; import ciir.umass.edu.learning.RankList; import ciir.umass.edu.utilities.SimpleMath; public class DCGScorer extends MetricScorer { protected static double[] discount = null;//cache protected static double[] gain = null;//cache public DCGScorer() { this.k = 10; //init cache if we haven't already done so if(discount == null) { discount = new double[5000]; for(int i=0;i<discount.length;i++) discount[i] = 1.0/SimpleMath.logBase2(i+2); gain = new double[6]; for(int i=0;i<6;i++) gain[i] = (1<<i) - 1;//2^i-1 } } public DCGScorer(int k) { this.k = k; //init cache if we haven't already done so if(discount == null) { discount = new double[5000]; for(int i=0;i<discount.length;i++) discount[i] = 1.0/SimpleMath.logBase2(i+2); gain = new double[6]; for(int i=0;i<6;i++) gain[i] = (1<<i) - 1;//2^i - 1 } } public MetricScorer clone() { return new DCGScorer(); } /** * Compute DCG at k. */ public double score(RankList rl) { if(rl.size() == 0) return 0; int size = k; if(k > rl.size() || k <= 0) size = rl.size(); int[] rel = getRelevanceLabels(rl); return getDCG(rel, size); } public double[][] swapChange(RankList rl) { int[] rel = getRelevanceLabels(rl); int size = (rl.size() > k) ? k : rl.size(); double[][] changes = new double[rl.size()][]; for(int i=0;i<rl.size();i++) changes[i] = new double[rl.size()]; //for(int i=0;i<rl.size()-1;i++)//ignore K, compute changes from the entire ranked list for(int i=0;i<size;i++) for(int j=i+1;j<rl.size();j++) changes[j][i] = changes[i][j] = (discount(i) - discount(j)) * (gain(rel[i]) - gain(rel[j])); return changes; } public String name() { return "DCG@"+k; } protected double getDCG(int[] rel, int topK) { double dcg = 0; for(int i=0;i<topK;i++) dcg += gain(rel[i]) * discount(i); return dcg; } //lazy caching protected double discount(int index) { if(index < discount.length) return discount[index]; //we need to expand our cache int cacheSize = discount.length + 1000; while(cacheSize <= index) cacheSize += 1000; double[] tmp = new double[cacheSize]; System.arraycopy(discount, 0, tmp, 0, discount.length); for(int i=discount.length;i<tmp.length;i++) tmp[i] = 1.0/SimpleMath.logBase2(i+2); discount = tmp; return discount[index]; } protected double gain(int rel) { if(rel < gain.length) return gain[rel]; //we need to expand our cache int cacheSize = gain.length + 10; while(cacheSize <= rel) cacheSize += 10; double[] tmp = new double[cacheSize]; System.arraycopy(gain, 0, tmp, 0, gain.length); for(int i=gain.length;i<tmp.length;i++) tmp[i] = (1<<i) - 1;//2^i - 1 gain = tmp; return gain[rel]; } }
3,276
24.403101
96
java
lodreclib
lodreclib-master/src/main/java/ciir/umass/edu/metric/ReciprocalRankScorer.java
/*=============================================================================== * Copyright (c) 2010-2012 University of Massachusetts. All Rights Reserved. * * Use of the RankLib package is subject to the terms of the software license set * forth in the LICENSE file included with this software, and also available at * http://people.cs.umass.edu/~vdang/ranklib_license.html *=============================================================================== */ package ciir.umass.edu.metric; import java.util.Arrays; import ciir.umass.edu.learning.RankList; /** * @author vdang */ public class ReciprocalRankScorer extends MetricScorer { public ReciprocalRankScorer() { this.k = 0;//consider the whole list } public double score(RankList rl) { int size = (rl.size() > k) ? k : rl.size(); int firstRank = -1; for(int i=0;i<size && (firstRank==-1);i++) { if(rl.get(i).getLabel() > relThresh)//relevant firstRank = i+1; } return (firstRank==-1)?0:(1.0f/firstRank); } public MetricScorer clone() { return new ReciprocalRankScorer(); } public String name() { return "RR@"+k; } public double[][] swapChange(RankList rl) { int firstRank = -1; int secondRank = -1; int size = (rl.size() > k) ? k : rl.size(); for(int i=0;i<size;i++) { if(rl.get(i).getLabel() > relThresh)//relevant { if(firstRank==-1) firstRank = i; else if(secondRank == -1) secondRank = i; } } //compute the change in RR by swapping each pair double[][] changes = new double[rl.size()][]; for(int i=0;i<rl.size();i++) { changes[i] = new double[rl.size()]; Arrays.fill(changes[i], 0); } double rr = 0.0; //consider swapping the first rank doc with everything else further down the list if(firstRank != -1) { rr = 1.0 / (firstRank+1); for(int j=firstRank+1;j<size;j++) { if(((int)(rl.get(j).getLabel())) == 0)//non-relevant { if(secondRank==-1 || j < secondRank)//after the swap, j is now the position of our used-to-be firstRank relevant doc changes[firstRank][j] = changes[j][firstRank] = 1.0 / (j+1) - rr; else changes[firstRank][j] = changes[j][firstRank] = 1.0 / (secondRank+1) - rr; } } for(int j=size;j<rl.size();j++) if(((int)(rl.get(j).getLabel())) == 0)//non-relevant { if(secondRank == -1) changes[firstRank][j] = changes[j][firstRank] = - rr; else changes[firstRank][j] = changes[j][firstRank] = 1.0 / (secondRank+1) - rr; } } else firstRank = size; //now it's time to consider swapping docs at earlier ranks than the first rank with those below it (and *it* too) for(int i=0;i<firstRank;i++) { for(int j=firstRank;j<rl.size();j++) { if(rl.get(j).getLabel() > 0) changes[i][j] = changes[j][i] = 1.0/(i+1) - rr; } } return changes; } }
2,857
25.71028
121
java
lodreclib
lodreclib-master/src/main/java/ciir/umass/edu/metric/ERRScorer.java
/*=============================================================================== * Copyright (c) 2010-2012 University of Massachusetts. All Rights Reserved. * * Use of the RankLib package is subject to the terms of the software license set * forth in the LICENSE file included with this software, and also available at * http://people.cs.umass.edu/~vdang/ranklib_license.html *=============================================================================== */ package ciir.umass.edu.metric; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import ciir.umass.edu.learning.RankList; /** * * @author Van Dang * Expected Reciprocal Rank */ public class ERRScorer extends MetricScorer { public static double MAX = 16;//by default, we assume the relevance scale of {0, 1, 2, 3, 4} => g_max = 4 => 2^g_max = 16 public ERRScorer() { this.k = 10; } public ERRScorer(int k) { this.k = k; } public ERRScorer clone() { return new ERRScorer(); } /** * Compute ERR at k. NDCG(k) = DCG(k) / DCG_{perfect}(k). Note that the "perfect ranking" must be computed based on the whole list, * not just top-k portion of the list. */ public double score(RankList rl) { int size = k; if(k > rl.size() || k <= 0) size = rl.size(); List<Integer> rel = new ArrayList<Integer>(); for(int i=0;i<rl.size();i++) rel.add((int)rl.get(i).getLabel()); double s = 0.0; double p = 1.0; for(int i=1;i<=size;i++) { double R = R(rel.get(i-1)); s += p*R/i; p *= (1.0 - R); } return s; } public String name() { return "ERR@" + k; } private double R(int rel) { return (double)((1<<rel)-1) / MAX;// (2^rel - 1)/MAX; } public double[][] swapChange(RankList rl) { int size = (rl.size() > k) ? k : rl.size(); int[] labels = new int[rl.size()]; double[] R = new double[rl.size()]; double[] np = new double[rl.size()];//p[i] = (1 - p[0])(1 - p[1])...(1-p[i-1]) double p = 1.0; //for(int i=0;i<rl.size();i++)//ignore K, compute changes from the entire ranked list for(int i=0;i<size;i++) { labels[i] = (int)rl.get(i).getLabel(); R[i] = R(labels[i]); np[i] = p * (1.0 - R[i]); p *= np[i]; } double[][] changes = new double[rl.size()][]; for(int i=0;i<rl.size();i++) { changes[i] = new double[rl.size()]; Arrays.fill(changes[i], 0); } //for(int i=0;i<rl.size()-1;i++)//ignore K, compute changes from the entire ranked list for(int i=0;i<size;i++) { double v1 = 1.0/(i+1) * (i==0?1:np[i-1]); double change = 0; for(int j=i+1;j<rl.size();j++) { if(labels[i] == labels[j]) change = 0; else { change = v1 * (R[j] - R[i]); p = (i==0?1:np[i-1]) * (R[i] - R[j]); for(int k=i+1;k<j;k++) { change += p * R[k]/(1+k); p *= 1.0 - R[k]; } change += (np[j-1]*(1.0 - R[j]) * R[i]/(1.0 - R[i]) - np[j-1] * R[j]) / (j+1); } changes[j][i] = changes[i][j] = change; } } return changes; } }
2,987
24.109244
132
java
lodreclib
lodreclib-master/src/main/java/ciir/umass/edu/metric/PrecisionScorer.java
/*=============================================================================== * Copyright (c) 2010-2012 University of Massachusetts. All Rights Reserved. * * Use of the RankLib package is subject to the terms of the software license set * forth in the LICENSE file included with this software, and also available at * http://people.cs.umass.edu/~vdang/ranklib_license.html *=============================================================================== */ package ciir.umass.edu.metric; import java.util.Arrays; import ciir.umass.edu.learning.RankList; /** * @author vdang */ public class PrecisionScorer extends MetricScorer { public PrecisionScorer() { this.k = 10; } public PrecisionScorer(int k) { this.k = k; } public double score(RankList rl) { int count = 0; int size = k; if(k > rl.size() || k <= 0) size = rl.size(); for(int i=0;i<size;i++) { if(rl.get(i).getLabel() >= relThresh)//relevant count++; } return ((double)count)/size; } public MetricScorer clone() { return new PrecisionScorer(); } public String name() { return "P@"+k; } public double[][] swapChange(RankList rl) { int size = (rl.size() > k) ? k : rl.size(); /*int relCount = 0; for(int i=0;i<size;i++) if(rl.get(i).getLabel() > 0.0)//relevant relCount++;*/ double[][] changes = new double[rl.size()][]; for(int i=0;i<rl.size();i++) { changes[i] = new double[rl.size()]; Arrays.fill(changes[i], 0); } for(int i=0;i<size;i++) { for(int j=size;j<rl.size();j++) { int c = getBinaryRelevance(rl.get(j).getLabel()) - getBinaryRelevance(rl.get(i).getLabel()); changes[i][j] = changes[j][i] = ((float)c)/size; } } return changes; } private int getBinaryRelevance(float label) { if(label > 0.0) return 1; return 0; } }
1,833
20.833333
97
java
lodreclib
lodreclib-master/src/main/java/ciir/umass/edu/metric/NDCGScorer.java
/*=============================================================================== * Copyright (c) 2010-2012 University of Massachusetts. All Rights Reserved. * * Use of the RankLib package is subject to the terms of the software license set * forth in the LICENSE file included with this software, and also available at * http://people.cs.umass.edu/~vdang/ranklib_license.html *=============================================================================== */ package ciir.umass.edu.metric; import java.io.BufferedReader; import java.io.FileInputStream; import java.io.InputStreamReader; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.List; import ciir.umass.edu.learning.RankList; import ciir.umass.edu.utilities.Sorter; /** * @author vdang */ public class NDCGScorer extends DCGScorer { protected HashMap<String, Double> idealGains = null; public NDCGScorer() { super(); idealGains = new HashMap<String, Double>(); } public NDCGScorer(int k) { super(k); idealGains = new HashMap<String, Double>(); } public MetricScorer clone() { return new NDCGScorer(); } public void loadExternalRelevanceJudgment(String qrelFile) { // Queries with external relevance judgment will have their cached ideal // gain value overridden try { String content = ""; BufferedReader in = new BufferedReader(new InputStreamReader( new FileInputStream(qrelFile))); String lastQID = ""; List<Integer> rel = new ArrayList<Integer>(); int nQueries = 0; while ((content = in.readLine()) != null) { content = content.trim(); if (content.length() == 0) continue; String[] s = content.split(" "); String qid = s[0].trim(); // String docid = s[2].trim(); int label = Integer.parseInt(s[3].trim()); if (lastQID.compareTo("") != 0 && lastQID.compareTo(qid) != 0) { int size = (rel.size() > k) ? k : rel.size(); int[] r = new int[rel.size()]; for (int i = 0; i < rel.size(); i++) r[i] = rel.get(i); double ideal = getIdealDCG(r, size); idealGains.put(lastQID, ideal); rel.clear(); nQueries++; } lastQID = qid; rel.add(label); } if (rel.size() > 0) { int size = (rel.size() > k) ? k : rel.size(); int[] r = new int[rel.size()]; for (int i = 0; i < rel.size(); i++) r[i] = rel.get(i); double ideal = getIdealDCG(r, size); idealGains.put(lastQID, ideal); rel.clear(); nQueries++; } in.close(); System.out.println("Relevance judgment file loaded. [#q=" + nQueries + "]"); } catch (Exception ex) { System.out .println("Error in NDCGScorer::loadExternalRelevanceJudgment(): " + ex.toString()); } } /** * Compute NDCG at k. NDCG(k) = DCG(k) / DCG_{perfect}(k). Note that the * "perfect ranking" must be computed based on the whole list, not just * top-k portion of the list. */ public double score(RankList rl) { if (rl.size() == 0) return 0; int size = k; if (k > rl.size() || k <= 0) size = rl.size(); int[] rel = getRelevanceLabels(rl); double ideal = 0; Double d = idealGains.get(rl.getID()); if (d != null) ideal = d.doubleValue(); else { ideal = getIdealDCG(rel, size); idealGains.put(rl.getID(), ideal); } if (ideal <= 0.0)// I mean precisely "=" return 0.0; // double dcg = getDCG(rel, size); // // double ndcg = dcg / ideal; // System.out.println("--- ndcg: " + ndcg + ". dcg:" + dcg + ". idcg:" // + ideal); return (getDCG(rel, size) / ideal); } public double[][] swapChange(RankList rl) { int size = (rl.size() > k) ? k : rl.size(); // compute the ideal ndcg int[] rel = getRelevanceLabels(rl); double ideal = 0; Double d = idealGains.get(rl.getID()); if (d != null) ideal = d.doubleValue(); else { ideal = getIdealDCG(rel, size); // idealGains.put(rl.getID(), ideal);//DO *NOT* do caching here. // It's not thread-safe. } double[][] changes = new double[rl.size()][]; for (int i = 0; i < rl.size(); i++) { changes[i] = new double[rl.size()]; Arrays.fill(changes[i], 0); } for (int i = 0; i < size; i++) for (int j = i + 1; j < rl.size(); j++) if (ideal > 0) changes[j][i] = changes[i][j] = (discount(i) - discount(j)) * (gain(rel[i]) - gain(rel[j])) / ideal; return changes; } public String name() { return "NDCG@" + k; } private double getIdealDCG(int[] rel, int topK) { int[] idx = Sorter.sort(rel, false); double dcg = 0; for (int i = 0; i < topK; i++) dcg += gain(rel[idx[i]]) * discount(i); return dcg; } }
4,607
26.105882
82
java
lodreclib
lodreclib-master/src/main/java/ciir/umass/edu/metric/MetricScorer.java
/*=============================================================================== * Copyright (c) 2010-2012 University of Massachusetts. All Rights Reserved. * * Use of the RankLib package is subject to the terms of the software license set * forth in the LICENSE file included with this software, and also available at * http://people.cs.umass.edu/~vdang/ranklib_license.html *=============================================================================== */ package ciir.umass.edu.metric; import java.util.List; import ciir.umass.edu.learning.RankList; /** * @author vdang * A generic retrieval measure computation interface. */ public class MetricScorer { protected int k = 10; protected float relThresh=1; public MetricScorer() { } public void setK(int k) { this.k = k; } public void setThresh(float k) { this.relThresh = k; } public int getK() { return k; } public void loadExternalRelevanceJudgment(String qrelFile) { } public double score(List<RankList> rl) { double score = 0.0; for(int i=0;i<rl.size();i++) score += score(rl.get(i)); return score/rl.size(); } protected int[] getRelevanceLabels(RankList rl) { int[] rel = new int[rl.size()]; for(int i=0;i<rl.size();i++) rel[i] = (int)rl.get(i).getLabel(); return rel; } /** * MUST BE OVER-RIDDEN * @param rl * @return */ public double score(RankList rl) { return 0.0; } public MetricScorer clone() { return null; } public String name() { return ""; } public double[][] swapChange(RankList rl) { return null; } }
1,581
18.060241
82
java
lodreclib
lodreclib-master/src/main/java/ciir/umass/edu/metric/METRIC.java
/*=============================================================================== * Copyright (c) 2010-2012 University of Massachusetts. All Rights Reserved. * * Use of the RankLib package is subject to the terms of the software license set * forth in the LICENSE file included with this software, and also available at * http://people.cs.umass.edu/~vdang/ranklib_license.html *=============================================================================== */ package ciir.umass.edu.metric; public enum METRIC { MAP, NDCG, DCG, Precision, Reciprocal, Best, ERR }
576
37.466667
82
java
lodreclib
lodreclib-master/src/main/java/ciir/umass/edu/metric/APScorer.java
/*=============================================================================== * Copyright (c) 2010-2012 University of Massachusetts. All Rights Reserved. * * Use of the RankLib package is subject to the terms of the software license set * forth in the LICENSE file included with this software, and also available at * http://people.cs.umass.edu/~vdang/ranklib_license.html *=============================================================================== */ package ciir.umass.edu.metric; import java.io.BufferedReader; import java.io.FileInputStream; import java.io.InputStreamReader; import java.util.Arrays; import java.util.HashMap; import ciir.umass.edu.learning.RankList; /** * @author vdang * This class implements MAP (Mean Average Precision) */ public class APScorer extends MetricScorer { //This class computes MAP from the *WHOLE* ranked list. "K" will be completely ignored. //The reason is, if you want MAP@10, you really should be using NDCG@10 or ERR@10 instead. public HashMap<String, Integer> relDocCount = null; public APScorer() { this.k = 0;//consider the whole list } public MetricScorer clone() { return new APScorer(); } public void loadExternalRelevanceJudgment(String qrelFile) { relDocCount = new HashMap<String, Integer>(); try { String content = ""; BufferedReader in = new BufferedReader(new InputStreamReader(new FileInputStream(qrelFile))); String lastQID = ""; int rdCount = 0;//relevant document count (per query) while((content = in.readLine()) != null) { content = content.trim(); if(content.length() == 0) continue; String[] s = content.split(" "); String qid = s[0].trim(); //String docid = s[2].trim(); int label = Integer.parseInt(s[3].trim()); if(lastQID.compareTo("")!=0 && lastQID.compareTo(qid)!=0) { relDocCount.put(lastQID, rdCount); rdCount = 0; } lastQID = qid; if(label > 0) rdCount++; } relDocCount.put(lastQID, rdCount); in.close(); System.out.println("Relevance judgment file loaded. [#q=" + relDocCount.keySet().size() + "]"); } catch(Exception ex) { System.out.println("Error in APScorer::loadExternalRelevanceJudgment(): " + ex.toString()); } } /** * Compute Average Precision (AP) of the list. AP of a list is the average of precision evaluated at ranks where a relevant document * is observed. * @return AP of the list. */ public double score(RankList rl) { double ap = 0.0; int count = 0; for(int i=0;i<rl.size();i++) { if(rl.get(i).getLabel() > relThresh)//relevant { count++; ap += ((double)count)/(i+1); } } int rdCount = 0; if(relDocCount != null) { Integer it = relDocCount.get(rl.getID()); if(it != null) rdCount = it.intValue(); } else //no qrel-file specified, we can only use the #relevant-docs in the training file rdCount = count; if(rdCount==0) return 0.0; return ap / rdCount; } public String name() { return "MAP"; } public double[][] swapChange(RankList rl) { //NOTE: Compute swap-change *IGNORING* K (consider the entire ranked list) int[] relCount = new int[rl.size()]; int[] labels = new int[rl.size()]; int count = 0; for(int i=0;i<rl.size();i++) { if(rl.get(i).getLabel() > 0)//relevant { labels[i] = 1; count++; } else labels[i] = 0; relCount[i] = count; } int rdCount = 0;//total number of relevant documents if(relDocCount != null)//if an external qrels file is specified { Integer it = relDocCount.get(rl.getID()); if(it != null) rdCount = it.intValue(); } else rdCount = count; double[][] changes = new double[rl.size()][]; for(int i=0;i<rl.size();i++) { changes[i] = new double[rl.size()]; Arrays.fill(changes[i], 0); } if(rdCount == 0 || count == 0) return changes;//all "0" for(int i=0;i<rl.size()-1;i++) { for(int j=i+1;j<rl.size();j++) { double change = 0; if(labels[i] != labels[j]) { int diff = labels[j]-labels[i]; change += ((double)((relCount[i]+diff)*labels[j] - relCount[i]*labels[i])) / (i+1); for(int k=i+1;k<=j-1;k++) if(labels[k] > 0) change += ((double)diff) / (k+1); change += ((double)(-relCount[j]*diff)) / (j+1); //It is equivalent to: change += ((double)(relCount[j]*labels[i] - relCount[j]*labels[j])) / (j+1); } changes[j][i] = changes[i][j] = change/rdCount; } } return changes; } }
4,496
25.928144
134
java
lodreclib
lodreclib-master/src/main/java/ciir/umass/edu/metric/MetricScorerFactory.java
/*=============================================================================== * Copyright (c) 2010-2012 University of Massachusetts. All Rights Reserved. * * Use of the RankLib package is subject to the terms of the software license set * forth in the LICENSE file included with this software, and also available at * http://people.cs.umass.edu/~vdang/ranklib_license.html *=============================================================================== */ package ciir.umass.edu.metric; import java.util.HashMap; /** * @author vdang */ public class MetricScorerFactory { private static MetricScorer[] mFactory = new MetricScorer[]{new APScorer(), new NDCGScorer(), new DCGScorer(), new PrecisionScorer(), new ReciprocalRankScorer(), new BestAtKScorer(), new ERRScorer()}; private static HashMap<String, MetricScorer> map = new HashMap<String, MetricScorer>(); public MetricScorerFactory() { map.put("MAP", new APScorer()); map.put("NDCG", new NDCGScorer()); map.put("DCG", new DCGScorer()); map.put("P", new PrecisionScorer()); map.put("RR", new ReciprocalRankScorer()); map.put("BEST", new BestAtKScorer()); map.put("ERR", new ERRScorer()); } public MetricScorer createScorer(METRIC metric) { return mFactory[metric.ordinal() - METRIC.MAP.ordinal()].clone(); } public MetricScorer createScorer(METRIC metric, int k) { MetricScorer s = mFactory[metric.ordinal() - METRIC.MAP.ordinal()].clone(); s.setK(k); return s; } public MetricScorer createScorer(String metric)//e.g.: metric = "NDCG@5" { int k = -1; String m = ""; MetricScorer s = null; if(metric.indexOf("@") != -1) { m = metric.substring(0, metric.indexOf("@")); k = Integer.parseInt(metric.substring(metric.indexOf("@")+1)); s = map.get(m.toUpperCase()).clone(); s.setK(k); } else s = map.get(metric.toUpperCase()).clone(); return s; } public MetricScorer createScorer(String metric, float relTresh) { int k = -1; String m = ""; MetricScorer s = null; if(metric.indexOf("@") != -1) { m = metric.substring(0, metric.indexOf("@")); k = Integer.parseInt(metric.substring(metric.indexOf("@")+1)); s = map.get(m.toUpperCase()).clone(); s.setK(k); s.setThresh(relTresh); } else s = map.get(metric.toUpperCase()).clone(); return s; } }
2,313
29.853333
201
java
lodreclib
lodreclib-master/src/main/java/ciir/umass/edu/metric/BestAtKScorer.java
/*=============================================================================== * Copyright (c) 2010-2012 University of Massachusetts. All Rights Reserved. * * Use of the RankLib package is subject to the terms of the software license set * forth in the LICENSE file included with this software, and also available at * http://people.cs.umass.edu/~vdang/ranklib_license.html *=============================================================================== */ package ciir.umass.edu.metric; import java.util.Arrays; import ciir.umass.edu.learning.RankList; /** * @author vdang */ public class BestAtKScorer extends MetricScorer { public BestAtKScorer() { this.k = 10; } public BestAtKScorer(int k) { this.k = k; } public double score(RankList rl) { return rl.get(maxToK(rl, k-1)).getLabel(); } public MetricScorer clone() { return new BestAtKScorer(); } /** * Return the position of the best object (e.g. docs with highest degree of relevance) among objects in the range [0..k] * NOTE: If you want best-at-k (i.e. best among top-k), you need maxToK(rl, k-1) * @param l The rank list. * @param k The last position of the range. * @return The index of the best object in the specified range. */ public int maxToK(RankList rl, int k) { int size = k; if(size < 0 || size > rl.size()-1) size = rl.size()-1; double max = -1.0; int max_i = 0; for(int i=0;i<=size;i++) { if(max < rl.get(i).getLabel()) { max = rl.get(i).getLabel(); max_i = i; } } return max_i; } public String name() { return "Best@"+k; } public double[][] swapChange(RankList rl) { //FIXME: not sure if this implementation is correct! int[] labels = new int[rl.size()]; int[] best = new int[rl.size()]; int max = -1; int maxVal = -1; int secondMaxVal = -1;//within top-K int maxCount = 0;//within top-K for(int i=0;i<rl.size();i++) { int v = (int)rl.get(i).getLabel(); labels[i] = v; if(maxVal < v) { if(i < k) { secondMaxVal = maxVal; maxCount = 0; } maxVal = v; max = i; } else if(maxVal == v && i < k) maxCount++; best[i] = max; } if(secondMaxVal == -1) secondMaxVal = 0; double[][] changes = new double[rl.size()][]; for(int i=0;i<rl.size();i++) { changes[i] = new double[rl.size()]; Arrays.fill(changes[i], 0); } //FIXME: THIS IS VERY *INEFFICIENT* for(int i=0;i<rl.size()-1;i++) { for(int j=i+1;j<rl.size();j++) { double change = 0; if(j < k || i >= k) change = 0; else if(labels[i] == labels[j] || labels[j] == labels[best[k-1]]) change = 0; else if(labels[j] > labels[best[k-1]]) change = labels[j] - labels[best[i]]; else if(labels[i] < labels[best[k-1]] || maxCount > 1) change = 0; else change = maxVal - Math.max(secondMaxVal, labels[j]); changes[i][j] = changes[j][i] = change; } } return changes; } }
2,952
22.624
121
java
lodreclib
lodreclib-master/src/main/java/ciir/umass/edu/learning/LinearRegRank.java
/*=============================================================================== * Copyright (c) 2010-2012 University of Massachusetts. All Rights Reserved. * * Use of the RankLib package is subject to the terms of the software license set * forth in the LICENSE file included with this software, and also available at * http://people.cs.umass.edu/~vdang/ranklib_license.html *=============================================================================== */ package ciir.umass.edu.learning; import java.io.BufferedReader; import java.io.FileInputStream; import java.io.InputStreamReader; import java.util.Arrays; import java.util.List; import ciir.umass.edu.metric.MetricScorer; import ciir.umass.edu.utilities.KeyValuePair; import ciir.umass.edu.utilities.SimpleMath; public class LinearRegRank extends Ranker { public static double lambda = 1E-10;//L2-norm regularization parameter //Local variables protected double[] weight = null; public LinearRegRank() { } public LinearRegRank(List<RankList> samples, int[] features, MetricScorer scorer) { super(samples, features, scorer); } public void init() { PRINTLN("Initializing... [Done]"); } public void learn() { PRINTLN("--------------------------------"); PRINTLN("Training starts..."); PRINTLN("--------------------------------"); PRINT("Learning the least square model... "); //closed form solution: beta = ((xTx - lambda*I)^(-1)) * (xTy) //where x is an n-by-f matrix (n=#data-points, f=#features), y is an n-element vector of relevance labels /*int nSample = 0; for(int i=0;i<samples.size();i++) nSample += samples.get(i).size();*/ int nVar = DataPoint.getFeatureCount(); double[][] xTx = new double[nVar][]; for(int i=0;i<nVar;i++) { xTx[i] = new double[nVar]; Arrays.fill(xTx[i], 0.0); } double[] xTy = new double[nVar]; Arrays.fill(xTy, 0.0); for(int s=0;s<samples.size();s++) { RankList rl = samples.get(s); for(int i=0;i<rl.size();i++) { xTy[nVar-1] += rl.get(i).getLabel(); for(int j=0;j<nVar-1;j++) { xTy[j] += rl.get(i).getFeatureValue(j+1) * rl.get(i).getLabel(); for(int k=0;k<nVar;k++) { double t = (k < nVar-1) ? rl.get(i).getFeatureValue(k+1) : 1f; xTx[j][k] += rl.get(i).getFeatureValue(j+1) * t; } } for(int k=0;k<nVar-1;k++) xTx[nVar-1][k] += rl.get(i).getFeatureValue(k+1); xTx[nVar-1][nVar-1] += 1f; } } if(lambda != 0.0)//regularized { for(int i=0;i<xTx.length;i++) xTx[i][i] += lambda; } weight = solve(xTx, xTy); PRINTLN("[Done]"); scoreOnTrainingData = SimpleMath.round(scorer.score(rank(samples)), 4); PRINTLN("---------------------------------"); PRINTLN("Finished sucessfully."); PRINTLN(scorer.name() + " on training data: " + scoreOnTrainingData); if(validationSamples != null) { bestScoreOnValidationData = scorer.score(rank(validationSamples)); PRINTLN(scorer.name() + " on validation data: " + SimpleMath.round(bestScoreOnValidationData, 4)); } PRINTLN("---------------------------------"); } public double eval(DataPoint p) { double score = weight[weight.length-1]; for(int i=0;i<features.length;i++) score += weight[i] * p.getFeatureValue(features[i]); return score; } public Ranker clone() { return new LinearRegRank(); } public String toString() { String output = "0:" + weight[0] + " "; for(int i=0;i<features.length;i++) output += features[i] + ":" + weight[i] + ((i==weight.length-1)?"":" "); return output; } public String model() { String output = "## " + name() + "\n"; output += "## Lambda = " + lambda + "\n"; output += toString(); return output; } public void load(String fn) { try { String content = ""; BufferedReader in = new BufferedReader( new InputStreamReader( new FileInputStream(fn), "ASCII")); KeyValuePair kvp = null; while((content = in.readLine()) != null) { content = content.trim(); if(content.length() == 0) continue; if(content.indexOf("##")==0) continue; kvp = new KeyValuePair(content); break; } in.close(); List<String> keys = kvp.keys(); List<String> values = kvp.values(); weight = new double[keys.size()]; features = new int[keys.size()-1];//weight = <weight for each feature, constant> int idx = 0; for(int i=0;i<keys.size();i++) { int fid = Integer.parseInt(keys.get(i)); if(fid > 0) { features[idx] = fid; weight[idx] = Double.parseDouble(values.get(i)); idx++; } else weight[weight.length-1] = Double.parseDouble(values.get(i)); } } catch(Exception ex) { System.out.println("Error in CoorAscent::load(): " + ex.toString()); } } public void printParameters() { PRINTLN("L2-norm regularization: lambda = " + lambda); } public String name() { return "Linear Regression"; } /** * Solve a system of linear equations Ax=B, in which A has to be a square matrix with the same length as B * @param A * @param B * @return x */ protected double[] solve(double[][] A, double[] B) { if(A.length == 0 || B.length == 0) { System.out.println("Error: some of the input arrays is empty."); System.exit(1); } if(A[0].length == 0) { System.out.println("Error: some of the input arrays is empty."); System.exit(1); } if(A.length != B.length) { System.out.println("Error: Solving Ax=B: A and B have different dimension."); System.exit(1); } //init double[][] a = new double[A.length][]; double[] b = new double[B.length]; System.arraycopy(B, 0, b, 0, B.length); for(int i=0;i<a.length;i++) { a[i] = new double[A[i].length]; if(i > 0) { if(a[i].length != a[i-1].length) { System.out.println("Error: Solving Ax=B: A is NOT a square matrix."); System.exit(1); } } System.arraycopy(A[i], 0, a[i], 0, A[i].length); } //apply the gaussian elimination process to convert the matrix A to upper triangular form double pivot = 0.0; double multiplier = 0.0; for(int j=0;j<b.length-1;j++)//loop through all columns of the matrix A { pivot = a[j][j]; for(int i=j+1;i<b.length;i++)//loop through all remaining rows { multiplier = a[i][j] / pivot; //i-th row = i-th row - (multiplier * j-th row) for(int k=j+1;k<b.length;k++)//loop through all remaining elements of the current row, starting at (j+1) a[i][k] -= a[j][k] * multiplier; b[i] -= b[j] * multiplier; } } //a*x=b //a is now an upper triangular matrix, now the solution x can be obtained with elementary linear algebra double[] x = new double[b.length]; int n = b.length; x[n-1] = b[n-1] / a[n-1][n-1]; for(int i=n-2;i>=0;i--)//walk back up to the first row -- we only need to care about the right to the diagonal { double val = b[i]; for(int j=i+1;j<n;j++) val -= a[i][j] * x[j]; x[i] = val / a[i][i]; } return x; } }
6,969
26.65873
112
java
lodreclib
lodreclib-master/src/main/java/ciir/umass/edu/learning/CoorAscent.java
/*=============================================================================== * Copyright (c) 2010-2012 University of Massachusetts. All Rights Reserved. * * Use of the RankLib package is subject to the terms of the software license set * forth in the LICENSE file included with this software, and also available at * http://people.cs.umass.edu/~vdang/ranklib_license.html *=============================================================================== */ package ciir.umass.edu.learning; import java.io.BufferedReader; import java.io.FileInputStream; import java.io.InputStreamReader; import java.util.ArrayList; import java.util.Collections; import java.util.List; import ciir.umass.edu.learning.RankList; import ciir.umass.edu.metric.MetricScorer; import ciir.umass.edu.utilities.KeyValuePair; import ciir.umass.edu.utilities.MergeSorter; import ciir.umass.edu.utilities.SimpleMath; /** * @author vdang * * This class implements the linear ranking model known as Coordinate Ascent. It was proposed in this paper: * D. Metzler and W.B. Croft. Linear feature-based models for information retrieval. Information Retrieval, 10(3): 257-274, 2007. */ public class CoorAscent extends Ranker { //Parameters public static int nRestart = 5; public static int nMaxIteration = 25; public static double stepBase = 0.05; public static double stepScale = 2.0; public static double tolerance = 0.001; public static boolean regularized = true; public static double slack = 0.001;//regularized parameter //Local variables protected double[] weight = null; protected int current_feature = -1;//used only during learning protected double weight_change = -1.0;//used only during learning public CoorAscent() { } public CoorAscent(List<RankList> samples, int[] features, MetricScorer scorer) { super(samples, features, scorer); } public void init() { PRINT("Initializing... "); weight = new double[features.length]; for(int i=0;i<weight.length;i++) weight[i] = 1.0f/features.length; PRINTLN("[Done]"); } public void learn() { double[] regVector = new double[weight.length]; copy(weight, regVector);//uniform weight distribution //this holds the final best model/score double[] bestModel = null; double bestModelScore = 0.0; int[] sign = new int[]{1, -1}; PRINTLN("---------------------------"); PRINTLN("Training starts..."); PRINTLN("---------------------------"); for(int r=0;r<nRestart;r++) { PRINTLN("[+] Random restart #" + (r+1) + "/" + nRestart + "..."); int consecutive_fails = 0; //initialize weight vector for(int i=0;i<weight.length;i++) weight[i] = 1.0f/features.length; current_feature = -1; double startScore = scorer.score(rank(samples));//compute all the scores (in whatever metric specified) and store them as cache //local best (within the current restart cycle) double bestScore = startScore; double[] bestWeight = new double[weight.length]; copy(weight, bestWeight); //There must be at least one feature increasing whose weight helps while((weight.length>1&&consecutive_fails < weight.length - 1) || (weight.length==1&&consecutive_fails==0)) { PRINTLN("Shuffling features' order... [Done.]"); PRINTLN("Optimizing weight vector... "); PRINTLN("------------------------------"); PRINTLN(new int[]{7, 8, 7}, new String[]{"Feature", "weight", scorer.name()}); PRINTLN("------------------------------"); int[] fids = getShuffledFeatures();//contain index of elements in the variable @features //Try maximizing each feature individually for(int i=0;i<fids.length;i++) { current_feature = fids[i];//this will trigger the "else" branch in the procedure rank() double origWeight = weight[fids[i]]; double totalStep = 0; double bestTotalStep = 0; boolean succeeds = false;//whether or not we succeed in finding a better weight value for the current feature for(int s=0;s<sign.length;s++)//search by both increasing and decreasing { double step = 0.001 * sign[s]; if(origWeight != 0.0 && Math.abs(step) > 0.5 * Math.abs(origWeight)) step = stepBase * Math.abs(origWeight); totalStep = step; for(int j=0;j<nMaxIteration;j++) { double w = origWeight + totalStep; weight_change = step;//weight_change is used in the "else" branch in the procedure rank() weight[fids[i]] = w; double score = scorer.score(rank(samples)); if(regularized) { double penalty = slack * getDistance(weight, regVector); score -= penalty; //PRINTLN("Penalty: " + penalty); } if(score > bestScore)//better than the local best, replace the local best with this model { bestScore = score; bestTotalStep = totalStep; succeeds = true; String bw = ((weight[fids[i]]>0)?"+":"") + SimpleMath.round(weight[fids[i]], 4); PRINTLN(new int[]{7, 8, 7}, new String[]{features[fids[i]]+"", bw+"", SimpleMath.round(bestScore, 4)+""}); } if(j < nMaxIteration-1) { step *= stepScale; totalStep += step; } } if(succeeds) break;//no need to search the other direction (e.g. sign = '-') else if(s < sign.length-1) { weight_change = -totalStep; updateCached();//restore the cached to reflect the orig. weight for the current feature //so that we can start searching in the other direction (since the optimization in the first direction failed) weight[fids[i]] = origWeight;//restore the weight to its initial value } } if(succeeds) { weight_change = bestTotalStep - totalStep; updateCached();//restore the cached to reflect the best weight for the current feature weight[fids[i]] = origWeight + bestTotalStep; consecutive_fails = 0;//since we found a better weight value double sum = normalize(weight); scaleCached(sum); copy(weight, bestWeight); } else { consecutive_fails++; weight_change = -totalStep; updateCached();//restore the cached to reflect the orig. weight for the current feature since the optimization failed //Restore the orig. weight value weight[fids[i]] = origWeight; } } PRINTLN("------------------------------"); //if we haven't made much progress then quit if(bestScore - startScore < tolerance) break; } //update the (global) best model with the best model found in this round if(validationSamples != null) { current_feature = -1; bestScore = scorer.score(rank(validationSamples)); } if(bestModel == null || bestScore > bestModelScore) { bestModelScore = bestScore; bestModel = bestWeight; } } copy(bestModel, weight); current_feature = -1;//turn off the cache mode scoreOnTrainingData = SimpleMath.round(scorer.score(rank(samples)), 4); PRINTLN("---------------------------------"); PRINTLN("Finished sucessfully."); PRINTLN(scorer.name() + " on training data: " + scoreOnTrainingData); if(validationSamples != null) { bestScoreOnValidationData = scorer.score(rank(validationSamples)); PRINTLN(scorer.name() + " on validation data: " + SimpleMath.round(bestScoreOnValidationData, 4)); } PRINTLN("---------------------------------"); } public RankList rank(RankList rl) { double[] score = new double[rl.size()]; if(current_feature == -1) { for(int i=0;i<rl.size();i++) { for(int j=0;j<features.length;j++) score[i] += weight[j] * rl.get(i).getFeatureValue(features[j]); rl.get(i).setCached(score[i]);//use cache of a data point to store its score given the model at this state } } else//This branch is only active during the training process. Here we trade the "clean" codes for efficiency { for(int i=0;i<rl.size();i++) { //cached score = a_1*x_1 + a_2*x_2 + ... + a_n*x_n //a_2 ==> a'_2 //new score = cached score + (a'_2 - a_2)*x_2 ====> NO NEED TO RE-COMPUTE THE WHOLE THING score[i] = rl.get(i).getCached() + weight_change * rl.get(i).getFeatureValue(features[current_feature]); rl.get(i).setCached(score[i]); } } int[] idx = MergeSorter.sort(score, false); return new RankList(rl, idx); } public double eval(DataPoint p) { double score = 0.0; for(int i=0;i<features.length;i++) score += weight[i] * p.getFeatureValue(features[i]); return score; } public Ranker clone() { return new CoorAscent(); } public String toString() { String output = ""; for(int i=0;i<weight.length;i++) output += features[i] + ":" + weight[i] + ((i==weight.length-1)?"":" "); return output; } public String model() { String output = "## " + name() + "\n"; output += "## Restart = " + nRestart + "\n"; output += "## MaxIteration = " + nMaxIteration + "\n"; output += "## StepBase = " + stepBase + "\n"; output += "## StepScale = " + stepScale + "\n"; output += "## Tolerance = " + tolerance + "\n"; output += "## Regularized = " + regularized + "\n"; output += "## Slack = " + slack + "\n"; output += toString(); return output; } public void load(String fn) { try { String content = ""; BufferedReader in = new BufferedReader( new InputStreamReader( new FileInputStream(fn), "ASCII")); KeyValuePair kvp = null; while((content = in.readLine()) != null) { content = content.trim(); if(content.length() == 0) continue; if(content.indexOf("##")==0) continue; kvp = new KeyValuePair(content); break; } in.close(); List<String> keys = kvp.keys(); List<String> values = kvp.values(); weight = new double[keys.size()]; features = new int[keys.size()]; for(int i=0;i<keys.size();i++) { features[i] = Integer.parseInt(keys.get(i)); weight[i] = Double.parseDouble(values.get(i)); } } catch(Exception ex) { System.out.println("Error in CoorAscent::load(): " + ex.toString()); } } public void printParameters() { PRINTLN("No. of random restarts: " + nRestart); PRINTLN("No. of iterations to search in each direction: " + nMaxIteration); PRINTLN("Tolerance: " + tolerance); if(regularized) PRINTLN("Reg. param: " + slack); else PRINTLN("Regularization: No"); } public String name() { return "Coordinate Ascent"; } private void updateCached() { for(int j=0;j<samples.size();j++) { RankList rl = samples.get(j); for(int i=0;i<rl.size();i++) { //cached score = a_1*x_1 + a_2*x_2 + ... + a_n*x_n //a_2 ==> a'_2 //new score = cached score + (a'_2 - a_2)*x_2 ====> NO NEED TO RE-COMPUTE THE WHOLE THING double score = rl.get(i).getCached() + weight_change * rl.get(i).getFeatureValue(features[current_feature]); rl.get(i).setCached(score); } } } private void scaleCached(double sum) { for(int j=0;j<samples.size();j++) { RankList rl = samples.get(j); for(int i=0;i<rl.size();i++) rl.get(i).setCached(rl.get(i).getCached()/sum); } } private int[] getShuffledFeatures() { int[] fids = new int[features.length]; List<Integer> l = new ArrayList<Integer>(); for(int i=0;i<features.length;i++) l.add(i); Collections.shuffle(l); for(int i=0;i<l.size();i++) fids[i] = l.get(i); return fids; } private double getDistance(double[] w1, double[] w2) { //ASSERT w1.length = w2.length double s1 = 0.0; double s2 = 0.0; for(int i=0;i<w1.length;i++) { s1 += Math.abs(w1[i]); s2 += Math.abs(w2[i]); } double dist = 0.0; for(int i=0;i<w1.length;i++) { double t = w1[i]/s1 - w2[i]/s2; dist += t*t; } return (double)Math.sqrt(dist); } private double normalize(double[] weights) { double sum = 0.0; for(int j=0;j<weights.length;j++) sum += Math.abs(weights[j]); if(sum > 0) { for(int j=0;j<weights.length;j++) weights[j] /= sum; } else { sum = 1; for(int j=0;j<weights.length;j++) weights[j] = 1.0/weights.length; } return sum; } public void copyModel(CoorAscent ranker) { weight = new double[features.length]; if(ranker.weight.length != weight.length) { System.out.println("These two models use different feature set!!"); System.exit(1); } copy(ranker.weight, weight); PRINTLN("Model loaded."); } public double distance(CoorAscent ca) { return getDistance(weight, ca.weight); } }
12,474
29.802469
130
java
lodreclib
lodreclib-master/src/main/java/ciir/umass/edu/learning/SparseDataPoint.java
/*=============================================================================== * Copyright (c) 2010-2012 University of Massachusetts. All Rights Reserved. * * Use of the RankLib package is subject to the terms of the software license set * forth in the LICENSE file included with this software, and also available at * http://people.cs.umass.edu/~vdang/ranklib_license.html *=============================================================================== */ package ciir.umass.edu.learning; import java.util.Arrays; import java.util.Set; /** * Implements a sparse data point using a compressed sparse row data structure * * @author Siddhartha Bagaria */ public class SparseDataPoint extends DataPoint { // Access pattern of the feature values private enum accessPattern { SEQUENTIAL, RANDOM }; private static accessPattern searchPattern = accessPattern.RANDOM; // Profiling variables // private static int numCalls = 0; // private static float avgTime = 0; // The feature ids for known values int fIds[]; // The feature values for corresponding Ids // float fVals[]; //moved to the parent class // Internal search optimizers. Currently unused. int lastMinId = -1; int lastMinPos = -1; public SparseDataPoint(String text) { super(text); } public SparseDataPoint(String text, Set<Integer> listFeatures) { super(text, listFeatures); } public SparseDataPoint(SparseDataPoint dp) { label = dp.label; this.user_id = dp.user_id; description = dp.description; cached = dp.cached; fIds = new int[dp.fIds.length]; fVals = new float[dp.fVals.length]; System.arraycopy(dp.fIds, 0, fIds, 0, dp.fIds.length); System.arraycopy(dp.fVals, 0, fVals, 0, dp.fVals.length); } private int locate(int fid) { if (searchPattern == accessPattern.SEQUENTIAL) { if (lastMinId > fid) { lastMinId = -1; lastMinPos = -1; } while (lastMinPos < knownFeatures && lastMinId < fid) lastMinId = fIds[++lastMinPos]; if (lastMinId == fid) return lastMinPos; } else if (searchPattern == accessPattern.RANDOM) { int pos = Arrays.binarySearch(fIds, fid); if (pos >= 0) return pos; } else System.err .println("Invalid search pattern specified for sparse data points."); return -1; } @Override public float getFeatureValue(int fid) { // long time = System.nanoTime(); if (fid <= 0 || fid > getFeatureCount()) { return 0; // System.out.println("Error in SparseDataPoint::getFeatureValue(): requesting invalid feature, fid=" // + fid); // System.exit(1); } int pos = locate(fid); // long completedIn = System.nanoTime() - time; // avgTime = (avgTime*numCalls + completedIn)/(++numCalls); // System.out.println("getFeatureValue average time: "+avgTime); if (pos >= 0) return fVals[pos]; return 0; // Should ideally be returning unknown? } @Override public void setFeatureValue(int fid, float fval) { if (fid <= 0 || fid > getFeatureCount()) { System.out .println("Error in SparseDataPoint::setFeatureValue(): feature (id=" + fid + ") out of range."); System.exit(1); } int pos = locate(fid); if (pos >= 0) fVals[pos] = fval; else { System.err .println("Error in SparseDataPoint::setFeatureValue(): feature (id=" + fid + ") not found."); System.exit(1); } } @Override public void setFeatureVector(float[] dfVals) { fIds = new int[knownFeatures]; fVals = new float[knownFeatures]; int pos = 0; for (int i = 1; i < dfVals.length; i++) { if (!isUnknown(dfVals[i])) { fIds[pos] = i; fVals[pos] = dfVals[i]; pos++; } } assert (pos == knownFeatures); } @Override public float[] getFeatureVector() { System.out.println("call to get feature vector method"); if (knownFeatures > 0) { float[] dfVals = new float[fIds[knownFeatures -1]]; //knownFeatures-1 Arrays.fill(dfVals, UNKNOWN); for (int i = 0; i < knownFeatures; i++) dfVals[fIds[i]] = fVals[i]; return dfVals; } else return new float[0]; } }
4,028
26.040268
104
java
lodreclib
lodreclib-master/src/main/java/ciir/umass/edu/learning/DataPoint.java
/*=============================================================================== * Copyright (c) 2010-2012 University of Massachusetts. All Rights Reserved. * * Use of the RankLib package is subject to the terms of the software license set * forth in the LICENSE file included with this software, and also available at * http://people.cs.umass.edu/~vdang/ranklib_license.html *=============================================================================== */ package ciir.umass.edu.learning; import java.util.Arrays; import java.util.Set; /** * @author vdang * * This class implements objects to be ranked. In the context of * Information retrieval, each instance is a query-url pair represented * by a n-dimentional feature vector. It should be general enough for * other ranking applications as well (not limited to just IR I hope). */ public abstract class DataPoint { int MAX_FEATURE = 100; public static int FEATURE_INCREASE = 20; protected static int featureCount = 0; protected static float UNKNOWN = Float.NaN; // attributes protected float label = 0.0f;// [ground truth] the real label of the data // point (e.g. its degree of relevance // according to the relevance judgment) protected String user_id = "";// id of this data point (e.g. query-id) protected String item_id = "";// id of this data point (e.g. query-id) protected String description = ""; protected float[] fVals = null; // fVals[0] is un-used. Feature id MUST // start from 1 // helper attributes protected int knownFeatures; // number of known feature values // internal to learning procedures protected double cached = -1.0;// the latest evaluation score of the learned // model on this data point protected static boolean isUnknown(float fVal) { return Float.isNaN(fVal); } protected static String getKey(String pair) { return pair.substring(0, pair.indexOf(":")); } protected static String getValue(String pair) { return pair.substring(pair.lastIndexOf(":") + 1); } /** * Parse the given line of text to construct a dense array of feature values * and reset metadata. * * @param text * @return Dense array of feature values */ protected float[] parse(String text, Set<Integer> listFeatures) { this.MAX_FEATURE = listFeatures.size() + 1; float[] fVals = new float[MAX_FEATURE];// Arrays.fill(fVals, UNKNOWN); int lastFeature = -1; try { int idx = text.indexOf("# "); if (idx != -1) { // int uid, iid; description = text.substring(idx + 1); String[] vals = description.split("\t"); // #user_id item_id // uid = Integer.parseInt(vals[0].trim()); // extract the // item_id // // if (uid == 307) // System.out.println(text); this.item_id = vals[1].trim(); // extract the item_id // iid = Integer.parseInt(this.item_id); text = text.substring(0, idx).trim();// remove the comment part // at the end of the // line } else { System.out.println("No text description! No itemId! " + text); System.exit(1); } String[] fs = text.split(" "); label = Float.parseFloat(fs[0]); if (label < 0) { System.out .println("Relevance label cannot be negative. System will now exit."); System.exit(1); } user_id = getValue(fs[1]); String key = ""; String val = ""; for (int i = 2; i < fs.length; i++) { key = getKey(fs[i]); val = getValue(fs[i]); int f = Integer.parseInt(key); if (listFeatures.contains(f)) { knownFeatures++; if (f >= MAX_FEATURE) { while (f >= MAX_FEATURE) MAX_FEATURE += FEATURE_INCREASE; float[] tmp = new float[MAX_FEATURE]; System.arraycopy(fVals, 0, tmp, 0, fVals.length); Arrays.fill(tmp, fVals.length, MAX_FEATURE, UNKNOWN); fVals = tmp; } fVals[f] = Float.parseFloat(val); if (f > featureCount)// #feature will be the max_id observed featureCount = f; if (f > lastFeature)// note that lastFeature is the max_id // observed for this current data point, // whereas featureCount is the max_id // observed on the entire dataset lastFeature = f; } } // shrink fVals float[] tmp = new float[lastFeature + 1]; System.arraycopy(fVals, 0, tmp, 0, lastFeature + 1); fVals = tmp; } catch (Exception ex) { System.out.println("Error in DataPoint::parse(): " + ex.toString()); System.out.println(text); System.exit(1); } return fVals; } /** * Parse the given line of text to construct a dense array of feature values * and reset metadata. * * @param text * @return Dense array of feature values */ protected float[] parse(String text) { float[] fVals = new float[MAX_FEATURE]; Arrays.fill(fVals, UNKNOWN); int lastFeature = -1; try { // int idx = text.indexOf("# "); // if (idx != -1) { // // int uid, iid; // description = text.substring(idx + 1); // String[] vals = description.split("\t"); // #user_id item_id // // // uid = Integer.parseInt(vals[0].trim()); // extract the // // item_id // // // // if (uid == 307) // // System.out.println(text); // // this.item_id = vals[1].trim(); // extract the item_id // // // iid = Integer.parseInt(this.item_id); // // text = text.substring(0, idx).trim();// remove the comment part // // at the end of the // // line // // } else { // // System.out.println("No text description! No itemId! " + text); // System.exit(1); // } String[] fs = text.split(" "); label = Float.parseFloat(fs[0]); if (label < 0) { System.out .println("Relevance label cannot be negative. System will now exit."); System.exit(1); } user_id = getValue(fs[1]); // item_id = fs[2].trim(); // extract the item_id String key = ""; String val = ""; key = getKey(fs[2]); val = getValue(fs[2]); if (key.compareTo("1") == 0) item_id = val; for (int i = 3; i < fs.length; i++) { knownFeatures++; key = getKey(fs[i]) ; val = getValue(fs[i]); int f = Integer.parseInt(key) -1 ; //because of the itemID if (f >= MAX_FEATURE) { while (f >= MAX_FEATURE) MAX_FEATURE += FEATURE_INCREASE; float[] tmp = new float[MAX_FEATURE]; System.arraycopy(fVals, 0, tmp, 0, fVals.length); Arrays.fill(tmp, fVals.length, MAX_FEATURE, UNKNOWN); fVals = tmp; } fVals[f] = Float.parseFloat(val); if (f > featureCount)// #feature will be the max_id observed featureCount = f; if (f > lastFeature)// note that lastFeature is the max_id // observed for this current data point, // whereas featureCount is the max_id // observed on the entire dataset lastFeature = f; } // shrink fVals float[] tmp = new float[lastFeature + 1]; System.arraycopy(fVals, 0, tmp, 0, lastFeature + 1); fVals = tmp; } catch (Exception ex) { System.out.println("Error in DataPoint::parse(): " + ex.toString()); System.out.println(text); System.exit(1); } return fVals; } /** * Get the value of the feature with the given feature ID * * @param fid * @return */ public abstract float getFeatureValue(int fid); /** * Set the value of the feature with the given feature ID return user_id; } * * @param fid * @param fval */ public abstract void setFeatureValue(int fid, float fval); /** * Sets the value of all features with the provided dense array of feature * values */ public abstract void setFeatureVector(float[] dfVals); /** * Gets the value of all features as a dense array of feature values. */ public abstract float[] getFeatureVector(); /** * Default constructor. No-op. */ protected DataPoint() { }; /** * The input must have the form: * * @param text */ protected DataPoint(String text) { float[] fVals = parse(text); setFeatureVector(fVals); } protected DataPoint(String text, Set<Integer> listFeatures) { float[] fVals = parse(text, listFeatures); setFeatureVector(fVals); } public String getID() { return user_id; } public void setID(String id) { this.user_id = id; } public String getItemID() { return item_id; } // public void setItemID(String id) { // this.item_id = id; // } public float getLabel() { return label; } public void setLabel(float label) { this.label = label; } public String getDescription() { return description; } public void setDescription(String description) { this.description = description; } public void setCached(double c) { cached = c; } public double getCached() { return cached; } public void resetCached() { cached = -100000000.0f; ; } public String toString() { float[] fVals = getFeatureVector(); String output = ((int) label) + " " + "qid:" + user_id + " "; for (int i = 1; i < fVals.length; i++) if (!isUnknown(fVals[i])) output += i + ":" + fVals[i] + ((i == fVals.length - 1) ? "" : " "); output += " " + description; return output; } public static int getFeatureCount() { return featureCount; } }
9,196
24.907042
82
java
lodreclib
lodreclib-master/src/main/java/ciir/umass/edu/learning/Ranker.java
/*=============================================================================== * Copyright (c) 2010-2012 University of Massachusetts. All Rights Reserved. * * Use of the RankLib package is subject to the terms of the software license set * forth in the LICENSE file included with this software, and also available at * http://people.cs.umass.edu/~vdang/ranklib_license.html *=============================================================================== */ package ciir.umass.edu.learning; import java.text.DateFormat; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Date; import java.util.List; import ciir.umass.edu.learning.RankList; import ciir.umass.edu.metric.MetricScorer; import ciir.umass.edu.utilities.FileUtils; import ciir.umass.edu.utilities.MergeSorter; /** * @author vdang * * This class implements the generic Ranker interface. Each ranking * algorithm implemented has to extend this class. */ public class Ranker { public static boolean verbose = true; protected List<RankList> samples = new ArrayList<RankList>();// training // samples protected int[] features = null; protected MetricScorer scorer = null; protected double scoreOnTrainingData = 0.0; protected double bestScoreOnValidationData = 0.0; protected List<RankList> validationSamples = null; protected Ranker() { } protected Ranker(List<RankList> samples, int[] features, MetricScorer scorer) { this.samples = samples; this.features = features; this.scorer = scorer; } // Utility functions public void setTrainingSet(List<RankList> samples) { this.samples = samples; } public void setFeatures(int[] features) { this.features = features; } public void setValidationSet(List<RankList> samples) { this.validationSamples = samples; } public void setMetricScorer(MetricScorer scorer) { this.scorer = scorer; } public double getScoreOnTrainingData() { return scoreOnTrainingData; } public double getScoreOnValidationData() { return bestScoreOnValidationData; } public int[] getFeatures() { return features; } public double predictScore(DataPoint p) { return eval(p); } public RankList rank(RankList rl) { double[] scores = new double[rl.size()]; for (int i = 0; i < rl.size(); i++) scores[i] = eval(rl.get(i)); int[] idx = MergeSorter.sort(scores, false); return new RankList(rl, idx); } public List<RankList> rank(List<RankList> l) { List<RankList> ll = new ArrayList<RankList>(); for (int i = 0; i < l.size(); i++) ll.add(rank(l.get(i))); return ll; } public void save(String modelFile) { FileUtils.write(modelFile, "ASCII", model()); } protected void PRINT(String msg) { if (verbose) System.out.print(msg); } protected void PRINTLN(String msg) { if (verbose) System.out.println(msg); } protected void PRINT(int[] len, String[] msgs) { if (verbose) { for (int i = 0; i < msgs.length; i++) { String msg = msgs[i]; if (msg.length() > len[i]) msg = msg.substring(0, len[i]); else while (msg.length() < len[i]) msg += " "; System.out.print(msg + " | "); } } } protected void PRINTLN(int[] len, String[] msgs) { PRINT(len, msgs); PRINTLN(""); } protected void PRINTTIME() { DateFormat dateFormat = new SimpleDateFormat("MM/dd HH:mm:ss"); Date date = new Date(); System.out.println(dateFormat.format(date)); } protected void PRINT_MEMORY_USAGE() { System.out.println("***** " + Runtime.getRuntime().freeMemory() + " / " + Runtime.getRuntime().maxMemory()); } protected void copy(double[] source, double[] target) { for (int j = 0; j < source.length; j++) target[j] = source[j]; } /** * HAVE TO BE OVER-RIDDEN IN SUB-CLASSES */ public void init() { } public void learn() { } public double eval(DataPoint p) { return -1.0; } public Ranker clone() { return null; } public String toString() { return ""; } public String model() { return "[Not yet implemented]"; } public void load(String fn) { } public void printParameters() { } public String name() { return ""; } }
4,153
21.333333
82
java
lodreclib
lodreclib-master/src/main/java/ciir/umass/edu/learning/DenseDataPoint.java
package ciir.umass.edu.learning; public class DenseDataPoint extends DataPoint { public DenseDataPoint(String text) { super(text); } public DenseDataPoint(DenseDataPoint dp) { label = dp.label; this.user_id = dp.user_id; description = dp.description; cached = dp.cached; fVals = new float[dp.fVals.length]; System.arraycopy(dp.fVals, 0, fVals, 0, dp.fVals.length); } @Override public float getFeatureValue(int fid) { if(fid <= 0 || fid >= fVals.length) { System.out.println("Error in DataPoint::getFeatureValue(): requesting unspecified feature, fid=" + fid); System.out.println("System will now exit."); System.exit(1); } if(isUnknown(fVals[fid]))//value for unspecified feature is 0 return 0; return fVals[fid]; } @Override public void setFeatureValue(int fid, float fval) { if(fid <= 0 || fid >= fVals.length) { System.out.println("Error in DataPoint::setFeatureValue(): feature (id=" + fid + ") not found."); System.exit(1); } fVals[fid] = fval; } @Override public void setFeatureVector(float[] dfVals) { //fVals = new float[dfVals.length]; //System.arraycopy(dfVals, 0, fVals, 0, dfVals.length); fVals = dfVals; } @Override public float[] getFeatureVector() { return fVals; } }
1,270
21.696429
107
java
lodreclib
lodreclib-master/src/main/java/ciir/umass/edu/learning/RankerTrainer.java
/*=============================================================================== * Copyright (c) 2010-2012 University of Massachusetts. All Rights Reserved. * * Use of the RankLib package is subject to the terms of the software license set * forth in the LICENSE file included with this software, and also available at * http://people.cs.umass.edu/~vdang/ranklib_license.html *=============================================================================== */ package ciir.umass.edu.learning; import java.util.List; import ciir.umass.edu.metric.MetricScorer; import ciir.umass.edu.utilities.SimpleMath; /** * @author vdang * * This class is for users who want to use this library * programmatically. It provides trained rankers of different types with * respect to user-specified parameters. */ public class RankerTrainer { protected RankerFactory rf = new RankerFactory(); protected double trainingTime = 0; public Ranker train(RANKER_TYPE type, List<RankList> train, int[] features, MetricScorer scorer) { Ranker ranker = rf.createRanker(type, train, features, scorer); //ranker.verbose = false; long start = System.nanoTime(); ranker.init(); ranker.learn(); trainingTime = System.nanoTime() - start; // printTrainingTime(); return ranker; } public Ranker train(RANKER_TYPE type, List<RankList> train, List<RankList> validation, int[] features, MetricScorer scorer, boolean silent) { Ranker ranker = rf.createRanker(type, train, features, scorer); ranker.verbose = !silent; ranker.setValidationSet(validation); long start = System.nanoTime(); ranker.init(); ranker.learn(); trainingTime = System.nanoTime() - start; // printTrainingTime(); return ranker; } public double getTrainingTime() { return trainingTime; } public void printTrainingTime() { System.out.println("Training time: " + SimpleMath.round((trainingTime) / 1e9, 2) + " seconds"); } }
1,961
28.727273
84
java
lodreclib
lodreclib-master/src/main/java/ciir/umass/edu/learning/RankerFactory.java
/*=============================================================================== * Copyright (c) 2010-2012 University of Massachusetts. All Rights Reserved. * * Use of the RankLib package is subject to the terms of the software license set * forth in the LICENSE file included with this software, and also available at * http://people.cs.umass.edu/~vdang/ranklib_license.html *=============================================================================== */ package ciir.umass.edu.learning; import java.io.BufferedReader; import java.io.FileInputStream; import java.io.InputStreamReader; import java.util.HashMap; import java.util.List; import ciir.umass.edu.learning.boosting.AdaRank; import ciir.umass.edu.learning.boosting.RankBoost; import ciir.umass.edu.learning.tree.MART; import ciir.umass.edu.learning.tree.LambdaMART; import ciir.umass.edu.learning.tree.RFRanker; import ciir.umass.edu.metric.MetricScorer; /** * @author vdang * * This class implements the Ranker factory. All ranking algorithms implemented have to be recognized in this class. */ public class RankerFactory { protected Ranker[] rFactory = new Ranker[]{new MART(), new RankBoost(), new AdaRank(), new CoorAscent(), new LambdaMART(), new RFRanker(), new LinearRegRank()}; protected static HashMap<String, RANKER_TYPE> map = new HashMap<String, RANKER_TYPE>(); public RankerFactory() { map.put(createRanker(RANKER_TYPE.MART).name().toUpperCase(), RANKER_TYPE.MART); map.put(createRanker(RANKER_TYPE.RANKBOOST).name().toUpperCase(), RANKER_TYPE.RANKBOOST); map.put(createRanker(RANKER_TYPE.ADARANK).name().toUpperCase(), RANKER_TYPE.ADARANK); map.put(createRanker(RANKER_TYPE.COOR_ASCENT).name().toUpperCase(), RANKER_TYPE.COOR_ASCENT); map.put(createRanker(RANKER_TYPE.LAMBDAMART).name().toUpperCase(), RANKER_TYPE.LAMBDAMART); map.put(createRanker(RANKER_TYPE.RANDOM_FOREST).name().toUpperCase(), RANKER_TYPE.RANDOM_FOREST); map.put(createRanker(RANKER_TYPE.LINEAR_REGRESSION).name().toUpperCase(), RANKER_TYPE.LINEAR_REGRESSION); } public Ranker createRanker(RANKER_TYPE type) { Ranker r = rFactory[type.ordinal() - RANKER_TYPE.MART.ordinal()].clone(); return r; } public Ranker createRanker(RANKER_TYPE type, List<RankList> samples, int[] features, MetricScorer scorer) { Ranker r = createRanker(type); r.setTrainingSet(samples); r.setFeatures(features); r.setMetricScorer(scorer); return r; } @SuppressWarnings("unchecked") public Ranker createRanker(String className) { Ranker r = null; try { Class c = Class.forName(className); r = (Ranker) c.newInstance(); } catch (ClassNotFoundException e) { System.out.println("Could find the class \"" + className + "\" you specified. Make sure the jar library is in your classpath."); e.printStackTrace(); System.exit(1); } catch (InstantiationException e) { System.out.println("Cannot create objects from the class \"" + className + "\" you specified."); e.printStackTrace(); System.exit(1); } catch (IllegalAccessException e) { System.out.println("The class \"" + className + "\" does not implement the Ranker interface."); e.printStackTrace(); System.exit(1); } return r; } public Ranker createRanker(String className, List<RankList> samples, int[] features, MetricScorer scorer) { Ranker r = createRanker(className); r.setTrainingSet(samples); r.setFeatures(features); r.setMetricScorer(scorer); return r; } public Ranker loadRanker(String modelFile) { Ranker r = null; try { BufferedReader in = new BufferedReader(new InputStreamReader(new FileInputStream(modelFile), "ASCII")); String content = in.readLine();//read the first line to get the name of the ranking algorithm in.close(); content = content.replace("## ", "").trim(); System.out.println("Model:\t\t" + content); r = createRanker(map.get(content.toUpperCase())); r.load(modelFile); } catch(Exception ex) { System.out.println("Error in RankerFactory.load(): " + ex.toString()); System.exit(1); } return r; } }
4,073
35.702703
161
java
lodreclib
lodreclib-master/src/main/java/ciir/umass/edu/learning/Combiner.java
/*=============================================================================== * Copyright (c) 2010-2012 University of Massachusetts. All Rights Reserved. * * Use of the RankLib package is subject to the terms of the software license set * forth in the LICENSE file included with this software, and also available at * http://people.cs.umass.edu/~vdang/ranklib_license.html *=============================================================================== */ package ciir.umass.edu.learning; import java.io.BufferedWriter; import java.io.FileOutputStream; import java.io.OutputStreamWriter; import ciir.umass.edu.learning.tree.Ensemble; import ciir.umass.edu.learning.tree.RFRanker; import ciir.umass.edu.utilities.FileUtils; public class Combiner { public static void main(String[] args) { Combiner c = new Combiner(); c.combine(args[0], args[1]); } public void combine(String directory, String outputFile) { RankerFactory rf = new RankerFactory(); String[] fns = FileUtils.getAllFiles(directory); BufferedWriter out = null; try{ out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(outputFile), "ASCII")); out.write("## " + (new RFRanker()).name() + "\n"); for(int i=0;i<fns.length;i++) { if(fns[i].indexOf(".progress") != -1) continue; String fn = directory + fns[i]; RFRanker r = (RFRanker)rf.loadRanker(fn); Ensemble en = r.getEnsembles()[0]; out.write(en.toString()); } out.close(); } catch(Exception e) { System.out.println("Error in Combiner::combine(): " + e.toString()); } } }
1,596
30.313725
95
java
lodreclib
lodreclib-master/src/main/java/ciir/umass/edu/learning/RANKER_TYPE.java
/*=============================================================================== * Copyright (c) 2010-2012 University of Massachusetts. All Rights Reserved. * * Use of the RankLib package is subject to the terms of the software license set * forth in the LICENSE file included with this software, and also available at * http://people.cs.umass.edu/~vdang/ranklib_license.html *=============================================================================== */ package ciir.umass.edu.learning; public enum RANKER_TYPE { MART, RANKBOOST, ADARANK, COOR_ASCENT, LAMBDAMART, RANDOM_FOREST, LINEAR_REGRESSION }
620
40.4
86
java
lodreclib
lodreclib-master/src/main/java/ciir/umass/edu/learning/RankList.java
/*=============================================================================== * Copyright (c) 2010-2012 University of Massachusetts. All Rights Reserved. * * Use of the RankLib package is subject to the terms of the software license set * forth in the LICENSE file included with this software, and also available at * http://people.cs.umass.edu/~vdang/ranklib_license.html *=============================================================================== */ package ciir.umass.edu.learning; import java.util.List; import ciir.umass.edu.utilities.Sorter; /** * @author vdang * * This class implement the list of objects (each of which is a DataPoint) to be ranked. */ public class RankList { protected DataPoint[] rl = null; public RankList(List<DataPoint> rl) { this.rl = new DataPoint[rl.size()]; for(int i=0;i<rl.size();i++) this.rl[i] = rl.get(i); } public RankList(RankList rl) { this.rl = new DataPoint[rl.size()]; for(int i=0;i<rl.size();i++) this.rl[i] = rl.get(i); } public RankList(RankList rl, int[] idx) { this.rl = new DataPoint[rl.size()]; for(int i=0;i<idx.length;i++) this.rl[i] = rl.get(idx[i]); } public RankList(RankList rl, int[] idx, int offset) { this.rl = new DataPoint[rl.size()]; for(int i=0;i<idx.length;i++) this.rl[i] = rl.get(idx[i]-offset); } public String getID() { return get(0).getID(); } public int size() { return rl.length; } public DataPoint get(int k) { return rl[k]; } public void set(int k, DataPoint p) { rl[k] = p; } public void addPoint( DataPoint p) { DataPoint[] tmp = new DataPoint[this.rl.length+1]; for(int i=0;i<tmp.length-1;i++) tmp[i] = rl[i]; tmp[tmp.length-1]=p; this.rl=tmp; } public RankList getCorrectRanking() { double[] score = new double[rl.length]; for(int i=0;i<rl.length;i++) score[i] = rl[i].getLabel(); int[] idx = Sorter.sort(score, false); return new RankList(this, idx); } public RankList getRanking(short fid) { double[] score = new double[rl.length]; for(int i=0;i<rl.length;i++) score[i] = rl[i].getFeatureValue(fid); int[] idx = Sorter.sort(score, false); return new RankList(this, idx); } }
2,214
21.602041
89
java
lodreclib
lodreclib-master/src/main/java/ciir/umass/edu/learning/Sampler.java
/*=============================================================================== * Copyright (c) 2010-2012 University of Massachusetts. All Rights Reserved. * * Use of the RankLib package is subject to the terms of the software license set * forth in the LICENSE file included with this software, and also available at * http://people.cs.umass.edu/~vdang/ranklib_license.html *=============================================================================== */ package ciir.umass.edu.learning; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Random; public class Sampler { protected List<RankList> samples = null;//bag data protected List<RankList> remains = null;//out-of-bag data public List<RankList> doSampling(List<RankList> samplingPool, float samplingRate, boolean withReplacement) { Random r = new Random(); samples = new ArrayList<RankList>(); int size = (int)(samplingRate * samplingPool.size()); if(withReplacement) { int[] used = new int[samplingPool.size()]; Arrays.fill(used, 0); for(int i=0;i<size;i++) { int selected = r.nextInt(samplingPool.size()); samples.add(samplingPool.get(selected)); used[selected] = 1; } remains = new ArrayList<RankList>(); for(int i=0;i<samplingPool.size();i++) if(used[i] == 0) remains.add(samplingPool.get(i)); } else { List<Integer> l = new ArrayList<Integer>(); for(int i=0;i<samplingPool.size();i++) l.add(i); for(int i=0;i<size;i++) { int selected = r.nextInt(l.size()); samples.add(samplingPool.get(l.get(selected))); l.remove(selected); } remains = new ArrayList<RankList>(); for(int i=0;i<l.size();i++) remains.add(samplingPool.get(l.get(i))); } return samples; } public List<RankList> getSamples() { return samples; } public List<RankList> getRemains() { return remains; } }
1,907
27.909091
107
java
lodreclib
lodreclib-master/src/main/java/ciir/umass/edu/learning/tree/Ensemble.java
/*=============================================================================== * Copyright (c) 2010-2012 University of Massachusetts. All Rights Reserved. * * Use of the RankLib package is subject to the terms of the software license set * forth in the LICENSE file included with this software, and also available at * http://people.cs.umass.edu/~vdang/ranklib_license.html *=============================================================================== */ package ciir.umass.edu.learning.tree; import java.io.ByteArrayInputStream; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; import org.w3c.dom.Document; import org.w3c.dom.Node; import org.w3c.dom.NodeList; import ciir.umass.edu.learning.DataPoint; /** * @author vdang */ public class Ensemble { protected List<RegressionTree> trees = null; protected List<Float> weights = null; protected int[] features = null; public Ensemble() { trees = new ArrayList<RegressionTree>(); weights = new ArrayList<Float>(); } public Ensemble(Ensemble e) { trees = new ArrayList<RegressionTree>(); weights = new ArrayList<Float>(); trees.addAll(e.trees); weights.addAll(e.weights); } public Ensemble(String xmlRep) { try { trees = new ArrayList<RegressionTree>(); weights = new ArrayList<Float>(); DocumentBuilderFactory dbFactory = DocumentBuilderFactory.newInstance(); DocumentBuilder dBuilder = dbFactory.newDocumentBuilder(); byte[] xmlDATA = xmlRep.getBytes(); ByteArrayInputStream in = new ByteArrayInputStream(xmlDATA); Document doc = dBuilder.parse(in); NodeList nl = doc.getElementsByTagName("tree"); HashMap<Integer, Integer> fids = new HashMap<Integer, Integer>(); for(int i=0;i<nl.getLength();i++) { Node n = nl.item(i);//each node corresponds to a "tree" (tag) //create a regression tree from this node Split root = create(n.getFirstChild(), fids); //get the weight for this tree float weight = Float.parseFloat(n.getAttributes().getNamedItem("weight").getNodeValue().toString()); //add it to the ensemble trees.add(new RegressionTree(root)); weights.add(weight); } features = new int[fids.keySet().size()]; int i = 0; for(Integer fid : fids.keySet()) features[i++] = fid; } catch(Exception ex) { System.out.println("Error in Emsemble(xmlRepresentation): " + ex.toString()); System.exit(1); } } public void add(RegressionTree tree, float weight) { trees.add(tree); weights.add(weight); } public RegressionTree getTree(int k) { return trees.get(k); } public float getWeight(int k) { return weights.get(k); } public double variance() { double var = 0; for(int i=0;i<trees.size();i++) var += trees.get(i).variance(); return var; } public void remove(int k) { trees.remove(k); weights.remove(k); } public int treeCount() { return trees.size(); } public int leafCount() { int count = 0; for(int i=0;i<trees.size();i++) count += trees.get(i).leaves().size(); return count; } public float eval(DataPoint dp) { float s = 0; for(int i=0;i<trees.size();i++) s += trees.get(i).eval(dp) * weights.get(i); return s; } public String toString() { String strRep = "<ensemble>" + "\n"; for(int i=0;i<trees.size();i++) { strRep += "\t<tree id=\"" + (i+1) + "\" weight=\"" + weights.get(i) + "\">" + "\n"; strRep += trees.get(i).toString("\t\t"); strRep += "\t</tree>" + "\n"; } strRep += "</ensemble>" + "\n"; return strRep; } public int[] getFeatures() { return features; } /** * Each input node @n corersponds to a <split> tag in the model file. * @param n * @return */ private Split create(Node n, HashMap<Integer, Integer> fids) { Split s = null; if(n.getFirstChild().getNodeName().compareToIgnoreCase("feature") == 0)//this is a split { NodeList nl = n.getChildNodes(); int fid = Integer.parseInt(nl.item(0).getFirstChild().getNodeValue().toString().trim());//<feature> fids.put(fid, 0); float threshold = Float.parseFloat(nl.item(1).getFirstChild().getNodeValue().toString().trim());//<threshold> s = new Split(fid, threshold, 0); s.setLeft(create(nl.item(2), fids)); s.setRight(create(nl.item(3), fids)); } else//this is a stump { float output = Float.parseFloat(n.getFirstChild().getFirstChild().getNodeValue().toString().trim()); s = new Split(); s.setOutput(output); } return s; } }
4,546
26.065476
112
java
lodreclib
lodreclib-master/src/main/java/ciir/umass/edu/learning/tree/MART.java
/*=============================================================================== * Copyright (c) 2010-2012 University of Massachusetts. All Rights Reserved. * * Use of the RankLib package is subject to the terms of the software license set * forth in the LICENSE file included with this software, and also available at * http://people.cs.umass.edu/~vdang/ranklib_license.html *=============================================================================== */ package ciir.umass.edu.learning.tree; import java.util.List; import ciir.umass.edu.learning.RankList; import ciir.umass.edu.learning.Ranker; import ciir.umass.edu.metric.MetricScorer; /** * @author vdang * * This class implements MART for (point-wise) ranking: * J.H. Friedman. Greedy function approximation: A gradient boosting machine. * Technical Report, IMS Reitz Lecture, Stanford, 1999; see also Annals of Statistics, 2001. */ public class MART extends LambdaMART { //Parameters //Inherits *ALL* parameters from LambdaMART public MART() { } public MART(List<RankList> samples, int[] features, MetricScorer scorer) { super(samples, features, scorer); } public Ranker clone() { return new MART(); } public String name() { return "MART"; } protected void computePseudoResponses() { for(int i=0;i<martSamples.length;i++) pseudoResponses[i] = martSamples[i].getLabel() - modelScores[i]; } protected void updateTreeOutput(RegressionTree rt) { List<Split> leaves = rt.leaves(); for(int i=0;i<leaves.size();i++) { float s1 = 0.0F; Split s = leaves.get(i); int[] idx = s.getSamples(); for(int j=0;j<idx.length;j++) { int k = idx[j]; s1 += pseudoResponses[k]; } s.setOutput(s1/idx.length); } } }
1,749
25.515152
93
java
lodreclib
lodreclib-master/src/main/java/ciir/umass/edu/learning/tree/LambdaMART.java
/*=============================================================================== * Copyright (c) 2010-2012 University of Massachusetts. All Rights Reserved. * * Use of the RankLib package is subject to the terms of the software license set * forth in the LICENSE file included with this software, and also available at * http://people.cs.umass.edu/~vdang/ranklib_license.html *=============================================================================== */ package ciir.umass.edu.learning.tree; import java.io.BufferedReader; import java.io.FileInputStream; import java.io.InputStreamReader; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import ciir.umass.edu.learning.DataPoint; import ciir.umass.edu.learning.RankList; import ciir.umass.edu.learning.Ranker; import ciir.umass.edu.metric.MetricScorer; import ciir.umass.edu.utilities.MyThreadPool; import ciir.umass.edu.utilities.SimpleMath; import ciir.umass.edu.utilities.MergeSorter; /** * @author vdang * * This class implements LambdaMART. * Q. Wu, C.J.C. Burges, K. Svore and J. Gao. Adapting Boosting for Information Retrieval Measures. * Journal of Information Retrieval, 2007. */ public class LambdaMART extends Ranker { //Parameters public static int nTrees = 50;//1000 the number of trees public static float learningRate = 0.1F;//0.1or shrinkage public static int nThreshold = 256;//256; public static int nRoundToStopEarly = 50;//100If no performance gain on the *VALIDATION* data is observed in #rounds, stop the training process right away. public static int nTreeLeaves = 10; //10 public static int minLeafSupport = 1; //for debugging public static int gcCycle = 100; //Local variables protected float[][] thresholds = null; protected Ensemble ensemble = null; protected double[] modelScores = null;//on training data protected double[][] modelScoresOnValidation = null; protected int bestModelOnValidation = Integer.MAX_VALUE-2; //Training instances prepared for MART protected DataPoint[] martSamples = null;//Need initializing only once protected int[][] sortedIdx = null;//sorted list of samples in @martSamples by each feature -- Need initializing only once protected FeatureHistogram hist = null; protected double[] pseudoResponses = null;//different for each iteration protected double[] weights = null;//different for each iteration public LambdaMART() { } public LambdaMART(List<RankList> samples, int[] features, MetricScorer scorer) { super(samples, features, scorer); } public void init() { PRINT("Initializing... "); //initialize samples for MART int dpCount = 0; for(int i=0;i<samples.size();i++) { RankList rl = samples.get(i); dpCount += rl.size(); } int current = 0; martSamples = new DataPoint[dpCount]; modelScores = new double[dpCount]; pseudoResponses = new double[dpCount]; weights = new double[dpCount]; for(int i=0;i<samples.size();i++) { RankList rl = samples.get(i); for(int j=0;j<rl.size();j++) { martSamples[current+j] = rl.get(j); modelScores[current+j] = 0.0F; pseudoResponses[current+j] = 0.0F; weights[current+j] = 0; } current += rl.size(); } //sort (MART) samples by each feature so that we can quickly retrieve a sorted list of samples by any feature later on. sortedIdx = new int[features.length][]; MyThreadPool p = MyThreadPool.getInstance(); if(p.size() == 1)//single-thread sortSamplesByFeature(0, features.length-1); else//multi-thread { int[] partition = p.partition(features.length); for(int i=0;i<partition.length-1;i++) p.execute(new SortWorker(this, partition[i], partition[i+1]-1)); p.await(); } //Create a table of candidate thresholds (for each feature). Later on, we will select the best tree split from these candidates thresholds = new float[features.length][]; for(int f=0;f<features.length;f++) { //For this feature, keep track of the list of unique values and the max/min List<Float> values = new ArrayList<Float>(); float fmax = Float.NEGATIVE_INFINITY; float fmin = Float.MAX_VALUE; for(int i=0;i<martSamples.length;i++) { int k = sortedIdx[f][i];//get samples sorted with respect to this feature float fv = martSamples[k].getFeatureValue(features[f]); values.add(fv); if(fmax < fv) fmax = fv; if(fmin > fv) fmin = fv; //skip all samples with the same feature value int j=i+1; while(j < martSamples.length) { if(martSamples[sortedIdx[f][j]].getFeatureValue(features[f]) > fv) break; j++; } i = j-1;//[i, j] gives the range of samples with the same feature value } if(values.size() <= nThreshold || nThreshold == -1) { thresholds[f] = new float[values.size()+1]; for(int i=0;i<values.size();i++) thresholds[f][i] = values.get(i); thresholds[f][values.size()] = Float.MAX_VALUE; } else { float step = (Math.abs(fmax - fmin))/nThreshold; thresholds[f] = new float[nThreshold+1]; thresholds[f][0] = fmin; for(int j=1;j<nThreshold;j++) thresholds[f][j] = thresholds[f][j-1] + step; thresholds[f][nThreshold] = Float.MAX_VALUE; } } if(validationSamples != null) { modelScoresOnValidation = new double[validationSamples.size()][]; for(int i=0;i<validationSamples.size();i++) { modelScoresOnValidation[i] = new double[validationSamples.get(i).size()]; Arrays.fill(modelScoresOnValidation[i], 0); } } //compute the feature histogram (this is used to speed up the procedure of finding the best tree split later on) hist = new FeatureHistogram(); hist.construct(martSamples, pseudoResponses, sortedIdx, features, thresholds); //we no longer need the sorted indexes of samples sortedIdx = null; System.gc(); PRINTLN("[Done]"); } public void learn() { ensemble = new Ensemble(); PRINTLN("---------------------------------"); PRINTLN("Training starts..."); PRINTLN("---------------------------------"); PRINTLN(new int[]{7, 9, 9}, new String[]{"#iter", scorer.name()+"-T", scorer.name()+"-V"}); PRINTLN("---------------------------------"); //Start the gradient boosting process for(int m=0; m<nTrees; m++) { PRINT(new int[]{7}, new String[]{(m+1)+""}); //Compute lambdas (which act as the "pseudo responses") //Create training instances for MART: // - Each document is a training sample // - The lambda for this document serves as its training label computePseudoResponses(); //update the histogram with these training labels (the feature histogram will be used to find the best tree split) hist.update(pseudoResponses); //Fit a regression tree RegressionTree rt = new RegressionTree(nTreeLeaves, martSamples, pseudoResponses, hist, minLeafSupport); rt.fit(); //Add this tree to the ensemble (our model) ensemble.add(rt, learningRate); //update the outputs of the tree (with gamma computed using the Newton-Raphson method) updateTreeOutput(rt); //Update the model's outputs on all training samples List<Split> leaves = rt.leaves(); for(int i=0;i<leaves.size();i++) { Split s = leaves.get(i); int[] idx = s.getSamples(); for(int j=0;j<idx.length;j++) modelScores[idx[j]] += learningRate * s.getOutput(); } //clear references to data that is no longer used rt.clearSamples(); //beg the garbage collector to work... if(m % gcCycle == 0) System.gc();//this call is expensive. We shouldn't do it too often. //Evaluate the current model scoreOnTrainingData = computeModelScoreOnTraining(); //**** NOTE **** //The above function to evaluate the current model on the training data is equivalent to a single call: // // scoreOnTrainingData = scorer.score(rank(samples); // //However, this function is more efficient since it uses the cached outputs of the model (as opposed to re-evaluating the model //on the entire training set). PRINT(new int[]{9}, new String[]{SimpleMath.round(scoreOnTrainingData, 4) + ""}); //Evaluate the current model on the validation data (if available) if(validationSamples != null) { //Update the model's scores on all validation samples for(int i=0;i<modelScoresOnValidation.length;i++) for(int j=0;j<modelScoresOnValidation[i].length;j++) modelScoresOnValidation[i][j] += learningRate * rt.eval(validationSamples.get(i).get(j)); //again, equivalent to scoreOnValidation=scorer.score(rank(validationSamples)), but more efficient since we use the cached models' outputs double score = computeModelScoreOnValidation(); PRINT(new int[]{9}, new String[]{SimpleMath.round(score, 4) + ""}); if(score > bestScoreOnValidationData) { bestScoreOnValidationData = score; bestModelOnValidation = ensemble.treeCount()-1; } } PRINTLN(""); //Should we stop early? if(m - bestModelOnValidation > nRoundToStopEarly) break; } //Rollback to the best model observed on the validation data while(ensemble.treeCount() > bestModelOnValidation+1) ensemble.remove(ensemble.treeCount()-1); //Finishing up scoreOnTrainingData = scorer.score(rank(samples)); PRINTLN("---------------------------------"); PRINTLN("Finished sucessfully."); PRINTLN(scorer.name() + " on training data: " + SimpleMath.round(scoreOnTrainingData, 4)); if(validationSamples != null) { bestScoreOnValidationData = scorer.score(rank(validationSamples)); PRINTLN(scorer.name() + " on validation data: " + SimpleMath.round(bestScoreOnValidationData, 4)); } PRINTLN("---------------------------------"); } public double eval(DataPoint dp) { return ensemble.eval(dp); } public Ranker clone() { return new LambdaMART(); } public String toString() { return ensemble.toString(); } public String model() { String output = "## " + name() + "\n"; output += "## No. of trees = " + nTrees + "\n"; output += "## No. of leaves = " + nTreeLeaves + "\n"; output += "## No. of threshold candidates = " + nThreshold + "\n"; output += "## Learning rate = " + learningRate + "\n"; output += "## Stop early = " + nRoundToStopEarly + "\n"; output += "\n"; output += toString(); return output; } public void load(String fn) { try { String content = ""; String model = ""; BufferedReader in = new BufferedReader(new InputStreamReader(new FileInputStream(fn), "ASCII")); while((content = in.readLine()) != null) { content = content.trim(); if(content.length() == 0) continue; if(content.indexOf("##")==0) continue; //actual model component model += content; } in.close(); //load the ensemble ensemble = new Ensemble(model); features = ensemble.getFeatures(); } catch(Exception ex) { System.out.println("Error in LambdaMART::load(): " + ex.toString()); } } public void printParameters() { PRINTLN("No. of trees: " + nTrees); PRINTLN("No. of leaves: " + nTreeLeaves); PRINTLN("No. of threshold candidates: " + nThreshold); PRINTLN("Min leaf support: " + minLeafSupport); PRINTLN("Learning rate: " + learningRate); PRINTLN("Stop early: " + nRoundToStopEarly + " rounds without performance gain on validation data"); } public String name() { return "LambdaMART"; } public Ensemble getEnsemble() { return ensemble; } protected void computePseudoResponses() { Arrays.fill(pseudoResponses, 0F); Arrays.fill(weights, 0); MyThreadPool p = MyThreadPool.getInstance(); if(p.size() == 1)//single-thread computePseudoResponses(0, samples.size()-1, 0); else //multi-threading { List<LambdaComputationWorker> workers = new ArrayList<LambdaMART.LambdaComputationWorker>(); //divide the entire dataset into chunks of equal size for each worker thread int[] partition = p.partition(samples.size()); int current = 0; for(int i=0;i<partition.length-1;i++) { //execute the worker LambdaComputationWorker wk = new LambdaComputationWorker(this, partition[i], partition[i+1]-1, current); workers.add(wk);//keep it so we can get back results from it later on p.execute(wk); if(i < partition.length-2) for(int j=partition[i]; j<=partition[i+1]-1;j++) current += samples.get(j).size(); } //wait for all workers to complete before we move on to the next stage p.await(); } } protected void computePseudoResponses(int start, int end, int current) { int cutoff = scorer.getK(); //compute the lambda for each document (a.k.a "pseudo response") for(int i=start;i<=end;i++) { RankList orig = samples.get(i); int[] idx = MergeSorter.sort(modelScores, current, current+orig.size()-1, false); RankList rl = new RankList(orig, idx, current); double[][] changes = scorer.swapChange(rl); //NOTE: j, k are indices in the sorted (by modelScore) list, not the original // ==> need to map back with idx[j] and idx[k] for(int j=0;j<rl.size();j++) { DataPoint p1 = rl.get(j); int mj = idx[j]; for(int k=0;k<rl.size();k++) { if(j > cutoff && k > cutoff)//swaping these pair won't result in any change in target measures since they're below the cut-off point break; DataPoint p2 = rl.get(k); int mk = idx[k]; if(p1.getLabel() > p2.getLabel()) { double deltaNDCG = Math.abs(changes[j][k]); if(deltaNDCG > 0) { double rho = 1.0 / (1 + Math.exp(modelScores[mj] - modelScores[mk])); double lambda = rho * deltaNDCG; pseudoResponses[mj] += lambda; pseudoResponses[mk] -= lambda; double delta = rho * (1.0 - rho) * deltaNDCG; weights[mj] += delta; weights[mk] += delta; } } } } current += orig.size(); } } protected void updateTreeOutput(RegressionTree rt) { List<Split> leaves = rt.leaves(); for(int i=0;i<leaves.size();i++) { float s1 = 0F; float s2 = 0F; Split s = leaves.get(i); int[] idx = s.getSamples(); for(int j=0;j<idx.length;j++) { int k = idx[j]; s1 += pseudoResponses[k]; s2 += weights[k]; } if(s2 == 0) s.setOutput(0); else s.setOutput(s1/s2); } } protected int[] sortSamplesByFeature(DataPoint[] samples, int fid) { double[] score = new double[samples.length]; for(int i=0;i<samples.length;i++) score[i] = samples[i].getFeatureValue(fid); int[] idx = MergeSorter.sort(score, true); return idx; } /** * This function is equivalent to the inherited function rank(...), but it uses the cached model's outputs instead of computing them from scratch. * @param rankListIndex * @param current * @return */ protected RankList rank(int rankListIndex, int current) { RankList orig = samples.get(rankListIndex); double[] scores = new double[orig.size()]; for(int i=0;i<scores.length;i++) scores[i] = modelScores[current+i]; int[] idx = MergeSorter.sort(scores, false); return new RankList(orig, idx); } protected float computeModelScoreOnTraining() { /*float s = 0; int current = 0; MyThreadPool p = MyThreadPool.getInstance(); if(p.size() == 1)//single-thread s = computeModelScoreOnTraining(0, samples.size()-1, current); else { List<Worker> workers = new ArrayList<Worker>(); //divide the entire dataset into chunks of equal size for each worker thread int[] partition = p.partition(samples.size()); for(int i=0;i<partition.length-1;i++) { //execute the worker Worker wk = new Worker(this, partition[i], partition[i+1]-1, current); workers.add(wk);//keep it so we can get back results from it later on p.execute(wk); if(i < partition.length-2) for(int j=partition[i]; j<=partition[i+1]-1;j++) current += samples.get(j).size(); } //wait for all workers to complete before we move on to the next stage p.await(); for(int i=0;i<workers.size();i++) s += workers.get(i).score; }*/ float s = computeModelScoreOnTraining(0, samples.size()-1, 0); s = s / samples.size(); return s; } protected float computeModelScoreOnTraining(int start, int end, int current) { float s = 0; int c = current; for(int i=start;i<=end;i++) { s += scorer.score(rank(i, c)); c += samples.get(i).size(); } return s; } protected float computeModelScoreOnValidation() { /*float score = 0; MyThreadPool p = MyThreadPool.getInstance(); if(p.size() == 1)//single-thread score = computeModelScoreOnValidation(0, validationSamples.size()-1); else { List<Worker> workers = new ArrayList<Worker>(); //divide the entire dataset into chunks of equal size for each worker thread int[] partition = p.partition(validationSamples.size()); for(int i=0;i<partition.length-1;i++) { //execute the worker Worker wk = new Worker(this, partition[i], partition[i+1]-1); workers.add(wk);//keep it so we can get back results from it later on p.execute(wk); } //wait for all workers to complete before we move on to the next stage p.await(); for(int i=0;i<workers.size();i++) score += workers.get(i).score; }*/ float score = computeModelScoreOnValidation(0, validationSamples.size()-1); return score/validationSamples.size(); } protected float computeModelScoreOnValidation(int start, int end) { float score = 0; for(int i=start;i<=end;i++) { int[] idx = MergeSorter.sort(modelScoresOnValidation[i], false); score += scorer.score(new RankList(validationSamples.get(i), idx)); } return score; } protected void sortSamplesByFeature(int fStart, int fEnd) { for(int i=fStart;i<=fEnd; i++) sortedIdx[i] = sortSamplesByFeature(martSamples, features[i]); } //For multi-threading processing class SortWorker implements Runnable { LambdaMART ranker = null; int start = -1; int end = -1; SortWorker(LambdaMART ranker, int start, int end) { this.ranker = ranker; this.start = start; this.end = end; } public void run() { ranker.sortSamplesByFeature(start, end); } } class LambdaComputationWorker implements Runnable { LambdaMART ranker = null; int rlStart = -1; int rlEnd = -1; int martStart = -1; LambdaComputationWorker(LambdaMART ranker, int rlStart, int rlEnd, int martStart) { this.ranker = ranker; this.rlStart = rlStart; this.rlEnd = rlEnd; this.martStart = martStart; } public void run() { ranker.computePseudoResponses(rlStart, rlEnd, martStart); } } class Worker implements Runnable { LambdaMART ranker = null; int rlStart = -1; int rlEnd = -1; int martStart = -1; int type = -1; //compute score on validation float score = 0; Worker(LambdaMART ranker, int rlStart, int rlEnd) { type = 3; this.ranker = ranker; this.rlStart = rlStart; this.rlEnd = rlEnd; } Worker(LambdaMART ranker, int rlStart, int rlEnd, int martStart) { type = 4; this.ranker = ranker; this.rlStart = rlStart; this.rlEnd = rlEnd; this.martStart = martStart; } public void run() { if(type == 4) score = ranker.computeModelScoreOnTraining(rlStart, rlEnd, martStart); else if(type == 3) score = ranker.computeModelScoreOnValidation(rlStart, rlEnd); } } }
19,317
30.36039
157
java
lodreclib
lodreclib-master/src/main/java/ciir/umass/edu/learning/tree/Split.java
/*=============================================================================== * Copyright (c) 2010-2012 University of Massachusetts. All Rights Reserved. * * Use of the RankLib package is subject to the terms of the software license set * forth in the LICENSE file included with this software, and also available at * http://people.cs.umass.edu/~vdang/ranklib_license.html *=============================================================================== */ package ciir.umass.edu.learning.tree; import java.util.ArrayList; import java.util.List; import ciir.umass.edu.learning.DataPoint; /** * * @author vdang * */ public class Split { //Key attributes of a split (tree node) private int featureID = -1; private float threshold = 0F; private double avgLabel = 0.0F; //Intermediate variables (ONLY used during learning) //*DO NOT* attempt to access them once the training is done private boolean isRoot = false; private double sumLabel = 0.0; private double sqSumLabel = 0.0; private Split left = null; private Split right = null; private double deviance = 0F;//mean squared error "S" private int[][] sortedSampleIDs = null; public int[] samples = null; public FeatureHistogram hist = null; public Split() { } public Split(int featureID, float threshold, double deviance) { this.featureID = featureID; this.threshold = threshold; this.deviance = deviance; } public Split(int[][] sortedSampleIDs, double deviance, double sumLabel, double sqSumLabel) { this.sortedSampleIDs = sortedSampleIDs; this.deviance = deviance; this.sumLabel = sumLabel; this.sqSumLabel = sqSumLabel; avgLabel = sumLabel/sortedSampleIDs[0].length; } public Split(int[] samples, FeatureHistogram hist, double deviance, double sumLabel) { this.samples = samples; this.hist = hist; this.deviance = deviance; this.sumLabel = sumLabel; avgLabel = sumLabel/samples.length; } public void set(int featureID, float threshold, double deviance) { this.featureID = featureID; this.threshold = threshold; this.deviance = deviance; } public void setLeft(Split s) { left = s; } public void setRight(Split s) { right = s; } public void setOutput(float output) { avgLabel = output; } public Split getLeft() { return left; } public Split getRight() { return right; } public double getDeviance() { return deviance; } public double getOutput() { return avgLabel; } public List<Split> leaves() { List<Split> list = new ArrayList<Split>(); leaves(list); return list; } private void leaves(List<Split> leaves) { if(featureID == -1) leaves.add(this); else { left.leaves(leaves); right.leaves(leaves); } } public double eval(DataPoint dp) { Split n = this; while(n.featureID != -1) { if(dp.getFeatureValue(n.featureID) <= n.threshold) n = n.left; else n = n.right; } return n.avgLabel; } public String toString() { return toString(""); } public String toString(String indent) { String strOutput = indent + "<split>" + "\n"; strOutput += getString(indent + "\t"); strOutput += indent + "</split>" + "\n"; return strOutput; } public String getString(String indent) { String strOutput = ""; if(featureID == -1) { strOutput += indent + "<output> " + avgLabel + " </output>" + "\n"; } else { strOutput += indent + "<feature> " + featureID + " </feature>" + "\n"; strOutput += indent + "<threshold> " + threshold + " </threshold>" + "\n"; strOutput += indent + "<split pos=\"left\">" + "\n"; strOutput += left.getString(indent + "\t"); strOutput += indent + "</split>" + "\n"; strOutput += indent + "<split pos=\"right\">" + "\n"; strOutput += right.getString(indent + "\t"); strOutput += indent + "</split>" + "\n"; } return strOutput; } //Internal functions(ONLY used during learning) //*DO NOT* attempt to call them once the training is done public boolean split(double[] trainingLabels, int minLeafSupport) { return hist.findBestSplit(this, trainingLabels, minLeafSupport); } public int[] getSamples() { if(sortedSampleIDs != null) return sortedSampleIDs[0]; return samples; } public int[][] getSampleSortedIndex() { return sortedSampleIDs; } public double getSumLabel() { return sumLabel; } public double getSqSumLabel() { return sqSumLabel; } public void clearSamples() { sortedSampleIDs = null; samples = null; hist = null; } public void setRoot(boolean isRoot) { this.isRoot = isRoot; } public boolean isRoot() { return isRoot; } }
4,587
21.600985
91
java
lodreclib
lodreclib-master/src/main/java/ciir/umass/edu/learning/tree/RegressionTree.java
/*=============================================================================== * Copyright (c) 2010-2012 University of Massachusetts. All Rights Reserved. * * Use of the RankLib package is subject to the terms of the software license set * forth in the LICENSE file included with this software, and also available at * http://people.cs.umass.edu/~vdang/ranklib_license.html *=============================================================================== */ package ciir.umass.edu.learning.tree; import java.util.ArrayList; import java.util.List; import ciir.umass.edu.learning.DataPoint; /** * @author vdang */ public class RegressionTree { //Parameters protected int nodes = -1;//10 -1 for unlimited number of nodes (the size of the tree will then be controlled *ONLY* by minLeafSupport) protected int minLeafSupport = 1; //Member variables and functions protected Split root = null; protected List<Split> leaves = null; protected DataPoint[] trainingSamples = null; protected double[] trainingLabels = null; protected int[] features = null; protected float[][] thresholds = null; protected int[] index = null; protected FeatureHistogram hist = null; public RegressionTree(Split root) { this.root = root; leaves = root.leaves(); } public RegressionTree(int nLeaves, DataPoint[] trainingSamples, double[] labels, FeatureHistogram hist, int minLeafSupport) { this.nodes = nLeaves; this.trainingSamples = trainingSamples; this.trainingLabels = labels; this.hist = hist; this.minLeafSupport = minLeafSupport; index = new int[trainingSamples.length]; for(int i=0;i<trainingSamples.length;i++) index[i] = i; } /** * Fit the tree from the specified training data */ public void fit() { List<Split> queue = new ArrayList<Split>(); root = new Split(index, hist, Float.MAX_VALUE, 0); root.setRoot(true); root.split(trainingLabels, minLeafSupport); insert(queue, root.getLeft()); insert(queue, root.getRight()); int taken = 0; while( (nodes == -1 || taken + queue.size() < nodes) && queue.size() > 0) { Split leaf = queue.get(0); queue.remove(0); if(leaf.getSamples().length < 2 * minLeafSupport) { taken++; continue; } if(!leaf.split(trainingLabels, minLeafSupport))//unsplitable (i.e. variance(s)==0; or after-split variance is higher than before) taken++; else { insert(queue, leaf.getLeft()); insert(queue, leaf.getRight()); } } leaves = root.leaves(); } /** * Get the tree output for the input sample * @param dp * @return */ public double eval(DataPoint dp) { return root.eval(dp); } /** * Retrieve all leave nodes in the tree * @return */ public List<Split> leaves() { return leaves; } /** * Clear samples associated with each leaves (when they are no longer necessary) in order to save memory */ public void clearSamples() { trainingSamples = null; trainingLabels = null; features = null; thresholds = null; index = null; hist = null; for(int i=0;i<leaves.size();i++) leaves.get(i).clearSamples(); } /** * Generate the string representation of the tree */ public String toString() { if(root != null) return root.toString(); return ""; } public String toString(String indent) { if(root != null) return root.toString(indent); return ""; } public double variance() { double var = 0; for(int i=0;i<leaves.size();i++) var += leaves.get(i).getDeviance(); return var; } protected void insert(List<Split> ls, Split s) { int i=0; while(i < ls.size()) { if(ls.get(i).getDeviance() > s.getDeviance()) i++; else break; } ls.add(i, s); } }
3,706
22.314465
135
java
lodreclib
lodreclib-master/src/main/java/ciir/umass/edu/learning/tree/FeatureHistogram.java
/*=============================================================================== * Copyright (c) 2010-2012 University of Massachusetts. All Rights Reserved. * * Use of the RankLib package is subject to the terms of the software license set * forth in the LICENSE file included with this software, and also available at * http://people.cs.umass.edu/~vdang/ranklib_license.html *=============================================================================== */ package ciir.umass.edu.learning.tree; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Random; import ciir.umass.edu.learning.DataPoint; import ciir.umass.edu.utilities.MyThreadPool; import ciir.umass.edu.utilities.WorkerThread; /** * @author vdang */ public class FeatureHistogram { class Config { int featureIdx = -1; int thresholdIdx = -1; double S = -1; } //Parameter public static float samplingRate = 1; //Variables public int[] features = null; public float[][] thresholds = null; public double[][] sum = null; public double sumResponse = 0; public double sqSumResponse = 0; public int[][] count = null; public int[][] sampleToThresholdMap = null; //whether to re-use its parents @sum and @count instead of cleaning up the parent and re-allocate for the children. //@sum and @count of any intermediate tree node (except for root) can be re-used. private boolean reuseParent = false; public FeatureHistogram() { } public void construct(DataPoint[] samples, double[] labels, int[][] sampleSortedIdx, int[] features, float[][] thresholds) { this.features = features; this.thresholds = thresholds; sumResponse = 0; sqSumResponse = 0; sum = new double[features.length][]; count = new int[features.length][]; sampleToThresholdMap = new int[features.length][]; MyThreadPool p = MyThreadPool.getInstance(); if(p.size() == 1) construct(samples, labels, sampleSortedIdx, thresholds, 0, features.length-1); else p.execute(new Worker(this, samples, labels, sampleSortedIdx, thresholds), features.length); } protected void construct(DataPoint[] samples, double[] labels, int[][] sampleSortedIdx, float[][] thresholds, int start, int end) { for(int i=start;i<=end;i++) { int fid = features[i]; //get the list of samples associated with this node (sorted in ascending order with respect to the current feature) int[] idx = sampleSortedIdx[i]; double sumLeft = 0; float[] threshold = thresholds[i]; double[] sumLabel = new double[threshold.length]; int[] c = new int[threshold.length]; int[] stMap = new int[samples.length]; int last = -1; for(int t=0;t<threshold.length;t++) { int j=last+1; //find the first sample that exceeds the current threshold for(;j<idx.length;j++) { int k = idx[j]; if(samples[k].getFeatureValue(fid) > threshold[t]) break; sumLeft += labels[k]; if(i == 0) { sumResponse += labels[k]; sqSumResponse += labels[k] * labels[k]; } stMap[k] = t; } last = j-1; sumLabel[t] = sumLeft; c[t] = last+1; } sampleToThresholdMap[i] = stMap; sum[i] = sumLabel; count[i] = c; } } protected void update(double[] labels) { sumResponse = 0; sqSumResponse = 0; MyThreadPool p = MyThreadPool.getInstance(); if(p.size() == 1) update(labels, 0, features.length-1); else p.execute(new Worker(this, labels), features.length); } protected void update(double[] labels, int start, int end) { for(int f=start;f<=end;f++) Arrays.fill(sum[f], 0); for(int k=0;k<labels.length;k++) { for(int f=start;f<=end;f++) { int t = sampleToThresholdMap[f][k]; sum[f][t] += labels[k]; if(f == 0) { sumResponse += labels[k]; sqSumResponse += labels[k]*labels[k]; } //count doesn't change, so no need to re-compute } } for(int f=start;f<=end;f++) { for(int t=1;t<thresholds[f].length;t++) sum[f][t] += sum[f][t-1]; } } public void construct(FeatureHistogram parent, int[] soi, double[] labels) { this.features = parent.features; this.thresholds = parent.thresholds; sumResponse = 0; sqSumResponse = 0; sum = new double[features.length][]; count = new int[features.length][]; sampleToThresholdMap = parent.sampleToThresholdMap; MyThreadPool p = MyThreadPool.getInstance(); if(p.size() == 1) construct(parent, soi, labels, 0, features.length-1); else p.execute(new Worker(this, parent, soi, labels), features.length); } protected void construct(FeatureHistogram parent, int[] soi, double[] labels, int start, int end) { //init for(int i=start;i<=end;i++) { float[] threshold = thresholds[i]; sum[i] = new double[threshold.length]; count[i] = new int[threshold.length]; Arrays.fill(sum[i], 0); Arrays.fill(count[i], 0); } //update for(int i=0;i<soi.length;i++) { int k = soi[i]; for(int f=start;f<=end;f++) { int t = sampleToThresholdMap[f][k]; sum[f][t] += labels[k]; count[f][t] ++; if(f == 0) { sumResponse += labels[k]; sqSumResponse += labels[k]*labels[k]; } } } for(int f=start;f<=end;f++) { for(int t=1;t<thresholds[f].length;t++) { sum[f][t] += sum[f][t-1]; count[f][t] += count[f][t-1]; } } } public void construct(FeatureHistogram parent, FeatureHistogram leftSibling, boolean reuseParent) { this.reuseParent = reuseParent; this.features = parent.features; this.thresholds = parent.thresholds; sumResponse = parent.sumResponse - leftSibling.sumResponse; sqSumResponse = parent.sqSumResponse - leftSibling.sqSumResponse; if(reuseParent) { sum = parent.sum; count = parent.count; } else { sum = new double[features.length][]; count = new int[features.length][]; } sampleToThresholdMap = parent.sampleToThresholdMap; MyThreadPool p = MyThreadPool.getInstance(); if(p.size() == 1) construct(parent, leftSibling, 0, features.length-1); else p.execute(new Worker(this, parent, leftSibling), features.length); } protected void construct(FeatureHistogram parent, FeatureHistogram leftSibling, int start, int end) { for(int f=start;f<=end;f++) { float[] threshold = thresholds[f]; if(!reuseParent) { sum[f] = new double[threshold.length]; count[f] = new int[threshold.length]; } for(int t=0;t<threshold.length;t++) { sum[f][t] = parent.sum[f][t] - leftSibling.sum[f][t]; count[f][t] = parent.count[f][t] - leftSibling.count[f][t]; } } } protected Config findBestSplit(int[] usedFeatures, int minLeafSupport, int start, int end) { Config cfg = new Config(); int totalCount = count[start][count[start].length-1]; for(int f=start;f<=end;f++) { int i = usedFeatures[f]; float[] threshold = thresholds[i]; for(int t=0;t<threshold.length;t++) { int countLeft = count[i][t]; int countRight = totalCount - countLeft; if(countLeft < minLeafSupport || countRight < minLeafSupport) continue; double sumLeft = sum[i][t]; double sumRight = sumResponse - sumLeft; double S = sumLeft * sumLeft / countLeft + sumRight * sumRight / countRight; if(cfg.S < S) { cfg.S = S; cfg.featureIdx = i; cfg.thresholdIdx = t; } } } return cfg; } public boolean findBestSplit(Split sp, double[] labels, int minLeafSupport) { if(sp.getDeviance() >= 0.0 && sp.getDeviance() <= 0.0)//equals 0 return false;//no need to split int[] usedFeatures = null;//index of the features to be used for tree splitting if(samplingRate < 1)//need to do sub sampling (feature sampling) { int size = (int)(samplingRate * features.length); usedFeatures = new int[size]; //put all features into a pool List<Integer> fpool = new ArrayList<Integer>(); for(int i=0;i<features.length;i++) fpool.add(i); //do sampling, without replacement Random r = new Random(); for(int i=0;i<size;i++) { int sel = r.nextInt(fpool.size()); usedFeatures[i] = fpool.get(sel); fpool.remove(sel); } } else//no sub-sampling, all features will be used { usedFeatures = new int[features.length]; for(int i=0;i<features.length;i++) usedFeatures[i] = i; } //find the best split Config best = new Config(); MyThreadPool p = MyThreadPool.getInstance(); if(p.size() == 1) best = findBestSplit(usedFeatures, minLeafSupport, 0, usedFeatures.length-1); else { WorkerThread[] workers = p.execute(new Worker(this, usedFeatures, minLeafSupport), usedFeatures.length); for(int i=0;i<workers.length;i++) { Worker wk = (Worker)workers[i]; if(best.S < wk.cfg.S) best = wk.cfg; } } if(best.S == -1)//unsplitable, for some reason... return false; //if(minS >= sp.getDeviance()) //return null; double[] sumLabel = sum[best.featureIdx]; int[] sampleCount = count[best.featureIdx]; double s = sumLabel[sumLabel.length-1]; int c = sampleCount[sumLabel.length-1]; double sumLeft = sumLabel[best.thresholdIdx]; int countLeft = sampleCount[best.thresholdIdx]; double sumRight = s - sumLeft; int countRight = c - countLeft; int[] left = new int[countLeft]; int[] right = new int[countRight]; int l = 0; int r = 0; int k = 0; int[] idx = sp.getSamples(); for(int j=0;j<idx.length;j++) { k = idx[j]; if(sampleToThresholdMap[best.featureIdx][k] <= best.thresholdIdx)//go to the left left[l++] = k; else//go to the right right[r++] = k; } FeatureHistogram lh = new FeatureHistogram(); lh.construct(sp.hist, left, labels); FeatureHistogram rh = new FeatureHistogram(); rh.construct(sp.hist, lh, !sp.isRoot()); double var = sqSumResponse - sumResponse * sumResponse / idx.length; double varLeft = lh.sqSumResponse - lh.sumResponse * lh.sumResponse / left.length; double varRight = rh.sqSumResponse - rh.sumResponse * rh.sumResponse / right.length; sp.set(features[best.featureIdx], thresholds[best.featureIdx][best.thresholdIdx], var); sp.setLeft(new Split(left, lh, varLeft, sumLeft)); sp.setRight(new Split(right, rh, varRight, sumRight)); sp.clearSamples(); return true; } class Worker extends WorkerThread { FeatureHistogram fh = null; int type = -1; //find best split (type == 0) int[] usedFeatures = null; int minLeafSup = -1; Config cfg = null; //update (type = 1) double[] labels = null; //construct (type = 2) FeatureHistogram parent = null; int[] soi = null; //construct (type = 3) FeatureHistogram leftSibling = null; //construct (type = 4) DataPoint[] samples; int[][] sampleSortedIdx; float[][] thresholds; public Worker() { } public Worker(FeatureHistogram fh, int[] usedFeatures, int minLeafSup) { type = 0; this.fh = fh; this.usedFeatures = usedFeatures; this.minLeafSup = minLeafSup; } public Worker(FeatureHistogram fh, double[] labels) { type = 1; this.fh = fh; this.labels = labels; } public Worker(FeatureHistogram fh, FeatureHistogram parent, int[] soi, double[] labels) { type = 2; this.fh = fh; this.parent = parent; this.soi = soi; this.labels = labels; } public Worker(FeatureHistogram fh, FeatureHistogram parent, FeatureHistogram leftSibling) { type = 3; this.fh = fh; this.parent = parent; this.leftSibling = leftSibling; } public Worker(FeatureHistogram fh, DataPoint[] samples, double[] labels, int[][] sampleSortedIdx, float[][] thresholds) { type = 4; this.fh = fh; this.samples = samples; this.labels = labels; this.sampleSortedIdx = sampleSortedIdx; this.thresholds = thresholds; } public void run() { if(type == 0) cfg = fh.findBestSplit(usedFeatures, minLeafSup, start, end); else if(type == 1) fh.update(labels, start, end); else if(type == 2) fh.construct(parent, soi, labels, start, end); else if(type == 3) fh.construct(parent, leftSibling, start, end); else if(type == 4) fh.construct(samples, labels, sampleSortedIdx, thresholds, start, end); } public WorkerThread clone() { Worker wk = new Worker(); wk.fh = fh; wk.type = type; //find best split (type == 0) wk.usedFeatures = usedFeatures; wk.minLeafSup = minLeafSup; //wk.cfg = cfg; //update (type = 1) wk.labels = labels; //construct (type = 2) wk.parent = parent; wk.soi = soi; //construct (type = 3) wk.leftSibling = leftSibling; //construct (type = 1) wk.samples = samples; wk.sampleSortedIdx = sampleSortedIdx; wk.thresholds = thresholds; return wk; } } }
12,766
25.54262
130
java
lodreclib
lodreclib-master/src/main/java/ciir/umass/edu/learning/tree/RFRanker.java
/*=============================================================================== * Copyright (c) 2010-2012 University of Massachusetts. All Rights Reserved. * * Use of the RankLib package is subject to the terms of the software license set * forth in the LICENSE file included with this software, and also available at * http://people.cs.umass.edu/~vdang/ranklib_license.html *=============================================================================== */ package ciir.umass.edu.learning.tree; import java.io.BufferedReader; import java.io.FileInputStream; import java.io.InputStreamReader; import java.util.ArrayList; import java.util.HashSet; import java.util.List; import ciir.umass.edu.learning.DataPoint; import ciir.umass.edu.learning.RANKER_TYPE; import ciir.umass.edu.learning.RankList; import ciir.umass.edu.learning.Ranker; import ciir.umass.edu.learning.RankerFactory; import ciir.umass.edu.learning.Sampler; import ciir.umass.edu.metric.MetricScorer; import ciir.umass.edu.utilities.SimpleMath; public class RFRanker extends Ranker { //Parameters //[a] general bagging parameters public static int nBag = 300; public static float subSamplingRate = 0.5f;//sampling of samples (*WITH* replacement) public static float featureSamplingRate = 0.8f;//sampling of features (*WITHOUT* replacement) //[b] what to do in each bag public static RANKER_TYPE rType = RANKER_TYPE.MART;// RANKER_TYPE.MART;//which algorithm to bag public static int nTrees = 1;//how many trees in each bag. If nTree > 1 ==> each bag will contain an ensemble of gradient boosted trees. public static int nTreeLeaves = 100; public static float learningRate = 0.1F;//or shrinkage. *ONLY* matters if nTrees > 1. public static int nThreshold = 256; public static int minLeafSupport = 1; //Variables protected Ensemble[] ensembles = null;//bag of ensembles, each can be a single tree or an ensemble of gradient boosted trees public RFRanker() { } public RFRanker(List<RankList> samples, int[] features, MetricScorer scorer) { super(samples, features, scorer); } public void init() { PRINT("Initializing... "); ensembles = new Ensemble[nBag]; //initialize parameters for the tree(s) built in each bag LambdaMART.nTrees = nTrees; LambdaMART.nTreeLeaves = nTreeLeaves; LambdaMART.learningRate = learningRate; LambdaMART.nThreshold = nThreshold; LambdaMART.minLeafSupport = minLeafSupport; LambdaMART.nRoundToStopEarly = -1;//no early-stopping since we're doing bagging //turn on feature sampling FeatureHistogram.samplingRate = featureSamplingRate; PRINTLN("[Done]"); } public void learn() { RankerFactory rf = new RankerFactory(); PRINTLN("------------------------------------"); PRINTLN("Training starts..."); PRINTLN("------------------------------------"); PRINTLN(new int[]{9, 9, 11}, new String[]{"bag", scorer.name()+"-B", scorer.name()+"-OOB"}); PRINTLN("------------------------------------"); //start the bagging process for(int i=0;i<nBag;i++) { if(i % LambdaMART.gcCycle == 0) System.gc(); Sampler sp = new Sampler(); //create a "bag" of samples by random sampling from the training set List<RankList> bag = sp.doSampling(samples, subSamplingRate, true); //"out-of-bag" samples //List<RankList> outOfBag = sp.getRemains(); LambdaMART r = (LambdaMART)rf.createRanker(rType, bag, features, scorer); //r.setValidationSet(outOfBag); boolean tmp = Ranker.verbose; Ranker.verbose = false;//turn of the progress messages from training this ranker r.init(); r.learn(); Ranker.verbose = tmp; //PRINTLN(new int[]{9, 9, 11}, new String[]{"b["+(i+1)+"]", SimpleMath.round(r.getScoreOnTrainingData(), 4)+"", SimpleMath.round(r.getScoreOnValidationData(), 4)+""}); PRINTLN(new int[]{9, 9}, new String[]{"b["+(i+1)+"]", SimpleMath.round(r.getScoreOnTrainingData(), 4)+""}); ensembles[i] = r.getEnsemble(); } //Finishing up scoreOnTrainingData = scorer.score(rank(samples)); PRINTLN("------------------------------------"); PRINTLN("Finished sucessfully."); PRINTLN(scorer.name() + " on training data: " + SimpleMath.round(scoreOnTrainingData, 4)); if(validationSamples != null) { bestScoreOnValidationData = scorer.score(rank(validationSamples)); PRINTLN(scorer.name() + " on validation data: " + SimpleMath.round(bestScoreOnValidationData, 4)); } PRINTLN("------------------------------------"); } public double eval(DataPoint dp) { double s = 0; for(int i=0;i<ensembles.length;i++) s += ensembles[i].eval(dp); return s/ensembles.length; } public Ranker clone() { return new RFRanker(); } public String toString() { String str = ""; for(int i=0;i<nBag;i++) str += ensembles[i].toString() + "\n"; return str; } public String model() { String output = "## " + name() + "\n"; output += "## No. of bags = " + nBag + "\n"; output += "## Sub-sampling = " + subSamplingRate + "\n"; output += "## Feature-sampling = " + featureSamplingRate + "\n"; output += "## No. of trees = " + nTrees + "\n"; output += "## No. of leaves = " + nTreeLeaves + "\n"; output += "## No. of threshold candidates = " + nThreshold + "\n"; output += "## Learning rate = " + learningRate + "\n"; output += "\n"; output += toString(); return output; } public void load(String fn) { try { String content = ""; String model = ""; BufferedReader in = new BufferedReader(new InputStreamReader(new FileInputStream(fn), "ASCII")); List<Ensemble> ens = new ArrayList<Ensemble>(); while((content = in.readLine()) != null) { content = content.trim(); if(content.length() == 0) continue; if(content.indexOf("##")==0) continue; //actual model component model += content; if(content.indexOf("</ensemble>") != -1) { //load the ensemble ens.add(new Ensemble(model)); model = ""; } } in.close(); HashSet<Integer> uniqueFeatures = new HashSet<Integer>(); ensembles = new Ensemble[ens.size()]; for(int i=0;i<ens.size();i++) { ensembles[i] = ens.get(i); //obtain used features int[] fids = ens.get(i).getFeatures(); for(int f=0;f<fids.length;f++) if(!uniqueFeatures.contains(fids[f])) uniqueFeatures.add(fids[f]); } int fi = 0; features = new int[uniqueFeatures.size()]; for(Integer f : uniqueFeatures) features[fi++] = f.intValue(); } catch(Exception ex) { System.out.println("Error in RFRanker::load(): " + ex.toString()); } } public void printParameters() { PRINTLN("No. of bags: " + nBag); PRINTLN("Sub-sampling: " + subSamplingRate); PRINTLN("Feature-sampling: " + featureSamplingRate); PRINTLN("No. of trees: " + nTrees); PRINTLN("No. of leaves: " + nTreeLeaves); PRINTLN("No. of threshold candidates: " + nThreshold); PRINTLN("Learning rate: " + learningRate); } public String name() { return "Random Forests"; } public Ensemble[] getEnsembles() { return ensembles; } }
7,014
32.564593
170
java
lodreclib
lodreclib-master/src/main/java/ciir/umass/edu/learning/boosting/WeakRanker.java
/*=============================================================================== * Copyright (c) 2010-2012 University of Massachusetts. All Rights Reserved. * * Use of the RankLib package is subject to the terms of the software license set * forth in the LICENSE file included with this software, and also available at * http://people.cs.umass.edu/~vdang/ranklib_license.html *=============================================================================== */ package ciir.umass.edu.learning.boosting; import java.util.ArrayList; import java.util.List; import ciir.umass.edu.learning.RankList; import ciir.umass.edu.utilities.Sorter; /** * @author vdang * * Weak rankers for AdaRank. */ public class WeakRanker { private int fid = -1; public WeakRanker(int fid) { this.fid = fid; } public int getFID() { return fid; } public RankList rank(RankList l) { double[] score = new double[l.size()]; for(int i=0;i<l.size();i++) score[i] = l.get(i).getFeatureValue(fid); int[] idx = Sorter.sort(score, false); return new RankList(l, idx); } public List<RankList> rank(List<RankList> l) { List<RankList> ll = new ArrayList<RankList>(); for(int i=0;i<l.size();i++) ll.add(rank(l.get(i))); return ll; } }
1,254
23.607843
82
java
lodreclib
lodreclib-master/src/main/java/ciir/umass/edu/learning/boosting/RankBoost.java
/*=============================================================================== * Copyright (c) 2010-2012 University of Massachusetts. All Rights Reserved. * * Use of the RankLib package is subject to the terms of the software license set * forth in the LICENSE file included with this software, and also available at * http://people.cs.umass.edu/~vdang/ranklib_license.html *=============================================================================== */ package ciir.umass.edu.learning.boosting; import java.io.BufferedReader; import java.io.FileInputStream; import java.io.InputStreamReader; import java.util.ArrayList; import java.util.List; import ciir.umass.edu.learning.DataPoint; import ciir.umass.edu.learning.RankList; import ciir.umass.edu.learning.Ranker; import ciir.umass.edu.metric.MetricScorer; import ciir.umass.edu.utilities.MergeSorter; import ciir.umass.edu.utilities.SimpleMath; /** * @author vdang * * This class implements RankBoost. * Y. Freund, R. Iyer, R. Schapire, and Y. Singer. An efficient boosting algorithm for combining preferences. * The Journal of Machine Learning Research, 4: 933-969, 2003. */ public class RankBoost extends Ranker { public static int nIteration = 300;//number of rounds public static int nThreshold = 10; protected double[][][] sweight = null;//sample weight D(x_0, x_1) -- the weight of x_1 ranked above x_2 protected double[][] potential = null;//pi(x) protected List<List<int[]>> sortedSamples = new ArrayList<List<int[]>>(); protected double[][] thresholds = null;//candidate values for weak rankers' threshold, selected from feature values protected int[][] tSortedIdx = null;//sorted (descend) index for @thresholds protected List<RBWeakRanker> wRankers = null;//best weak rankers at each round protected List<Double> rWeight = null;//alpha (weak rankers' weight) //to store the best model on validation data (if specified) protected List<RBWeakRanker> bestModelRankers = new ArrayList<RBWeakRanker>(); protected List<Double> bestModelWeights = new ArrayList<Double>(); private double R_t = 0.0; private double Z_t = 1.0; private int totalCorrectPairs = 0;//crucial pairs public RankBoost() { } public RankBoost(List<RankList> samples, int[] features, MetricScorer scorer) { super(samples, features, scorer); } private int[] reorder(RankList rl, int fid) { double[] score = new double[rl.size()]; for(int i=0;i<rl.size();i++) score[i] = rl.get(i).getFeatureValue(fid); int[] idx = MergeSorter.sort(score, false); return idx; } /** * compute the potential (pi) based on the current sample (pair) weight distribution D_t. */ private void updatePotential() { for(int i=0;i<samples.size();i++) { RankList rl = samples.get(i); for(int j=0;j<rl.size();j++) { double p = 0.0; for(int k=j+1;k<rl.size();k++) p += sweight[i][j][k]; for(int k=0;k<j;k++) p -= sweight[i][k][j]; potential[i][j] = p; } } } /** * Find the <feature, threshold> that maximize r (which will approximately minimize the exponential error on the training data). * Create the weak ranker h_t from this pair <feature, threshold>. h_t(p) > threshold => h_t(p)=1; h_t(p)=0 otherwise. * @return The learned weak ranker. The value of <i>current_r</i> is also updated to be the best r observed. */ private RBWeakRanker learnWeakRanker() { int bestFid = -1; double maxR = -10; double bestThreshold = -1.0; for(int i=0;i<features.length;i++) { List<int[]> sSortedIndex = sortedSamples.get(i);//samples sorted (descending) by the current feature int[] idx = tSortedIdx[i];//candidate thresholds for the current features int[] last = new int[samples.size()];//the last "touched" (and taken) position in each sample rank list for(int j=0;j<samples.size();j++) last[j] = -1; double r = 0.0; for(int j=0;j<idx.length;j++) { double t = thresholds[i][idx[j]]; //we want something t < threshold <= tp for(int k=0;k<samples.size();k++) { RankList rl = samples.get(k); int[] sk = sSortedIndex.get(k); for(int l=last[k]+1;l<rl.size();l++) { DataPoint p = rl.get(sk[l]); if(p.getFeatureValue(features[i]) > t)//take it { r += potential[k][sk[l]]; last[k] = l; } else break; } } //finish computing r if(r > maxR) { maxR = r; bestThreshold = t; bestFid = features[i]; } } } if(bestFid == -1) return null; R_t = Z_t * maxR;//save it so we won't have to re-compute when we need it return new RBWeakRanker(bestFid, bestThreshold); } public void init() { PRINT("Initializing... "); wRankers = new ArrayList<RBWeakRanker>(); rWeight = new ArrayList<Double>(); //for each (true) ranked list, we only care about correctly ranked pair (e.g. L={1,2,3} => <1,2>, <1,3>, <2,3>) // count the number of correctly ranked pairs from sample ranked list totalCorrectPairs = 0; for(int i=0;i<samples.size();i++) { samples.set(i, samples.get(i).getCorrectRanking());//make sure the training samples are in correct ranking RankList rl = samples.get(i); for(int j=0;j<rl.size()-1;j++) for(int k=rl.size()-1;k>=j+1 && rl.get(j).getLabel() > rl.get(k).getLabel();k--)//faster than the for-if below //for(int k=j+1;k<rl.size();k++) //if(rl.get(j).getLabel() > rl.get(k).getLabel()) totalCorrectPairs++; } //compute weight for all correctly ranked pairs sweight = new double[samples.size()][][]; for(int i=0;i<samples.size();i++) { RankList rl = samples.get(i); sweight[i] = new double[rl.size()][]; for(int j=0;j<rl.size()-1;j++) { sweight[i][j] = new double[rl.size()]; for(int k=j+1;k<rl.size();k++) if(rl.get(j).getLabel() > rl.get(k).getLabel())//strictly "greater than" ==> crucial pairs sweight[i][j][k] = 1.0 / totalCorrectPairs; else sweight[i][j][k] = 0.0;//not crucial pairs } } //init potential matrix potential = new double[samples.size()][]; for(int i=0;i<samples.size();i++) potential[i] = new double[samples.get(i).size()]; if(nThreshold <= 0) { //create a table of candidate thresholds (for each feature) for weak rankers (they are just all possible feature values) int count = 0; for(int i=0;i<samples.size();i++) count += samples.get(i).size(); thresholds = new double[features.length][]; for(int i=0;i<features.length;i++) thresholds[i] = new double[count]; int c = 0; for(int i=0;i<samples.size();i++) { RankList rl = samples.get(i); for(int j=0;j<rl.size();j++) { for(int k=0;k<features.length;k++) thresholds[k][c] = rl.get(j).getFeatureValue(features[k]); c++; } } } else { double[] fmax = new double[features.length]; double[] fmin = new double[features.length]; for(int i=0;i<features.length;i++) { fmax[i] = -1E6; fmin[i] = 1E6; } for(int i=0;i<samples.size();i++) { RankList rl = samples.get(i); for(int j=0;j<rl.size();j++) { for(int k=0;k<features.length;k++) { double f = rl.get(j).getFeatureValue(features[k]); if (f > fmax[k]) fmax[k] = f; if (f < fmin[k]) fmin[k] = f; } } } thresholds = new double[features.length][]; for(int i=0;i<features.length;i++) { double step = (Math.abs(fmax[i] - fmin[i]))/nThreshold; thresholds[i] = new double[nThreshold+1]; thresholds[i][0] = fmax[i]; for(int j=1;j<nThreshold;j++) thresholds[i][j] = thresholds[i][j-1] - step; thresholds[i][nThreshold] = fmin[i] - 1.0E8; } } //sort this table with respect to each feature (each row of the matrix @thresholds) tSortedIdx = new int[features.length][]; for(int i=0;i<features.length;i++) tSortedIdx[i] = MergeSorter.sort(thresholds[i], false); //now create a sorted lists of every samples ranked list with respect to each feature //e.g. Feature f_i <==> all sample ranked list is now ranked with respect to f_i for(int i=0;i<features.length;i++) { List<int[]> idx = new ArrayList<int[]>(); for(int j=0;j<samples.size();j++) idx.add(reorder(samples.get(j), features[i])); sortedSamples.add(idx); } PRINTLN("[Done]"); } public void learn() { PRINTLN("------------------------------------------"); PRINTLN("Training starts..."); PRINTLN("--------------------------------------------------------------------"); PRINTLN(new int[]{7, 8, 9, 9, 9, 9}, new String[]{"#iter", "Sel. F.", "Threshold", "Error", scorer.name()+"-T", scorer.name()+"-V"}); PRINTLN("--------------------------------------------------------------------"); for(int t=1; t<=nIteration; t++) { updatePotential(); //learn the weak ranker RBWeakRanker wr = learnWeakRanker(); if(wr == null)//no more features to select break; //compute weak ranker weight double alpha_t = (double) (0.5 * SimpleMath.ln((Z_t+R_t)/(Z_t-R_t)));//@current_r is computed in learnWeakRanker() wRankers.add(wr); rWeight.add(alpha_t); //update sample pairs' weight distribution Z_t = 0.0;//normalization factor for(int i=0;i<samples.size();i++) { RankList rl = samples.get(i); double[][] D_t = new double[rl.size()][]; for(int j=0;j<rl.size()-1;j++) { D_t[j] = new double[rl.size()]; for(int k=j+1;k<rl.size();k++) { //we should rank x_j higher than x_k //so if our h_t does so, decrease the weight of this pair //otherwise, increase its weight D_t[j][k] = (double) (sweight[i][j][k] * Math.exp(alpha_t * (wr.score(rl.get(k)) - wr.score(rl.get(j))))); Z_t += D_t[j][k]; } } sweight[i] = D_t; } PRINT(new int[]{7, 8, 9, 9}, new String[]{t+"", wr.getFid()+"", SimpleMath.round(wr.getThreshold(), 4)+"", SimpleMath.round(R_t, 4)+""}); if(t % 1 == 0) { PRINT(new int[]{9}, new String[]{SimpleMath.round(scorer.score(rank(samples)), 4)+""}); if(validationSamples != null) { double score = scorer.score(rank(validationSamples)); if(score > bestScoreOnValidationData) { bestScoreOnValidationData = score; bestModelRankers.clear(); bestModelRankers.addAll(wRankers); bestModelWeights.clear(); bestModelWeights.addAll(rWeight); } PRINT(new int[]{9}, new String[]{SimpleMath.round(score, 4)+""}); } } PRINTLN(""); //System.out.println("Z_t = " + Z + "\tr = " + current_r + "\t" + Math.sqrt(1.0 - current_r*current_r)); //normalize sweight to make sure it is a valid distribution for(int i=0;i<samples.size();i++) { RankList rl = samples.get(i); for(int j=0;j<rl.size()-1;j++) for(int k=j+1;k<rl.size();k++) sweight[i][j][k] /= Z_t; } System.gc(); } //if validation data is specified ==> best model on this data has been saved //we now restore the current model to that best model if(validationSamples != null && bestModelRankers.size()>0) { wRankers.clear(); rWeight.clear(); wRankers.addAll(bestModelRankers); rWeight.addAll(bestModelWeights); } scoreOnTrainingData = SimpleMath.round(scorer.score(rank(samples)), 4); PRINTLN("--------------------------------------------------------------------"); PRINTLN("Finished sucessfully."); PRINTLN(scorer.name() + " on training data: " + scoreOnTrainingData); if(validationSamples != null) { bestScoreOnValidationData = scorer.score(rank(validationSamples)); PRINTLN(scorer.name() + " on validation data: " + SimpleMath.round(bestScoreOnValidationData, 4)); } PRINTLN("---------------------------------"); } public double eval(DataPoint p) { double score = 0.0; for(int j=0;j<wRankers.size();j++) score += rWeight.get(j) * wRankers.get(j).score(p); return score; } public Ranker clone() { return new RankBoost(); } public String toString() { String output = ""; for(int i=0;i<wRankers.size();i++) output += wRankers.get(i).toString() + ":" + rWeight.get(i) + ((i==rWeight.size()-1)?"":" "); return output; } public String model() { String output = "## " + name() + "\n"; output += "## Iteration = " + nIteration + "\n"; output += "## No. of threshold candidates = " + nThreshold + "\n"; output += toString(); return output; } public void load(String fn) { try { String content = ""; BufferedReader in = new BufferedReader( new InputStreamReader( new FileInputStream(fn), "ASCII")); while((content = in.readLine()) != null) { content = content.trim(); if(content.length() == 0) continue; if(content.indexOf("##")==0) continue; break; } in.close(); rWeight = new ArrayList<Double>(); wRankers = new ArrayList<RBWeakRanker>(); int idx = content.lastIndexOf("#"); if(idx != -1)//remove description at the end of the line (if any) content = content.substring(0, idx).trim();//remove the comment part at the end of the line String[] fs = content.split(" "); for(int i=0;i<fs.length;i++) { fs[i] = fs[i].trim(); if(fs[i].compareTo("")==0) continue; String[] strs = fs[i].split(":"); int fid = Integer.parseInt(strs[0]); double threshold = Double.parseDouble(strs[1]); double weight = Double.parseDouble(strs[2]); rWeight.add(weight); wRankers.add(new RBWeakRanker(fid, threshold)); } features = new int[rWeight.size()]; for(int i=0;i<rWeight.size();i++) features[i] = wRankers.get(i).getFid(); } catch(Exception ex) { System.out.println("Error in RankBoost::load(): " + ex.toString()); } } public void printParameters() { PRINTLN("No. of rounds: " + nIteration); PRINTLN("No. of threshold candidates: " + nThreshold); } public String name() { return "RankBoost"; } }
13,906
30.042411
140
java