target
stringlengths
20
113k
src_fm
stringlengths
11
86.3k
src_fm_fc
stringlengths
21
86.4k
src_fm_fc_co
stringlengths
30
86.4k
src_fm_fc_ms
stringlengths
42
86.8k
src_fm_fc_ms_ff
stringlengths
43
86.8k
@Test public void copyOfValuesTest() { IntValueMap<String> set = new IntValueMap<>(); for (int i = 0; i < 1000; i++) { set.put(String.valueOf(i), i + 1); } int[] values = set.copyOfValues(); Assert.assertEquals(1000, values.length); Arrays.sort(values); for (int i = 0; i < 1000; i++) { Assert.assertEquals(i + 1, values[i]); } set.remove("768"); set.remove("0"); set.remove("999"); values = set.copyOfValues(); Assert.assertEquals(997, values.length); Arrays.sort(values); Assert.assertTrue(Arrays.binarySearch(values, 769) < 0); Assert.assertTrue(Arrays.binarySearch(values, 1) < 0); Assert.assertTrue(Arrays.binarySearch(values, 1000) < 0); }
public int[] copyOfValues() { int[] result = new int[keyCount]; int k = 0; for (int i = 0; i < keys.length; i++) { if (hasValidKey(i)) { result[k] = values[i]; k++; } } return result; }
IntValueMap extends HashBase<T> implements Iterable<T> { public int[] copyOfValues() { int[] result = new int[keyCount]; int k = 0; for (int i = 0; i < keys.length; i++) { if (hasValidKey(i)) { result[k] = values[i]; k++; } } return result; } }
IntValueMap extends HashBase<T> implements Iterable<T> { public int[] copyOfValues() { int[] result = new int[keyCount]; int k = 0; for (int i = 0; i < keys.length; i++) { if (hasValidKey(i)) { result[k] = values[i]; k++; } } return result; } IntValueMap(); IntValueMap(int size); }
IntValueMap extends HashBase<T> implements Iterable<T> { public int[] copyOfValues() { int[] result = new int[keyCount]; int k = 0; for (int i = 0; i < keys.length; i++) { if (hasValidKey(i)) { result[k] = values[i]; k++; } } return result; } IntValueMap(); IntValueMap(int size); int addOrIncrement(T key); void addOrIncrementAll(Iterable<T> keys); int get(T key); int decrement(T key); int incrementByAmount(T key, int amount); void put(T key, int value); int[] copyOfValues(); List<Entry<T>> getAsEntryList(); Iterator<Entry<T>> entryIterator(); Iterable<Entry<T>> iterableEntries(); }
IntValueMap extends HashBase<T> implements Iterable<T> { public int[] copyOfValues() { int[] result = new int[keyCount]; int k = 0; for (int i = 0; i < keys.length; i++) { if (hasValidKey(i)) { result[k] = values[i]; k++; } } return result; } IntValueMap(); IntValueMap(int size); int addOrIncrement(T key); void addOrIncrementAll(Iterable<T> keys); int get(T key); int decrement(T key); int incrementByAmount(T key, int amount); void put(T key, int value); int[] copyOfValues(); List<Entry<T>> getAsEntryList(); Iterator<Entry<T>> entryIterator(); Iterable<Entry<T>> iterableEntries(); }
@Test @Ignore("Not a unit test") public void perfStrings() { for (int i = 0; i < 5; i++) { Set<String> strings = uniqueStrings(1000000, 7); Stopwatch sw = Stopwatch.createStarted(); Set<String> newSet = new HashSet<>(strings); System.out.println("Java Set : " + sw.elapsed(TimeUnit.MILLISECONDS)); System.out.println("Size = " + newSet.size()); sw.reset().start(); IntValueMap<String> cs = new IntValueMap<>(strings.size() * 2); cs.addOrIncrementAll(strings); System.out.println("Count Add : " + sw.elapsed(TimeUnit.MILLISECONDS)); } }
public void addOrIncrementAll(Iterable<T> keys) { for (T t : keys) { incrementByAmount(t, 1); } }
IntValueMap extends HashBase<T> implements Iterable<T> { public void addOrIncrementAll(Iterable<T> keys) { for (T t : keys) { incrementByAmount(t, 1); } } }
IntValueMap extends HashBase<T> implements Iterable<T> { public void addOrIncrementAll(Iterable<T> keys) { for (T t : keys) { incrementByAmount(t, 1); } } IntValueMap(); IntValueMap(int size); }
IntValueMap extends HashBase<T> implements Iterable<T> { public void addOrIncrementAll(Iterable<T> keys) { for (T t : keys) { incrementByAmount(t, 1); } } IntValueMap(); IntValueMap(int size); int addOrIncrement(T key); void addOrIncrementAll(Iterable<T> keys); int get(T key); int decrement(T key); int incrementByAmount(T key, int amount); void put(T key, int value); int[] copyOfValues(); List<Entry<T>> getAsEntryList(); Iterator<Entry<T>> entryIterator(); Iterable<Entry<T>> iterableEntries(); }
IntValueMap extends HashBase<T> implements Iterable<T> { public void addOrIncrementAll(Iterable<T> keys) { for (T t : keys) { incrementByAmount(t, 1); } } IntValueMap(); IntValueMap(int size); int addOrIncrement(T key); void addOrIncrementAll(Iterable<T> keys); int get(T key); int decrement(T key); int incrementByAmount(T key, int amount); void put(T key, int value); int[] copyOfValues(); List<Entry<T>> getAsEntryList(); Iterator<Entry<T>> entryIterator(); Iterable<Entry<T>> iterableEntries(); }
@Test(expected = IllegalArgumentException.class) public void safeGet() { FixedBitVector vector = new FixedBitVector(10); vector.safeGet(10); }
public boolean safeGet(int n) { check(n); return (words[n >> 5] & setMasks[n & 31]) != 0; }
FixedBitVector { public boolean safeGet(int n) { check(n); return (words[n >> 5] & setMasks[n & 31]) != 0; } }
FixedBitVector { public boolean safeGet(int n) { check(n); return (words[n >> 5] & setMasks[n & 31]) != 0; } FixedBitVector(int length); }
FixedBitVector { public boolean safeGet(int n) { check(n); return (words[n >> 5] & setMasks[n & 31]) != 0; } FixedBitVector(int length); boolean get(int n); boolean safeGet(int n); void set(int n); void safeSet(int n); void clear(int n); void safeClear(int n); int numberOfOnes(); int numberOfNewOneBitCount(FixedBitVector other); int differentBitCount(FixedBitVector other); int numberOfZeroes(); int[] zeroIndexes(); }
FixedBitVector { public boolean safeGet(int n) { check(n); return (words[n >> 5] & setMasks[n & 31]) != 0; } FixedBitVector(int length); boolean get(int n); boolean safeGet(int n); void set(int n); void safeSet(int n); void clear(int n); void safeClear(int n); int numberOfOnes(); int numberOfNewOneBitCount(FixedBitVector other); int differentBitCount(FixedBitVector other); int numberOfZeroes(); int[] zeroIndexes(); final int length; }
@Test(expected = IllegalArgumentException.class) public void safeSet() { FixedBitVector vector = new FixedBitVector(10); vector.safeSet(10); }
public void safeSet(int n) { check(n); words[n >> 5] |= setMasks[n & 31]; }
FixedBitVector { public void safeSet(int n) { check(n); words[n >> 5] |= setMasks[n & 31]; } }
FixedBitVector { public void safeSet(int n) { check(n); words[n >> 5] |= setMasks[n & 31]; } FixedBitVector(int length); }
FixedBitVector { public void safeSet(int n) { check(n); words[n >> 5] |= setMasks[n & 31]; } FixedBitVector(int length); boolean get(int n); boolean safeGet(int n); void set(int n); void safeSet(int n); void clear(int n); void safeClear(int n); int numberOfOnes(); int numberOfNewOneBitCount(FixedBitVector other); int differentBitCount(FixedBitVector other); int numberOfZeroes(); int[] zeroIndexes(); }
FixedBitVector { public void safeSet(int n) { check(n); words[n >> 5] |= setMasks[n & 31]; } FixedBitVector(int length); boolean get(int n); boolean safeGet(int n); void set(int n); void safeSet(int n); void clear(int n); void safeClear(int n); int numberOfOnes(); int numberOfNewOneBitCount(FixedBitVector other); int differentBitCount(FixedBitVector other); int numberOfZeroes(); int[] zeroIndexes(); final int length; }
@Test public void nounVoicingTest() { String[] voicing = {"kabak", "kabak [A:Voicing]", "psikolog", "havuç", "turp [A:Voicing]", "galip", "nohut", "cenk", "kükürt"}; for (String s : voicing) { DictionaryItem item = TurkishDictionaryLoader.loadFromString(s); Assert.assertEquals(Noun, item.primaryPos); Assert.assertTrue("error in:" + s, item.hasAttribute(RootAttribute.Voicing)); } String[] novoicing = {"kek", "link [A:NoVoicing]", "top", "kulp", "takat [A:NoVoicing]"}; for (String s : novoicing) { DictionaryItem item = TurkishDictionaryLoader.loadFromString(s); Assert.assertEquals(Noun, item.primaryPos); Assert.assertTrue("error in:" + s, item.hasAttribute(NoVoicing)); } }
public static DictionaryItem loadFromString(String dictionaryLine) { String lemma = dictionaryLine; if (dictionaryLine.contains(" ")) { lemma = dictionaryLine.substring(0, dictionaryLine.indexOf(" ")); } return load(dictionaryLine).getMatchingItems(lemma).get(0); }
TurkishDictionaryLoader { public static DictionaryItem loadFromString(String dictionaryLine) { String lemma = dictionaryLine; if (dictionaryLine.contains(" ")) { lemma = dictionaryLine.substring(0, dictionaryLine.indexOf(" ")); } return load(dictionaryLine).getMatchingItems(lemma).get(0); } }
TurkishDictionaryLoader { public static DictionaryItem loadFromString(String dictionaryLine) { String lemma = dictionaryLine; if (dictionaryLine.contains(" ")) { lemma = dictionaryLine.substring(0, dictionaryLine.indexOf(" ")); } return load(dictionaryLine).getMatchingItems(lemma).get(0); } }
TurkishDictionaryLoader { public static DictionaryItem loadFromString(String dictionaryLine) { String lemma = dictionaryLine; if (dictionaryLine.contains(" ")) { lemma = dictionaryLine.substring(0, dictionaryLine.indexOf(" ")); } return load(dictionaryLine).getMatchingItems(lemma).get(0); } static RootLexicon loadDefaultDictionaries(); static RootLexicon loadFromResources(String... resourcePaths); static RootLexicon loadFromResources(Collection<String> resourcePaths); static RootLexicon load(File input); static RootLexicon loadInto(RootLexicon lexicon, File input); static DictionaryItem loadFromString(String dictionaryLine); static RootLexicon load(String... dictionaryLines); static RootLexicon load(Iterable<String> dictionaryLines); }
TurkishDictionaryLoader { public static DictionaryItem loadFromString(String dictionaryLine) { String lemma = dictionaryLine; if (dictionaryLine.contains(" ")) { lemma = dictionaryLine.substring(0, dictionaryLine.indexOf(" ")); } return load(dictionaryLine).getMatchingItems(lemma).get(0); } static RootLexicon loadDefaultDictionaries(); static RootLexicon loadFromResources(String... resourcePaths); static RootLexicon loadFromResources(Collection<String> resourcePaths); static RootLexicon load(File input); static RootLexicon loadInto(RootLexicon lexicon, File input); static DictionaryItem loadFromString(String dictionaryLine); static RootLexicon load(String... dictionaryLines); static RootLexicon load(Iterable<String> dictionaryLines); static final List<String> DEFAULT_DICTIONARY_RESOURCES; }
@Test(expected = IllegalArgumentException.class) public void safeClear() { FixedBitVector vector = new FixedBitVector(10); vector.safeClear(10); }
public void safeClear(int n) { check(n); words[n >> 5] &= resetMasks[n & 31]; }
FixedBitVector { public void safeClear(int n) { check(n); words[n >> 5] &= resetMasks[n & 31]; } }
FixedBitVector { public void safeClear(int n) { check(n); words[n >> 5] &= resetMasks[n & 31]; } FixedBitVector(int length); }
FixedBitVector { public void safeClear(int n) { check(n); words[n >> 5] &= resetMasks[n & 31]; } FixedBitVector(int length); boolean get(int n); boolean safeGet(int n); void set(int n); void safeSet(int n); void clear(int n); void safeClear(int n); int numberOfOnes(); int numberOfNewOneBitCount(FixedBitVector other); int differentBitCount(FixedBitVector other); int numberOfZeroes(); int[] zeroIndexes(); }
FixedBitVector { public void safeClear(int n) { check(n); words[n >> 5] &= resetMasks[n & 31]; } FixedBitVector(int length); boolean get(int n); boolean safeGet(int n); void set(int n); void safeSet(int n); void clear(int n); void safeClear(int n); int numberOfOnes(); int numberOfNewOneBitCount(FixedBitVector other); int differentBitCount(FixedBitVector other); int numberOfZeroes(); int[] zeroIndexes(); final int length; }
@Test public void testValues() { FloatValueMap<String> set = new FloatValueMap<>(); set.set("a", 7); set.set("b", 2); set.set("c", 3); set.set("d", 4); set.set("d", 5); Assert.assertEquals(4, set.size()); float[] values = set.values(); Arrays.sort(values); Assert.assertTrue(Arrays.equals(new float[]{2f, 3f, 5f, 7f}, values)); }
public float[] values() { float[] result = new float[size()]; int j = 0; for (int i = 0; i < keys.length; i++) { if (hasValidKey(i)) { result[j++] = values[i]; } } return result; }
FloatValueMap extends HashBase<T> implements Iterable<T> { public float[] values() { float[] result = new float[size()]; int j = 0; for (int i = 0; i < keys.length; i++) { if (hasValidKey(i)) { result[j++] = values[i]; } } return result; } }
FloatValueMap extends HashBase<T> implements Iterable<T> { public float[] values() { float[] result = new float[size()]; int j = 0; for (int i = 0; i < keys.length; i++) { if (hasValidKey(i)) { result[j++] = values[i]; } } return result; } FloatValueMap(); FloatValueMap(int size); private FloatValueMap(FloatValueMap<T> other, T[] keys, float[] values); }
FloatValueMap extends HashBase<T> implements Iterable<T> { public float[] values() { float[] result = new float[size()]; int j = 0; for (int i = 0; i < keys.length; i++) { if (hasValidKey(i)) { result[j++] = values[i]; } } return result; } FloatValueMap(); FloatValueMap(int size); private FloatValueMap(FloatValueMap<T> other, T[] keys, float[] values); float get(T key); float incrementByAmount(T key, float amount); void set(T key, float value); float[] values(); List<Entry<T>> getAsEntryList(); FloatValueMap<T> copy(); Iterator<Entry<T>> entryIterator(); Iterable<Entry<T>> iterableEntries(); }
FloatValueMap extends HashBase<T> implements Iterable<T> { public float[] values() { float[] result = new float[size()]; int j = 0; for (int i = 0; i < keys.length; i++) { if (hasValidKey(i)) { result[j++] = values[i]; } } return result; } FloatValueMap(); FloatValueMap(int size); private FloatValueMap(FloatValueMap<T> other, T[] keys, float[] values); float get(T key); float incrementByAmount(T key, float amount); void set(T key, float value); float[] values(); List<Entry<T>> getAsEntryList(); FloatValueMap<T> copy(); Iterator<Entry<T>> entryIterator(); Iterable<Entry<T>> iterableEntries(); }
@Test public void addTest() { Foo f1 = new Foo("abc", 1); Foo f2 = new Foo("abc", 2); LookupSet<Foo> fooSet = new LookupSet<>(); Assert.assertTrue(fooSet.add(f1)); Assert.assertFalse(fooSet.add(f2)); }
public boolean add(T key) { if (key == null) { throw new IllegalArgumentException("Key cannot be null."); } if (keyCount + removeCount == threshold) { expand(); } int loc = locate(key); if (loc >= 0) { return false; } else { loc = -loc - 1; keys[loc] = key; keyCount++; return true; } }
LookupSet extends HashBase<T> implements Iterable<T> { public boolean add(T key) { if (key == null) { throw new IllegalArgumentException("Key cannot be null."); } if (keyCount + removeCount == threshold) { expand(); } int loc = locate(key); if (loc >= 0) { return false; } else { loc = -loc - 1; keys[loc] = key; keyCount++; return true; } } }
LookupSet extends HashBase<T> implements Iterable<T> { public boolean add(T key) { if (key == null) { throw new IllegalArgumentException("Key cannot be null."); } if (keyCount + removeCount == threshold) { expand(); } int loc = locate(key); if (loc >= 0) { return false; } else { loc = -loc - 1; keys[loc] = key; keyCount++; return true; } } LookupSet(); LookupSet(int size); }
LookupSet extends HashBase<T> implements Iterable<T> { public boolean add(T key) { if (key == null) { throw new IllegalArgumentException("Key cannot be null."); } if (keyCount + removeCount == threshold) { expand(); } int loc = locate(key); if (loc >= 0) { return false; } else { loc = -loc - 1; keys[loc] = key; keyCount++; return true; } } LookupSet(); LookupSet(int size); T set(T key); @SafeVarargs final void addAll(T... t); void addAll(Iterable<T> it); boolean add(T key); T getOrAdd(T key); }
LookupSet extends HashBase<T> implements Iterable<T> { public boolean add(T key) { if (key == null) { throw new IllegalArgumentException("Key cannot be null."); } if (keyCount + removeCount == threshold) { expand(); } int loc = locate(key); if (loc >= 0) { return false; } else { loc = -loc - 1; keys[loc] = key; keyCount++; return true; } } LookupSet(); LookupSet(int size); T set(T key); @SafeVarargs final void addAll(T... t); void addAll(Iterable<T> it); boolean add(T key); T getOrAdd(T key); }
@Test public void lookupTest() { Foo f1 = new Foo("abc", 1); Foo f2 = new Foo("abc", 2); LookupSet<Foo> fooSet = new LookupSet<>(); Assert.assertNull(fooSet.lookup(f1)); Assert.assertNull(fooSet.lookup(f2)); fooSet.add(f1); Assert.assertEquals(1, fooSet.lookup(f1).b); Assert.assertEquals(1, fooSet.lookup(f2).b); fooSet.add(f2); Assert.assertEquals(1, fooSet.lookup(f1).b); Assert.assertEquals(1, fooSet.lookup(f2).b); }
public boolean add(T key) { if (key == null) { throw new IllegalArgumentException("Key cannot be null."); } if (keyCount + removeCount == threshold) { expand(); } int loc = locate(key); if (loc >= 0) { return false; } else { loc = -loc - 1; keys[loc] = key; keyCount++; return true; } }
LookupSet extends HashBase<T> implements Iterable<T> { public boolean add(T key) { if (key == null) { throw new IllegalArgumentException("Key cannot be null."); } if (keyCount + removeCount == threshold) { expand(); } int loc = locate(key); if (loc >= 0) { return false; } else { loc = -loc - 1; keys[loc] = key; keyCount++; return true; } } }
LookupSet extends HashBase<T> implements Iterable<T> { public boolean add(T key) { if (key == null) { throw new IllegalArgumentException("Key cannot be null."); } if (keyCount + removeCount == threshold) { expand(); } int loc = locate(key); if (loc >= 0) { return false; } else { loc = -loc - 1; keys[loc] = key; keyCount++; return true; } } LookupSet(); LookupSet(int size); }
LookupSet extends HashBase<T> implements Iterable<T> { public boolean add(T key) { if (key == null) { throw new IllegalArgumentException("Key cannot be null."); } if (keyCount + removeCount == threshold) { expand(); } int loc = locate(key); if (loc >= 0) { return false; } else { loc = -loc - 1; keys[loc] = key; keyCount++; return true; } } LookupSet(); LookupSet(int size); T set(T key); @SafeVarargs final void addAll(T... t); void addAll(Iterable<T> it); boolean add(T key); T getOrAdd(T key); }
LookupSet extends HashBase<T> implements Iterable<T> { public boolean add(T key) { if (key == null) { throw new IllegalArgumentException("Key cannot be null."); } if (keyCount + removeCount == threshold) { expand(); } int loc = locate(key); if (loc >= 0) { return false; } else { loc = -loc - 1; keys[loc] = key; keyCount++; return true; } } LookupSet(); LookupSet(int size); T set(T key); @SafeVarargs final void addAll(T... t); void addAll(Iterable<T> it); boolean add(T key); T getOrAdd(T key); }
@Test public void getOrAddTest() { Foo f1 = new Foo("abc", 1); Foo f2 = new Foo("abc", 2); LookupSet<Foo> fooSet = new LookupSet<>(); Assert.assertEquals(1, fooSet.getOrAdd(f1).b); Assert.assertEquals(1, fooSet.getOrAdd(f2).b); }
public T getOrAdd(T key) { if (key == null) { throw new IllegalArgumentException("Key cannot be null."); } if (keyCount + removeCount == threshold) { expand(); } int loc = locate(key); if (loc >= 0) { return keys[loc]; } else { loc = -loc - 1; keys[loc] = key; keyCount++; return key; } }
LookupSet extends HashBase<T> implements Iterable<T> { public T getOrAdd(T key) { if (key == null) { throw new IllegalArgumentException("Key cannot be null."); } if (keyCount + removeCount == threshold) { expand(); } int loc = locate(key); if (loc >= 0) { return keys[loc]; } else { loc = -loc - 1; keys[loc] = key; keyCount++; return key; } } }
LookupSet extends HashBase<T> implements Iterable<T> { public T getOrAdd(T key) { if (key == null) { throw new IllegalArgumentException("Key cannot be null."); } if (keyCount + removeCount == threshold) { expand(); } int loc = locate(key); if (loc >= 0) { return keys[loc]; } else { loc = -loc - 1; keys[loc] = key; keyCount++; return key; } } LookupSet(); LookupSet(int size); }
LookupSet extends HashBase<T> implements Iterable<T> { public T getOrAdd(T key) { if (key == null) { throw new IllegalArgumentException("Key cannot be null."); } if (keyCount + removeCount == threshold) { expand(); } int loc = locate(key); if (loc >= 0) { return keys[loc]; } else { loc = -loc - 1; keys[loc] = key; keyCount++; return key; } } LookupSet(); LookupSet(int size); T set(T key); @SafeVarargs final void addAll(T... t); void addAll(Iterable<T> it); boolean add(T key); T getOrAdd(T key); }
LookupSet extends HashBase<T> implements Iterable<T> { public T getOrAdd(T key) { if (key == null) { throw new IllegalArgumentException("Key cannot be null."); } if (keyCount + removeCount == threshold) { expand(); } int loc = locate(key); if (loc >= 0) { return keys[loc]; } else { loc = -loc - 1; keys[loc] = key; keyCount++; return key; } } LookupSet(); LookupSet(int size); T set(T key); @SafeVarargs final void addAll(T... t); void addAll(Iterable<T> it); boolean add(T key); T getOrAdd(T key); }
@Test public void removeTest() { Foo f1 = new Foo("abc", 1); Foo f2 = new Foo("abc", 2); LookupSet<Foo> fooSet = new LookupSet<>(); Assert.assertEquals(1, fooSet.getOrAdd(f1).b); Assert.assertEquals(1, fooSet.getOrAdd(f2).b); Assert.assertEquals(1, fooSet.remove(f2).b); Assert.assertEquals(2, fooSet.getOrAdd(f2).b); }
public T getOrAdd(T key) { if (key == null) { throw new IllegalArgumentException("Key cannot be null."); } if (keyCount + removeCount == threshold) { expand(); } int loc = locate(key); if (loc >= 0) { return keys[loc]; } else { loc = -loc - 1; keys[loc] = key; keyCount++; return key; } }
LookupSet extends HashBase<T> implements Iterable<T> { public T getOrAdd(T key) { if (key == null) { throw new IllegalArgumentException("Key cannot be null."); } if (keyCount + removeCount == threshold) { expand(); } int loc = locate(key); if (loc >= 0) { return keys[loc]; } else { loc = -loc - 1; keys[loc] = key; keyCount++; return key; } } }
LookupSet extends HashBase<T> implements Iterable<T> { public T getOrAdd(T key) { if (key == null) { throw new IllegalArgumentException("Key cannot be null."); } if (keyCount + removeCount == threshold) { expand(); } int loc = locate(key); if (loc >= 0) { return keys[loc]; } else { loc = -loc - 1; keys[loc] = key; keyCount++; return key; } } LookupSet(); LookupSet(int size); }
LookupSet extends HashBase<T> implements Iterable<T> { public T getOrAdd(T key) { if (key == null) { throw new IllegalArgumentException("Key cannot be null."); } if (keyCount + removeCount == threshold) { expand(); } int loc = locate(key); if (loc >= 0) { return keys[loc]; } else { loc = -loc - 1; keys[loc] = key; keyCount++; return key; } } LookupSet(); LookupSet(int size); T set(T key); @SafeVarargs final void addAll(T... t); void addAll(Iterable<T> it); boolean add(T key); T getOrAdd(T key); }
LookupSet extends HashBase<T> implements Iterable<T> { public T getOrAdd(T key) { if (key == null) { throw new IllegalArgumentException("Key cannot be null."); } if (keyCount + removeCount == threshold) { expand(); } int loc = locate(key); if (loc >= 0) { return keys[loc]; } else { loc = -loc - 1; keys[loc] = key; keyCount++; return key; } } LookupSet(); LookupSet(int size); T set(T key); @SafeVarargs final void addAll(T... t); void addAll(Iterable<T> it); boolean add(T key); T getOrAdd(T key); }
@Test public void removeSpansWorksCorrectly2() { IntFloatMap im = createMap(); int limit = 9999; insertSpan(im, 0, limit); int[] r = TestUtils.createRandomUintArray(1000, limit); for (int i : r) { im.remove(i); } for (int i : r) { assertEqualsF(im.get(i), IntIntMap.NO_RESULT); } insertSpan(im, 0, limit); checkSpan(im, 0, limit); removeSpan(im, 0, limit); assertEqualsF(im.size(), 0); insertSpan(im, -limit, limit); checkSpan(im, -limit, limit); }
public float get(int key) { checkKey(key); int slot = firstProbe(key); while (true) { final long entry = entries[slot]; final int t = (int) (entry & 0xFFFF_FFFFL); if (t == key) { return Float.intBitsToFloat((int) (entry >>> 32)); } if (t == EMPTY) { return NO_RESULT; } slot = probe(slot); } }
IntFloatMap extends CompactIntMapBase { public float get(int key) { checkKey(key); int slot = firstProbe(key); while (true) { final long entry = entries[slot]; final int t = (int) (entry & 0xFFFF_FFFFL); if (t == key) { return Float.intBitsToFloat((int) (entry >>> 32)); } if (t == EMPTY) { return NO_RESULT; } slot = probe(slot); } } }
IntFloatMap extends CompactIntMapBase { public float get(int key) { checkKey(key); int slot = firstProbe(key); while (true) { final long entry = entries[slot]; final int t = (int) (entry & 0xFFFF_FFFFL); if (t == key) { return Float.intBitsToFloat((int) (entry >>> 32)); } if (t == EMPTY) { return NO_RESULT; } slot = probe(slot); } } IntFloatMap(); IntFloatMap(int capacity); }
IntFloatMap extends CompactIntMapBase { public float get(int key) { checkKey(key); int slot = firstProbe(key); while (true) { final long entry = entries[slot]; final int t = (int) (entry & 0xFFFF_FFFFL); if (t == key) { return Float.intBitsToFloat((int) (entry >>> 32)); } if (t == EMPTY) { return NO_RESULT; } slot = probe(slot); } } IntFloatMap(); IntFloatMap(int capacity); void put(int key, float value); void increment(int key, float value); float get(int key); float[] getValues(); }
IntFloatMap extends CompactIntMapBase { public float get(int key) { checkKey(key); int slot = firstProbe(key); while (true) { final long entry = entries[slot]; final int t = (int) (entry & 0xFFFF_FFFFL); if (t == key) { return Float.intBitsToFloat((int) (entry >>> 32)); } if (t == EMPTY) { return NO_RESULT; } slot = probe(slot); } } IntFloatMap(); IntFloatMap(int capacity); void put(int key, float value); void increment(int key, float value); float get(int key); float[] getValues(); }
@Test public void removeTest2() { IntFloatMap map = createMap(); for (int i = 0; i < 10000; i++) { map.put(i, i + 1); } for (int i = 0; i < 10000; i += 3) { map.remove(i); } for (int i = 0; i < 10000; i += 3) { Assert.assertTrue(!map.containsKey(i)); } for (int i = 0; i < 10000; i++) { map.put(i, i + 1); } for (int i = 0; i < 10000; i += 3) { Assert.assertTrue(map.containsKey(i)); } }
public void put(int key, float value) { checkKey(key); expandIfNecessary(); int loc = locate(key); if (loc >= 0) { setValue(loc, value); } else { setKeyValue(-loc - 1, key, value); keyCount++; } }
IntFloatMap extends CompactIntMapBase { public void put(int key, float value) { checkKey(key); expandIfNecessary(); int loc = locate(key); if (loc >= 0) { setValue(loc, value); } else { setKeyValue(-loc - 1, key, value); keyCount++; } } }
IntFloatMap extends CompactIntMapBase { public void put(int key, float value) { checkKey(key); expandIfNecessary(); int loc = locate(key); if (loc >= 0) { setValue(loc, value); } else { setKeyValue(-loc - 1, key, value); keyCount++; } } IntFloatMap(); IntFloatMap(int capacity); }
IntFloatMap extends CompactIntMapBase { public void put(int key, float value) { checkKey(key); expandIfNecessary(); int loc = locate(key); if (loc >= 0) { setValue(loc, value); } else { setKeyValue(-loc - 1, key, value); keyCount++; } } IntFloatMap(); IntFloatMap(int capacity); void put(int key, float value); void increment(int key, float value); float get(int key); float[] getValues(); }
IntFloatMap extends CompactIntMapBase { public void put(int key, float value) { checkKey(key); expandIfNecessary(); int loc = locate(key); if (loc >= 0) { setValue(loc, value); } else { setKeyValue(-loc - 1, key, value); keyCount++; } } IntFloatMap(); IntFloatMap(int capacity); void put(int key, float value); void increment(int key, float value); float get(int key); float[] getValues(); }
@Test public void getAllTest() { List<Item> items = createitems("elma", "el", "arm", "armut", "a", "elmas"); additems(items); List<Item> all = lt.getAll(); Assert.assertEquals(6, all.size()); }
public List<T> getAll() { List<T> items = new ArrayList<>(size); List<Node<T>> toWalk = Lists.newArrayList(root); while (toWalk.size() > 0) { List<Node<T>> n = new ArrayList<>(); for (Node<T> tNode : toWalk) { if (tNode.hasItem()) { items.addAll(tNode.items); } if (tNode.children != null && tNode.children.size() > 0) { n.addAll(tNode.children.getValues()); } } toWalk = n; } return items; }
Trie { public List<T> getAll() { List<T> items = new ArrayList<>(size); List<Node<T>> toWalk = Lists.newArrayList(root); while (toWalk.size() > 0) { List<Node<T>> n = new ArrayList<>(); for (Node<T> tNode : toWalk) { if (tNode.hasItem()) { items.addAll(tNode.items); } if (tNode.children != null && tNode.children.size() > 0) { n.addAll(tNode.children.getValues()); } } toWalk = n; } return items; } }
Trie { public List<T> getAll() { List<T> items = new ArrayList<>(size); List<Node<T>> toWalk = Lists.newArrayList(root); while (toWalk.size() > 0) { List<Node<T>> n = new ArrayList<>(); for (Node<T> tNode : toWalk) { if (tNode.hasItem()) { items.addAll(tNode.items); } if (tNode.children != null && tNode.children.size() > 0) { n.addAll(tNode.children.getValues()); } } toWalk = n; } return items; } }
Trie { public List<T> getAll() { List<T> items = new ArrayList<>(size); List<Node<T>> toWalk = Lists.newArrayList(root); while (toWalk.size() > 0) { List<Node<T>> n = new ArrayList<>(); for (Node<T> tNode : toWalk) { if (tNode.hasItem()) { items.addAll(tNode.items); } if (tNode.children != null && tNode.children.size() > 0) { n.addAll(tNode.children.getValues()); } } toWalk = n; } return items; } void add(String s, T item); void remove(String s, T item); int size(); boolean containsItem(String s, T item); List<T> getItems(String s); List<T> getAll(); List<T> getPrefixMatchingItems(String input); String toString(); }
Trie { public List<T> getAll() { List<T> items = new ArrayList<>(size); List<Node<T>> toWalk = Lists.newArrayList(root); while (toWalk.size() > 0) { List<Node<T>> n = new ArrayList<>(); for (Node<T> tNode : toWalk) { if (tNode.hasItem()) { items.addAll(tNode.items); } if (tNode.children != null && tNode.children.size() > 0) { n.addAll(tNode.children.getValues()); } } toWalk = n; } return items; } void add(String s, T item); void remove(String s, T item); int size(); boolean containsItem(String s, T item); List<T> getItems(String s); List<T> getAll(); List<T> getPrefixMatchingItems(String input); String toString(); }
@Test public void removeStems() { List<Item> items = createitems("el", "elmas", "elma", "ela"); additems(items); checkitemsExist(items); checkitemsMatches("el", createitems("el")); checkitemsMatches("el", createitems()); lt.remove(items.get(1).surfaceForm, items.get(1)); checkitemsMatches("elmas", createitems()); checkitemsMatches("e", createitems()); checkitemsMatches("ela", createitems("ela")); checkitemsMatches("elastik", createitems("ela")); checkitemsMatches("elmas", createitems("el", "elma")); checkitemsMatches("elmaslar", createitems("el", "elma")); }
public void remove(String s, T item) { Node node = walkToNode(s); if (node != null && node.hasItem()) { node.items.remove(item); size--; } }
Trie { public void remove(String s, T item) { Node node = walkToNode(s); if (node != null && node.hasItem()) { node.items.remove(item); size--; } } }
Trie { public void remove(String s, T item) { Node node = walkToNode(s); if (node != null && node.hasItem()) { node.items.remove(item); size--; } } }
Trie { public void remove(String s, T item) { Node node = walkToNode(s); if (node != null && node.hasItem()) { node.items.remove(item); size--; } } void add(String s, T item); void remove(String s, T item); int size(); boolean containsItem(String s, T item); List<T> getItems(String s); List<T> getAll(); List<T> getPrefixMatchingItems(String input); String toString(); }
Trie { public void remove(String s, T item) { Node node = walkToNode(s); if (node != null && node.hasItem()) { node.items.remove(item); size--; } } void add(String s, T item); void remove(String s, T item); int size(); boolean containsItem(String s, T item); List<T> getItems(String s); List<T> getAll(); List<T> getPrefixMatchingItems(String input); String toString(); }
@Test public void referenceTest1() { String[] ref = {"ad", "ad [A:Doubling,InverseHarmony]", "soy", "soyadı [A:CompoundP3sg; Roots:soy-ad]"}; RootLexicon lexicon = TurkishDictionaryLoader.load(ref); DictionaryItem item = lexicon.getItemById("soyadı_Noun"); Assert.assertNotNull(item); Assert.assertFalse(item.attributes.contains(RootAttribute.Doubling)); }
public static RootLexicon load(File input) throws IOException { return Files.asCharSource(input, Charsets.UTF_8).readLines(new TextLexiconProcessor()); }
TurkishDictionaryLoader { public static RootLexicon load(File input) throws IOException { return Files.asCharSource(input, Charsets.UTF_8).readLines(new TextLexiconProcessor()); } }
TurkishDictionaryLoader { public static RootLexicon load(File input) throws IOException { return Files.asCharSource(input, Charsets.UTF_8).readLines(new TextLexiconProcessor()); } }
TurkishDictionaryLoader { public static RootLexicon load(File input) throws IOException { return Files.asCharSource(input, Charsets.UTF_8).readLines(new TextLexiconProcessor()); } static RootLexicon loadDefaultDictionaries(); static RootLexicon loadFromResources(String... resourcePaths); static RootLexicon loadFromResources(Collection<String> resourcePaths); static RootLexicon load(File input); static RootLexicon loadInto(RootLexicon lexicon, File input); static DictionaryItem loadFromString(String dictionaryLine); static RootLexicon load(String... dictionaryLines); static RootLexicon load(Iterable<String> dictionaryLines); }
TurkishDictionaryLoader { public static RootLexicon load(File input) throws IOException { return Files.asCharSource(input, Charsets.UTF_8).readLines(new TextLexiconProcessor()); } static RootLexicon loadDefaultDictionaries(); static RootLexicon loadFromResources(String... resourcePaths); static RootLexicon loadFromResources(Collection<String> resourcePaths); static RootLexicon load(File input); static RootLexicon loadInto(RootLexicon lexicon, File input); static DictionaryItem loadFromString(String dictionaryLine); static RootLexicon load(String... dictionaryLines); static RootLexicon load(Iterable<String> dictionaryLines); static final List<String> DEFAULT_DICTIONARY_RESOURCES; }
@Test public void formatNumbersTest() throws IOException { TurkishMorphology morphology = TurkishMorphology.builder() .disableCache() .setLexicon("bir [P:Num]", "dört [P:Num;A:Voicing]", "üç [P:Num]", "beş [P:Num]") .build(); TurkishSpellChecker spellChecker = new TurkishSpellChecker(morphology); String[] inputs = { "1'e", "4'ten", "123'ü", "12,5'ten", "1'E", "4'TEN", "123'Ü", "12,5'TEN", "%1", "%1'i", "%1,3'ü", }; for (String input : inputs) { Assert.assertTrue("Fail at " + input, spellChecker.check(input)); } }
public boolean check(String input) { WordAnalysis analyses = morphology.analyze(input); WordAnalysisSurfaceFormatter.CaseType caseType = formatter.guessCase(input); for (SingleAnalysis analysis : analyses) { if (analysis.isUnknown()) { continue; } if (analysisPredicate != null && !analysisPredicate.test(analysis)) { continue; } String apostrophe = getApostrophe(input); if (formatter.canBeFormatted(analysis, caseType)) { String formatted = formatter.formatToCase(analysis, caseType, apostrophe); if (input.equals(formatted)) { return true; } } } return false; }
TurkishSpellChecker { public boolean check(String input) { WordAnalysis analyses = morphology.analyze(input); WordAnalysisSurfaceFormatter.CaseType caseType = formatter.guessCase(input); for (SingleAnalysis analysis : analyses) { if (analysis.isUnknown()) { continue; } if (analysisPredicate != null && !analysisPredicate.test(analysis)) { continue; } String apostrophe = getApostrophe(input); if (formatter.canBeFormatted(analysis, caseType)) { String formatted = formatter.formatToCase(analysis, caseType, apostrophe); if (input.equals(formatted)) { return true; } } } return false; } }
TurkishSpellChecker { public boolean check(String input) { WordAnalysis analyses = morphology.analyze(input); WordAnalysisSurfaceFormatter.CaseType caseType = formatter.guessCase(input); for (SingleAnalysis analysis : analyses) { if (analysis.isUnknown()) { continue; } if (analysisPredicate != null && !analysisPredicate.test(analysis)) { continue; } String apostrophe = getApostrophe(input); if (formatter.canBeFormatted(analysis, caseType)) { String formatted = formatter.formatToCase(analysis, caseType, apostrophe); if (input.equals(formatted)) { return true; } } } return false; } TurkishSpellChecker(TurkishMorphology morphology); TurkishSpellChecker(TurkishMorphology morphology, CharacterGraph graph); TurkishSpellChecker( TurkishMorphology morphology, CharacterGraphDecoder decoder, CharMatcher matcher); }
TurkishSpellChecker { public boolean check(String input) { WordAnalysis analyses = morphology.analyze(input); WordAnalysisSurfaceFormatter.CaseType caseType = formatter.guessCase(input); for (SingleAnalysis analysis : analyses) { if (analysis.isUnknown()) { continue; } if (analysisPredicate != null && !analysisPredicate.test(analysis)) { continue; } String apostrophe = getApostrophe(input); if (formatter.canBeFormatted(analysis, caseType)) { String formatted = formatter.formatToCase(analysis, caseType, apostrophe); if (input.equals(formatted)) { return true; } } } return false; } TurkishSpellChecker(TurkishMorphology morphology); TurkishSpellChecker(TurkishMorphology morphology, CharacterGraph graph); TurkishSpellChecker( TurkishMorphology morphology, CharacterGraphDecoder decoder, CharMatcher matcher); NgramLanguageModel getUnigramLanguageModel(); void setAnalysisPredicate(Predicate<SingleAnalysis> analysisPredicate); static List<String> tokenizeForSpelling(String sentence); boolean check(String input); List<String> suggestForWord(String word, NgramLanguageModel lm); List<String> suggestForWord( String word, String leftContext, String rightContext, NgramLanguageModel lm); List<String> suggestForWord(String word); CharacterGraphDecoder getDecoder(); List<String> rankWithUnigramProbability(List<String> strings, NgramLanguageModel lm); }
TurkishSpellChecker { public boolean check(String input) { WordAnalysis analyses = morphology.analyze(input); WordAnalysisSurfaceFormatter.CaseType caseType = formatter.guessCase(input); for (SingleAnalysis analysis : analyses) { if (analysis.isUnknown()) { continue; } if (analysisPredicate != null && !analysisPredicate.test(analysis)) { continue; } String apostrophe = getApostrophe(input); if (formatter.canBeFormatted(analysis, caseType)) { String formatted = formatter.formatToCase(analysis, caseType, apostrophe); if (input.equals(formatted)) { return true; } } } return false; } TurkishSpellChecker(TurkishMorphology morphology); TurkishSpellChecker(TurkishMorphology morphology, CharacterGraph graph); TurkishSpellChecker( TurkishMorphology morphology, CharacterGraphDecoder decoder, CharMatcher matcher); NgramLanguageModel getUnigramLanguageModel(); void setAnalysisPredicate(Predicate<SingleAnalysis> analysisPredicate); static List<String> tokenizeForSpelling(String sentence); boolean check(String input); List<String> suggestForWord(String word, NgramLanguageModel lm); List<String> suggestForWord( String word, String leftContext, String rightContext, NgramLanguageModel lm); List<String> suggestForWord(String word); CharacterGraphDecoder getDecoder(); List<String> rankWithUnigramProbability(List<String> strings, NgramLanguageModel lm); }
@Test public void isVowelTest() { TurkishAlphabet alphabet = TurkishAlphabet.INSTANCE; String vowels = "aeiuüıoöâîû"; for (char c : vowels.toCharArray()) { Assert.assertTrue(alphabet.isVowel(c)); } String nonvowels = "bcçdfgğjklmnprştvxwzq."; for (char c : nonvowels.toCharArray()) { Assert.assertFalse(alphabet.isVowel(c)); } }
public boolean isVowel(char c) { return lookup(vowelLookup, c); }
TurkishAlphabet { public boolean isVowel(char c) { return lookup(vowelLookup, c); } }
TurkishAlphabet { public boolean isVowel(char c) { return lookup(vowelLookup, c); } private TurkishAlphabet(); }
TurkishAlphabet { public boolean isVowel(char c) { return lookup(vowelLookup, c); } private TurkishAlphabet(); String toAscii(String in); String foreignDiacriticsToTurkish(String in); boolean containsAsciiRelated(String s); IntIntMap getTurkishToAsciiMap(); char getAsciiEqual(char c); boolean isAsciiEqual(char c1, char c2); boolean allCapital(String input); String normalize(String input); char normalizeCircumflex(char c); boolean containsCircumflex(String s); boolean isTurkishSpecific(char c); boolean containsApostrophe(String s); boolean containsForeignDiacritics(String s); String getAllLetters(); String getLowercaseLetters(); String getUppercaseLetters(); String normalizeCircumflex(String s); String normalizeApostrophe(String s); char voice(char c); char devoice(char c); TurkicLetter getLetter(char c); TurkicLetter getLastLetter(CharSequence s); char lastChar(CharSequence s); TurkicLetter getFirstLetter(CharSequence s); boolean isVowel(char c); boolean isDictionaryLetter(char c); boolean isStopConsonant(char c); boolean isVoicelessConsonant(char c); TurkicLetter getLastVowel(CharSequence s); TurkicLetter getFirstVowel(CharSequence s); boolean checkVowelHarmonyA(CharSequence source, CharSequence target); boolean checkVowelHarmonyI(CharSequence source, CharSequence target); boolean checkVowelHarmonyA(TurkicLetter source, TurkicLetter target); boolean checkVowelHarmonyI(TurkicLetter source, TurkicLetter target); boolean containsVowel(CharSequence s); int vowelCount(String s); boolean containsDigit(String s); boolean equalsIgnoreDiacritics(String s1, String s2); boolean startsWithIgnoreDiacritics(String s1, String s2); }
TurkishAlphabet { public boolean isVowel(char c) { return lookup(vowelLookup, c); } private TurkishAlphabet(); String toAscii(String in); String foreignDiacriticsToTurkish(String in); boolean containsAsciiRelated(String s); IntIntMap getTurkishToAsciiMap(); char getAsciiEqual(char c); boolean isAsciiEqual(char c1, char c2); boolean allCapital(String input); String normalize(String input); char normalizeCircumflex(char c); boolean containsCircumflex(String s); boolean isTurkishSpecific(char c); boolean containsApostrophe(String s); boolean containsForeignDiacritics(String s); String getAllLetters(); String getLowercaseLetters(); String getUppercaseLetters(); String normalizeCircumflex(String s); String normalizeApostrophe(String s); char voice(char c); char devoice(char c); TurkicLetter getLetter(char c); TurkicLetter getLastLetter(CharSequence s); char lastChar(CharSequence s); TurkicLetter getFirstLetter(CharSequence s); boolean isVowel(char c); boolean isDictionaryLetter(char c); boolean isStopConsonant(char c); boolean isVoicelessConsonant(char c); TurkicLetter getLastVowel(CharSequence s); TurkicLetter getFirstVowel(CharSequence s); boolean checkVowelHarmonyA(CharSequence source, CharSequence target); boolean checkVowelHarmonyI(CharSequence source, CharSequence target); boolean checkVowelHarmonyA(TurkicLetter source, TurkicLetter target); boolean checkVowelHarmonyI(TurkicLetter source, TurkicLetter target); boolean containsVowel(CharSequence s); int vowelCount(String s); boolean containsDigit(String s); boolean equalsIgnoreDiacritics(String s1, String s2); boolean startsWithIgnoreDiacritics(String s1, String s2); static final Locale TR; static TurkishAlphabet INSTANCE; }
@Test public void vowelCountTest() { TurkishAlphabet alphabet = TurkishAlphabet.INSTANCE; String[] entries = {"a", "aa", "", "bb", "bebaba"}; int[] expCounts = {1, 2, 0, 0, 3}; int i = 0; for (String entry : entries) { Assert.assertEquals(expCounts[i++], alphabet.vowelCount(entry)); } }
public int vowelCount(String s) { int result = 0; for (int i = 0; i < s.length(); i++) { if (isVowel(s.charAt(i))) { result++; } } return result; }
TurkishAlphabet { public int vowelCount(String s) { int result = 0; for (int i = 0; i < s.length(); i++) { if (isVowel(s.charAt(i))) { result++; } } return result; } }
TurkishAlphabet { public int vowelCount(String s) { int result = 0; for (int i = 0; i < s.length(); i++) { if (isVowel(s.charAt(i))) { result++; } } return result; } private TurkishAlphabet(); }
TurkishAlphabet { public int vowelCount(String s) { int result = 0; for (int i = 0; i < s.length(); i++) { if (isVowel(s.charAt(i))) { result++; } } return result; } private TurkishAlphabet(); String toAscii(String in); String foreignDiacriticsToTurkish(String in); boolean containsAsciiRelated(String s); IntIntMap getTurkishToAsciiMap(); char getAsciiEqual(char c); boolean isAsciiEqual(char c1, char c2); boolean allCapital(String input); String normalize(String input); char normalizeCircumflex(char c); boolean containsCircumflex(String s); boolean isTurkishSpecific(char c); boolean containsApostrophe(String s); boolean containsForeignDiacritics(String s); String getAllLetters(); String getLowercaseLetters(); String getUppercaseLetters(); String normalizeCircumflex(String s); String normalizeApostrophe(String s); char voice(char c); char devoice(char c); TurkicLetter getLetter(char c); TurkicLetter getLastLetter(CharSequence s); char lastChar(CharSequence s); TurkicLetter getFirstLetter(CharSequence s); boolean isVowel(char c); boolean isDictionaryLetter(char c); boolean isStopConsonant(char c); boolean isVoicelessConsonant(char c); TurkicLetter getLastVowel(CharSequence s); TurkicLetter getFirstVowel(CharSequence s); boolean checkVowelHarmonyA(CharSequence source, CharSequence target); boolean checkVowelHarmonyI(CharSequence source, CharSequence target); boolean checkVowelHarmonyA(TurkicLetter source, TurkicLetter target); boolean checkVowelHarmonyI(TurkicLetter source, TurkicLetter target); boolean containsVowel(CharSequence s); int vowelCount(String s); boolean containsDigit(String s); boolean equalsIgnoreDiacritics(String s1, String s2); boolean startsWithIgnoreDiacritics(String s1, String s2); }
TurkishAlphabet { public int vowelCount(String s) { int result = 0; for (int i = 0; i < s.length(); i++) { if (isVowel(s.charAt(i))) { result++; } } return result; } private TurkishAlphabet(); String toAscii(String in); String foreignDiacriticsToTurkish(String in); boolean containsAsciiRelated(String s); IntIntMap getTurkishToAsciiMap(); char getAsciiEqual(char c); boolean isAsciiEqual(char c1, char c2); boolean allCapital(String input); String normalize(String input); char normalizeCircumflex(char c); boolean containsCircumflex(String s); boolean isTurkishSpecific(char c); boolean containsApostrophe(String s); boolean containsForeignDiacritics(String s); String getAllLetters(); String getLowercaseLetters(); String getUppercaseLetters(); String normalizeCircumflex(String s); String normalizeApostrophe(String s); char voice(char c); char devoice(char c); TurkicLetter getLetter(char c); TurkicLetter getLastLetter(CharSequence s); char lastChar(CharSequence s); TurkicLetter getFirstLetter(CharSequence s); boolean isVowel(char c); boolean isDictionaryLetter(char c); boolean isStopConsonant(char c); boolean isVoicelessConsonant(char c); TurkicLetter getLastVowel(CharSequence s); TurkicLetter getFirstVowel(CharSequence s); boolean checkVowelHarmonyA(CharSequence source, CharSequence target); boolean checkVowelHarmonyI(CharSequence source, CharSequence target); boolean checkVowelHarmonyA(TurkicLetter source, TurkicLetter target); boolean checkVowelHarmonyI(TurkicLetter source, TurkicLetter target); boolean containsVowel(CharSequence s); int vowelCount(String s); boolean containsDigit(String s); boolean equalsIgnoreDiacritics(String s1, String s2); boolean startsWithIgnoreDiacritics(String s1, String s2); static final Locale TR; static TurkishAlphabet INSTANCE; }
@Test public void voiceTest() { TurkishAlphabet alphabet = TurkishAlphabet.INSTANCE; String iStr = "çÇgGkKpPtTaAbB"; String oStr = "cCğĞğĞbBdDaAbB"; for (int i = 0; i < iStr.length(); i++) { char in = iStr.charAt(i); char outExpected = oStr.charAt(i); Assert.assertEquals("", String.valueOf(outExpected), String.valueOf(alphabet.voice(in))); } }
public char voice(char c) { int res = voicingMap.get(c); return res == IntIntMap.NO_RESULT ? c : (char) res; }
TurkishAlphabet { public char voice(char c) { int res = voicingMap.get(c); return res == IntIntMap.NO_RESULT ? c : (char) res; } }
TurkishAlphabet { public char voice(char c) { int res = voicingMap.get(c); return res == IntIntMap.NO_RESULT ? c : (char) res; } private TurkishAlphabet(); }
TurkishAlphabet { public char voice(char c) { int res = voicingMap.get(c); return res == IntIntMap.NO_RESULT ? c : (char) res; } private TurkishAlphabet(); String toAscii(String in); String foreignDiacriticsToTurkish(String in); boolean containsAsciiRelated(String s); IntIntMap getTurkishToAsciiMap(); char getAsciiEqual(char c); boolean isAsciiEqual(char c1, char c2); boolean allCapital(String input); String normalize(String input); char normalizeCircumflex(char c); boolean containsCircumflex(String s); boolean isTurkishSpecific(char c); boolean containsApostrophe(String s); boolean containsForeignDiacritics(String s); String getAllLetters(); String getLowercaseLetters(); String getUppercaseLetters(); String normalizeCircumflex(String s); String normalizeApostrophe(String s); char voice(char c); char devoice(char c); TurkicLetter getLetter(char c); TurkicLetter getLastLetter(CharSequence s); char lastChar(CharSequence s); TurkicLetter getFirstLetter(CharSequence s); boolean isVowel(char c); boolean isDictionaryLetter(char c); boolean isStopConsonant(char c); boolean isVoicelessConsonant(char c); TurkicLetter getLastVowel(CharSequence s); TurkicLetter getFirstVowel(CharSequence s); boolean checkVowelHarmonyA(CharSequence source, CharSequence target); boolean checkVowelHarmonyI(CharSequence source, CharSequence target); boolean checkVowelHarmonyA(TurkicLetter source, TurkicLetter target); boolean checkVowelHarmonyI(TurkicLetter source, TurkicLetter target); boolean containsVowel(CharSequence s); int vowelCount(String s); boolean containsDigit(String s); boolean equalsIgnoreDiacritics(String s1, String s2); boolean startsWithIgnoreDiacritics(String s1, String s2); }
TurkishAlphabet { public char voice(char c) { int res = voicingMap.get(c); return res == IntIntMap.NO_RESULT ? c : (char) res; } private TurkishAlphabet(); String toAscii(String in); String foreignDiacriticsToTurkish(String in); boolean containsAsciiRelated(String s); IntIntMap getTurkishToAsciiMap(); char getAsciiEqual(char c); boolean isAsciiEqual(char c1, char c2); boolean allCapital(String input); String normalize(String input); char normalizeCircumflex(char c); boolean containsCircumflex(String s); boolean isTurkishSpecific(char c); boolean containsApostrophe(String s); boolean containsForeignDiacritics(String s); String getAllLetters(); String getLowercaseLetters(); String getUppercaseLetters(); String normalizeCircumflex(String s); String normalizeApostrophe(String s); char voice(char c); char devoice(char c); TurkicLetter getLetter(char c); TurkicLetter getLastLetter(CharSequence s); char lastChar(CharSequence s); TurkicLetter getFirstLetter(CharSequence s); boolean isVowel(char c); boolean isDictionaryLetter(char c); boolean isStopConsonant(char c); boolean isVoicelessConsonant(char c); TurkicLetter getLastVowel(CharSequence s); TurkicLetter getFirstVowel(CharSequence s); boolean checkVowelHarmonyA(CharSequence source, CharSequence target); boolean checkVowelHarmonyI(CharSequence source, CharSequence target); boolean checkVowelHarmonyA(TurkicLetter source, TurkicLetter target); boolean checkVowelHarmonyI(TurkicLetter source, TurkicLetter target); boolean containsVowel(CharSequence s); int vowelCount(String s); boolean containsDigit(String s); boolean equalsIgnoreDiacritics(String s1, String s2); boolean startsWithIgnoreDiacritics(String s1, String s2); static final Locale TR; static TurkishAlphabet INSTANCE; }
@Test public void devoiceTest() { TurkishAlphabet alphabet = TurkishAlphabet.INSTANCE; String iStr = "bBcCdDgGğĞaAkK"; String oStr = "pPçÇtTkKkKaAkK"; for (int i = 0; i < iStr.length(); i++) { char in = iStr.charAt(i); char outExpected = oStr.charAt(i); Assert.assertEquals("", String.valueOf(outExpected), String.valueOf(alphabet.devoice(in))); } }
public char devoice(char c) { int res = devoicingMap.get(c); return res == IntIntMap.NO_RESULT ? c : (char) res; }
TurkishAlphabet { public char devoice(char c) { int res = devoicingMap.get(c); return res == IntIntMap.NO_RESULT ? c : (char) res; } }
TurkishAlphabet { public char devoice(char c) { int res = devoicingMap.get(c); return res == IntIntMap.NO_RESULT ? c : (char) res; } private TurkishAlphabet(); }
TurkishAlphabet { public char devoice(char c) { int res = devoicingMap.get(c); return res == IntIntMap.NO_RESULT ? c : (char) res; } private TurkishAlphabet(); String toAscii(String in); String foreignDiacriticsToTurkish(String in); boolean containsAsciiRelated(String s); IntIntMap getTurkishToAsciiMap(); char getAsciiEqual(char c); boolean isAsciiEqual(char c1, char c2); boolean allCapital(String input); String normalize(String input); char normalizeCircumflex(char c); boolean containsCircumflex(String s); boolean isTurkishSpecific(char c); boolean containsApostrophe(String s); boolean containsForeignDiacritics(String s); String getAllLetters(); String getLowercaseLetters(); String getUppercaseLetters(); String normalizeCircumflex(String s); String normalizeApostrophe(String s); char voice(char c); char devoice(char c); TurkicLetter getLetter(char c); TurkicLetter getLastLetter(CharSequence s); char lastChar(CharSequence s); TurkicLetter getFirstLetter(CharSequence s); boolean isVowel(char c); boolean isDictionaryLetter(char c); boolean isStopConsonant(char c); boolean isVoicelessConsonant(char c); TurkicLetter getLastVowel(CharSequence s); TurkicLetter getFirstVowel(CharSequence s); boolean checkVowelHarmonyA(CharSequence source, CharSequence target); boolean checkVowelHarmonyI(CharSequence source, CharSequence target); boolean checkVowelHarmonyA(TurkicLetter source, TurkicLetter target); boolean checkVowelHarmonyI(TurkicLetter source, TurkicLetter target); boolean containsVowel(CharSequence s); int vowelCount(String s); boolean containsDigit(String s); boolean equalsIgnoreDiacritics(String s1, String s2); boolean startsWithIgnoreDiacritics(String s1, String s2); }
TurkishAlphabet { public char devoice(char c) { int res = devoicingMap.get(c); return res == IntIntMap.NO_RESULT ? c : (char) res; } private TurkishAlphabet(); String toAscii(String in); String foreignDiacriticsToTurkish(String in); boolean containsAsciiRelated(String s); IntIntMap getTurkishToAsciiMap(); char getAsciiEqual(char c); boolean isAsciiEqual(char c1, char c2); boolean allCapital(String input); String normalize(String input); char normalizeCircumflex(char c); boolean containsCircumflex(String s); boolean isTurkishSpecific(char c); boolean containsApostrophe(String s); boolean containsForeignDiacritics(String s); String getAllLetters(); String getLowercaseLetters(); String getUppercaseLetters(); String normalizeCircumflex(String s); String normalizeApostrophe(String s); char voice(char c); char devoice(char c); TurkicLetter getLetter(char c); TurkicLetter getLastLetter(CharSequence s); char lastChar(CharSequence s); TurkicLetter getFirstLetter(CharSequence s); boolean isVowel(char c); boolean isDictionaryLetter(char c); boolean isStopConsonant(char c); boolean isVoicelessConsonant(char c); TurkicLetter getLastVowel(CharSequence s); TurkicLetter getFirstVowel(CharSequence s); boolean checkVowelHarmonyA(CharSequence source, CharSequence target); boolean checkVowelHarmonyI(CharSequence source, CharSequence target); boolean checkVowelHarmonyA(TurkicLetter source, TurkicLetter target); boolean checkVowelHarmonyI(TurkicLetter source, TurkicLetter target); boolean containsVowel(CharSequence s); int vowelCount(String s); boolean containsDigit(String s); boolean equalsIgnoreDiacritics(String s1, String s2); boolean startsWithIgnoreDiacritics(String s1, String s2); static final Locale TR; static TurkishAlphabet INSTANCE; }
@Test public void circumflexTest() { TurkishAlphabet alphabet = TurkishAlphabet.INSTANCE; String iStr = "abcâîûÂÎÛ fg12"; String oStr = "abcaiuAİU fg12"; Assert.assertEquals(oStr, alphabet.normalizeCircumflex(iStr)); }
public char normalizeCircumflex(char c) { int res = circumflexMap.get(c); return res == IntIntMap.NO_RESULT ? c : (char) res; }
TurkishAlphabet { public char normalizeCircumflex(char c) { int res = circumflexMap.get(c); return res == IntIntMap.NO_RESULT ? c : (char) res; } }
TurkishAlphabet { public char normalizeCircumflex(char c) { int res = circumflexMap.get(c); return res == IntIntMap.NO_RESULT ? c : (char) res; } private TurkishAlphabet(); }
TurkishAlphabet { public char normalizeCircumflex(char c) { int res = circumflexMap.get(c); return res == IntIntMap.NO_RESULT ? c : (char) res; } private TurkishAlphabet(); String toAscii(String in); String foreignDiacriticsToTurkish(String in); boolean containsAsciiRelated(String s); IntIntMap getTurkishToAsciiMap(); char getAsciiEqual(char c); boolean isAsciiEqual(char c1, char c2); boolean allCapital(String input); String normalize(String input); char normalizeCircumflex(char c); boolean containsCircumflex(String s); boolean isTurkishSpecific(char c); boolean containsApostrophe(String s); boolean containsForeignDiacritics(String s); String getAllLetters(); String getLowercaseLetters(); String getUppercaseLetters(); String normalizeCircumflex(String s); String normalizeApostrophe(String s); char voice(char c); char devoice(char c); TurkicLetter getLetter(char c); TurkicLetter getLastLetter(CharSequence s); char lastChar(CharSequence s); TurkicLetter getFirstLetter(CharSequence s); boolean isVowel(char c); boolean isDictionaryLetter(char c); boolean isStopConsonant(char c); boolean isVoicelessConsonant(char c); TurkicLetter getLastVowel(CharSequence s); TurkicLetter getFirstVowel(CharSequence s); boolean checkVowelHarmonyA(CharSequence source, CharSequence target); boolean checkVowelHarmonyI(CharSequence source, CharSequence target); boolean checkVowelHarmonyA(TurkicLetter source, TurkicLetter target); boolean checkVowelHarmonyI(TurkicLetter source, TurkicLetter target); boolean containsVowel(CharSequence s); int vowelCount(String s); boolean containsDigit(String s); boolean equalsIgnoreDiacritics(String s1, String s2); boolean startsWithIgnoreDiacritics(String s1, String s2); }
TurkishAlphabet { public char normalizeCircumflex(char c) { int res = circumflexMap.get(c); return res == IntIntMap.NO_RESULT ? c : (char) res; } private TurkishAlphabet(); String toAscii(String in); String foreignDiacriticsToTurkish(String in); boolean containsAsciiRelated(String s); IntIntMap getTurkishToAsciiMap(); char getAsciiEqual(char c); boolean isAsciiEqual(char c1, char c2); boolean allCapital(String input); String normalize(String input); char normalizeCircumflex(char c); boolean containsCircumflex(String s); boolean isTurkishSpecific(char c); boolean containsApostrophe(String s); boolean containsForeignDiacritics(String s); String getAllLetters(); String getLowercaseLetters(); String getUppercaseLetters(); String normalizeCircumflex(String s); String normalizeApostrophe(String s); char voice(char c); char devoice(char c); TurkicLetter getLetter(char c); TurkicLetter getLastLetter(CharSequence s); char lastChar(CharSequence s); TurkicLetter getFirstLetter(CharSequence s); boolean isVowel(char c); boolean isDictionaryLetter(char c); boolean isStopConsonant(char c); boolean isVoicelessConsonant(char c); TurkicLetter getLastVowel(CharSequence s); TurkicLetter getFirstVowel(CharSequence s); boolean checkVowelHarmonyA(CharSequence source, CharSequence target); boolean checkVowelHarmonyI(CharSequence source, CharSequence target); boolean checkVowelHarmonyA(TurkicLetter source, TurkicLetter target); boolean checkVowelHarmonyI(TurkicLetter source, TurkicLetter target); boolean containsVowel(CharSequence s); int vowelCount(String s); boolean containsDigit(String s); boolean equalsIgnoreDiacritics(String s1, String s2); boolean startsWithIgnoreDiacritics(String s1, String s2); static final Locale TR; static TurkishAlphabet INSTANCE; }
@Test public void toAsciiTest() { TurkishAlphabet alphabet = TurkishAlphabet.INSTANCE; String iStr = "abcçğıiİIoöüşâîûÂÎÛz"; String oStr = "abccgiiIIoousaiuAIUz"; Assert.assertEquals(oStr, alphabet.toAscii(iStr)); }
public String toAscii(String in) { StringBuilder sb = new StringBuilder(in.length()); for (int i = 0; i < in.length(); i++) { char c = in.charAt(i); int res = turkishToAsciiMap.get(c); char map = res == IntIntMap.NO_RESULT ? c : (char) res; sb.append(map); } return sb.toString(); }
TurkishAlphabet { public String toAscii(String in) { StringBuilder sb = new StringBuilder(in.length()); for (int i = 0; i < in.length(); i++) { char c = in.charAt(i); int res = turkishToAsciiMap.get(c); char map = res == IntIntMap.NO_RESULT ? c : (char) res; sb.append(map); } return sb.toString(); } }
TurkishAlphabet { public String toAscii(String in) { StringBuilder sb = new StringBuilder(in.length()); for (int i = 0; i < in.length(); i++) { char c = in.charAt(i); int res = turkishToAsciiMap.get(c); char map = res == IntIntMap.NO_RESULT ? c : (char) res; sb.append(map); } return sb.toString(); } private TurkishAlphabet(); }
TurkishAlphabet { public String toAscii(String in) { StringBuilder sb = new StringBuilder(in.length()); for (int i = 0; i < in.length(); i++) { char c = in.charAt(i); int res = turkishToAsciiMap.get(c); char map = res == IntIntMap.NO_RESULT ? c : (char) res; sb.append(map); } return sb.toString(); } private TurkishAlphabet(); String toAscii(String in); String foreignDiacriticsToTurkish(String in); boolean containsAsciiRelated(String s); IntIntMap getTurkishToAsciiMap(); char getAsciiEqual(char c); boolean isAsciiEqual(char c1, char c2); boolean allCapital(String input); String normalize(String input); char normalizeCircumflex(char c); boolean containsCircumflex(String s); boolean isTurkishSpecific(char c); boolean containsApostrophe(String s); boolean containsForeignDiacritics(String s); String getAllLetters(); String getLowercaseLetters(); String getUppercaseLetters(); String normalizeCircumflex(String s); String normalizeApostrophe(String s); char voice(char c); char devoice(char c); TurkicLetter getLetter(char c); TurkicLetter getLastLetter(CharSequence s); char lastChar(CharSequence s); TurkicLetter getFirstLetter(CharSequence s); boolean isVowel(char c); boolean isDictionaryLetter(char c); boolean isStopConsonant(char c); boolean isVoicelessConsonant(char c); TurkicLetter getLastVowel(CharSequence s); TurkicLetter getFirstVowel(CharSequence s); boolean checkVowelHarmonyA(CharSequence source, CharSequence target); boolean checkVowelHarmonyI(CharSequence source, CharSequence target); boolean checkVowelHarmonyA(TurkicLetter source, TurkicLetter target); boolean checkVowelHarmonyI(TurkicLetter source, TurkicLetter target); boolean containsVowel(CharSequence s); int vowelCount(String s); boolean containsDigit(String s); boolean equalsIgnoreDiacritics(String s1, String s2); boolean startsWithIgnoreDiacritics(String s1, String s2); }
TurkishAlphabet { public String toAscii(String in) { StringBuilder sb = new StringBuilder(in.length()); for (int i = 0; i < in.length(); i++) { char c = in.charAt(i); int res = turkishToAsciiMap.get(c); char map = res == IntIntMap.NO_RESULT ? c : (char) res; sb.append(map); } return sb.toString(); } private TurkishAlphabet(); String toAscii(String in); String foreignDiacriticsToTurkish(String in); boolean containsAsciiRelated(String s); IntIntMap getTurkishToAsciiMap(); char getAsciiEqual(char c); boolean isAsciiEqual(char c1, char c2); boolean allCapital(String input); String normalize(String input); char normalizeCircumflex(char c); boolean containsCircumflex(String s); boolean isTurkishSpecific(char c); boolean containsApostrophe(String s); boolean containsForeignDiacritics(String s); String getAllLetters(); String getLowercaseLetters(); String getUppercaseLetters(); String normalizeCircumflex(String s); String normalizeApostrophe(String s); char voice(char c); char devoice(char c); TurkicLetter getLetter(char c); TurkicLetter getLastLetter(CharSequence s); char lastChar(CharSequence s); TurkicLetter getFirstLetter(CharSequence s); boolean isVowel(char c); boolean isDictionaryLetter(char c); boolean isStopConsonant(char c); boolean isVoicelessConsonant(char c); TurkicLetter getLastVowel(CharSequence s); TurkicLetter getFirstVowel(CharSequence s); boolean checkVowelHarmonyA(CharSequence source, CharSequence target); boolean checkVowelHarmonyI(CharSequence source, CharSequence target); boolean checkVowelHarmonyA(TurkicLetter source, TurkicLetter target); boolean checkVowelHarmonyI(TurkicLetter source, TurkicLetter target); boolean containsVowel(CharSequence s); int vowelCount(String s); boolean containsDigit(String s); boolean equalsIgnoreDiacritics(String s1, String s2); boolean startsWithIgnoreDiacritics(String s1, String s2); static final Locale TR; static TurkishAlphabet INSTANCE; }
@Test public void equalsIgnoreDiacritics() { TurkishAlphabet alphabet = TurkishAlphabet.INSTANCE; String[] a = {"siraci", "ağac", "ağaç"}; String[] b = {"şıracı", "ağaç", "agac"}; for (int i = 0; i < a.length; i++) { Assert.assertTrue(alphabet.equalsIgnoreDiacritics(a[i], b[i])); } }
public boolean equalsIgnoreDiacritics(String s1, String s2) { if (s1 == null || s2 == null) { return false; } if (s1.length() != s2.length()) { return false; } for (int i = 0; i < s1.length(); i++) { char c1 = s1.charAt(i); char c2 = s2.charAt(i); if (!isAsciiEqual(c1, c2)) { return false; } } return true; }
TurkishAlphabet { public boolean equalsIgnoreDiacritics(String s1, String s2) { if (s1 == null || s2 == null) { return false; } if (s1.length() != s2.length()) { return false; } for (int i = 0; i < s1.length(); i++) { char c1 = s1.charAt(i); char c2 = s2.charAt(i); if (!isAsciiEqual(c1, c2)) { return false; } } return true; } }
TurkishAlphabet { public boolean equalsIgnoreDiacritics(String s1, String s2) { if (s1 == null || s2 == null) { return false; } if (s1.length() != s2.length()) { return false; } for (int i = 0; i < s1.length(); i++) { char c1 = s1.charAt(i); char c2 = s2.charAt(i); if (!isAsciiEqual(c1, c2)) { return false; } } return true; } private TurkishAlphabet(); }
TurkishAlphabet { public boolean equalsIgnoreDiacritics(String s1, String s2) { if (s1 == null || s2 == null) { return false; } if (s1.length() != s2.length()) { return false; } for (int i = 0; i < s1.length(); i++) { char c1 = s1.charAt(i); char c2 = s2.charAt(i); if (!isAsciiEqual(c1, c2)) { return false; } } return true; } private TurkishAlphabet(); String toAscii(String in); String foreignDiacriticsToTurkish(String in); boolean containsAsciiRelated(String s); IntIntMap getTurkishToAsciiMap(); char getAsciiEqual(char c); boolean isAsciiEqual(char c1, char c2); boolean allCapital(String input); String normalize(String input); char normalizeCircumflex(char c); boolean containsCircumflex(String s); boolean isTurkishSpecific(char c); boolean containsApostrophe(String s); boolean containsForeignDiacritics(String s); String getAllLetters(); String getLowercaseLetters(); String getUppercaseLetters(); String normalizeCircumflex(String s); String normalizeApostrophe(String s); char voice(char c); char devoice(char c); TurkicLetter getLetter(char c); TurkicLetter getLastLetter(CharSequence s); char lastChar(CharSequence s); TurkicLetter getFirstLetter(CharSequence s); boolean isVowel(char c); boolean isDictionaryLetter(char c); boolean isStopConsonant(char c); boolean isVoicelessConsonant(char c); TurkicLetter getLastVowel(CharSequence s); TurkicLetter getFirstVowel(CharSequence s); boolean checkVowelHarmonyA(CharSequence source, CharSequence target); boolean checkVowelHarmonyI(CharSequence source, CharSequence target); boolean checkVowelHarmonyA(TurkicLetter source, TurkicLetter target); boolean checkVowelHarmonyI(TurkicLetter source, TurkicLetter target); boolean containsVowel(CharSequence s); int vowelCount(String s); boolean containsDigit(String s); boolean equalsIgnoreDiacritics(String s1, String s2); boolean startsWithIgnoreDiacritics(String s1, String s2); }
TurkishAlphabet { public boolean equalsIgnoreDiacritics(String s1, String s2) { if (s1 == null || s2 == null) { return false; } if (s1.length() != s2.length()) { return false; } for (int i = 0; i < s1.length(); i++) { char c1 = s1.charAt(i); char c2 = s2.charAt(i); if (!isAsciiEqual(c1, c2)) { return false; } } return true; } private TurkishAlphabet(); String toAscii(String in); String foreignDiacriticsToTurkish(String in); boolean containsAsciiRelated(String s); IntIntMap getTurkishToAsciiMap(); char getAsciiEqual(char c); boolean isAsciiEqual(char c1, char c2); boolean allCapital(String input); String normalize(String input); char normalizeCircumflex(char c); boolean containsCircumflex(String s); boolean isTurkishSpecific(char c); boolean containsApostrophe(String s); boolean containsForeignDiacritics(String s); String getAllLetters(); String getLowercaseLetters(); String getUppercaseLetters(); String normalizeCircumflex(String s); String normalizeApostrophe(String s); char voice(char c); char devoice(char c); TurkicLetter getLetter(char c); TurkicLetter getLastLetter(CharSequence s); char lastChar(CharSequence s); TurkicLetter getFirstLetter(CharSequence s); boolean isVowel(char c); boolean isDictionaryLetter(char c); boolean isStopConsonant(char c); boolean isVoicelessConsonant(char c); TurkicLetter getLastVowel(CharSequence s); TurkicLetter getFirstVowel(CharSequence s); boolean checkVowelHarmonyA(CharSequence source, CharSequence target); boolean checkVowelHarmonyI(CharSequence source, CharSequence target); boolean checkVowelHarmonyA(TurkicLetter source, TurkicLetter target); boolean checkVowelHarmonyI(TurkicLetter source, TurkicLetter target); boolean containsVowel(CharSequence s); int vowelCount(String s); boolean containsDigit(String s); boolean equalsIgnoreDiacritics(String s1, String s2); boolean startsWithIgnoreDiacritics(String s1, String s2); static final Locale TR; static TurkishAlphabet INSTANCE; }
@Test public void vowelHarmonyA() { TurkishAlphabet alphabet = TurkishAlphabet.INSTANCE; String[] a = {"elma", "kedi", "turp"}; String[] b= {"lar", "cik", "un"}; for (int i = 0; i < a.length; i++) { Assert.assertTrue(alphabet.checkVowelHarmonyA(a[i], b[i])); } }
public boolean checkVowelHarmonyA(CharSequence source, CharSequence target) { TurkicLetter sourceLastVowel = getLastVowel(source); TurkicLetter targetFirstVowel = getLastVowel(target); return checkVowelHarmonyA(sourceLastVowel, targetFirstVowel); }
TurkishAlphabet { public boolean checkVowelHarmonyA(CharSequence source, CharSequence target) { TurkicLetter sourceLastVowel = getLastVowel(source); TurkicLetter targetFirstVowel = getLastVowel(target); return checkVowelHarmonyA(sourceLastVowel, targetFirstVowel); } }
TurkishAlphabet { public boolean checkVowelHarmonyA(CharSequence source, CharSequence target) { TurkicLetter sourceLastVowel = getLastVowel(source); TurkicLetter targetFirstVowel = getLastVowel(target); return checkVowelHarmonyA(sourceLastVowel, targetFirstVowel); } private TurkishAlphabet(); }
TurkishAlphabet { public boolean checkVowelHarmonyA(CharSequence source, CharSequence target) { TurkicLetter sourceLastVowel = getLastVowel(source); TurkicLetter targetFirstVowel = getLastVowel(target); return checkVowelHarmonyA(sourceLastVowel, targetFirstVowel); } private TurkishAlphabet(); String toAscii(String in); String foreignDiacriticsToTurkish(String in); boolean containsAsciiRelated(String s); IntIntMap getTurkishToAsciiMap(); char getAsciiEqual(char c); boolean isAsciiEqual(char c1, char c2); boolean allCapital(String input); String normalize(String input); char normalizeCircumflex(char c); boolean containsCircumflex(String s); boolean isTurkishSpecific(char c); boolean containsApostrophe(String s); boolean containsForeignDiacritics(String s); String getAllLetters(); String getLowercaseLetters(); String getUppercaseLetters(); String normalizeCircumflex(String s); String normalizeApostrophe(String s); char voice(char c); char devoice(char c); TurkicLetter getLetter(char c); TurkicLetter getLastLetter(CharSequence s); char lastChar(CharSequence s); TurkicLetter getFirstLetter(CharSequence s); boolean isVowel(char c); boolean isDictionaryLetter(char c); boolean isStopConsonant(char c); boolean isVoicelessConsonant(char c); TurkicLetter getLastVowel(CharSequence s); TurkicLetter getFirstVowel(CharSequence s); boolean checkVowelHarmonyA(CharSequence source, CharSequence target); boolean checkVowelHarmonyI(CharSequence source, CharSequence target); boolean checkVowelHarmonyA(TurkicLetter source, TurkicLetter target); boolean checkVowelHarmonyI(TurkicLetter source, TurkicLetter target); boolean containsVowel(CharSequence s); int vowelCount(String s); boolean containsDigit(String s); boolean equalsIgnoreDiacritics(String s1, String s2); boolean startsWithIgnoreDiacritics(String s1, String s2); }
TurkishAlphabet { public boolean checkVowelHarmonyA(CharSequence source, CharSequence target) { TurkicLetter sourceLastVowel = getLastVowel(source); TurkicLetter targetFirstVowel = getLastVowel(target); return checkVowelHarmonyA(sourceLastVowel, targetFirstVowel); } private TurkishAlphabet(); String toAscii(String in); String foreignDiacriticsToTurkish(String in); boolean containsAsciiRelated(String s); IntIntMap getTurkishToAsciiMap(); char getAsciiEqual(char c); boolean isAsciiEqual(char c1, char c2); boolean allCapital(String input); String normalize(String input); char normalizeCircumflex(char c); boolean containsCircumflex(String s); boolean isTurkishSpecific(char c); boolean containsApostrophe(String s); boolean containsForeignDiacritics(String s); String getAllLetters(); String getLowercaseLetters(); String getUppercaseLetters(); String normalizeCircumflex(String s); String normalizeApostrophe(String s); char voice(char c); char devoice(char c); TurkicLetter getLetter(char c); TurkicLetter getLastLetter(CharSequence s); char lastChar(CharSequence s); TurkicLetter getFirstLetter(CharSequence s); boolean isVowel(char c); boolean isDictionaryLetter(char c); boolean isStopConsonant(char c); boolean isVoicelessConsonant(char c); TurkicLetter getLastVowel(CharSequence s); TurkicLetter getFirstVowel(CharSequence s); boolean checkVowelHarmonyA(CharSequence source, CharSequence target); boolean checkVowelHarmonyI(CharSequence source, CharSequence target); boolean checkVowelHarmonyA(TurkicLetter source, TurkicLetter target); boolean checkVowelHarmonyI(TurkicLetter source, TurkicLetter target); boolean containsVowel(CharSequence s); int vowelCount(String s); boolean containsDigit(String s); boolean equalsIgnoreDiacritics(String s1, String s2); boolean startsWithIgnoreDiacritics(String s1, String s2); static final Locale TR; static TurkishAlphabet INSTANCE; }
@Test public void vowelHarmonyA2() { TurkishAlphabet alphabet = TurkishAlphabet.INSTANCE; String[] a = {"elma", "kedi", "turp"}; String[] b = {"ler", "cık", "in"}; for (int i = 0; i < a.length; i++) { Assert.assertFalse(alphabet.checkVowelHarmonyA(a[i], b[i])); } }
public boolean checkVowelHarmonyA(CharSequence source, CharSequence target) { TurkicLetter sourceLastVowel = getLastVowel(source); TurkicLetter targetFirstVowel = getLastVowel(target); return checkVowelHarmonyA(sourceLastVowel, targetFirstVowel); }
TurkishAlphabet { public boolean checkVowelHarmonyA(CharSequence source, CharSequence target) { TurkicLetter sourceLastVowel = getLastVowel(source); TurkicLetter targetFirstVowel = getLastVowel(target); return checkVowelHarmonyA(sourceLastVowel, targetFirstVowel); } }
TurkishAlphabet { public boolean checkVowelHarmonyA(CharSequence source, CharSequence target) { TurkicLetter sourceLastVowel = getLastVowel(source); TurkicLetter targetFirstVowel = getLastVowel(target); return checkVowelHarmonyA(sourceLastVowel, targetFirstVowel); } private TurkishAlphabet(); }
TurkishAlphabet { public boolean checkVowelHarmonyA(CharSequence source, CharSequence target) { TurkicLetter sourceLastVowel = getLastVowel(source); TurkicLetter targetFirstVowel = getLastVowel(target); return checkVowelHarmonyA(sourceLastVowel, targetFirstVowel); } private TurkishAlphabet(); String toAscii(String in); String foreignDiacriticsToTurkish(String in); boolean containsAsciiRelated(String s); IntIntMap getTurkishToAsciiMap(); char getAsciiEqual(char c); boolean isAsciiEqual(char c1, char c2); boolean allCapital(String input); String normalize(String input); char normalizeCircumflex(char c); boolean containsCircumflex(String s); boolean isTurkishSpecific(char c); boolean containsApostrophe(String s); boolean containsForeignDiacritics(String s); String getAllLetters(); String getLowercaseLetters(); String getUppercaseLetters(); String normalizeCircumflex(String s); String normalizeApostrophe(String s); char voice(char c); char devoice(char c); TurkicLetter getLetter(char c); TurkicLetter getLastLetter(CharSequence s); char lastChar(CharSequence s); TurkicLetter getFirstLetter(CharSequence s); boolean isVowel(char c); boolean isDictionaryLetter(char c); boolean isStopConsonant(char c); boolean isVoicelessConsonant(char c); TurkicLetter getLastVowel(CharSequence s); TurkicLetter getFirstVowel(CharSequence s); boolean checkVowelHarmonyA(CharSequence source, CharSequence target); boolean checkVowelHarmonyI(CharSequence source, CharSequence target); boolean checkVowelHarmonyA(TurkicLetter source, TurkicLetter target); boolean checkVowelHarmonyI(TurkicLetter source, TurkicLetter target); boolean containsVowel(CharSequence s); int vowelCount(String s); boolean containsDigit(String s); boolean equalsIgnoreDiacritics(String s1, String s2); boolean startsWithIgnoreDiacritics(String s1, String s2); }
TurkishAlphabet { public boolean checkVowelHarmonyA(CharSequence source, CharSequence target) { TurkicLetter sourceLastVowel = getLastVowel(source); TurkicLetter targetFirstVowel = getLastVowel(target); return checkVowelHarmonyA(sourceLastVowel, targetFirstVowel); } private TurkishAlphabet(); String toAscii(String in); String foreignDiacriticsToTurkish(String in); boolean containsAsciiRelated(String s); IntIntMap getTurkishToAsciiMap(); char getAsciiEqual(char c); boolean isAsciiEqual(char c1, char c2); boolean allCapital(String input); String normalize(String input); char normalizeCircumflex(char c); boolean containsCircumflex(String s); boolean isTurkishSpecific(char c); boolean containsApostrophe(String s); boolean containsForeignDiacritics(String s); String getAllLetters(); String getLowercaseLetters(); String getUppercaseLetters(); String normalizeCircumflex(String s); String normalizeApostrophe(String s); char voice(char c); char devoice(char c); TurkicLetter getLetter(char c); TurkicLetter getLastLetter(CharSequence s); char lastChar(CharSequence s); TurkicLetter getFirstLetter(CharSequence s); boolean isVowel(char c); boolean isDictionaryLetter(char c); boolean isStopConsonant(char c); boolean isVoicelessConsonant(char c); TurkicLetter getLastVowel(CharSequence s); TurkicLetter getFirstVowel(CharSequence s); boolean checkVowelHarmonyA(CharSequence source, CharSequence target); boolean checkVowelHarmonyI(CharSequence source, CharSequence target); boolean checkVowelHarmonyA(TurkicLetter source, TurkicLetter target); boolean checkVowelHarmonyI(TurkicLetter source, TurkicLetter target); boolean containsVowel(CharSequence s); int vowelCount(String s); boolean containsDigit(String s); boolean equalsIgnoreDiacritics(String s1, String s2); boolean startsWithIgnoreDiacritics(String s1, String s2); static final Locale TR; static TurkishAlphabet INSTANCE; }
@Test public void vowelHarmonyI1() { TurkishAlphabet alphabet = TurkishAlphabet.INSTANCE; String[] a = {"elma", "kedi", "turp"}; String[] b = {"yı", "yi", "u"}; for (int i = 0; i < a.length; i++) { Assert.assertTrue(alphabet.checkVowelHarmonyI(a[i], b[i])); } }
public boolean checkVowelHarmonyI(CharSequence source, CharSequence target) { TurkicLetter sourceLastVowel = getLastVowel(source); TurkicLetter targetFirstVowel = getLastVowel(target); return checkVowelHarmonyI(sourceLastVowel, targetFirstVowel); }
TurkishAlphabet { public boolean checkVowelHarmonyI(CharSequence source, CharSequence target) { TurkicLetter sourceLastVowel = getLastVowel(source); TurkicLetter targetFirstVowel = getLastVowel(target); return checkVowelHarmonyI(sourceLastVowel, targetFirstVowel); } }
TurkishAlphabet { public boolean checkVowelHarmonyI(CharSequence source, CharSequence target) { TurkicLetter sourceLastVowel = getLastVowel(source); TurkicLetter targetFirstVowel = getLastVowel(target); return checkVowelHarmonyI(sourceLastVowel, targetFirstVowel); } private TurkishAlphabet(); }
TurkishAlphabet { public boolean checkVowelHarmonyI(CharSequence source, CharSequence target) { TurkicLetter sourceLastVowel = getLastVowel(source); TurkicLetter targetFirstVowel = getLastVowel(target); return checkVowelHarmonyI(sourceLastVowel, targetFirstVowel); } private TurkishAlphabet(); String toAscii(String in); String foreignDiacriticsToTurkish(String in); boolean containsAsciiRelated(String s); IntIntMap getTurkishToAsciiMap(); char getAsciiEqual(char c); boolean isAsciiEqual(char c1, char c2); boolean allCapital(String input); String normalize(String input); char normalizeCircumflex(char c); boolean containsCircumflex(String s); boolean isTurkishSpecific(char c); boolean containsApostrophe(String s); boolean containsForeignDiacritics(String s); String getAllLetters(); String getLowercaseLetters(); String getUppercaseLetters(); String normalizeCircumflex(String s); String normalizeApostrophe(String s); char voice(char c); char devoice(char c); TurkicLetter getLetter(char c); TurkicLetter getLastLetter(CharSequence s); char lastChar(CharSequence s); TurkicLetter getFirstLetter(CharSequence s); boolean isVowel(char c); boolean isDictionaryLetter(char c); boolean isStopConsonant(char c); boolean isVoicelessConsonant(char c); TurkicLetter getLastVowel(CharSequence s); TurkicLetter getFirstVowel(CharSequence s); boolean checkVowelHarmonyA(CharSequence source, CharSequence target); boolean checkVowelHarmonyI(CharSequence source, CharSequence target); boolean checkVowelHarmonyA(TurkicLetter source, TurkicLetter target); boolean checkVowelHarmonyI(TurkicLetter source, TurkicLetter target); boolean containsVowel(CharSequence s); int vowelCount(String s); boolean containsDigit(String s); boolean equalsIgnoreDiacritics(String s1, String s2); boolean startsWithIgnoreDiacritics(String s1, String s2); }
TurkishAlphabet { public boolean checkVowelHarmonyI(CharSequence source, CharSequence target) { TurkicLetter sourceLastVowel = getLastVowel(source); TurkicLetter targetFirstVowel = getLastVowel(target); return checkVowelHarmonyI(sourceLastVowel, targetFirstVowel); } private TurkishAlphabet(); String toAscii(String in); String foreignDiacriticsToTurkish(String in); boolean containsAsciiRelated(String s); IntIntMap getTurkishToAsciiMap(); char getAsciiEqual(char c); boolean isAsciiEqual(char c1, char c2); boolean allCapital(String input); String normalize(String input); char normalizeCircumflex(char c); boolean containsCircumflex(String s); boolean isTurkishSpecific(char c); boolean containsApostrophe(String s); boolean containsForeignDiacritics(String s); String getAllLetters(); String getLowercaseLetters(); String getUppercaseLetters(); String normalizeCircumflex(String s); String normalizeApostrophe(String s); char voice(char c); char devoice(char c); TurkicLetter getLetter(char c); TurkicLetter getLastLetter(CharSequence s); char lastChar(CharSequence s); TurkicLetter getFirstLetter(CharSequence s); boolean isVowel(char c); boolean isDictionaryLetter(char c); boolean isStopConsonant(char c); boolean isVoicelessConsonant(char c); TurkicLetter getLastVowel(CharSequence s); TurkicLetter getFirstVowel(CharSequence s); boolean checkVowelHarmonyA(CharSequence source, CharSequence target); boolean checkVowelHarmonyI(CharSequence source, CharSequence target); boolean checkVowelHarmonyA(TurkicLetter source, TurkicLetter target); boolean checkVowelHarmonyI(TurkicLetter source, TurkicLetter target); boolean containsVowel(CharSequence s); int vowelCount(String s); boolean containsDigit(String s); boolean equalsIgnoreDiacritics(String s1, String s2); boolean startsWithIgnoreDiacritics(String s1, String s2); static final Locale TR; static TurkishAlphabet INSTANCE; }
@Test public void referenceTest2() { String[] ref = {"ad", "ad [A:Doubling,InverseHarmony;Index:1]", "soy", "soyadı [A:CompoundP3sg; Roots:soy-ad]"}; RootLexicon lexicon = TurkishDictionaryLoader.load(ref); DictionaryItem item = lexicon.getItemById("soyadı_Noun"); Assert.assertNotNull(item); Assert.assertFalse(item.attributes.contains(RootAttribute.Doubling)); }
public static RootLexicon load(File input) throws IOException { return Files.asCharSource(input, Charsets.UTF_8).readLines(new TextLexiconProcessor()); }
TurkishDictionaryLoader { public static RootLexicon load(File input) throws IOException { return Files.asCharSource(input, Charsets.UTF_8).readLines(new TextLexiconProcessor()); } }
TurkishDictionaryLoader { public static RootLexicon load(File input) throws IOException { return Files.asCharSource(input, Charsets.UTF_8).readLines(new TextLexiconProcessor()); } }
TurkishDictionaryLoader { public static RootLexicon load(File input) throws IOException { return Files.asCharSource(input, Charsets.UTF_8).readLines(new TextLexiconProcessor()); } static RootLexicon loadDefaultDictionaries(); static RootLexicon loadFromResources(String... resourcePaths); static RootLexicon loadFromResources(Collection<String> resourcePaths); static RootLexicon load(File input); static RootLexicon loadInto(RootLexicon lexicon, File input); static DictionaryItem loadFromString(String dictionaryLine); static RootLexicon load(String... dictionaryLines); static RootLexicon load(Iterable<String> dictionaryLines); }
TurkishDictionaryLoader { public static RootLexicon load(File input) throws IOException { return Files.asCharSource(input, Charsets.UTF_8).readLines(new TextLexiconProcessor()); } static RootLexicon loadDefaultDictionaries(); static RootLexicon loadFromResources(String... resourcePaths); static RootLexicon loadFromResources(Collection<String> resourcePaths); static RootLexicon load(File input); static RootLexicon loadInto(RootLexicon lexicon, File input); static DictionaryItem loadFromString(String dictionaryLine); static RootLexicon load(String... dictionaryLines); static RootLexicon load(Iterable<String> dictionaryLines); static final List<String> DEFAULT_DICTIONARY_RESOURCES; }
@Test public void vowelHarmonyI2() { TurkishAlphabet alphabet = TurkishAlphabet.INSTANCE; String[] a = {"elma", "kedi", "turp"}; String[] b = {"yu", "yü", "ı"}; for (int i = 0; i < a.length; i++) { Assert.assertFalse(alphabet.checkVowelHarmonyI(a[i], b[i])); } }
public boolean checkVowelHarmonyI(CharSequence source, CharSequence target) { TurkicLetter sourceLastVowel = getLastVowel(source); TurkicLetter targetFirstVowel = getLastVowel(target); return checkVowelHarmonyI(sourceLastVowel, targetFirstVowel); }
TurkishAlphabet { public boolean checkVowelHarmonyI(CharSequence source, CharSequence target) { TurkicLetter sourceLastVowel = getLastVowel(source); TurkicLetter targetFirstVowel = getLastVowel(target); return checkVowelHarmonyI(sourceLastVowel, targetFirstVowel); } }
TurkishAlphabet { public boolean checkVowelHarmonyI(CharSequence source, CharSequence target) { TurkicLetter sourceLastVowel = getLastVowel(source); TurkicLetter targetFirstVowel = getLastVowel(target); return checkVowelHarmonyI(sourceLastVowel, targetFirstVowel); } private TurkishAlphabet(); }
TurkishAlphabet { public boolean checkVowelHarmonyI(CharSequence source, CharSequence target) { TurkicLetter sourceLastVowel = getLastVowel(source); TurkicLetter targetFirstVowel = getLastVowel(target); return checkVowelHarmonyI(sourceLastVowel, targetFirstVowel); } private TurkishAlphabet(); String toAscii(String in); String foreignDiacriticsToTurkish(String in); boolean containsAsciiRelated(String s); IntIntMap getTurkishToAsciiMap(); char getAsciiEqual(char c); boolean isAsciiEqual(char c1, char c2); boolean allCapital(String input); String normalize(String input); char normalizeCircumflex(char c); boolean containsCircumflex(String s); boolean isTurkishSpecific(char c); boolean containsApostrophe(String s); boolean containsForeignDiacritics(String s); String getAllLetters(); String getLowercaseLetters(); String getUppercaseLetters(); String normalizeCircumflex(String s); String normalizeApostrophe(String s); char voice(char c); char devoice(char c); TurkicLetter getLetter(char c); TurkicLetter getLastLetter(CharSequence s); char lastChar(CharSequence s); TurkicLetter getFirstLetter(CharSequence s); boolean isVowel(char c); boolean isDictionaryLetter(char c); boolean isStopConsonant(char c); boolean isVoicelessConsonant(char c); TurkicLetter getLastVowel(CharSequence s); TurkicLetter getFirstVowel(CharSequence s); boolean checkVowelHarmonyA(CharSequence source, CharSequence target); boolean checkVowelHarmonyI(CharSequence source, CharSequence target); boolean checkVowelHarmonyA(TurkicLetter source, TurkicLetter target); boolean checkVowelHarmonyI(TurkicLetter source, TurkicLetter target); boolean containsVowel(CharSequence s); int vowelCount(String s); boolean containsDigit(String s); boolean equalsIgnoreDiacritics(String s1, String s2); boolean startsWithIgnoreDiacritics(String s1, String s2); }
TurkishAlphabet { public boolean checkVowelHarmonyI(CharSequence source, CharSequence target) { TurkicLetter sourceLastVowel = getLastVowel(source); TurkicLetter targetFirstVowel = getLastVowel(target); return checkVowelHarmonyI(sourceLastVowel, targetFirstVowel); } private TurkishAlphabet(); String toAscii(String in); String foreignDiacriticsToTurkish(String in); boolean containsAsciiRelated(String s); IntIntMap getTurkishToAsciiMap(); char getAsciiEqual(char c); boolean isAsciiEqual(char c1, char c2); boolean allCapital(String input); String normalize(String input); char normalizeCircumflex(char c); boolean containsCircumflex(String s); boolean isTurkishSpecific(char c); boolean containsApostrophe(String s); boolean containsForeignDiacritics(String s); String getAllLetters(); String getLowercaseLetters(); String getUppercaseLetters(); String normalizeCircumflex(String s); String normalizeApostrophe(String s); char voice(char c); char devoice(char c); TurkicLetter getLetter(char c); TurkicLetter getLastLetter(CharSequence s); char lastChar(CharSequence s); TurkicLetter getFirstLetter(CharSequence s); boolean isVowel(char c); boolean isDictionaryLetter(char c); boolean isStopConsonant(char c); boolean isVoicelessConsonant(char c); TurkicLetter getLastVowel(CharSequence s); TurkicLetter getFirstVowel(CharSequence s); boolean checkVowelHarmonyA(CharSequence source, CharSequence target); boolean checkVowelHarmonyI(CharSequence source, CharSequence target); boolean checkVowelHarmonyA(TurkicLetter source, TurkicLetter target); boolean checkVowelHarmonyI(TurkicLetter source, TurkicLetter target); boolean containsVowel(CharSequence s); int vowelCount(String s); boolean containsDigit(String s); boolean equalsIgnoreDiacritics(String s1, String s2); boolean startsWithIgnoreDiacritics(String s1, String s2); static final Locale TR; static TurkishAlphabet INSTANCE; }
@Test public void startsWithDiacriticsIgnoredTest() { TurkishAlphabet alphabet = TurkishAlphabet.INSTANCE; String[] a = {"siraci", "çağlayan"}; String[] b = {"şıracı", "cag"}; for (int i = 0; i < a.length; i++) { Assert.assertTrue(alphabet.startsWithIgnoreDiacritics(a[i], b[i])); } }
public boolean startsWithIgnoreDiacritics(String s1, String s2) { if (s1 == null || s2 == null) { return false; } if (s1.length() < s2.length()) { return false; } for (int i = 0; i < s2.length(); i++) { char c1 = s1.charAt(i); char c2 = s2.charAt(i); if (!isAsciiEqual(c1, c2)) { return false; } } return true; }
TurkishAlphabet { public boolean startsWithIgnoreDiacritics(String s1, String s2) { if (s1 == null || s2 == null) { return false; } if (s1.length() < s2.length()) { return false; } for (int i = 0; i < s2.length(); i++) { char c1 = s1.charAt(i); char c2 = s2.charAt(i); if (!isAsciiEqual(c1, c2)) { return false; } } return true; } }
TurkishAlphabet { public boolean startsWithIgnoreDiacritics(String s1, String s2) { if (s1 == null || s2 == null) { return false; } if (s1.length() < s2.length()) { return false; } for (int i = 0; i < s2.length(); i++) { char c1 = s1.charAt(i); char c2 = s2.charAt(i); if (!isAsciiEqual(c1, c2)) { return false; } } return true; } private TurkishAlphabet(); }
TurkishAlphabet { public boolean startsWithIgnoreDiacritics(String s1, String s2) { if (s1 == null || s2 == null) { return false; } if (s1.length() < s2.length()) { return false; } for (int i = 0; i < s2.length(); i++) { char c1 = s1.charAt(i); char c2 = s2.charAt(i); if (!isAsciiEqual(c1, c2)) { return false; } } return true; } private TurkishAlphabet(); String toAscii(String in); String foreignDiacriticsToTurkish(String in); boolean containsAsciiRelated(String s); IntIntMap getTurkishToAsciiMap(); char getAsciiEqual(char c); boolean isAsciiEqual(char c1, char c2); boolean allCapital(String input); String normalize(String input); char normalizeCircumflex(char c); boolean containsCircumflex(String s); boolean isTurkishSpecific(char c); boolean containsApostrophe(String s); boolean containsForeignDiacritics(String s); String getAllLetters(); String getLowercaseLetters(); String getUppercaseLetters(); String normalizeCircumflex(String s); String normalizeApostrophe(String s); char voice(char c); char devoice(char c); TurkicLetter getLetter(char c); TurkicLetter getLastLetter(CharSequence s); char lastChar(CharSequence s); TurkicLetter getFirstLetter(CharSequence s); boolean isVowel(char c); boolean isDictionaryLetter(char c); boolean isStopConsonant(char c); boolean isVoicelessConsonant(char c); TurkicLetter getLastVowel(CharSequence s); TurkicLetter getFirstVowel(CharSequence s); boolean checkVowelHarmonyA(CharSequence source, CharSequence target); boolean checkVowelHarmonyI(CharSequence source, CharSequence target); boolean checkVowelHarmonyA(TurkicLetter source, TurkicLetter target); boolean checkVowelHarmonyI(TurkicLetter source, TurkicLetter target); boolean containsVowel(CharSequence s); int vowelCount(String s); boolean containsDigit(String s); boolean equalsIgnoreDiacritics(String s1, String s2); boolean startsWithIgnoreDiacritics(String s1, String s2); }
TurkishAlphabet { public boolean startsWithIgnoreDiacritics(String s1, String s2) { if (s1 == null || s2 == null) { return false; } if (s1.length() < s2.length()) { return false; } for (int i = 0; i < s2.length(); i++) { char c1 = s1.charAt(i); char c2 = s2.charAt(i); if (!isAsciiEqual(c1, c2)) { return false; } } return true; } private TurkishAlphabet(); String toAscii(String in); String foreignDiacriticsToTurkish(String in); boolean containsAsciiRelated(String s); IntIntMap getTurkishToAsciiMap(); char getAsciiEqual(char c); boolean isAsciiEqual(char c1, char c2); boolean allCapital(String input); String normalize(String input); char normalizeCircumflex(char c); boolean containsCircumflex(String s); boolean isTurkishSpecific(char c); boolean containsApostrophe(String s); boolean containsForeignDiacritics(String s); String getAllLetters(); String getLowercaseLetters(); String getUppercaseLetters(); String normalizeCircumflex(String s); String normalizeApostrophe(String s); char voice(char c); char devoice(char c); TurkicLetter getLetter(char c); TurkicLetter getLastLetter(CharSequence s); char lastChar(CharSequence s); TurkicLetter getFirstLetter(CharSequence s); boolean isVowel(char c); boolean isDictionaryLetter(char c); boolean isStopConsonant(char c); boolean isVoicelessConsonant(char c); TurkicLetter getLastVowel(CharSequence s); TurkicLetter getFirstVowel(CharSequence s); boolean checkVowelHarmonyA(CharSequence source, CharSequence target); boolean checkVowelHarmonyI(CharSequence source, CharSequence target); boolean checkVowelHarmonyA(TurkicLetter source, TurkicLetter target); boolean checkVowelHarmonyI(TurkicLetter source, TurkicLetter target); boolean containsVowel(CharSequence s); int vowelCount(String s); boolean containsDigit(String s); boolean equalsIgnoreDiacritics(String s1, String s2); boolean startsWithIgnoreDiacritics(String s1, String s2); static final Locale TR; static TurkishAlphabet INSTANCE; }
@Test public void specialWordsTest() throws IOException { LmVocabulary vocabulary = new LmVocabulary("<S>", "Hello", "</S>"); vocabulary.containsAll("<S>", "Hello", "</S>", "<unk>"); }
public boolean containsAll(int... indexes) { for (int index : indexes) { if (!contains(index)) { return false; } } return true; }
LmVocabulary { public boolean containsAll(int... indexes) { for (int index : indexes) { if (!contains(index)) { return false; } } return true; } }
LmVocabulary { public boolean containsAll(int... indexes) { for (int index : indexes) { if (!contains(index)) { return false; } } return true; } LmVocabulary(String... vocabulary); LmVocabulary(List<String> vocabulary); private LmVocabulary(RandomAccessFile raf); private LmVocabulary(DataInputStream dis); }
LmVocabulary { public boolean containsAll(int... indexes) { for (int index : indexes) { if (!contains(index)) { return false; } } return true; } LmVocabulary(String... vocabulary); LmVocabulary(List<String> vocabulary); private LmVocabulary(RandomAccessFile raf); private LmVocabulary(DataInputStream dis); static LmVocabulary loadFromBinary(File binaryVocabularyFile); static LmVocabulary loadFromBinary(Path binaryVocabularyFilePath); static LmVocabulary loadFromRandomAccessFile(RandomAccessFile raf); static LmVocabulary loadFromDataInputStream(DataInputStream dis); static LmVocabulary loadFromUtf8File(File utfVocabularyFile); static Builder builder(); static LmVocabulary intersect(LmVocabulary v1, LmVocabulary v2); void saveBinary(File file); void saveBinary(DataOutputStream dos); int size(); boolean containsUnknown(int... gramIds); boolean containsSuffix(); boolean containsPrefix(); String getWord(int index); int indexOf(String word); int getSentenceStartIndex(); int getSentenceEndIndex(); int getUnknownWordIndex(); String getUnknownWord(); String getSentenceStart(); String getSentenceEnd(); Iterable<Integer> alphabeticallySortedWordsIds(Locale locale); Iterable<Integer> alphabeticallySortedWordsIds(); Iterable<String> words(); Iterable<String> wordsSorted(); Iterable<String> wordsSorted(Locale locale); String getWordsString(int... indexes); boolean containsAll(int... indexes); boolean contains(int index); boolean containsAll(String... words); boolean contains(String word); long encodeTrigram(int g0, int g1, int g2); long encodeTrigram(int... triGram); int[] toIndexes(String... words); int[] toIndexes(String[] history, String word); int[] toIndexes(int[] history, String word); String[] toWords(int... indexes); }
LmVocabulary { public boolean containsAll(int... indexes) { for (int index : indexes) { if (!contains(index)) { return false; } } return true; } LmVocabulary(String... vocabulary); LmVocabulary(List<String> vocabulary); private LmVocabulary(RandomAccessFile raf); private LmVocabulary(DataInputStream dis); static LmVocabulary loadFromBinary(File binaryVocabularyFile); static LmVocabulary loadFromBinary(Path binaryVocabularyFilePath); static LmVocabulary loadFromRandomAccessFile(RandomAccessFile raf); static LmVocabulary loadFromDataInputStream(DataInputStream dis); static LmVocabulary loadFromUtf8File(File utfVocabularyFile); static Builder builder(); static LmVocabulary intersect(LmVocabulary v1, LmVocabulary v2); void saveBinary(File file); void saveBinary(DataOutputStream dos); int size(); boolean containsUnknown(int... gramIds); boolean containsSuffix(); boolean containsPrefix(); String getWord(int index); int indexOf(String word); int getSentenceStartIndex(); int getSentenceEndIndex(); int getUnknownWordIndex(); String getUnknownWord(); String getSentenceStart(); String getSentenceEnd(); Iterable<Integer> alphabeticallySortedWordsIds(Locale locale); Iterable<Integer> alphabeticallySortedWordsIds(); Iterable<String> words(); Iterable<String> wordsSorted(); Iterable<String> wordsSorted(Locale locale); String getWordsString(int... indexes); boolean containsAll(int... indexes); boolean contains(int index); boolean containsAll(String... words); boolean contains(String word); long encodeTrigram(int g0, int g1, int g2); long encodeTrigram(int... triGram); int[] toIndexes(String... words); int[] toIndexes(String[] history, String word); int[] toIndexes(int[] history, String word); String[] toWords(int... indexes); static final String DEFAULT_SENTENCE_BEGIN_MARKER; static final String DEFAULT_SENTENCE_END_MARKER; static final String DEFAULT_UNKNOWN_WORD; }
@Test public void binaryFileGenerationTest() throws IOException { File tmp = getBinaryVocFile(); LmVocabulary vocabulary = LmVocabulary.loadFromBinary(tmp); simpleCheck(vocabulary); }
public static LmVocabulary loadFromBinary(File binaryVocabularyFile) throws IOException { try (DataInputStream dis = new DataInputStream( new BufferedInputStream(new FileInputStream(binaryVocabularyFile)))) { return new LmVocabulary(dis); } }
LmVocabulary { public static LmVocabulary loadFromBinary(File binaryVocabularyFile) throws IOException { try (DataInputStream dis = new DataInputStream( new BufferedInputStream(new FileInputStream(binaryVocabularyFile)))) { return new LmVocabulary(dis); } } }
LmVocabulary { public static LmVocabulary loadFromBinary(File binaryVocabularyFile) throws IOException { try (DataInputStream dis = new DataInputStream( new BufferedInputStream(new FileInputStream(binaryVocabularyFile)))) { return new LmVocabulary(dis); } } LmVocabulary(String... vocabulary); LmVocabulary(List<String> vocabulary); private LmVocabulary(RandomAccessFile raf); private LmVocabulary(DataInputStream dis); }
LmVocabulary { public static LmVocabulary loadFromBinary(File binaryVocabularyFile) throws IOException { try (DataInputStream dis = new DataInputStream( new BufferedInputStream(new FileInputStream(binaryVocabularyFile)))) { return new LmVocabulary(dis); } } LmVocabulary(String... vocabulary); LmVocabulary(List<String> vocabulary); private LmVocabulary(RandomAccessFile raf); private LmVocabulary(DataInputStream dis); static LmVocabulary loadFromBinary(File binaryVocabularyFile); static LmVocabulary loadFromBinary(Path binaryVocabularyFilePath); static LmVocabulary loadFromRandomAccessFile(RandomAccessFile raf); static LmVocabulary loadFromDataInputStream(DataInputStream dis); static LmVocabulary loadFromUtf8File(File utfVocabularyFile); static Builder builder(); static LmVocabulary intersect(LmVocabulary v1, LmVocabulary v2); void saveBinary(File file); void saveBinary(DataOutputStream dos); int size(); boolean containsUnknown(int... gramIds); boolean containsSuffix(); boolean containsPrefix(); String getWord(int index); int indexOf(String word); int getSentenceStartIndex(); int getSentenceEndIndex(); int getUnknownWordIndex(); String getUnknownWord(); String getSentenceStart(); String getSentenceEnd(); Iterable<Integer> alphabeticallySortedWordsIds(Locale locale); Iterable<Integer> alphabeticallySortedWordsIds(); Iterable<String> words(); Iterable<String> wordsSorted(); Iterable<String> wordsSorted(Locale locale); String getWordsString(int... indexes); boolean containsAll(int... indexes); boolean contains(int index); boolean containsAll(String... words); boolean contains(String word); long encodeTrigram(int g0, int g1, int g2); long encodeTrigram(int... triGram); int[] toIndexes(String... words); int[] toIndexes(String[] history, String word); int[] toIndexes(int[] history, String word); String[] toWords(int... indexes); }
LmVocabulary { public static LmVocabulary loadFromBinary(File binaryVocabularyFile) throws IOException { try (DataInputStream dis = new DataInputStream( new BufferedInputStream(new FileInputStream(binaryVocabularyFile)))) { return new LmVocabulary(dis); } } LmVocabulary(String... vocabulary); LmVocabulary(List<String> vocabulary); private LmVocabulary(RandomAccessFile raf); private LmVocabulary(DataInputStream dis); static LmVocabulary loadFromBinary(File binaryVocabularyFile); static LmVocabulary loadFromBinary(Path binaryVocabularyFilePath); static LmVocabulary loadFromRandomAccessFile(RandomAccessFile raf); static LmVocabulary loadFromDataInputStream(DataInputStream dis); static LmVocabulary loadFromUtf8File(File utfVocabularyFile); static Builder builder(); static LmVocabulary intersect(LmVocabulary v1, LmVocabulary v2); void saveBinary(File file); void saveBinary(DataOutputStream dos); int size(); boolean containsUnknown(int... gramIds); boolean containsSuffix(); boolean containsPrefix(); String getWord(int index); int indexOf(String word); int getSentenceStartIndex(); int getSentenceEndIndex(); int getUnknownWordIndex(); String getUnknownWord(); String getSentenceStart(); String getSentenceEnd(); Iterable<Integer> alphabeticallySortedWordsIds(Locale locale); Iterable<Integer> alphabeticallySortedWordsIds(); Iterable<String> words(); Iterable<String> wordsSorted(); Iterable<String> wordsSorted(Locale locale); String getWordsString(int... indexes); boolean containsAll(int... indexes); boolean contains(int index); boolean containsAll(String... words); boolean contains(String word); long encodeTrigram(int g0, int g1, int g2); long encodeTrigram(int... triGram); int[] toIndexes(String... words); int[] toIndexes(String[] history, String word); int[] toIndexes(int[] history, String word); String[] toWords(int... indexes); static final String DEFAULT_SENTENCE_BEGIN_MARKER; static final String DEFAULT_SENTENCE_END_MARKER; static final String DEFAULT_UNKNOWN_WORD; }
@Test public void utf8FileGenerationTest() throws IOException { File tmp = getUtf8VocFile(); LmVocabulary vocabulary = LmVocabulary.loadFromUtf8File(tmp); simpleCheck(vocabulary); }
public static LmVocabulary loadFromUtf8File(File utfVocabularyFile) throws IOException { return new LmVocabulary(SimpleTextReader.trimmingUTF8Reader(utfVocabularyFile).asStringList()); }
LmVocabulary { public static LmVocabulary loadFromUtf8File(File utfVocabularyFile) throws IOException { return new LmVocabulary(SimpleTextReader.trimmingUTF8Reader(utfVocabularyFile).asStringList()); } }
LmVocabulary { public static LmVocabulary loadFromUtf8File(File utfVocabularyFile) throws IOException { return new LmVocabulary(SimpleTextReader.trimmingUTF8Reader(utfVocabularyFile).asStringList()); } LmVocabulary(String... vocabulary); LmVocabulary(List<String> vocabulary); private LmVocabulary(RandomAccessFile raf); private LmVocabulary(DataInputStream dis); }
LmVocabulary { public static LmVocabulary loadFromUtf8File(File utfVocabularyFile) throws IOException { return new LmVocabulary(SimpleTextReader.trimmingUTF8Reader(utfVocabularyFile).asStringList()); } LmVocabulary(String... vocabulary); LmVocabulary(List<String> vocabulary); private LmVocabulary(RandomAccessFile raf); private LmVocabulary(DataInputStream dis); static LmVocabulary loadFromBinary(File binaryVocabularyFile); static LmVocabulary loadFromBinary(Path binaryVocabularyFilePath); static LmVocabulary loadFromRandomAccessFile(RandomAccessFile raf); static LmVocabulary loadFromDataInputStream(DataInputStream dis); static LmVocabulary loadFromUtf8File(File utfVocabularyFile); static Builder builder(); static LmVocabulary intersect(LmVocabulary v1, LmVocabulary v2); void saveBinary(File file); void saveBinary(DataOutputStream dos); int size(); boolean containsUnknown(int... gramIds); boolean containsSuffix(); boolean containsPrefix(); String getWord(int index); int indexOf(String word); int getSentenceStartIndex(); int getSentenceEndIndex(); int getUnknownWordIndex(); String getUnknownWord(); String getSentenceStart(); String getSentenceEnd(); Iterable<Integer> alphabeticallySortedWordsIds(Locale locale); Iterable<Integer> alphabeticallySortedWordsIds(); Iterable<String> words(); Iterable<String> wordsSorted(); Iterable<String> wordsSorted(Locale locale); String getWordsString(int... indexes); boolean containsAll(int... indexes); boolean contains(int index); boolean containsAll(String... words); boolean contains(String word); long encodeTrigram(int g0, int g1, int g2); long encodeTrigram(int... triGram); int[] toIndexes(String... words); int[] toIndexes(String[] history, String word); int[] toIndexes(int[] history, String word); String[] toWords(int... indexes); }
LmVocabulary { public static LmVocabulary loadFromUtf8File(File utfVocabularyFile) throws IOException { return new LmVocabulary(SimpleTextReader.trimmingUTF8Reader(utfVocabularyFile).asStringList()); } LmVocabulary(String... vocabulary); LmVocabulary(List<String> vocabulary); private LmVocabulary(RandomAccessFile raf); private LmVocabulary(DataInputStream dis); static LmVocabulary loadFromBinary(File binaryVocabularyFile); static LmVocabulary loadFromBinary(Path binaryVocabularyFilePath); static LmVocabulary loadFromRandomAccessFile(RandomAccessFile raf); static LmVocabulary loadFromDataInputStream(DataInputStream dis); static LmVocabulary loadFromUtf8File(File utfVocabularyFile); static Builder builder(); static LmVocabulary intersect(LmVocabulary v1, LmVocabulary v2); void saveBinary(File file); void saveBinary(DataOutputStream dos); int size(); boolean containsUnknown(int... gramIds); boolean containsSuffix(); boolean containsPrefix(); String getWord(int index); int indexOf(String word); int getSentenceStartIndex(); int getSentenceEndIndex(); int getUnknownWordIndex(); String getUnknownWord(); String getSentenceStart(); String getSentenceEnd(); Iterable<Integer> alphabeticallySortedWordsIds(Locale locale); Iterable<Integer> alphabeticallySortedWordsIds(); Iterable<String> words(); Iterable<String> wordsSorted(); Iterable<String> wordsSorted(Locale locale); String getWordsString(int... indexes); boolean containsAll(int... indexes); boolean contains(int index); boolean containsAll(String... words); boolean contains(String word); long encodeTrigram(int g0, int g1, int g2); long encodeTrigram(int... triGram); int[] toIndexes(String... words); int[] toIndexes(String[] history, String word); int[] toIndexes(int[] history, String word); String[] toWords(int... indexes); static final String DEFAULT_SENTENCE_BEGIN_MARKER; static final String DEFAULT_SENTENCE_END_MARKER; static final String DEFAULT_UNKNOWN_WORD; }
@Test public void streamGenerationTest() throws IOException { File tmp = getBinaryVocFile(); try (DataInputStream dis = new DataInputStream(new FileInputStream(tmp))) { LmVocabulary vocabulary = LmVocabulary.loadFromDataInputStream(dis); simpleCheck(vocabulary); } }
public static LmVocabulary loadFromDataInputStream(DataInputStream dis) throws IOException { return new LmVocabulary(dis); }
LmVocabulary { public static LmVocabulary loadFromDataInputStream(DataInputStream dis) throws IOException { return new LmVocabulary(dis); } }
LmVocabulary { public static LmVocabulary loadFromDataInputStream(DataInputStream dis) throws IOException { return new LmVocabulary(dis); } LmVocabulary(String... vocabulary); LmVocabulary(List<String> vocabulary); private LmVocabulary(RandomAccessFile raf); private LmVocabulary(DataInputStream dis); }
LmVocabulary { public static LmVocabulary loadFromDataInputStream(DataInputStream dis) throws IOException { return new LmVocabulary(dis); } LmVocabulary(String... vocabulary); LmVocabulary(List<String> vocabulary); private LmVocabulary(RandomAccessFile raf); private LmVocabulary(DataInputStream dis); static LmVocabulary loadFromBinary(File binaryVocabularyFile); static LmVocabulary loadFromBinary(Path binaryVocabularyFilePath); static LmVocabulary loadFromRandomAccessFile(RandomAccessFile raf); static LmVocabulary loadFromDataInputStream(DataInputStream dis); static LmVocabulary loadFromUtf8File(File utfVocabularyFile); static Builder builder(); static LmVocabulary intersect(LmVocabulary v1, LmVocabulary v2); void saveBinary(File file); void saveBinary(DataOutputStream dos); int size(); boolean containsUnknown(int... gramIds); boolean containsSuffix(); boolean containsPrefix(); String getWord(int index); int indexOf(String word); int getSentenceStartIndex(); int getSentenceEndIndex(); int getUnknownWordIndex(); String getUnknownWord(); String getSentenceStart(); String getSentenceEnd(); Iterable<Integer> alphabeticallySortedWordsIds(Locale locale); Iterable<Integer> alphabeticallySortedWordsIds(); Iterable<String> words(); Iterable<String> wordsSorted(); Iterable<String> wordsSorted(Locale locale); String getWordsString(int... indexes); boolean containsAll(int... indexes); boolean contains(int index); boolean containsAll(String... words); boolean contains(String word); long encodeTrigram(int g0, int g1, int g2); long encodeTrigram(int... triGram); int[] toIndexes(String... words); int[] toIndexes(String[] history, String word); int[] toIndexes(int[] history, String word); String[] toWords(int... indexes); }
LmVocabulary { public static LmVocabulary loadFromDataInputStream(DataInputStream dis) throws IOException { return new LmVocabulary(dis); } LmVocabulary(String... vocabulary); LmVocabulary(List<String> vocabulary); private LmVocabulary(RandomAccessFile raf); private LmVocabulary(DataInputStream dis); static LmVocabulary loadFromBinary(File binaryVocabularyFile); static LmVocabulary loadFromBinary(Path binaryVocabularyFilePath); static LmVocabulary loadFromRandomAccessFile(RandomAccessFile raf); static LmVocabulary loadFromDataInputStream(DataInputStream dis); static LmVocabulary loadFromUtf8File(File utfVocabularyFile); static Builder builder(); static LmVocabulary intersect(LmVocabulary v1, LmVocabulary v2); void saveBinary(File file); void saveBinary(DataOutputStream dos); int size(); boolean containsUnknown(int... gramIds); boolean containsSuffix(); boolean containsPrefix(); String getWord(int index); int indexOf(String word); int getSentenceStartIndex(); int getSentenceEndIndex(); int getUnknownWordIndex(); String getUnknownWord(); String getSentenceStart(); String getSentenceEnd(); Iterable<Integer> alphabeticallySortedWordsIds(Locale locale); Iterable<Integer> alphabeticallySortedWordsIds(); Iterable<String> words(); Iterable<String> wordsSorted(); Iterable<String> wordsSorted(Locale locale); String getWordsString(int... indexes); boolean containsAll(int... indexes); boolean contains(int index); boolean containsAll(String... words); boolean contains(String word); long encodeTrigram(int g0, int g1, int g2); long encodeTrigram(int... triGram); int[] toIndexes(String... words); int[] toIndexes(String[] history, String word); int[] toIndexes(int[] history, String word); String[] toWords(int... indexes); static final String DEFAULT_SENTENCE_BEGIN_MARKER; static final String DEFAULT_SENTENCE_END_MARKER; static final String DEFAULT_UNKNOWN_WORD; }
@Test public void randomAccessGenerationTest() throws IOException { File tmp = getBinaryVocFile(); try (RandomAccessFile raf = new RandomAccessFile(tmp, "r")) { LmVocabulary vocabulary = LmVocabulary.loadFromRandomAccessFile(raf); simpleCheck(vocabulary); } }
public static LmVocabulary loadFromRandomAccessFile(RandomAccessFile raf) throws IOException { return new LmVocabulary(raf); }
LmVocabulary { public static LmVocabulary loadFromRandomAccessFile(RandomAccessFile raf) throws IOException { return new LmVocabulary(raf); } }
LmVocabulary { public static LmVocabulary loadFromRandomAccessFile(RandomAccessFile raf) throws IOException { return new LmVocabulary(raf); } LmVocabulary(String... vocabulary); LmVocabulary(List<String> vocabulary); private LmVocabulary(RandomAccessFile raf); private LmVocabulary(DataInputStream dis); }
LmVocabulary { public static LmVocabulary loadFromRandomAccessFile(RandomAccessFile raf) throws IOException { return new LmVocabulary(raf); } LmVocabulary(String... vocabulary); LmVocabulary(List<String> vocabulary); private LmVocabulary(RandomAccessFile raf); private LmVocabulary(DataInputStream dis); static LmVocabulary loadFromBinary(File binaryVocabularyFile); static LmVocabulary loadFromBinary(Path binaryVocabularyFilePath); static LmVocabulary loadFromRandomAccessFile(RandomAccessFile raf); static LmVocabulary loadFromDataInputStream(DataInputStream dis); static LmVocabulary loadFromUtf8File(File utfVocabularyFile); static Builder builder(); static LmVocabulary intersect(LmVocabulary v1, LmVocabulary v2); void saveBinary(File file); void saveBinary(DataOutputStream dos); int size(); boolean containsUnknown(int... gramIds); boolean containsSuffix(); boolean containsPrefix(); String getWord(int index); int indexOf(String word); int getSentenceStartIndex(); int getSentenceEndIndex(); int getUnknownWordIndex(); String getUnknownWord(); String getSentenceStart(); String getSentenceEnd(); Iterable<Integer> alphabeticallySortedWordsIds(Locale locale); Iterable<Integer> alphabeticallySortedWordsIds(); Iterable<String> words(); Iterable<String> wordsSorted(); Iterable<String> wordsSorted(Locale locale); String getWordsString(int... indexes); boolean containsAll(int... indexes); boolean contains(int index); boolean containsAll(String... words); boolean contains(String word); long encodeTrigram(int g0, int g1, int g2); long encodeTrigram(int... triGram); int[] toIndexes(String... words); int[] toIndexes(String[] history, String word); int[] toIndexes(int[] history, String word); String[] toWords(int... indexes); }
LmVocabulary { public static LmVocabulary loadFromRandomAccessFile(RandomAccessFile raf) throws IOException { return new LmVocabulary(raf); } LmVocabulary(String... vocabulary); LmVocabulary(List<String> vocabulary); private LmVocabulary(RandomAccessFile raf); private LmVocabulary(DataInputStream dis); static LmVocabulary loadFromBinary(File binaryVocabularyFile); static LmVocabulary loadFromBinary(Path binaryVocabularyFilePath); static LmVocabulary loadFromRandomAccessFile(RandomAccessFile raf); static LmVocabulary loadFromDataInputStream(DataInputStream dis); static LmVocabulary loadFromUtf8File(File utfVocabularyFile); static Builder builder(); static LmVocabulary intersect(LmVocabulary v1, LmVocabulary v2); void saveBinary(File file); void saveBinary(DataOutputStream dos); int size(); boolean containsUnknown(int... gramIds); boolean containsSuffix(); boolean containsPrefix(); String getWord(int index); int indexOf(String word); int getSentenceStartIndex(); int getSentenceEndIndex(); int getUnknownWordIndex(); String getUnknownWord(); String getSentenceStart(); String getSentenceEnd(); Iterable<Integer> alphabeticallySortedWordsIds(Locale locale); Iterable<Integer> alphabeticallySortedWordsIds(); Iterable<String> words(); Iterable<String> wordsSorted(); Iterable<String> wordsSorted(Locale locale); String getWordsString(int... indexes); boolean containsAll(int... indexes); boolean contains(int index); boolean containsAll(String... words); boolean contains(String word); long encodeTrigram(int g0, int g1, int g2); long encodeTrigram(int... triGram); int[] toIndexes(String... words); int[] toIndexes(String[] history, String word); int[] toIndexes(int[] history, String word); String[] toWords(int... indexes); static final String DEFAULT_SENTENCE_BEGIN_MARKER; static final String DEFAULT_SENTENCE_END_MARKER; static final String DEFAULT_UNKNOWN_WORD; }
@Test public void contains() throws IOException { LmVocabulary vocabulary = new LmVocabulary("Hello", "World"); int helloIndex = vocabulary.indexOf("Hello"); int worldIndex = vocabulary.indexOf("World"); Assert.assertTrue(vocabulary.contains(helloIndex)); Assert.assertTrue(vocabulary.contains(worldIndex)); int unkIndex = vocabulary.indexOf("Foo"); Assert.assertEquals(vocabulary.getUnknownWordIndex(), unkIndex); Assert.assertTrue(vocabulary.containsAll(helloIndex, worldIndex)); Assert.assertFalse(vocabulary.containsAll(-1, 2)); Assert.assertTrue(vocabulary.contains("Hello")); Assert.assertTrue(vocabulary.contains("World")); Assert.assertFalse(vocabulary.contains("Foo")); Assert.assertFalse(vocabulary.containsAll("Hello", "Foo")); Assert.assertTrue(vocabulary.containsAll("Hello", "World")); }
public boolean contains(int index) { return index >= 0 && index < vocabulary.size(); }
LmVocabulary { public boolean contains(int index) { return index >= 0 && index < vocabulary.size(); } }
LmVocabulary { public boolean contains(int index) { return index >= 0 && index < vocabulary.size(); } LmVocabulary(String... vocabulary); LmVocabulary(List<String> vocabulary); private LmVocabulary(RandomAccessFile raf); private LmVocabulary(DataInputStream dis); }
LmVocabulary { public boolean contains(int index) { return index >= 0 && index < vocabulary.size(); } LmVocabulary(String... vocabulary); LmVocabulary(List<String> vocabulary); private LmVocabulary(RandomAccessFile raf); private LmVocabulary(DataInputStream dis); static LmVocabulary loadFromBinary(File binaryVocabularyFile); static LmVocabulary loadFromBinary(Path binaryVocabularyFilePath); static LmVocabulary loadFromRandomAccessFile(RandomAccessFile raf); static LmVocabulary loadFromDataInputStream(DataInputStream dis); static LmVocabulary loadFromUtf8File(File utfVocabularyFile); static Builder builder(); static LmVocabulary intersect(LmVocabulary v1, LmVocabulary v2); void saveBinary(File file); void saveBinary(DataOutputStream dos); int size(); boolean containsUnknown(int... gramIds); boolean containsSuffix(); boolean containsPrefix(); String getWord(int index); int indexOf(String word); int getSentenceStartIndex(); int getSentenceEndIndex(); int getUnknownWordIndex(); String getUnknownWord(); String getSentenceStart(); String getSentenceEnd(); Iterable<Integer> alphabeticallySortedWordsIds(Locale locale); Iterable<Integer> alphabeticallySortedWordsIds(); Iterable<String> words(); Iterable<String> wordsSorted(); Iterable<String> wordsSorted(Locale locale); String getWordsString(int... indexes); boolean containsAll(int... indexes); boolean contains(int index); boolean containsAll(String... words); boolean contains(String word); long encodeTrigram(int g0, int g1, int g2); long encodeTrigram(int... triGram); int[] toIndexes(String... words); int[] toIndexes(String[] history, String word); int[] toIndexes(int[] history, String word); String[] toWords(int... indexes); }
LmVocabulary { public boolean contains(int index) { return index >= 0 && index < vocabulary.size(); } LmVocabulary(String... vocabulary); LmVocabulary(List<String> vocabulary); private LmVocabulary(RandomAccessFile raf); private LmVocabulary(DataInputStream dis); static LmVocabulary loadFromBinary(File binaryVocabularyFile); static LmVocabulary loadFromBinary(Path binaryVocabularyFilePath); static LmVocabulary loadFromRandomAccessFile(RandomAccessFile raf); static LmVocabulary loadFromDataInputStream(DataInputStream dis); static LmVocabulary loadFromUtf8File(File utfVocabularyFile); static Builder builder(); static LmVocabulary intersect(LmVocabulary v1, LmVocabulary v2); void saveBinary(File file); void saveBinary(DataOutputStream dos); int size(); boolean containsUnknown(int... gramIds); boolean containsSuffix(); boolean containsPrefix(); String getWord(int index); int indexOf(String word); int getSentenceStartIndex(); int getSentenceEndIndex(); int getUnknownWordIndex(); String getUnknownWord(); String getSentenceStart(); String getSentenceEnd(); Iterable<Integer> alphabeticallySortedWordsIds(Locale locale); Iterable<Integer> alphabeticallySortedWordsIds(); Iterable<String> words(); Iterable<String> wordsSorted(); Iterable<String> wordsSorted(Locale locale); String getWordsString(int... indexes); boolean containsAll(int... indexes); boolean contains(int index); boolean containsAll(String... words); boolean contains(String word); long encodeTrigram(int g0, int g1, int g2); long encodeTrigram(int... triGram); int[] toIndexes(String... words); int[] toIndexes(String[] history, String word); int[] toIndexes(int[] history, String word); String[] toWords(int... indexes); static final String DEFAULT_SENTENCE_BEGIN_MARKER; static final String DEFAULT_SENTENCE_END_MARKER; static final String DEFAULT_UNKNOWN_WORD; }
@Test public void encodedTrigramTest() throws IOException { LmVocabulary vocabulary = new LmVocabulary("a", "b", "c", "d", "e"); long k = ((1L << 21 | 2L) << 21) | 3L; Assert.assertEquals(k, vocabulary.encodeTrigram(3, 2, 1)); Assert.assertEquals(k, vocabulary.encodeTrigram(3, 2, 1)); }
public long encodeTrigram(int g0, int g1, int g2) { long encoded = g2; encoded = (encoded << 21) | g1; return (encoded << 21) | g0; }
LmVocabulary { public long encodeTrigram(int g0, int g1, int g2) { long encoded = g2; encoded = (encoded << 21) | g1; return (encoded << 21) | g0; } }
LmVocabulary { public long encodeTrigram(int g0, int g1, int g2) { long encoded = g2; encoded = (encoded << 21) | g1; return (encoded << 21) | g0; } LmVocabulary(String... vocabulary); LmVocabulary(List<String> vocabulary); private LmVocabulary(RandomAccessFile raf); private LmVocabulary(DataInputStream dis); }
LmVocabulary { public long encodeTrigram(int g0, int g1, int g2) { long encoded = g2; encoded = (encoded << 21) | g1; return (encoded << 21) | g0; } LmVocabulary(String... vocabulary); LmVocabulary(List<String> vocabulary); private LmVocabulary(RandomAccessFile raf); private LmVocabulary(DataInputStream dis); static LmVocabulary loadFromBinary(File binaryVocabularyFile); static LmVocabulary loadFromBinary(Path binaryVocabularyFilePath); static LmVocabulary loadFromRandomAccessFile(RandomAccessFile raf); static LmVocabulary loadFromDataInputStream(DataInputStream dis); static LmVocabulary loadFromUtf8File(File utfVocabularyFile); static Builder builder(); static LmVocabulary intersect(LmVocabulary v1, LmVocabulary v2); void saveBinary(File file); void saveBinary(DataOutputStream dos); int size(); boolean containsUnknown(int... gramIds); boolean containsSuffix(); boolean containsPrefix(); String getWord(int index); int indexOf(String word); int getSentenceStartIndex(); int getSentenceEndIndex(); int getUnknownWordIndex(); String getUnknownWord(); String getSentenceStart(); String getSentenceEnd(); Iterable<Integer> alphabeticallySortedWordsIds(Locale locale); Iterable<Integer> alphabeticallySortedWordsIds(); Iterable<String> words(); Iterable<String> wordsSorted(); Iterable<String> wordsSorted(Locale locale); String getWordsString(int... indexes); boolean containsAll(int... indexes); boolean contains(int index); boolean containsAll(String... words); boolean contains(String word); long encodeTrigram(int g0, int g1, int g2); long encodeTrigram(int... triGram); int[] toIndexes(String... words); int[] toIndexes(String[] history, String word); int[] toIndexes(int[] history, String word); String[] toWords(int... indexes); }
LmVocabulary { public long encodeTrigram(int g0, int g1, int g2) { long encoded = g2; encoded = (encoded << 21) | g1; return (encoded << 21) | g0; } LmVocabulary(String... vocabulary); LmVocabulary(List<String> vocabulary); private LmVocabulary(RandomAccessFile raf); private LmVocabulary(DataInputStream dis); static LmVocabulary loadFromBinary(File binaryVocabularyFile); static LmVocabulary loadFromBinary(Path binaryVocabularyFilePath); static LmVocabulary loadFromRandomAccessFile(RandomAccessFile raf); static LmVocabulary loadFromDataInputStream(DataInputStream dis); static LmVocabulary loadFromUtf8File(File utfVocabularyFile); static Builder builder(); static LmVocabulary intersect(LmVocabulary v1, LmVocabulary v2); void saveBinary(File file); void saveBinary(DataOutputStream dos); int size(); boolean containsUnknown(int... gramIds); boolean containsSuffix(); boolean containsPrefix(); String getWord(int index); int indexOf(String word); int getSentenceStartIndex(); int getSentenceEndIndex(); int getUnknownWordIndex(); String getUnknownWord(); String getSentenceStart(); String getSentenceEnd(); Iterable<Integer> alphabeticallySortedWordsIds(Locale locale); Iterable<Integer> alphabeticallySortedWordsIds(); Iterable<String> words(); Iterable<String> wordsSorted(); Iterable<String> wordsSorted(Locale locale); String getWordsString(int... indexes); boolean containsAll(int... indexes); boolean contains(int index); boolean containsAll(String... words); boolean contains(String word); long encodeTrigram(int g0, int g1, int g2); long encodeTrigram(int... triGram); int[] toIndexes(String... words); int[] toIndexes(String[] history, String word); int[] toIndexes(int[] history, String word); String[] toWords(int... indexes); static final String DEFAULT_SENTENCE_BEGIN_MARKER; static final String DEFAULT_SENTENCE_END_MARKER; static final String DEFAULT_UNKNOWN_WORD; }
@Test public void toWordsTest() throws IOException { LmVocabulary vocabulary = new LmVocabulary("a", "b", "c", "d", "e"); int[] indexes = vocabulary.toIndexes("a", "e", "b"); Assert.assertEquals("a e b", Joiner.on(" ").join(vocabulary.toWords(indexes))); indexes = vocabulary.toIndexes("a", "e", "foo"); Assert.assertEquals("a e <unk>", Joiner.on(" ").join(vocabulary.toWords(indexes))); }
public String[] toWords(int... indexes) { String[] words = new String[indexes.length]; int k = 0; for (int index : indexes) { if (contains(index)) { words[k++] = vocabulary.get(index); } else { Log.warn("Out of bounds word index is used:" + index); words[k++] = unknownWord; } } return words; }
LmVocabulary { public String[] toWords(int... indexes) { String[] words = new String[indexes.length]; int k = 0; for (int index : indexes) { if (contains(index)) { words[k++] = vocabulary.get(index); } else { Log.warn("Out of bounds word index is used:" + index); words[k++] = unknownWord; } } return words; } }
LmVocabulary { public String[] toWords(int... indexes) { String[] words = new String[indexes.length]; int k = 0; for (int index : indexes) { if (contains(index)) { words[k++] = vocabulary.get(index); } else { Log.warn("Out of bounds word index is used:" + index); words[k++] = unknownWord; } } return words; } LmVocabulary(String... vocabulary); LmVocabulary(List<String> vocabulary); private LmVocabulary(RandomAccessFile raf); private LmVocabulary(DataInputStream dis); }
LmVocabulary { public String[] toWords(int... indexes) { String[] words = new String[indexes.length]; int k = 0; for (int index : indexes) { if (contains(index)) { words[k++] = vocabulary.get(index); } else { Log.warn("Out of bounds word index is used:" + index); words[k++] = unknownWord; } } return words; } LmVocabulary(String... vocabulary); LmVocabulary(List<String> vocabulary); private LmVocabulary(RandomAccessFile raf); private LmVocabulary(DataInputStream dis); static LmVocabulary loadFromBinary(File binaryVocabularyFile); static LmVocabulary loadFromBinary(Path binaryVocabularyFilePath); static LmVocabulary loadFromRandomAccessFile(RandomAccessFile raf); static LmVocabulary loadFromDataInputStream(DataInputStream dis); static LmVocabulary loadFromUtf8File(File utfVocabularyFile); static Builder builder(); static LmVocabulary intersect(LmVocabulary v1, LmVocabulary v2); void saveBinary(File file); void saveBinary(DataOutputStream dos); int size(); boolean containsUnknown(int... gramIds); boolean containsSuffix(); boolean containsPrefix(); String getWord(int index); int indexOf(String word); int getSentenceStartIndex(); int getSentenceEndIndex(); int getUnknownWordIndex(); String getUnknownWord(); String getSentenceStart(); String getSentenceEnd(); Iterable<Integer> alphabeticallySortedWordsIds(Locale locale); Iterable<Integer> alphabeticallySortedWordsIds(); Iterable<String> words(); Iterable<String> wordsSorted(); Iterable<String> wordsSorted(Locale locale); String getWordsString(int... indexes); boolean containsAll(int... indexes); boolean contains(int index); boolean containsAll(String... words); boolean contains(String word); long encodeTrigram(int g0, int g1, int g2); long encodeTrigram(int... triGram); int[] toIndexes(String... words); int[] toIndexes(String[] history, String word); int[] toIndexes(int[] history, String word); String[] toWords(int... indexes); }
LmVocabulary { public String[] toWords(int... indexes) { String[] words = new String[indexes.length]; int k = 0; for (int index : indexes) { if (contains(index)) { words[k++] = vocabulary.get(index); } else { Log.warn("Out of bounds word index is used:" + index); words[k++] = unknownWord; } } return words; } LmVocabulary(String... vocabulary); LmVocabulary(List<String> vocabulary); private LmVocabulary(RandomAccessFile raf); private LmVocabulary(DataInputStream dis); static LmVocabulary loadFromBinary(File binaryVocabularyFile); static LmVocabulary loadFromBinary(Path binaryVocabularyFilePath); static LmVocabulary loadFromRandomAccessFile(RandomAccessFile raf); static LmVocabulary loadFromDataInputStream(DataInputStream dis); static LmVocabulary loadFromUtf8File(File utfVocabularyFile); static Builder builder(); static LmVocabulary intersect(LmVocabulary v1, LmVocabulary v2); void saveBinary(File file); void saveBinary(DataOutputStream dos); int size(); boolean containsUnknown(int... gramIds); boolean containsSuffix(); boolean containsPrefix(); String getWord(int index); int indexOf(String word); int getSentenceStartIndex(); int getSentenceEndIndex(); int getUnknownWordIndex(); String getUnknownWord(); String getSentenceStart(); String getSentenceEnd(); Iterable<Integer> alphabeticallySortedWordsIds(Locale locale); Iterable<Integer> alphabeticallySortedWordsIds(); Iterable<String> words(); Iterable<String> wordsSorted(); Iterable<String> wordsSorted(Locale locale); String getWordsString(int... indexes); boolean containsAll(int... indexes); boolean contains(int index); boolean containsAll(String... words); boolean contains(String word); long encodeTrigram(int g0, int g1, int g2); long encodeTrigram(int... triGram); int[] toIndexes(String... words); int[] toIndexes(String[] history, String word); int[] toIndexes(int[] history, String word); String[] toWords(int... indexes); static final String DEFAULT_SENTENCE_BEGIN_MARKER; static final String DEFAULT_SENTENCE_END_MARKER; static final String DEFAULT_UNKNOWN_WORD; }
@Test public void pronunciation1() { String[] ref = { "VST [P:Noun, Abbrv; Pr:viesti]", "VST [P:Noun, Abbrv; Pr:vesete; Ref:VST_Noun_Abbrv; Index:2]"}; RootLexicon lexicon = TurkishDictionaryLoader.load(ref); DictionaryItem item = lexicon.getItemById("VST_Noun_Abbrv"); Assert.assertNotNull(item); DictionaryItem item2 = lexicon.getItemById("VST_Noun_Abbrv_2"); Assert.assertNotNull(item2); Assert.assertEquals(item, item2.getReferenceItem()); }
public static RootLexicon load(File input) throws IOException { return Files.asCharSource(input, Charsets.UTF_8).readLines(new TextLexiconProcessor()); }
TurkishDictionaryLoader { public static RootLexicon load(File input) throws IOException { return Files.asCharSource(input, Charsets.UTF_8).readLines(new TextLexiconProcessor()); } }
TurkishDictionaryLoader { public static RootLexicon load(File input) throws IOException { return Files.asCharSource(input, Charsets.UTF_8).readLines(new TextLexiconProcessor()); } }
TurkishDictionaryLoader { public static RootLexicon load(File input) throws IOException { return Files.asCharSource(input, Charsets.UTF_8).readLines(new TextLexiconProcessor()); } static RootLexicon loadDefaultDictionaries(); static RootLexicon loadFromResources(String... resourcePaths); static RootLexicon loadFromResources(Collection<String> resourcePaths); static RootLexicon load(File input); static RootLexicon loadInto(RootLexicon lexicon, File input); static DictionaryItem loadFromString(String dictionaryLine); static RootLexicon load(String... dictionaryLines); static RootLexicon load(Iterable<String> dictionaryLines); }
TurkishDictionaryLoader { public static RootLexicon load(File input) throws IOException { return Files.asCharSource(input, Charsets.UTF_8).readLines(new TextLexiconProcessor()); } static RootLexicon loadDefaultDictionaries(); static RootLexicon loadFromResources(String... resourcePaths); static RootLexicon loadFromResources(Collection<String> resourcePaths); static RootLexicon load(File input); static RootLexicon loadInto(RootLexicon lexicon, File input); static DictionaryItem loadFromString(String dictionaryLine); static RootLexicon load(String... dictionaryLines); static RootLexicon load(Iterable<String> dictionaryLines); static final List<String> DEFAULT_DICTIONARY_RESOURCES; }
@Test public void builderTest() throws IOException { LmVocabulary.Builder builder = LmVocabulary.builder(); String[] words = {"elma", "çilek", "karpuz", "armut", "elma", "armut"}; for (String word : words) { builder.add(word); } Assert.assertEquals(4, builder.size()); Assert.assertEquals(0, builder.indexOf("elma")); Assert.assertEquals(1, builder.indexOf("çilek")); Assert.assertEquals(2, builder.indexOf("karpuz")); Assert.assertEquals(-1, builder.indexOf("mango")); List<Integer> list = Lists.newArrayList(builder.alphabeticallySortedWordsIds()); Assert.assertEquals(Lists.newArrayList(3, 0, 2, 1), list); list = Lists.newArrayList(builder.alphabeticallySortedWordsIds(new Locale("tr"))); Assert.assertEquals(Lists.newArrayList(3, 1, 0, 2), list); LmVocabulary vocab = builder.generate(); Assert.assertEquals(7, vocab.size()); }
public static Builder builder() { return new Builder(); }
LmVocabulary { public static Builder builder() { return new Builder(); } }
LmVocabulary { public static Builder builder() { return new Builder(); } LmVocabulary(String... vocabulary); LmVocabulary(List<String> vocabulary); private LmVocabulary(RandomAccessFile raf); private LmVocabulary(DataInputStream dis); }
LmVocabulary { public static Builder builder() { return new Builder(); } LmVocabulary(String... vocabulary); LmVocabulary(List<String> vocabulary); private LmVocabulary(RandomAccessFile raf); private LmVocabulary(DataInputStream dis); static LmVocabulary loadFromBinary(File binaryVocabularyFile); static LmVocabulary loadFromBinary(Path binaryVocabularyFilePath); static LmVocabulary loadFromRandomAccessFile(RandomAccessFile raf); static LmVocabulary loadFromDataInputStream(DataInputStream dis); static LmVocabulary loadFromUtf8File(File utfVocabularyFile); static Builder builder(); static LmVocabulary intersect(LmVocabulary v1, LmVocabulary v2); void saveBinary(File file); void saveBinary(DataOutputStream dos); int size(); boolean containsUnknown(int... gramIds); boolean containsSuffix(); boolean containsPrefix(); String getWord(int index); int indexOf(String word); int getSentenceStartIndex(); int getSentenceEndIndex(); int getUnknownWordIndex(); String getUnknownWord(); String getSentenceStart(); String getSentenceEnd(); Iterable<Integer> alphabeticallySortedWordsIds(Locale locale); Iterable<Integer> alphabeticallySortedWordsIds(); Iterable<String> words(); Iterable<String> wordsSorted(); Iterable<String> wordsSorted(Locale locale); String getWordsString(int... indexes); boolean containsAll(int... indexes); boolean contains(int index); boolean containsAll(String... words); boolean contains(String word); long encodeTrigram(int g0, int g1, int g2); long encodeTrigram(int... triGram); int[] toIndexes(String... words); int[] toIndexes(String[] history, String word); int[] toIndexes(int[] history, String word); String[] toWords(int... indexes); }
LmVocabulary { public static Builder builder() { return new Builder(); } LmVocabulary(String... vocabulary); LmVocabulary(List<String> vocabulary); private LmVocabulary(RandomAccessFile raf); private LmVocabulary(DataInputStream dis); static LmVocabulary loadFromBinary(File binaryVocabularyFile); static LmVocabulary loadFromBinary(Path binaryVocabularyFilePath); static LmVocabulary loadFromRandomAccessFile(RandomAccessFile raf); static LmVocabulary loadFromDataInputStream(DataInputStream dis); static LmVocabulary loadFromUtf8File(File utfVocabularyFile); static Builder builder(); static LmVocabulary intersect(LmVocabulary v1, LmVocabulary v2); void saveBinary(File file); void saveBinary(DataOutputStream dos); int size(); boolean containsUnknown(int... gramIds); boolean containsSuffix(); boolean containsPrefix(); String getWord(int index); int indexOf(String word); int getSentenceStartIndex(); int getSentenceEndIndex(); int getUnknownWordIndex(); String getUnknownWord(); String getSentenceStart(); String getSentenceEnd(); Iterable<Integer> alphabeticallySortedWordsIds(Locale locale); Iterable<Integer> alphabeticallySortedWordsIds(); Iterable<String> words(); Iterable<String> wordsSorted(); Iterable<String> wordsSorted(Locale locale); String getWordsString(int... indexes); boolean containsAll(int... indexes); boolean contains(int index); boolean containsAll(String... words); boolean contains(String word); long encodeTrigram(int g0, int g1, int g2); long encodeTrigram(int... triGram); int[] toIndexes(String... words); int[] toIndexes(String[] history, String word); int[] toIndexes(int[] history, String word); String[] toWords(int... indexes); static final String DEFAULT_SENTENCE_BEGIN_MARKER; static final String DEFAULT_SENTENCE_END_MARKER; static final String DEFAULT_UNKNOWN_WORD; }
@Test public void testGeneration() throws IOException { SmoothLm lm = getTinyLm(); Assert.assertEquals(3, lm.getOrder()); }
@Override public int getOrder() { return order; }
SmoothLm extends BaseLanguageModel implements NgramLanguageModel { @Override public int getOrder() { return order; } }
SmoothLm extends BaseLanguageModel implements NgramLanguageModel { @Override public int getOrder() { return order; } private SmoothLm( DataInputStream dis, float logBase, float unigramWeight, float unknownBackoffPenalty, boolean useStupidBackoff, float stupidBackoffAlpha, File ngramKeyFileDir); private SmoothLm(DataInputStream dis); }
SmoothLm extends BaseLanguageModel implements NgramLanguageModel { @Override public int getOrder() { return order; } private SmoothLm( DataInputStream dis, float logBase, float unigramWeight, float unknownBackoffPenalty, boolean useStupidBackoff, float stupidBackoffAlpha, File ngramKeyFileDir); private SmoothLm(DataInputStream dis); static Builder builder(InputStream is); static Builder builder(File modelFile); static Builder builder(Path modelFile); String info(); double getStupidBackoffLogAlpha(); int getVersion(); @Override float getUnigramProbability(int id); @Override int getOrder(); @Override LmVocabulary getVocabulary(); LookupCache getCache(); LookupCache getCache(int bits); int getGramCount(int n); @Override boolean ngramExists(int... wordIndexes); double getProbabilityValue(int... wordIndexes); float getBigramProbabilityValue(int w0, int w1); double getBackoffValue(int... wordIndexes); float getBigramBackoffValue(int w0, int w1); double getProbability(String... words); void countFalsePositives(int... wordIndexes); boolean ngramIdsAvailable(); int getFalsePositiveCount(); float getProbability(int... wordIndexes); float getBigramProbability(int w0, int w1); float getTriGramProbability(int w0, int w1, int w2); float getTriGramProbability(int w0, int w1, int w2, int fingerPrint); float getTriGramProbability(int... w); int getBackoffCount(String... tokens); int getBackoffCount(int... wordIndexes); String explain(int... wordIndexes); double getLogBase(); }
SmoothLm extends BaseLanguageModel implements NgramLanguageModel { @Override public int getOrder() { return order; } private SmoothLm( DataInputStream dis, float logBase, float unigramWeight, float unknownBackoffPenalty, boolean useStupidBackoff, float stupidBackoffAlpha, File ngramKeyFileDir); private SmoothLm(DataInputStream dis); static Builder builder(InputStream is); static Builder builder(File modelFile); static Builder builder(Path modelFile); String info(); double getStupidBackoffLogAlpha(); int getVersion(); @Override float getUnigramProbability(int id); @Override int getOrder(); @Override LmVocabulary getVocabulary(); LookupCache getCache(); LookupCache getCache(int bits); int getGramCount(int n); @Override boolean ngramExists(int... wordIndexes); double getProbabilityValue(int... wordIndexes); float getBigramProbabilityValue(int w0, int w1); double getBackoffValue(int... wordIndexes); float getBigramBackoffValue(int w0, int w1); double getProbability(String... words); void countFalsePositives(int... wordIndexes); boolean ngramIdsAvailable(); int getFalsePositiveCount(); float getProbability(int... wordIndexes); float getBigramProbability(int w0, int w1); float getTriGramProbability(int w0, int w1, int w2); float getTriGramProbability(int w0, int w1, int w2, int fingerPrint); float getTriGramProbability(int... w); int getBackoffCount(String... tokens); int getBackoffCount(int... wordIndexes); String explain(int... wordIndexes); double getLogBase(); static final float DEFAULT_LOG_BASE; static final float DEFAULT_UNIGRAM_WEIGHT; static final float DEFAULT_UNKNOWN_BACKOFF_PENALTY; static final float DEFAULT_STUPID_BACKOFF_ALPHA; static final int DEFAULT_UNKNOWN_TOKEN_PROBABILITY; }
@Test public void testVocabulary() throws IOException { SmoothLm lm = getTinyLm(); LmVocabulary vocab = lm.getVocabulary(); Assert.assertTrue(vocab.contains("Ahmet")); int i1 = vocab.indexOf("Ahmet"); Assert.assertTrue(vocab.contains("elma")); int i2 = vocab.indexOf("elma"); Assert.assertTrue(i1 != i2); Assert.assertEquals("Ahmet", vocab.getWord(i1)); Assert.assertEquals("elma", vocab.getWord(i2)); }
@Override public LmVocabulary getVocabulary() { return vocabulary; }
SmoothLm extends BaseLanguageModel implements NgramLanguageModel { @Override public LmVocabulary getVocabulary() { return vocabulary; } }
SmoothLm extends BaseLanguageModel implements NgramLanguageModel { @Override public LmVocabulary getVocabulary() { return vocabulary; } private SmoothLm( DataInputStream dis, float logBase, float unigramWeight, float unknownBackoffPenalty, boolean useStupidBackoff, float stupidBackoffAlpha, File ngramKeyFileDir); private SmoothLm(DataInputStream dis); }
SmoothLm extends BaseLanguageModel implements NgramLanguageModel { @Override public LmVocabulary getVocabulary() { return vocabulary; } private SmoothLm( DataInputStream dis, float logBase, float unigramWeight, float unknownBackoffPenalty, boolean useStupidBackoff, float stupidBackoffAlpha, File ngramKeyFileDir); private SmoothLm(DataInputStream dis); static Builder builder(InputStream is); static Builder builder(File modelFile); static Builder builder(Path modelFile); String info(); double getStupidBackoffLogAlpha(); int getVersion(); @Override float getUnigramProbability(int id); @Override int getOrder(); @Override LmVocabulary getVocabulary(); LookupCache getCache(); LookupCache getCache(int bits); int getGramCount(int n); @Override boolean ngramExists(int... wordIndexes); double getProbabilityValue(int... wordIndexes); float getBigramProbabilityValue(int w0, int w1); double getBackoffValue(int... wordIndexes); float getBigramBackoffValue(int w0, int w1); double getProbability(String... words); void countFalsePositives(int... wordIndexes); boolean ngramIdsAvailable(); int getFalsePositiveCount(); float getProbability(int... wordIndexes); float getBigramProbability(int w0, int w1); float getTriGramProbability(int w0, int w1, int w2); float getTriGramProbability(int w0, int w1, int w2, int fingerPrint); float getTriGramProbability(int... w); int getBackoffCount(String... tokens); int getBackoffCount(int... wordIndexes); String explain(int... wordIndexes); double getLogBase(); }
SmoothLm extends BaseLanguageModel implements NgramLanguageModel { @Override public LmVocabulary getVocabulary() { return vocabulary; } private SmoothLm( DataInputStream dis, float logBase, float unigramWeight, float unknownBackoffPenalty, boolean useStupidBackoff, float stupidBackoffAlpha, File ngramKeyFileDir); private SmoothLm(DataInputStream dis); static Builder builder(InputStream is); static Builder builder(File modelFile); static Builder builder(Path modelFile); String info(); double getStupidBackoffLogAlpha(); int getVersion(); @Override float getUnigramProbability(int id); @Override int getOrder(); @Override LmVocabulary getVocabulary(); LookupCache getCache(); LookupCache getCache(int bits); int getGramCount(int n); @Override boolean ngramExists(int... wordIndexes); double getProbabilityValue(int... wordIndexes); float getBigramProbabilityValue(int w0, int w1); double getBackoffValue(int... wordIndexes); float getBigramBackoffValue(int w0, int w1); double getProbability(String... words); void countFalsePositives(int... wordIndexes); boolean ngramIdsAvailable(); int getFalsePositiveCount(); float getProbability(int... wordIndexes); float getBigramProbability(int w0, int w1); float getTriGramProbability(int w0, int w1, int w2); float getTriGramProbability(int w0, int w1, int w2, int fingerPrint); float getTriGramProbability(int... w); int getBackoffCount(String... tokens); int getBackoffCount(int... wordIndexes); String explain(int... wordIndexes); double getLogBase(); static final float DEFAULT_LOG_BASE; static final float DEFAULT_UNIGRAM_WEIGHT; static final float DEFAULT_UNKNOWN_BACKOFF_PENALTY; static final float DEFAULT_STUPID_BACKOFF_ALPHA; static final int DEFAULT_UNKNOWN_TOKEN_PROBABILITY; }
@Test public void testExplain() throws IOException { SmoothLm lm = getTinyLm(); LmVocabulary vocabulary = lm.getVocabulary(); int[] is = {vocabulary.indexOf("<s>")}; System.out.println(lm.explain(is)); int[] is2 = vocabulary.toIndexes("<s>", "kedi"); System.out.println(lm.explain(is2)); int[] is3 = vocabulary.toIndexes("Ahmet", "dondurma", "yedi"); System.out.println(lm.explain(is3)); int[] is4 = vocabulary.toIndexes("Ahmet", "yemez"); System.out.println(lm.explain(is4)); int[] is5 = vocabulary.toIndexes("Ahmet", "yemez", "kırmızı"); System.out.println(lm.explain(is5)); }
public String explain(int... wordIndexes) { return explain(new Explanation(), wordIndexes).sb.toString(); }
SmoothLm extends BaseLanguageModel implements NgramLanguageModel { public String explain(int... wordIndexes) { return explain(new Explanation(), wordIndexes).sb.toString(); } }
SmoothLm extends BaseLanguageModel implements NgramLanguageModel { public String explain(int... wordIndexes) { return explain(new Explanation(), wordIndexes).sb.toString(); } private SmoothLm( DataInputStream dis, float logBase, float unigramWeight, float unknownBackoffPenalty, boolean useStupidBackoff, float stupidBackoffAlpha, File ngramKeyFileDir); private SmoothLm(DataInputStream dis); }
SmoothLm extends BaseLanguageModel implements NgramLanguageModel { public String explain(int... wordIndexes) { return explain(new Explanation(), wordIndexes).sb.toString(); } private SmoothLm( DataInputStream dis, float logBase, float unigramWeight, float unknownBackoffPenalty, boolean useStupidBackoff, float stupidBackoffAlpha, File ngramKeyFileDir); private SmoothLm(DataInputStream dis); static Builder builder(InputStream is); static Builder builder(File modelFile); static Builder builder(Path modelFile); String info(); double getStupidBackoffLogAlpha(); int getVersion(); @Override float getUnigramProbability(int id); @Override int getOrder(); @Override LmVocabulary getVocabulary(); LookupCache getCache(); LookupCache getCache(int bits); int getGramCount(int n); @Override boolean ngramExists(int... wordIndexes); double getProbabilityValue(int... wordIndexes); float getBigramProbabilityValue(int w0, int w1); double getBackoffValue(int... wordIndexes); float getBigramBackoffValue(int w0, int w1); double getProbability(String... words); void countFalsePositives(int... wordIndexes); boolean ngramIdsAvailable(); int getFalsePositiveCount(); float getProbability(int... wordIndexes); float getBigramProbability(int w0, int w1); float getTriGramProbability(int w0, int w1, int w2); float getTriGramProbability(int w0, int w1, int w2, int fingerPrint); float getTriGramProbability(int... w); int getBackoffCount(String... tokens); int getBackoffCount(int... wordIndexes); String explain(int... wordIndexes); double getLogBase(); }
SmoothLm extends BaseLanguageModel implements NgramLanguageModel { public String explain(int... wordIndexes) { return explain(new Explanation(), wordIndexes).sb.toString(); } private SmoothLm( DataInputStream dis, float logBase, float unigramWeight, float unknownBackoffPenalty, boolean useStupidBackoff, float stupidBackoffAlpha, File ngramKeyFileDir); private SmoothLm(DataInputStream dis); static Builder builder(InputStream is); static Builder builder(File modelFile); static Builder builder(Path modelFile); String info(); double getStupidBackoffLogAlpha(); int getVersion(); @Override float getUnigramProbability(int id); @Override int getOrder(); @Override LmVocabulary getVocabulary(); LookupCache getCache(); LookupCache getCache(int bits); int getGramCount(int n); @Override boolean ngramExists(int... wordIndexes); double getProbabilityValue(int... wordIndexes); float getBigramProbabilityValue(int w0, int w1); double getBackoffValue(int... wordIndexes); float getBigramBackoffValue(int w0, int w1); double getProbability(String... words); void countFalsePositives(int... wordIndexes); boolean ngramIdsAvailable(); int getFalsePositiveCount(); float getProbability(int... wordIndexes); float getBigramProbability(int w0, int w1); float getTriGramProbability(int w0, int w1, int w2); float getTriGramProbability(int w0, int w1, int w2, int fingerPrint); float getTriGramProbability(int... w); int getBackoffCount(String... tokens); int getBackoffCount(int... wordIndexes); String explain(int... wordIndexes); double getLogBase(); static final float DEFAULT_LOG_BASE; static final float DEFAULT_UNIGRAM_WEIGHT; static final float DEFAULT_UNKNOWN_BACKOFF_PENALTY; static final float DEFAULT_STUPID_BACKOFF_ALPHA; static final int DEFAULT_UNKNOWN_TOKEN_PROBABILITY; }
@Test public void testOpenNlpStyle() throws IOException { Path p = TestUtil.tempFileWithData( "<Start:ABC> Foo Bar <End> ivir zivir <Start:DEF> haha <End> . "); NerDataSet set = NerDataSet.load(p, AnnotationStyle.OPEN_NLP); System.out.println("types= " + set.types); Assert.assertTrue(TestUtil.containsAll(set.types, "ABC", "DEF", "OUT")); }
public static NerDataSet load(Path path, AnnotationStyle style) throws IOException { switch (style) { case BRACKET: return loadBracketStyle(path); case ENAMEX: return loadEnamexStyle(path); case OPEN_NLP: return loadOpenNlpStyle(path); } throw new IOException(String.format("Cannot load data from %s with style %s", path, style)); }
NerDataSet { public static NerDataSet load(Path path, AnnotationStyle style) throws IOException { switch (style) { case BRACKET: return loadBracketStyle(path); case ENAMEX: return loadEnamexStyle(path); case OPEN_NLP: return loadOpenNlpStyle(path); } throw new IOException(String.format("Cannot load data from %s with style %s", path, style)); } }
NerDataSet { public static NerDataSet load(Path path, AnnotationStyle style) throws IOException { switch (style) { case BRACKET: return loadBracketStyle(path); case ENAMEX: return loadEnamexStyle(path); case OPEN_NLP: return loadOpenNlpStyle(path); } throw new IOException(String.format("Cannot load data from %s with style %s", path, style)); } NerDataSet(List<NerSentence> sentences); }
NerDataSet { public static NerDataSet load(Path path, AnnotationStyle style) throws IOException { switch (style) { case BRACKET: return loadBracketStyle(path); case ENAMEX: return loadEnamexStyle(path); case OPEN_NLP: return loadOpenNlpStyle(path); } throw new IOException(String.format("Cannot load data from %s with style %s", path, style)); } NerDataSet(List<NerSentence> sentences); List<NerSentence> getSentences(); void shuffle(); static NerDataSet load(Path path, AnnotationStyle style); static String normalizeForNer(String input); void addSet(NerDataSet set); NerDataSet getSubSet(int from, int to); String info(); }
NerDataSet { public static NerDataSet load(Path path, AnnotationStyle style) throws IOException { switch (style) { case BRACKET: return loadBracketStyle(path); case ENAMEX: return loadEnamexStyle(path); case OPEN_NLP: return loadOpenNlpStyle(path); } throw new IOException(String.format("Cannot load data from %s with style %s", path, style)); } NerDataSet(List<NerSentence> sentences); List<NerSentence> getSentences(); void shuffle(); static NerDataSet load(Path path, AnnotationStyle style); static String normalizeForNer(String input); void addSet(NerDataSet set); NerDataSet getSubSet(int from, int to); String info(); static final String OUT_TOKEN_TYPE; }
@Test public void testBracketStyle() throws IOException { Path p = TestUtil.tempFileWithData( "[ABC Foo Bar] ivir zivir [DEF haha] . "); NerDataSet set = NerDataSet.load(p, AnnotationStyle.BRACKET); System.out.println("types= " + set.types); Assert.assertTrue(TestUtil.containsAll(set.types, "ABC", "DEF", "OUT")); }
public static NerDataSet load(Path path, AnnotationStyle style) throws IOException { switch (style) { case BRACKET: return loadBracketStyle(path); case ENAMEX: return loadEnamexStyle(path); case OPEN_NLP: return loadOpenNlpStyle(path); } throw new IOException(String.format("Cannot load data from %s with style %s", path, style)); }
NerDataSet { public static NerDataSet load(Path path, AnnotationStyle style) throws IOException { switch (style) { case BRACKET: return loadBracketStyle(path); case ENAMEX: return loadEnamexStyle(path); case OPEN_NLP: return loadOpenNlpStyle(path); } throw new IOException(String.format("Cannot load data from %s with style %s", path, style)); } }
NerDataSet { public static NerDataSet load(Path path, AnnotationStyle style) throws IOException { switch (style) { case BRACKET: return loadBracketStyle(path); case ENAMEX: return loadEnamexStyle(path); case OPEN_NLP: return loadOpenNlpStyle(path); } throw new IOException(String.format("Cannot load data from %s with style %s", path, style)); } NerDataSet(List<NerSentence> sentences); }
NerDataSet { public static NerDataSet load(Path path, AnnotationStyle style) throws IOException { switch (style) { case BRACKET: return loadBracketStyle(path); case ENAMEX: return loadEnamexStyle(path); case OPEN_NLP: return loadOpenNlpStyle(path); } throw new IOException(String.format("Cannot load data from %s with style %s", path, style)); } NerDataSet(List<NerSentence> sentences); List<NerSentence> getSentences(); void shuffle(); static NerDataSet load(Path path, AnnotationStyle style); static String normalizeForNer(String input); void addSet(NerDataSet set); NerDataSet getSubSet(int from, int to); String info(); }
NerDataSet { public static NerDataSet load(Path path, AnnotationStyle style) throws IOException { switch (style) { case BRACKET: return loadBracketStyle(path); case ENAMEX: return loadEnamexStyle(path); case OPEN_NLP: return loadOpenNlpStyle(path); } throw new IOException(String.format("Cannot load data from %s with style %s", path, style)); } NerDataSet(List<NerSentence> sentences); List<NerSentence> getSentences(); void shuffle(); static NerDataSet load(Path path, AnnotationStyle style); static String normalizeForNer(String input); void addSet(NerDataSet set); NerDataSet getSubSet(int from, int to); String info(); static final String OUT_TOKEN_TYPE; }
@Test public void testInstances() { TurkishTokenizer t = TurkishTokenizer.DEFAULT; matchToken(t, "a b \t c \n \r", "a", "b", "c"); t = TurkishTokenizer.ALL; matchToken(t, " a b\t\n\rc", " ", "a", " ", "b", "\t", "\n", "\r", "c"); t = TurkishTokenizer.builder().ignoreAll().acceptTypes(Type.Number).build(); matchToken(t, "www.foo.bar 12,4'ü [email protected] ; ^% 2 adf 12 \r \n ", "12,4'ü", "2", "12"); }
public static Builder builder() { return new Builder(); }
TurkishTokenizer { public static Builder builder() { return new Builder(); } }
TurkishTokenizer { public static Builder builder() { return new Builder(); } private TurkishTokenizer(long acceptedTypeBits); }
TurkishTokenizer { public static Builder builder() { return new Builder(); } private TurkishTokenizer(long acceptedTypeBits); static Builder builder(); boolean isTypeAccepted(Token.Type i); boolean isTypeIgnored(Token.Type i); List<Token> tokenize(File file); List<Token> tokenize(String input); List<Token> tokenize(Reader reader); List<String> tokenizeToStrings(String input); Iterator<Token> getTokenIterator(String input); Iterator<Token> getTokenIterator(File file); Iterator<Token> getTokenIterator(Reader reader); static Token convert(org.antlr.v4.runtime.Token token); static Token convert(org.antlr.v4.runtime.Token token, Token.Type type); static Token.Type convertType(org.antlr.v4.runtime.Token token); }
TurkishTokenizer { public static Builder builder() { return new Builder(); } private TurkishTokenizer(long acceptedTypeBits); static Builder builder(); boolean isTypeAccepted(Token.Type i); boolean isTypeIgnored(Token.Type i); List<Token> tokenize(File file); List<Token> tokenize(String input); List<Token> tokenize(Reader reader); List<String> tokenizeToStrings(String input); Iterator<Token> getTokenIterator(String input); Iterator<Token> getTokenIterator(File file); Iterator<Token> getTokenIterator(Reader reader); static Token convert(org.antlr.v4.runtime.Token token); static Token convert(org.antlr.v4.runtime.Token token, Token.Type type); static Token.Type convertType(org.antlr.v4.runtime.Token token); static final TurkishTokenizer ALL; static final TurkishTokenizer DEFAULT; }
@Test public void testTokenBoundaries() { TurkishTokenizer t = TurkishTokenizer.ALL; List<Token> tokens = t.tokenize("bir av. geldi."); Token t0 = tokens.get(0); Assert.assertEquals("bir", t0.getText()); Assert.assertEquals(0, t0.getStart()); Assert.assertEquals(2, t0.getEnd()); Token t1 = tokens.get(1); Assert.assertEquals(" ", t1.getText()); Assert.assertEquals(3, t1.getStart()); Assert.assertEquals(3, t1.getEnd()); Token t2 = tokens.get(2); Assert.assertEquals("av.", t2.getText()); Assert.assertEquals(4, t2.getStart()); Assert.assertEquals(6, t2.getEnd()); Token t3 = tokens.get(3); Assert.assertEquals(" ", t3.getText()); Assert.assertEquals(7, t3.getStart()); Assert.assertEquals(7, t3.getEnd()); Token t4 = tokens.get(4); Assert.assertEquals("geldi", t4.getText()); Assert.assertEquals(8, t4.getStart()); Assert.assertEquals(12, t4.getEnd()); Token t5 = tokens.get(5); Assert.assertEquals(".", t5.getText()); Assert.assertEquals(13, t5.getStart()); Assert.assertEquals(13, t5.getEnd()); }
public List<Token> tokenize(File file) throws IOException { return getAllTokens(lexerInstance(CharStreams.fromPath(file.toPath()))); }
TurkishTokenizer { public List<Token> tokenize(File file) throws IOException { return getAllTokens(lexerInstance(CharStreams.fromPath(file.toPath()))); } }
TurkishTokenizer { public List<Token> tokenize(File file) throws IOException { return getAllTokens(lexerInstance(CharStreams.fromPath(file.toPath()))); } private TurkishTokenizer(long acceptedTypeBits); }
TurkishTokenizer { public List<Token> tokenize(File file) throws IOException { return getAllTokens(lexerInstance(CharStreams.fromPath(file.toPath()))); } private TurkishTokenizer(long acceptedTypeBits); static Builder builder(); boolean isTypeAccepted(Token.Type i); boolean isTypeIgnored(Token.Type i); List<Token> tokenize(File file); List<Token> tokenize(String input); List<Token> tokenize(Reader reader); List<String> tokenizeToStrings(String input); Iterator<Token> getTokenIterator(String input); Iterator<Token> getTokenIterator(File file); Iterator<Token> getTokenIterator(Reader reader); static Token convert(org.antlr.v4.runtime.Token token); static Token convert(org.antlr.v4.runtime.Token token, Token.Type type); static Token.Type convertType(org.antlr.v4.runtime.Token token); }
TurkishTokenizer { public List<Token> tokenize(File file) throws IOException { return getAllTokens(lexerInstance(CharStreams.fromPath(file.toPath()))); } private TurkishTokenizer(long acceptedTypeBits); static Builder builder(); boolean isTypeAccepted(Token.Type i); boolean isTypeIgnored(Token.Type i); List<Token> tokenize(File file); List<Token> tokenize(String input); List<Token> tokenize(Reader reader); List<String> tokenizeToStrings(String input); Iterator<Token> getTokenIterator(String input); Iterator<Token> getTokenIterator(File file); Iterator<Token> getTokenIterator(Reader reader); static Token convert(org.antlr.v4.runtime.Token token); static Token convert(org.antlr.v4.runtime.Token token, Token.Type type); static Token.Type convertType(org.antlr.v4.runtime.Token token); static final TurkishTokenizer ALL; static final TurkishTokenizer DEFAULT; }
@Test @Ignore("Not an actual test. Requires external data.") public void performance() throws IOException { TurkishTokenizer tokenizer = TurkishTokenizer.DEFAULT; for (int it = 0; it < 5; it++) { List<String> lines = Files.readAllLines( Paths.get("/media/aaa/Data/aaa/corpora/dunya.100k")); Stopwatch clock = Stopwatch.createStarted(); long tokenCount = 0; for (String line : lines) { List<Token> tokens = tokenizer.tokenize(line); tokenCount += tokens.size(); } long elapsed = clock.elapsed(TimeUnit.MILLISECONDS); Log.info("Token count = %d ", tokenCount); Log.info("Speed (tps) = %.1f", tokenCount * 1000d / elapsed); } }
public List<Token> tokenize(File file) throws IOException { return getAllTokens(lexerInstance(CharStreams.fromPath(file.toPath()))); }
TurkishTokenizer { public List<Token> tokenize(File file) throws IOException { return getAllTokens(lexerInstance(CharStreams.fromPath(file.toPath()))); } }
TurkishTokenizer { public List<Token> tokenize(File file) throws IOException { return getAllTokens(lexerInstance(CharStreams.fromPath(file.toPath()))); } private TurkishTokenizer(long acceptedTypeBits); }
TurkishTokenizer { public List<Token> tokenize(File file) throws IOException { return getAllTokens(lexerInstance(CharStreams.fromPath(file.toPath()))); } private TurkishTokenizer(long acceptedTypeBits); static Builder builder(); boolean isTypeAccepted(Token.Type i); boolean isTypeIgnored(Token.Type i); List<Token> tokenize(File file); List<Token> tokenize(String input); List<Token> tokenize(Reader reader); List<String> tokenizeToStrings(String input); Iterator<Token> getTokenIterator(String input); Iterator<Token> getTokenIterator(File file); Iterator<Token> getTokenIterator(Reader reader); static Token convert(org.antlr.v4.runtime.Token token); static Token convert(org.antlr.v4.runtime.Token token, Token.Type type); static Token.Type convertType(org.antlr.v4.runtime.Token token); }
TurkishTokenizer { public List<Token> tokenize(File file) throws IOException { return getAllTokens(lexerInstance(CharStreams.fromPath(file.toPath()))); } private TurkishTokenizer(long acceptedTypeBits); static Builder builder(); boolean isTypeAccepted(Token.Type i); boolean isTypeIgnored(Token.Type i); List<Token> tokenize(File file); List<Token> tokenize(String input); List<Token> tokenize(Reader reader); List<String> tokenizeToStrings(String input); Iterator<Token> getTokenIterator(String input); Iterator<Token> getTokenIterator(File file); Iterator<Token> getTokenIterator(Reader reader); static Token convert(org.antlr.v4.runtime.Token token); static Token convert(org.antlr.v4.runtime.Token token, Token.Type type); static Token.Type convertType(org.antlr.v4.runtime.Token token); static final TurkishTokenizer ALL; static final TurkishTokenizer DEFAULT; }
@Test public void substringTest() { Assert.assertEquals("", new Span(0, 0).getSubstring("hello")); Assert.assertEquals("h", new Span(0, 1).getSubstring("hello")); Assert.assertEquals("ello", new Span(1, 5).getSubstring("hello")); }
public String getSubstring(String input) { return input.substring(start, end); }
Span { public String getSubstring(String input) { return input.substring(start, end); } }
Span { public String getSubstring(String input) { return input.substring(start, end); } Span(int start, int end); }
Span { public String getSubstring(String input) { return input.substring(start, end); } Span(int start, int end); int length(); int middleValue(); Span copy(int offset); String getSubstring(String input); boolean inSpan(int i); }
Span { public String getSubstring(String input) { return input.substring(start, end); } Span(int start, int end); int length(); int middleValue(); Span copy(int offset); String getSubstring(String input); boolean inSpan(int i); final int start; final int end; }
@Test public void implicitP3sgTest() { String[] lines = { "üzeri [A:CompoundP3sg;Roots:üzer]"}; RootLexicon lexicon = TurkishDictionaryLoader.load(lines); Assert.assertEquals(2, lexicon.size()); }
public static RootLexicon load(File input) throws IOException { return Files.asCharSource(input, Charsets.UTF_8).readLines(new TextLexiconProcessor()); }
TurkishDictionaryLoader { public static RootLexicon load(File input) throws IOException { return Files.asCharSource(input, Charsets.UTF_8).readLines(new TextLexiconProcessor()); } }
TurkishDictionaryLoader { public static RootLexicon load(File input) throws IOException { return Files.asCharSource(input, Charsets.UTF_8).readLines(new TextLexiconProcessor()); } }
TurkishDictionaryLoader { public static RootLexicon load(File input) throws IOException { return Files.asCharSource(input, Charsets.UTF_8).readLines(new TextLexiconProcessor()); } static RootLexicon loadDefaultDictionaries(); static RootLexicon loadFromResources(String... resourcePaths); static RootLexicon loadFromResources(Collection<String> resourcePaths); static RootLexicon load(File input); static RootLexicon loadInto(RootLexicon lexicon, File input); static DictionaryItem loadFromString(String dictionaryLine); static RootLexicon load(String... dictionaryLines); static RootLexicon load(Iterable<String> dictionaryLines); }
TurkishDictionaryLoader { public static RootLexicon load(File input) throws IOException { return Files.asCharSource(input, Charsets.UTF_8).readLines(new TextLexiconProcessor()); } static RootLexicon loadDefaultDictionaries(); static RootLexicon loadFromResources(String... resourcePaths); static RootLexicon loadFromResources(Collection<String> resourcePaths); static RootLexicon load(File input); static RootLexicon loadInto(RootLexicon lexicon, File input); static DictionaryItem loadFromString(String dictionaryLine); static RootLexicon load(String... dictionaryLines); static RootLexicon load(Iterable<String> dictionaryLines); static final List<String> DEFAULT_DICTIONARY_RESOURCES; }
@Test public void lengthTest() { Assert.assertEquals(0, new Span(0, 0).length()); Assert.assertEquals(1, new Span(0, 1).length()); Assert.assertEquals(4, new Span(1, 5).length()); }
public int length() { return end - start; }
Span { public int length() { return end - start; } }
Span { public int length() { return end - start; } Span(int start, int end); }
Span { public int length() { return end - start; } Span(int start, int end); int length(); int middleValue(); Span copy(int offset); String getSubstring(String input); boolean inSpan(int i); }
Span { public int length() { return end - start; } Span(int start, int end); int length(); int middleValue(); Span copy(int offset); String getSubstring(String input); boolean inSpan(int i); final int start; final int end; }
@Test public void singletonAccessShouldNotThrowException() throws IOException { TurkishSentenceExtractor.DEFAULT.fromParagraph("hello"); }
public List<String> fromParagraph(String paragraph) { List<Span> spans = extractToSpans(paragraph); List<String> sentences = new ArrayList<>(spans.size()); for (Span span : spans) { String sentence = span.getSubstring(paragraph).trim(); if (sentence.length() > 0) { sentences.add(sentence); } } return sentences; }
TurkishSentenceExtractor extends PerceptronSegmenter { public List<String> fromParagraph(String paragraph) { List<Span> spans = extractToSpans(paragraph); List<String> sentences = new ArrayList<>(spans.size()); for (Span span : spans) { String sentence = span.getSubstring(paragraph).trim(); if (sentence.length() > 0) { sentences.add(sentence); } } return sentences; } }
TurkishSentenceExtractor extends PerceptronSegmenter { public List<String> fromParagraph(String paragraph) { List<Span> spans = extractToSpans(paragraph); List<String> sentences = new ArrayList<>(spans.size()); for (Span span : spans) { String sentence = span.getSubstring(paragraph).trim(); if (sentence.length() > 0) { sentences.add(sentence); } } return sentences; } private TurkishSentenceExtractor(FloatValueMap<String> weights); private TurkishSentenceExtractor(FloatValueMap<String> weights, boolean doNotSplitInDoubleQuotes); }
TurkishSentenceExtractor extends PerceptronSegmenter { public List<String> fromParagraph(String paragraph) { List<Span> spans = extractToSpans(paragraph); List<String> sentences = new ArrayList<>(spans.size()); for (Span span : spans) { String sentence = span.getSubstring(paragraph).trim(); if (sentence.length() > 0) { sentences.add(sentence); } } return sentences; } private TurkishSentenceExtractor(FloatValueMap<String> weights); private TurkishSentenceExtractor(FloatValueMap<String> weights, boolean doNotSplitInDoubleQuotes); static Builder builder(); List<String> fromParagraphs(Collection<String> paragraphs); List<String> fromParagraph(String paragraph); List<String> fromDocument(String document); char[] getBoundaryCharacters(); }
TurkishSentenceExtractor extends PerceptronSegmenter { public List<String> fromParagraph(String paragraph) { List<Span> spans = extractToSpans(paragraph); List<String> sentences = new ArrayList<>(spans.size()); for (Span span : spans) { String sentence = span.getSubstring(paragraph).trim(); if (sentence.length() > 0) { sentences.add(sentence); } } return sentences; } private TurkishSentenceExtractor(FloatValueMap<String> weights); private TurkishSentenceExtractor(FloatValueMap<String> weights, boolean doNotSplitInDoubleQuotes); static Builder builder(); List<String> fromParagraphs(Collection<String> paragraphs); List<String> fromParagraph(String paragraph); List<String> fromDocument(String document); char[] getBoundaryCharacters(); static final TurkishSentenceExtractor DEFAULT; }
@Test public void shouldExtractSentences1() throws IOException { String test = "Merhaba Dünya.| Nasılsın?"; List<String> expected = getSentences(test); Assert.assertEquals(expected, TurkishSentenceExtractor.DEFAULT.fromParagraph(test.replace("|", ""))); }
public List<String> fromParagraph(String paragraph) { List<Span> spans = extractToSpans(paragraph); List<String> sentences = new ArrayList<>(spans.size()); for (Span span : spans) { String sentence = span.getSubstring(paragraph).trim(); if (sentence.length() > 0) { sentences.add(sentence); } } return sentences; }
TurkishSentenceExtractor extends PerceptronSegmenter { public List<String> fromParagraph(String paragraph) { List<Span> spans = extractToSpans(paragraph); List<String> sentences = new ArrayList<>(spans.size()); for (Span span : spans) { String sentence = span.getSubstring(paragraph).trim(); if (sentence.length() > 0) { sentences.add(sentence); } } return sentences; } }
TurkishSentenceExtractor extends PerceptronSegmenter { public List<String> fromParagraph(String paragraph) { List<Span> spans = extractToSpans(paragraph); List<String> sentences = new ArrayList<>(spans.size()); for (Span span : spans) { String sentence = span.getSubstring(paragraph).trim(); if (sentence.length() > 0) { sentences.add(sentence); } } return sentences; } private TurkishSentenceExtractor(FloatValueMap<String> weights); private TurkishSentenceExtractor(FloatValueMap<String> weights, boolean doNotSplitInDoubleQuotes); }
TurkishSentenceExtractor extends PerceptronSegmenter { public List<String> fromParagraph(String paragraph) { List<Span> spans = extractToSpans(paragraph); List<String> sentences = new ArrayList<>(spans.size()); for (Span span : spans) { String sentence = span.getSubstring(paragraph).trim(); if (sentence.length() > 0) { sentences.add(sentence); } } return sentences; } private TurkishSentenceExtractor(FloatValueMap<String> weights); private TurkishSentenceExtractor(FloatValueMap<String> weights, boolean doNotSplitInDoubleQuotes); static Builder builder(); List<String> fromParagraphs(Collection<String> paragraphs); List<String> fromParagraph(String paragraph); List<String> fromDocument(String document); char[] getBoundaryCharacters(); }
TurkishSentenceExtractor extends PerceptronSegmenter { public List<String> fromParagraph(String paragraph) { List<Span> spans = extractToSpans(paragraph); List<String> sentences = new ArrayList<>(spans.size()); for (Span span : spans) { String sentence = span.getSubstring(paragraph).trim(); if (sentence.length() > 0) { sentences.add(sentence); } } return sentences; } private TurkishSentenceExtractor(FloatValueMap<String> weights); private TurkishSentenceExtractor(FloatValueMap<String> weights, boolean doNotSplitInDoubleQuotes); static Builder builder(); List<String> fromParagraphs(Collection<String> paragraphs); List<String> fromParagraph(String paragraph); List<String> fromDocument(String document); char[] getBoundaryCharacters(); static final TurkishSentenceExtractor DEFAULT; }
@Test public void shouldExtractSingleSentences() throws IOException { String test = "Merhaba Dünya."; List<String> expected = getSentences(test); Assert.assertEquals(expected, TurkishSentenceExtractor.DEFAULT.fromParagraph(test.replace("|", ""))); }
public List<String> fromParagraph(String paragraph) { List<Span> spans = extractToSpans(paragraph); List<String> sentences = new ArrayList<>(spans.size()); for (Span span : spans) { String sentence = span.getSubstring(paragraph).trim(); if (sentence.length() > 0) { sentences.add(sentence); } } return sentences; }
TurkishSentenceExtractor extends PerceptronSegmenter { public List<String> fromParagraph(String paragraph) { List<Span> spans = extractToSpans(paragraph); List<String> sentences = new ArrayList<>(spans.size()); for (Span span : spans) { String sentence = span.getSubstring(paragraph).trim(); if (sentence.length() > 0) { sentences.add(sentence); } } return sentences; } }
TurkishSentenceExtractor extends PerceptronSegmenter { public List<String> fromParagraph(String paragraph) { List<Span> spans = extractToSpans(paragraph); List<String> sentences = new ArrayList<>(spans.size()); for (Span span : spans) { String sentence = span.getSubstring(paragraph).trim(); if (sentence.length() > 0) { sentences.add(sentence); } } return sentences; } private TurkishSentenceExtractor(FloatValueMap<String> weights); private TurkishSentenceExtractor(FloatValueMap<String> weights, boolean doNotSplitInDoubleQuotes); }
TurkishSentenceExtractor extends PerceptronSegmenter { public List<String> fromParagraph(String paragraph) { List<Span> spans = extractToSpans(paragraph); List<String> sentences = new ArrayList<>(spans.size()); for (Span span : spans) { String sentence = span.getSubstring(paragraph).trim(); if (sentence.length() > 0) { sentences.add(sentence); } } return sentences; } private TurkishSentenceExtractor(FloatValueMap<String> weights); private TurkishSentenceExtractor(FloatValueMap<String> weights, boolean doNotSplitInDoubleQuotes); static Builder builder(); List<String> fromParagraphs(Collection<String> paragraphs); List<String> fromParagraph(String paragraph); List<String> fromDocument(String document); char[] getBoundaryCharacters(); }
TurkishSentenceExtractor extends PerceptronSegmenter { public List<String> fromParagraph(String paragraph) { List<Span> spans = extractToSpans(paragraph); List<String> sentences = new ArrayList<>(spans.size()); for (Span span : spans) { String sentence = span.getSubstring(paragraph).trim(); if (sentence.length() > 0) { sentences.add(sentence); } } return sentences; } private TurkishSentenceExtractor(FloatValueMap<String> weights); private TurkishSentenceExtractor(FloatValueMap<String> weights, boolean doNotSplitInDoubleQuotes); static Builder builder(); List<String> fromParagraphs(Collection<String> paragraphs); List<String> fromParagraph(String paragraph); List<String> fromDocument(String document); char[] getBoundaryCharacters(); static final TurkishSentenceExtractor DEFAULT; }
@Test public void shouldExtractSentencesSecondDoesNotEndWithDot() throws IOException { String test = "Merhaba Dünya.| Nasılsın"; List<String> expected = getSentences(test); Assert.assertEquals(expected, TurkishSentenceExtractor.DEFAULT.fromParagraph(test.replace("|", ""))); }
public List<String> fromParagraph(String paragraph) { List<Span> spans = extractToSpans(paragraph); List<String> sentences = new ArrayList<>(spans.size()); for (Span span : spans) { String sentence = span.getSubstring(paragraph).trim(); if (sentence.length() > 0) { sentences.add(sentence); } } return sentences; }
TurkishSentenceExtractor extends PerceptronSegmenter { public List<String> fromParagraph(String paragraph) { List<Span> spans = extractToSpans(paragraph); List<String> sentences = new ArrayList<>(spans.size()); for (Span span : spans) { String sentence = span.getSubstring(paragraph).trim(); if (sentence.length() > 0) { sentences.add(sentence); } } return sentences; } }
TurkishSentenceExtractor extends PerceptronSegmenter { public List<String> fromParagraph(String paragraph) { List<Span> spans = extractToSpans(paragraph); List<String> sentences = new ArrayList<>(spans.size()); for (Span span : spans) { String sentence = span.getSubstring(paragraph).trim(); if (sentence.length() > 0) { sentences.add(sentence); } } return sentences; } private TurkishSentenceExtractor(FloatValueMap<String> weights); private TurkishSentenceExtractor(FloatValueMap<String> weights, boolean doNotSplitInDoubleQuotes); }
TurkishSentenceExtractor extends PerceptronSegmenter { public List<String> fromParagraph(String paragraph) { List<Span> spans = extractToSpans(paragraph); List<String> sentences = new ArrayList<>(spans.size()); for (Span span : spans) { String sentence = span.getSubstring(paragraph).trim(); if (sentence.length() > 0) { sentences.add(sentence); } } return sentences; } private TurkishSentenceExtractor(FloatValueMap<String> weights); private TurkishSentenceExtractor(FloatValueMap<String> weights, boolean doNotSplitInDoubleQuotes); static Builder builder(); List<String> fromParagraphs(Collection<String> paragraphs); List<String> fromParagraph(String paragraph); List<String> fromDocument(String document); char[] getBoundaryCharacters(); }
TurkishSentenceExtractor extends PerceptronSegmenter { public List<String> fromParagraph(String paragraph) { List<Span> spans = extractToSpans(paragraph); List<String> sentences = new ArrayList<>(spans.size()); for (Span span : spans) { String sentence = span.getSubstring(paragraph).trim(); if (sentence.length() > 0) { sentences.add(sentence); } } return sentences; } private TurkishSentenceExtractor(FloatValueMap<String> weights); private TurkishSentenceExtractor(FloatValueMap<String> weights, boolean doNotSplitInDoubleQuotes); static Builder builder(); List<String> fromParagraphs(Collection<String> paragraphs); List<String> fromParagraph(String paragraph); List<String> fromDocument(String document); char[] getBoundaryCharacters(); static final TurkishSentenceExtractor DEFAULT; }
@Test public void shouldReturnDotForDot() throws IOException { List<String> expected = getSentences("."); Assert.assertEquals(expected, TurkishSentenceExtractor.DEFAULT.fromParagraph(".")); }
public List<String> fromParagraph(String paragraph) { List<Span> spans = extractToSpans(paragraph); List<String> sentences = new ArrayList<>(spans.size()); for (Span span : spans) { String sentence = span.getSubstring(paragraph).trim(); if (sentence.length() > 0) { sentences.add(sentence); } } return sentences; }
TurkishSentenceExtractor extends PerceptronSegmenter { public List<String> fromParagraph(String paragraph) { List<Span> spans = extractToSpans(paragraph); List<String> sentences = new ArrayList<>(spans.size()); for (Span span : spans) { String sentence = span.getSubstring(paragraph).trim(); if (sentence.length() > 0) { sentences.add(sentence); } } return sentences; } }
TurkishSentenceExtractor extends PerceptronSegmenter { public List<String> fromParagraph(String paragraph) { List<Span> spans = extractToSpans(paragraph); List<String> sentences = new ArrayList<>(spans.size()); for (Span span : spans) { String sentence = span.getSubstring(paragraph).trim(); if (sentence.length() > 0) { sentences.add(sentence); } } return sentences; } private TurkishSentenceExtractor(FloatValueMap<String> weights); private TurkishSentenceExtractor(FloatValueMap<String> weights, boolean doNotSplitInDoubleQuotes); }
TurkishSentenceExtractor extends PerceptronSegmenter { public List<String> fromParagraph(String paragraph) { List<Span> spans = extractToSpans(paragraph); List<String> sentences = new ArrayList<>(spans.size()); for (Span span : spans) { String sentence = span.getSubstring(paragraph).trim(); if (sentence.length() > 0) { sentences.add(sentence); } } return sentences; } private TurkishSentenceExtractor(FloatValueMap<String> weights); private TurkishSentenceExtractor(FloatValueMap<String> weights, boolean doNotSplitInDoubleQuotes); static Builder builder(); List<String> fromParagraphs(Collection<String> paragraphs); List<String> fromParagraph(String paragraph); List<String> fromDocument(String document); char[] getBoundaryCharacters(); }
TurkishSentenceExtractor extends PerceptronSegmenter { public List<String> fromParagraph(String paragraph) { List<Span> spans = extractToSpans(paragraph); List<String> sentences = new ArrayList<>(spans.size()); for (Span span : spans) { String sentence = span.getSubstring(paragraph).trim(); if (sentence.length() > 0) { sentences.add(sentence); } } return sentences; } private TurkishSentenceExtractor(FloatValueMap<String> weights); private TurkishSentenceExtractor(FloatValueMap<String> weights, boolean doNotSplitInDoubleQuotes); static Builder builder(); List<String> fromParagraphs(Collection<String> paragraphs); List<String> fromParagraph(String paragraph); List<String> fromDocument(String document); char[] getBoundaryCharacters(); static final TurkishSentenceExtractor DEFAULT; }
@Test public void shouldReturn0ForEmpty() throws IOException { Assert.assertEquals(0, TurkishSentenceExtractor.DEFAULT.fromParagraph("").size()); }
public List<String> fromParagraph(String paragraph) { List<Span> spans = extractToSpans(paragraph); List<String> sentences = new ArrayList<>(spans.size()); for (Span span : spans) { String sentence = span.getSubstring(paragraph).trim(); if (sentence.length() > 0) { sentences.add(sentence); } } return sentences; }
TurkishSentenceExtractor extends PerceptronSegmenter { public List<String> fromParagraph(String paragraph) { List<Span> spans = extractToSpans(paragraph); List<String> sentences = new ArrayList<>(spans.size()); for (Span span : spans) { String sentence = span.getSubstring(paragraph).trim(); if (sentence.length() > 0) { sentences.add(sentence); } } return sentences; } }
TurkishSentenceExtractor extends PerceptronSegmenter { public List<String> fromParagraph(String paragraph) { List<Span> spans = extractToSpans(paragraph); List<String> sentences = new ArrayList<>(spans.size()); for (Span span : spans) { String sentence = span.getSubstring(paragraph).trim(); if (sentence.length() > 0) { sentences.add(sentence); } } return sentences; } private TurkishSentenceExtractor(FloatValueMap<String> weights); private TurkishSentenceExtractor(FloatValueMap<String> weights, boolean doNotSplitInDoubleQuotes); }
TurkishSentenceExtractor extends PerceptronSegmenter { public List<String> fromParagraph(String paragraph) { List<Span> spans = extractToSpans(paragraph); List<String> sentences = new ArrayList<>(spans.size()); for (Span span : spans) { String sentence = span.getSubstring(paragraph).trim(); if (sentence.length() > 0) { sentences.add(sentence); } } return sentences; } private TurkishSentenceExtractor(FloatValueMap<String> weights); private TurkishSentenceExtractor(FloatValueMap<String> weights, boolean doNotSplitInDoubleQuotes); static Builder builder(); List<String> fromParagraphs(Collection<String> paragraphs); List<String> fromParagraph(String paragraph); List<String> fromDocument(String document); char[] getBoundaryCharacters(); }
TurkishSentenceExtractor extends PerceptronSegmenter { public List<String> fromParagraph(String paragraph) { List<Span> spans = extractToSpans(paragraph); List<String> sentences = new ArrayList<>(spans.size()); for (Span span : spans) { String sentence = span.getSubstring(paragraph).trim(); if (sentence.length() > 0) { sentences.add(sentence); } } return sentences; } private TurkishSentenceExtractor(FloatValueMap<String> weights); private TurkishSentenceExtractor(FloatValueMap<String> weights, boolean doNotSplitInDoubleQuotes); static Builder builder(); List<String> fromParagraphs(Collection<String> paragraphs); List<String> fromParagraph(String paragraph); List<String> fromDocument(String document); char[] getBoundaryCharacters(); static final TurkishSentenceExtractor DEFAULT; }
@Test public void shouldReturn0ForEmptyff() { List<String> sentences = TurkishSentenceExtractor.DEFAULT.fromParagraph(""); Assert.assertEquals(0, sentences.size()); }
public List<String> fromParagraph(String paragraph) { List<Span> spans = extractToSpans(paragraph); List<String> sentences = new ArrayList<>(spans.size()); for (Span span : spans) { String sentence = span.getSubstring(paragraph).trim(); if (sentence.length() > 0) { sentences.add(sentence); } } return sentences; }
TurkishSentenceExtractor extends PerceptronSegmenter { public List<String> fromParagraph(String paragraph) { List<Span> spans = extractToSpans(paragraph); List<String> sentences = new ArrayList<>(spans.size()); for (Span span : spans) { String sentence = span.getSubstring(paragraph).trim(); if (sentence.length() > 0) { sentences.add(sentence); } } return sentences; } }
TurkishSentenceExtractor extends PerceptronSegmenter { public List<String> fromParagraph(String paragraph) { List<Span> spans = extractToSpans(paragraph); List<String> sentences = new ArrayList<>(spans.size()); for (Span span : spans) { String sentence = span.getSubstring(paragraph).trim(); if (sentence.length() > 0) { sentences.add(sentence); } } return sentences; } private TurkishSentenceExtractor(FloatValueMap<String> weights); private TurkishSentenceExtractor(FloatValueMap<String> weights, boolean doNotSplitInDoubleQuotes); }
TurkishSentenceExtractor extends PerceptronSegmenter { public List<String> fromParagraph(String paragraph) { List<Span> spans = extractToSpans(paragraph); List<String> sentences = new ArrayList<>(spans.size()); for (Span span : spans) { String sentence = span.getSubstring(paragraph).trim(); if (sentence.length() > 0) { sentences.add(sentence); } } return sentences; } private TurkishSentenceExtractor(FloatValueMap<String> weights); private TurkishSentenceExtractor(FloatValueMap<String> weights, boolean doNotSplitInDoubleQuotes); static Builder builder(); List<String> fromParagraphs(Collection<String> paragraphs); List<String> fromParagraph(String paragraph); List<String> fromDocument(String document); char[] getBoundaryCharacters(); }
TurkishSentenceExtractor extends PerceptronSegmenter { public List<String> fromParagraph(String paragraph) { List<Span> spans = extractToSpans(paragraph); List<String> sentences = new ArrayList<>(spans.size()); for (Span span : spans) { String sentence = span.getSubstring(paragraph).trim(); if (sentence.length() > 0) { sentences.add(sentence); } } return sentences; } private TurkishSentenceExtractor(FloatValueMap<String> weights); private TurkishSentenceExtractor(FloatValueMap<String> weights, boolean doNotSplitInDoubleQuotes); static Builder builder(); List<String> fromParagraphs(Collection<String> paragraphs); List<String> fromParagraph(String paragraph); List<String> fromDocument(String document); char[] getBoundaryCharacters(); static final TurkishSentenceExtractor DEFAULT; }
@Test public void testDoubleQuotes() throws IOException { TurkishSentenceExtractor e = TurkishSentenceExtractor .builder() .doNotSplitInDoubleQuotes() .useDefaultModel().build(); Assert.assertEquals( "\"Merhaba! Bugün hava çok güzel. Ne dersin?\" dedi tavşan.|Havucu kemirirken.", markBoundariesParagraph( e, "\"Merhaba! Bugün hava çok güzel. Ne dersin?\" dedi tavşan. Havucu kemirirken.")); Assert.assertEquals( "\"Buna hakkı yok!\" diye öfkeyle konuşmaya başladı Baba Kurt.", markBoundariesParagraph( e, "\"Buna hakkı yok!\" diye öfkeyle konuşmaya başladı Baba Kurt.")); }
public static Builder builder() { return new Builder(); }
TurkishSentenceExtractor extends PerceptronSegmenter { public static Builder builder() { return new Builder(); } }
TurkishSentenceExtractor extends PerceptronSegmenter { public static Builder builder() { return new Builder(); } private TurkishSentenceExtractor(FloatValueMap<String> weights); private TurkishSentenceExtractor(FloatValueMap<String> weights, boolean doNotSplitInDoubleQuotes); }
TurkishSentenceExtractor extends PerceptronSegmenter { public static Builder builder() { return new Builder(); } private TurkishSentenceExtractor(FloatValueMap<String> weights); private TurkishSentenceExtractor(FloatValueMap<String> weights, boolean doNotSplitInDoubleQuotes); static Builder builder(); List<String> fromParagraphs(Collection<String> paragraphs); List<String> fromParagraph(String paragraph); List<String> fromDocument(String document); char[] getBoundaryCharacters(); }
TurkishSentenceExtractor extends PerceptronSegmenter { public static Builder builder() { return new Builder(); } private TurkishSentenceExtractor(FloatValueMap<String> weights); private TurkishSentenceExtractor(FloatValueMap<String> weights, boolean doNotSplitInDoubleQuotes); static Builder builder(); List<String> fromParagraphs(Collection<String> paragraphs); List<String> fromParagraph(String paragraph); List<String> fromDocument(String document); char[] getBoundaryCharacters(); static final TurkishSentenceExtractor DEFAULT; }
@Test public void nounAttributesTest() { List<ItemAttrPair> testList = Lists.newArrayList( testPair("takat [A:NoVoicing, InverseHarmony]", NoVoicing, InverseHarmony), testPair("nakit [A: LastVowelDrop]", Voicing, LastVowelDrop), testPair("ret [A:Voicing, Doubling]", Voicing, Doubling) ); for (ItemAttrPair pair : testList) { DictionaryItem item = TurkishDictionaryLoader.loadFromString(pair.str); Assert.assertEquals(Noun, item.primaryPos); Assert.assertEquals("error in:" + pair.str, pair.attrs, item.attributes); } }
public static DictionaryItem loadFromString(String dictionaryLine) { String lemma = dictionaryLine; if (dictionaryLine.contains(" ")) { lemma = dictionaryLine.substring(0, dictionaryLine.indexOf(" ")); } return load(dictionaryLine).getMatchingItems(lemma).get(0); }
TurkishDictionaryLoader { public static DictionaryItem loadFromString(String dictionaryLine) { String lemma = dictionaryLine; if (dictionaryLine.contains(" ")) { lemma = dictionaryLine.substring(0, dictionaryLine.indexOf(" ")); } return load(dictionaryLine).getMatchingItems(lemma).get(0); } }
TurkishDictionaryLoader { public static DictionaryItem loadFromString(String dictionaryLine) { String lemma = dictionaryLine; if (dictionaryLine.contains(" ")) { lemma = dictionaryLine.substring(0, dictionaryLine.indexOf(" ")); } return load(dictionaryLine).getMatchingItems(lemma).get(0); } }
TurkishDictionaryLoader { public static DictionaryItem loadFromString(String dictionaryLine) { String lemma = dictionaryLine; if (dictionaryLine.contains(" ")) { lemma = dictionaryLine.substring(0, dictionaryLine.indexOf(" ")); } return load(dictionaryLine).getMatchingItems(lemma).get(0); } static RootLexicon loadDefaultDictionaries(); static RootLexicon loadFromResources(String... resourcePaths); static RootLexicon loadFromResources(Collection<String> resourcePaths); static RootLexicon load(File input); static RootLexicon loadInto(RootLexicon lexicon, File input); static DictionaryItem loadFromString(String dictionaryLine); static RootLexicon load(String... dictionaryLines); static RootLexicon load(Iterable<String> dictionaryLines); }
TurkishDictionaryLoader { public static DictionaryItem loadFromString(String dictionaryLine) { String lemma = dictionaryLine; if (dictionaryLine.contains(" ")) { lemma = dictionaryLine.substring(0, dictionaryLine.indexOf(" ")); } return load(dictionaryLine).getMatchingItems(lemma).get(0); } static RootLexicon loadDefaultDictionaries(); static RootLexicon loadFromResources(String... resourcePaths); static RootLexicon loadFromResources(Collection<String> resourcePaths); static RootLexicon load(File input); static RootLexicon loadInto(RootLexicon lexicon, File input); static DictionaryItem loadFromString(String dictionaryLine); static RootLexicon load(String... dictionaryLines); static RootLexicon load(Iterable<String> dictionaryLines); static final List<String> DEFAULT_DICTIONARY_RESOURCES; }
@Test @Ignore("Not a unit test") public void shouldPrintItemsInDevlDictionary() throws IOException { RootLexicon items = TurkishDictionaryLoader .load(new File(Resources.getResource("dev-lexicon.txt").getFile())); for (DictionaryItem item : items) { System.out.println(item); } }
public static RootLexicon load(File input) throws IOException { return Files.asCharSource(input, Charsets.UTF_8).readLines(new TextLexiconProcessor()); }
TurkishDictionaryLoader { public static RootLexicon load(File input) throws IOException { return Files.asCharSource(input, Charsets.UTF_8).readLines(new TextLexiconProcessor()); } }
TurkishDictionaryLoader { public static RootLexicon load(File input) throws IOException { return Files.asCharSource(input, Charsets.UTF_8).readLines(new TextLexiconProcessor()); } }
TurkishDictionaryLoader { public static RootLexicon load(File input) throws IOException { return Files.asCharSource(input, Charsets.UTF_8).readLines(new TextLexiconProcessor()); } static RootLexicon loadDefaultDictionaries(); static RootLexicon loadFromResources(String... resourcePaths); static RootLexicon loadFromResources(Collection<String> resourcePaths); static RootLexicon load(File input); static RootLexicon loadInto(RootLexicon lexicon, File input); static DictionaryItem loadFromString(String dictionaryLine); static RootLexicon load(String... dictionaryLines); static RootLexicon load(Iterable<String> dictionaryLines); }
TurkishDictionaryLoader { public static RootLexicon load(File input) throws IOException { return Files.asCharSource(input, Charsets.UTF_8).readLines(new TextLexiconProcessor()); } static RootLexicon loadDefaultDictionaries(); static RootLexicon loadFromResources(String... resourcePaths); static RootLexicon loadFromResources(Collection<String> resourcePaths); static RootLexicon load(File input); static RootLexicon loadInto(RootLexicon lexicon, File input); static DictionaryItem loadFromString(String dictionaryLine); static RootLexicon load(String... dictionaryLines); static RootLexicon load(Iterable<String> dictionaryLines); static final List<String> DEFAULT_DICTIONARY_RESOURCES; }
@Test @Ignore("Not a unit test") public void saveFullAttributes() throws IOException { RootLexicon items = TurkishDictionaryLoader.loadDefaultDictionaries(); PrintWriter p = new PrintWriter(new File("dictionary-all-attributes.txt"), "utf-8"); for (DictionaryItem item : items) { p.println(item.toString()); } }
public static RootLexicon loadDefaultDictionaries() throws IOException { return loadFromResources(DEFAULT_DICTIONARY_RESOURCES); }
TurkishDictionaryLoader { public static RootLexicon loadDefaultDictionaries() throws IOException { return loadFromResources(DEFAULT_DICTIONARY_RESOURCES); } }
TurkishDictionaryLoader { public static RootLexicon loadDefaultDictionaries() throws IOException { return loadFromResources(DEFAULT_DICTIONARY_RESOURCES); } }
TurkishDictionaryLoader { public static RootLexicon loadDefaultDictionaries() throws IOException { return loadFromResources(DEFAULT_DICTIONARY_RESOURCES); } static RootLexicon loadDefaultDictionaries(); static RootLexicon loadFromResources(String... resourcePaths); static RootLexicon loadFromResources(Collection<String> resourcePaths); static RootLexicon load(File input); static RootLexicon loadInto(RootLexicon lexicon, File input); static DictionaryItem loadFromString(String dictionaryLine); static RootLexicon load(String... dictionaryLines); static RootLexicon load(Iterable<String> dictionaryLines); }
TurkishDictionaryLoader { public static RootLexicon loadDefaultDictionaries() throws IOException { return loadFromResources(DEFAULT_DICTIONARY_RESOURCES); } static RootLexicon loadDefaultDictionaries(); static RootLexicon loadFromResources(String... resourcePaths); static RootLexicon loadFromResources(Collection<String> resourcePaths); static RootLexicon load(File input); static RootLexicon loadInto(RootLexicon lexicon, File input); static DictionaryItem loadFromString(String dictionaryLine); static RootLexicon load(String... dictionaryLines); static RootLexicon load(Iterable<String> dictionaryLines); static final List<String> DEFAULT_DICTIONARY_RESOURCES; }
@Test public void cardinalTest() { Assert.assertEquals("sıfır", TurkishNumbers.convertToString(0)); Assert.assertEquals("bin", TurkishNumbers.convertToString(1000)); Assert.assertEquals("bir", TurkishNumbers.convertToString(1)); Assert.assertEquals("on bir", TurkishNumbers.convertToString(11)); Assert.assertEquals("yüz on bir", TurkishNumbers.convertToString(111)); Assert.assertEquals("yüz on bir bin", TurkishNumbers.convertToString(111000)); Assert.assertEquals( "bir milyon iki yüz otuz dört bin beş yüz altmış yedi", TurkishNumbers.convertToString(1_234_567)); Assert.assertEquals( "bir milyar iki yüz otuz dört milyon beş yüz altmış yedi bin sekiz yüz doksan", TurkishNumbers.convertToString(1_234_567_890)); }
public static String convertToString(long input) { if (input == 0) { return "sıfır"; } if (input < MIN_NUMBER || input > MAX_NUMBER) { throw new IllegalArgumentException("number is out of bounds:" + input); } String result = ""; long girisPos = Math.abs(input); int sayac = 0; while (girisPos > 0) { int uclu = (int) (girisPos % 1000); if (uclu != 0) { if (uclu == 1 && sayac == 1) { result = thousands[sayac] + " " + result; } else { result = convertThreeDigit(uclu) + " " + thousands[sayac] + " " + result; } } sayac++; girisPos /= 1000; } if (input < 0) { return "eksi " + result.trim(); } else { return result.trim(); } }
TurkishNumbers { public static String convertToString(long input) { if (input == 0) { return "sıfır"; } if (input < MIN_NUMBER || input > MAX_NUMBER) { throw new IllegalArgumentException("number is out of bounds:" + input); } String result = ""; long girisPos = Math.abs(input); int sayac = 0; while (girisPos > 0) { int uclu = (int) (girisPos % 1000); if (uclu != 0) { if (uclu == 1 && sayac == 1) { result = thousands[sayac] + " " + result; } else { result = convertThreeDigit(uclu) + " " + thousands[sayac] + " " + result; } } sayac++; girisPos /= 1000; } if (input < 0) { return "eksi " + result.trim(); } else { return result.trim(); } } }
TurkishNumbers { public static String convertToString(long input) { if (input == 0) { return "sıfır"; } if (input < MIN_NUMBER || input > MAX_NUMBER) { throw new IllegalArgumentException("number is out of bounds:" + input); } String result = ""; long girisPos = Math.abs(input); int sayac = 0; while (girisPos > 0) { int uclu = (int) (girisPos % 1000); if (uclu != 0) { if (uclu == 1 && sayac == 1) { result = thousands[sayac] + " " + result; } else { result = convertThreeDigit(uclu) + " " + thousands[sayac] + " " + result; } } sayac++; girisPos /= 1000; } if (input < 0) { return "eksi " + result.trim(); } else { return result.trim(); } } }
TurkishNumbers { public static String convertToString(long input) { if (input == 0) { return "sıfır"; } if (input < MIN_NUMBER || input > MAX_NUMBER) { throw new IllegalArgumentException("number is out of bounds:" + input); } String result = ""; long girisPos = Math.abs(input); int sayac = 0; while (girisPos > 0) { int uclu = (int) (girisPos % 1000); if (uclu != 0) { if (uclu == 1 && sayac == 1) { result = thousands[sayac] + " " + result; } else { result = convertThreeDigit(uclu) + " " + thousands[sayac] + " " + result; } } sayac++; girisPos /= 1000; } if (input < 0) { return "eksi " + result.trim(); } else { return result.trim(); } } static Map<String, String> getOrdinalMap(); static String convertToString(long input); static String convertNumberToString(String input); static long singleWordNumberValue(String word); static List<String> replaceNumberStrings(List<String> inputSequence); static List<String> seperateConnectedNumbers(List<String> inputSequence); static List<String> seperateConnectedNumbers(String input); static long convertToNumber(String... words); static long convertToNumber(String text); static String convertOrdinalNumberString(String input); static List<String> separateNumbers(String s); static String getOrdinal(String input); static boolean hasNumber(String s); static boolean hasOnlyNumber(String s); static int romanToDecimal(String s); }
TurkishNumbers { public static String convertToString(long input) { if (input == 0) { return "sıfır"; } if (input < MIN_NUMBER || input > MAX_NUMBER) { throw new IllegalArgumentException("number is out of bounds:" + input); } String result = ""; long girisPos = Math.abs(input); int sayac = 0; while (girisPos > 0) { int uclu = (int) (girisPos % 1000); if (uclu != 0) { if (uclu == 1 && sayac == 1) { result = thousands[sayac] + " " + result; } else { result = convertThreeDigit(uclu) + " " + thousands[sayac] + " " + result; } } sayac++; girisPos /= 1000; } if (input < 0) { return "eksi " + result.trim(); } else { return result.trim(); } } static Map<String, String> getOrdinalMap(); static String convertToString(long input); static String convertNumberToString(String input); static long singleWordNumberValue(String word); static List<String> replaceNumberStrings(List<String> inputSequence); static List<String> seperateConnectedNumbers(List<String> inputSequence); static List<String> seperateConnectedNumbers(String input); static long convertToNumber(String... words); static long convertToNumber(String text); static String convertOrdinalNumberString(String input); static List<String> separateNumbers(String s); static String getOrdinal(String input); static boolean hasNumber(String s); static boolean hasOnlyNumber(String s); static int romanToDecimal(String s); static final long MAX_NUMBER; static final long MIN_NUMBER; }
@Test public void cardinalTest2() { Assert.assertEquals("sıfır", TurkishNumbers.convertNumberToString("0")); Assert.assertEquals("sıfır sıfır", TurkishNumbers.convertNumberToString("00")); Assert.assertEquals("sıfır sıfır sıfır", TurkishNumbers.convertNumberToString("000")); Assert.assertEquals("sıfır sıfır sıfır bir", TurkishNumbers.convertNumberToString("0001")); Assert.assertEquals("bin", TurkishNumbers.convertNumberToString("1000")); Assert.assertEquals("bir", TurkishNumbers.convertNumberToString("1")); Assert.assertEquals("on bir", TurkishNumbers.convertNumberToString("11")); Assert.assertEquals("yüz on bir", TurkishNumbers.convertNumberToString("111")); Assert.assertEquals("yüz on bir bin", TurkishNumbers.convertNumberToString("111000")); Assert.assertEquals("sıfır yüz on bir bin", TurkishNumbers.convertNumberToString("0111000")); Assert.assertEquals("sıfır sıfır yüz on bir bin", TurkishNumbers.convertNumberToString("00111000")); }
public static String convertNumberToString(String input) { if (input.startsWith("+")) { input = input.substring(1); } List<String> sb = new ArrayList<>(); int i; for (i = 0; i < input.length(); i++) { if (input.charAt(i) == '0') { sb.add("sıfır"); } else { break; } } String rest = input.substring(i); if (rest.length() > 0) { sb.add(convertToString(Long.parseLong(rest))); } return String.join(" ", sb); }
TurkishNumbers { public static String convertNumberToString(String input) { if (input.startsWith("+")) { input = input.substring(1); } List<String> sb = new ArrayList<>(); int i; for (i = 0; i < input.length(); i++) { if (input.charAt(i) == '0') { sb.add("sıfır"); } else { break; } } String rest = input.substring(i); if (rest.length() > 0) { sb.add(convertToString(Long.parseLong(rest))); } return String.join(" ", sb); } }
TurkishNumbers { public static String convertNumberToString(String input) { if (input.startsWith("+")) { input = input.substring(1); } List<String> sb = new ArrayList<>(); int i; for (i = 0; i < input.length(); i++) { if (input.charAt(i) == '0') { sb.add("sıfır"); } else { break; } } String rest = input.substring(i); if (rest.length() > 0) { sb.add(convertToString(Long.parseLong(rest))); } return String.join(" ", sb); } }
TurkishNumbers { public static String convertNumberToString(String input) { if (input.startsWith("+")) { input = input.substring(1); } List<String> sb = new ArrayList<>(); int i; for (i = 0; i < input.length(); i++) { if (input.charAt(i) == '0') { sb.add("sıfır"); } else { break; } } String rest = input.substring(i); if (rest.length() > 0) { sb.add(convertToString(Long.parseLong(rest))); } return String.join(" ", sb); } static Map<String, String> getOrdinalMap(); static String convertToString(long input); static String convertNumberToString(String input); static long singleWordNumberValue(String word); static List<String> replaceNumberStrings(List<String> inputSequence); static List<String> seperateConnectedNumbers(List<String> inputSequence); static List<String> seperateConnectedNumbers(String input); static long convertToNumber(String... words); static long convertToNumber(String text); static String convertOrdinalNumberString(String input); static List<String> separateNumbers(String s); static String getOrdinal(String input); static boolean hasNumber(String s); static boolean hasOnlyNumber(String s); static int romanToDecimal(String s); }
TurkishNumbers { public static String convertNumberToString(String input) { if (input.startsWith("+")) { input = input.substring(1); } List<String> sb = new ArrayList<>(); int i; for (i = 0; i < input.length(); i++) { if (input.charAt(i) == '0') { sb.add("sıfır"); } else { break; } } String rest = input.substring(i); if (rest.length() > 0) { sb.add(convertToString(Long.parseLong(rest))); } return String.join(" ", sb); } static Map<String, String> getOrdinalMap(); static String convertToString(long input); static String convertNumberToString(String input); static long singleWordNumberValue(String word); static List<String> replaceNumberStrings(List<String> inputSequence); static List<String> seperateConnectedNumbers(List<String> inputSequence); static List<String> seperateConnectedNumbers(String input); static long convertToNumber(String... words); static long convertToNumber(String text); static String convertOrdinalNumberString(String input); static List<String> separateNumbers(String s); static String getOrdinal(String input); static boolean hasNumber(String s); static boolean hasOnlyNumber(String s); static int romanToDecimal(String s); static final long MAX_NUMBER; static final long MIN_NUMBER; }
@Test public void ordinalTest() { Assert.assertEquals("sıfırıncı", TurkishNumbers.convertOrdinalNumberString("0.")); }
public static String convertOrdinalNumberString(String input) { String numberPart = input; if (input.endsWith(".")) { numberPart = Strings.subStringUntilFirst(input, "."); } long number = Long.parseLong(numberPart); String text = convertToString(number); String[] words = text.trim().split("[ ]+"); String lastNumber = words[words.length - 1]; if (ordinalMap.containsKey(lastNumber)) { lastNumber = ordinalMap.get(lastNumber); } else { throw new RuntimeException("Cannot find ordinal reading for:" + lastNumber); } StringBuilder sb = new StringBuilder(); for (int i = 0; i < words.length - 1; i++) { sb.append(words[i]).append(" "); } sb.append(lastNumber); return sb.toString(); }
TurkishNumbers { public static String convertOrdinalNumberString(String input) { String numberPart = input; if (input.endsWith(".")) { numberPart = Strings.subStringUntilFirst(input, "."); } long number = Long.parseLong(numberPart); String text = convertToString(number); String[] words = text.trim().split("[ ]+"); String lastNumber = words[words.length - 1]; if (ordinalMap.containsKey(lastNumber)) { lastNumber = ordinalMap.get(lastNumber); } else { throw new RuntimeException("Cannot find ordinal reading for:" + lastNumber); } StringBuilder sb = new StringBuilder(); for (int i = 0; i < words.length - 1; i++) { sb.append(words[i]).append(" "); } sb.append(lastNumber); return sb.toString(); } }
TurkishNumbers { public static String convertOrdinalNumberString(String input) { String numberPart = input; if (input.endsWith(".")) { numberPart = Strings.subStringUntilFirst(input, "."); } long number = Long.parseLong(numberPart); String text = convertToString(number); String[] words = text.trim().split("[ ]+"); String lastNumber = words[words.length - 1]; if (ordinalMap.containsKey(lastNumber)) { lastNumber = ordinalMap.get(lastNumber); } else { throw new RuntimeException("Cannot find ordinal reading for:" + lastNumber); } StringBuilder sb = new StringBuilder(); for (int i = 0; i < words.length - 1; i++) { sb.append(words[i]).append(" "); } sb.append(lastNumber); return sb.toString(); } }
TurkishNumbers { public static String convertOrdinalNumberString(String input) { String numberPart = input; if (input.endsWith(".")) { numberPart = Strings.subStringUntilFirst(input, "."); } long number = Long.parseLong(numberPart); String text = convertToString(number); String[] words = text.trim().split("[ ]+"); String lastNumber = words[words.length - 1]; if (ordinalMap.containsKey(lastNumber)) { lastNumber = ordinalMap.get(lastNumber); } else { throw new RuntimeException("Cannot find ordinal reading for:" + lastNumber); } StringBuilder sb = new StringBuilder(); for (int i = 0; i < words.length - 1; i++) { sb.append(words[i]).append(" "); } sb.append(lastNumber); return sb.toString(); } static Map<String, String> getOrdinalMap(); static String convertToString(long input); static String convertNumberToString(String input); static long singleWordNumberValue(String word); static List<String> replaceNumberStrings(List<String> inputSequence); static List<String> seperateConnectedNumbers(List<String> inputSequence); static List<String> seperateConnectedNumbers(String input); static long convertToNumber(String... words); static long convertToNumber(String text); static String convertOrdinalNumberString(String input); static List<String> separateNumbers(String s); static String getOrdinal(String input); static boolean hasNumber(String s); static boolean hasOnlyNumber(String s); static int romanToDecimal(String s); }
TurkishNumbers { public static String convertOrdinalNumberString(String input) { String numberPart = input; if (input.endsWith(".")) { numberPart = Strings.subStringUntilFirst(input, "."); } long number = Long.parseLong(numberPart); String text = convertToString(number); String[] words = text.trim().split("[ ]+"); String lastNumber = words[words.length - 1]; if (ordinalMap.containsKey(lastNumber)) { lastNumber = ordinalMap.get(lastNumber); } else { throw new RuntimeException("Cannot find ordinal reading for:" + lastNumber); } StringBuilder sb = new StringBuilder(); for (int i = 0; i < words.length - 1; i++) { sb.append(words[i]).append(" "); } sb.append(lastNumber); return sb.toString(); } static Map<String, String> getOrdinalMap(); static String convertToString(long input); static String convertNumberToString(String input); static long singleWordNumberValue(String word); static List<String> replaceNumberStrings(List<String> inputSequence); static List<String> seperateConnectedNumbers(List<String> inputSequence); static List<String> seperateConnectedNumbers(String input); static long convertToNumber(String... words); static long convertToNumber(String text); static String convertOrdinalNumberString(String input); static List<String> separateNumbers(String s); static String getOrdinal(String input); static boolean hasNumber(String s); static boolean hasOnlyNumber(String s); static int romanToDecimal(String s); static final long MAX_NUMBER; static final long MIN_NUMBER; }
@Test public void separateNumbersTest() { Assert.assertEquals(Lists.newArrayList("H", "12", "A", "5") , TurkishNumbers.separateNumbers("H12A5")); Assert.assertEquals(Lists.newArrayList("F", "16", "'ya") , TurkishNumbers.separateNumbers("F16'ya")); }
public static List<String> separateNumbers(String s) { return Regexps.allMatches(NUMBER_SEPARATION, s); }
TurkishNumbers { public static List<String> separateNumbers(String s) { return Regexps.allMatches(NUMBER_SEPARATION, s); } }
TurkishNumbers { public static List<String> separateNumbers(String s) { return Regexps.allMatches(NUMBER_SEPARATION, s); } }
TurkishNumbers { public static List<String> separateNumbers(String s) { return Regexps.allMatches(NUMBER_SEPARATION, s); } static Map<String, String> getOrdinalMap(); static String convertToString(long input); static String convertNumberToString(String input); static long singleWordNumberValue(String word); static List<String> replaceNumberStrings(List<String> inputSequence); static List<String> seperateConnectedNumbers(List<String> inputSequence); static List<String> seperateConnectedNumbers(String input); static long convertToNumber(String... words); static long convertToNumber(String text); static String convertOrdinalNumberString(String input); static List<String> separateNumbers(String s); static String getOrdinal(String input); static boolean hasNumber(String s); static boolean hasOnlyNumber(String s); static int romanToDecimal(String s); }
TurkishNumbers { public static List<String> separateNumbers(String s) { return Regexps.allMatches(NUMBER_SEPARATION, s); } static Map<String, String> getOrdinalMap(); static String convertToString(long input); static String convertNumberToString(String input); static long singleWordNumberValue(String word); static List<String> replaceNumberStrings(List<String> inputSequence); static List<String> seperateConnectedNumbers(List<String> inputSequence); static List<String> seperateConnectedNumbers(String input); static long convertToNumber(String... words); static long convertToNumber(String text); static String convertOrdinalNumberString(String input); static List<String> separateNumbers(String s); static String getOrdinal(String input); static boolean hasNumber(String s); static boolean hasOnlyNumber(String s); static int romanToDecimal(String s); static final long MAX_NUMBER; static final long MIN_NUMBER; }
@Test @Ignore("Slow. Uses actual data.") public void suggestWord1() throws Exception { TurkishMorphology morphology = TurkishMorphology.builder() .setLexicon("Türkiye", "Bayram").build(); List<String> endings = Lists.newArrayList("ında", "de"); StemEndingGraph graph = new StemEndingGraph(morphology, endings); TurkishSpellChecker spellChecker = new TurkishSpellChecker(morphology, graph.stemGraph); NgramLanguageModel lm = getLm("lm-unigram.slm"); check(spellChecker, lm, "Türkiye'de", "Türkiye'de"); }
public boolean check(String input) { WordAnalysis analyses = morphology.analyze(input); WordAnalysisSurfaceFormatter.CaseType caseType = formatter.guessCase(input); for (SingleAnalysis analysis : analyses) { if (analysis.isUnknown()) { continue; } if (analysisPredicate != null && !analysisPredicate.test(analysis)) { continue; } String apostrophe = getApostrophe(input); if (formatter.canBeFormatted(analysis, caseType)) { String formatted = formatter.formatToCase(analysis, caseType, apostrophe); if (input.equals(formatted)) { return true; } } } return false; }
TurkishSpellChecker { public boolean check(String input) { WordAnalysis analyses = morphology.analyze(input); WordAnalysisSurfaceFormatter.CaseType caseType = formatter.guessCase(input); for (SingleAnalysis analysis : analyses) { if (analysis.isUnknown()) { continue; } if (analysisPredicate != null && !analysisPredicate.test(analysis)) { continue; } String apostrophe = getApostrophe(input); if (formatter.canBeFormatted(analysis, caseType)) { String formatted = formatter.formatToCase(analysis, caseType, apostrophe); if (input.equals(formatted)) { return true; } } } return false; } }
TurkishSpellChecker { public boolean check(String input) { WordAnalysis analyses = morphology.analyze(input); WordAnalysisSurfaceFormatter.CaseType caseType = formatter.guessCase(input); for (SingleAnalysis analysis : analyses) { if (analysis.isUnknown()) { continue; } if (analysisPredicate != null && !analysisPredicate.test(analysis)) { continue; } String apostrophe = getApostrophe(input); if (formatter.canBeFormatted(analysis, caseType)) { String formatted = formatter.formatToCase(analysis, caseType, apostrophe); if (input.equals(formatted)) { return true; } } } return false; } TurkishSpellChecker(TurkishMorphology morphology); TurkishSpellChecker(TurkishMorphology morphology, CharacterGraph graph); TurkishSpellChecker( TurkishMorphology morphology, CharacterGraphDecoder decoder, CharMatcher matcher); }
TurkishSpellChecker { public boolean check(String input) { WordAnalysis analyses = morphology.analyze(input); WordAnalysisSurfaceFormatter.CaseType caseType = formatter.guessCase(input); for (SingleAnalysis analysis : analyses) { if (analysis.isUnknown()) { continue; } if (analysisPredicate != null && !analysisPredicate.test(analysis)) { continue; } String apostrophe = getApostrophe(input); if (formatter.canBeFormatted(analysis, caseType)) { String formatted = formatter.formatToCase(analysis, caseType, apostrophe); if (input.equals(formatted)) { return true; } } } return false; } TurkishSpellChecker(TurkishMorphology morphology); TurkishSpellChecker(TurkishMorphology morphology, CharacterGraph graph); TurkishSpellChecker( TurkishMorphology morphology, CharacterGraphDecoder decoder, CharMatcher matcher); NgramLanguageModel getUnigramLanguageModel(); void setAnalysisPredicate(Predicate<SingleAnalysis> analysisPredicate); static List<String> tokenizeForSpelling(String sentence); boolean check(String input); List<String> suggestForWord(String word, NgramLanguageModel lm); List<String> suggestForWord( String word, String leftContext, String rightContext, NgramLanguageModel lm); List<String> suggestForWord(String word); CharacterGraphDecoder getDecoder(); List<String> rankWithUnigramProbability(List<String> strings, NgramLanguageModel lm); }
TurkishSpellChecker { public boolean check(String input) { WordAnalysis analyses = morphology.analyze(input); WordAnalysisSurfaceFormatter.CaseType caseType = formatter.guessCase(input); for (SingleAnalysis analysis : analyses) { if (analysis.isUnknown()) { continue; } if (analysisPredicate != null && !analysisPredicate.test(analysis)) { continue; } String apostrophe = getApostrophe(input); if (formatter.canBeFormatted(analysis, caseType)) { String formatted = formatter.formatToCase(analysis, caseType, apostrophe); if (input.equals(formatted)) { return true; } } } return false; } TurkishSpellChecker(TurkishMorphology morphology); TurkishSpellChecker(TurkishMorphology morphology, CharacterGraph graph); TurkishSpellChecker( TurkishMorphology morphology, CharacterGraphDecoder decoder, CharMatcher matcher); NgramLanguageModel getUnigramLanguageModel(); void setAnalysisPredicate(Predicate<SingleAnalysis> analysisPredicate); static List<String> tokenizeForSpelling(String sentence); boolean check(String input); List<String> suggestForWord(String word, NgramLanguageModel lm); List<String> suggestForWord( String word, String leftContext, String rightContext, NgramLanguageModel lm); List<String> suggestForWord(String word); CharacterGraphDecoder getDecoder(); List<String> rankWithUnigramProbability(List<String> strings, NgramLanguageModel lm); }
@Test public void separateConnectedNumbersTest() { Assert.assertEquals(Lists.newArrayList("on") , TurkishNumbers.seperateConnectedNumbers("on")); Assert.assertEquals(Lists.newArrayList("on", "iki", "bin", "altı", "yüz") , TurkishNumbers.seperateConnectedNumbers("onikibinaltıyüz")); Assert.assertEquals(Lists.newArrayList("bir", "iki", "üç") , TurkishNumbers.seperateConnectedNumbers("birikiüç")); }
public static List<String> seperateConnectedNumbers(List<String> inputSequence) { List<String> output = new ArrayList<>(inputSequence.size()); for (String s : inputSequence) { if (stringToNumber.containsKey(s)) { output.add(valueOf(stringToNumber.get(s))); continue; } output.addAll(seperateConnectedNumbers(s)); } return output; }
TurkishNumbers { public static List<String> seperateConnectedNumbers(List<String> inputSequence) { List<String> output = new ArrayList<>(inputSequence.size()); for (String s : inputSequence) { if (stringToNumber.containsKey(s)) { output.add(valueOf(stringToNumber.get(s))); continue; } output.addAll(seperateConnectedNumbers(s)); } return output; } }
TurkishNumbers { public static List<String> seperateConnectedNumbers(List<String> inputSequence) { List<String> output = new ArrayList<>(inputSequence.size()); for (String s : inputSequence) { if (stringToNumber.containsKey(s)) { output.add(valueOf(stringToNumber.get(s))); continue; } output.addAll(seperateConnectedNumbers(s)); } return output; } }
TurkishNumbers { public static List<String> seperateConnectedNumbers(List<String> inputSequence) { List<String> output = new ArrayList<>(inputSequence.size()); for (String s : inputSequence) { if (stringToNumber.containsKey(s)) { output.add(valueOf(stringToNumber.get(s))); continue; } output.addAll(seperateConnectedNumbers(s)); } return output; } static Map<String, String> getOrdinalMap(); static String convertToString(long input); static String convertNumberToString(String input); static long singleWordNumberValue(String word); static List<String> replaceNumberStrings(List<String> inputSequence); static List<String> seperateConnectedNumbers(List<String> inputSequence); static List<String> seperateConnectedNumbers(String input); static long convertToNumber(String... words); static long convertToNumber(String text); static String convertOrdinalNumberString(String input); static List<String> separateNumbers(String s); static String getOrdinal(String input); static boolean hasNumber(String s); static boolean hasOnlyNumber(String s); static int romanToDecimal(String s); }
TurkishNumbers { public static List<String> seperateConnectedNumbers(List<String> inputSequence) { List<String> output = new ArrayList<>(inputSequence.size()); for (String s : inputSequence) { if (stringToNumber.containsKey(s)) { output.add(valueOf(stringToNumber.get(s))); continue; } output.addAll(seperateConnectedNumbers(s)); } return output; } static Map<String, String> getOrdinalMap(); static String convertToString(long input); static String convertNumberToString(String input); static long singleWordNumberValue(String word); static List<String> replaceNumberStrings(List<String> inputSequence); static List<String> seperateConnectedNumbers(List<String> inputSequence); static List<String> seperateConnectedNumbers(String input); static long convertToNumber(String... words); static long convertToNumber(String text); static String convertOrdinalNumberString(String input); static List<String> separateNumbers(String s); static String getOrdinal(String input); static boolean hasNumber(String s); static boolean hasOnlyNumber(String s); static int romanToDecimal(String s); static final long MAX_NUMBER; static final long MIN_NUMBER; }
@Test public void testTextToNumber1() { Assert.assertEquals(11, TurkishNumbers.convertToNumber("on bir")); Assert.assertEquals(111, TurkishNumbers.convertToNumber("yüz on bir")); Assert.assertEquals(101, TurkishNumbers.convertToNumber("yüz bir")); Assert.assertEquals(1000_000, TurkishNumbers.convertToNumber("bir milyon")); Assert.assertEquals(-1, TurkishNumbers.convertToNumber("bir bin")); }
public static long convertToNumber(String... words) { return textToNumber.convert(words); }
TurkishNumbers { public static long convertToNumber(String... words) { return textToNumber.convert(words); } }
TurkishNumbers { public static long convertToNumber(String... words) { return textToNumber.convert(words); } }
TurkishNumbers { public static long convertToNumber(String... words) { return textToNumber.convert(words); } static Map<String, String> getOrdinalMap(); static String convertToString(long input); static String convertNumberToString(String input); static long singleWordNumberValue(String word); static List<String> replaceNumberStrings(List<String> inputSequence); static List<String> seperateConnectedNumbers(List<String> inputSequence); static List<String> seperateConnectedNumbers(String input); static long convertToNumber(String... words); static long convertToNumber(String text); static String convertOrdinalNumberString(String input); static List<String> separateNumbers(String s); static String getOrdinal(String input); static boolean hasNumber(String s); static boolean hasOnlyNumber(String s); static int romanToDecimal(String s); }
TurkishNumbers { public static long convertToNumber(String... words) { return textToNumber.convert(words); } static Map<String, String> getOrdinalMap(); static String convertToString(long input); static String convertNumberToString(String input); static long singleWordNumberValue(String word); static List<String> replaceNumberStrings(List<String> inputSequence); static List<String> seperateConnectedNumbers(List<String> inputSequence); static List<String> seperateConnectedNumbers(String input); static long convertToNumber(String... words); static long convertToNumber(String text); static String convertOrdinalNumberString(String input); static List<String> separateNumbers(String s); static String getOrdinal(String input); static boolean hasNumber(String s); static boolean hasOnlyNumber(String s); static int romanToDecimal(String s); static final long MAX_NUMBER; static final long MIN_NUMBER; }
@Test public void romanNumberTest() { Assert.assertEquals(-1, TurkishNumbers.romanToDecimal("foo")); Assert.assertEquals(-1, TurkishNumbers.romanToDecimal("IIIIIII")); Assert.assertEquals(1987, TurkishNumbers.romanToDecimal("MCMLXXXVII")); }
public static int romanToDecimal(String s) { if (s == null || s.isEmpty() || !romanNumeralPattern.matcher(s).matches()) { return -1; } final Matcher matcher = Pattern.compile("M|CM|D|CD|C|XC|L|XL|X|IX|V|IV|I").matcher(s); final int[] decimalValues = {1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1}; final String[] romanNumerals = {"M", "CM", "D", "CD", "C", "XC", "L", "XL", "X", "IX", "V", "IV", "I"}; int result = 0; while (matcher.find()) { for (int i = 0; i < romanNumerals.length; i++) { if (romanNumerals[i].equals(matcher.group(0))) { result += decimalValues[i]; } } } return result; }
TurkishNumbers { public static int romanToDecimal(String s) { if (s == null || s.isEmpty() || !romanNumeralPattern.matcher(s).matches()) { return -1; } final Matcher matcher = Pattern.compile("M|CM|D|CD|C|XC|L|XL|X|IX|V|IV|I").matcher(s); final int[] decimalValues = {1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1}; final String[] romanNumerals = {"M", "CM", "D", "CD", "C", "XC", "L", "XL", "X", "IX", "V", "IV", "I"}; int result = 0; while (matcher.find()) { for (int i = 0; i < romanNumerals.length; i++) { if (romanNumerals[i].equals(matcher.group(0))) { result += decimalValues[i]; } } } return result; } }
TurkishNumbers { public static int romanToDecimal(String s) { if (s == null || s.isEmpty() || !romanNumeralPattern.matcher(s).matches()) { return -1; } final Matcher matcher = Pattern.compile("M|CM|D|CD|C|XC|L|XL|X|IX|V|IV|I").matcher(s); final int[] decimalValues = {1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1}; final String[] romanNumerals = {"M", "CM", "D", "CD", "C", "XC", "L", "XL", "X", "IX", "V", "IV", "I"}; int result = 0; while (matcher.find()) { for (int i = 0; i < romanNumerals.length; i++) { if (romanNumerals[i].equals(matcher.group(0))) { result += decimalValues[i]; } } } return result; } }
TurkishNumbers { public static int romanToDecimal(String s) { if (s == null || s.isEmpty() || !romanNumeralPattern.matcher(s).matches()) { return -1; } final Matcher matcher = Pattern.compile("M|CM|D|CD|C|XC|L|XL|X|IX|V|IV|I").matcher(s); final int[] decimalValues = {1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1}; final String[] romanNumerals = {"M", "CM", "D", "CD", "C", "XC", "L", "XL", "X", "IX", "V", "IV", "I"}; int result = 0; while (matcher.find()) { for (int i = 0; i < romanNumerals.length; i++) { if (romanNumerals[i].equals(matcher.group(0))) { result += decimalValues[i]; } } } return result; } static Map<String, String> getOrdinalMap(); static String convertToString(long input); static String convertNumberToString(String input); static long singleWordNumberValue(String word); static List<String> replaceNumberStrings(List<String> inputSequence); static List<String> seperateConnectedNumbers(List<String> inputSequence); static List<String> seperateConnectedNumbers(String input); static long convertToNumber(String... words); static long convertToNumber(String text); static String convertOrdinalNumberString(String input); static List<String> separateNumbers(String s); static String getOrdinal(String input); static boolean hasNumber(String s); static boolean hasOnlyNumber(String s); static int romanToDecimal(String s); }
TurkishNumbers { public static int romanToDecimal(String s) { if (s == null || s.isEmpty() || !romanNumeralPattern.matcher(s).matches()) { return -1; } final Matcher matcher = Pattern.compile("M|CM|D|CD|C|XC|L|XL|X|IX|V|IV|I").matcher(s); final int[] decimalValues = {1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1}; final String[] romanNumerals = {"M", "CM", "D", "CD", "C", "XC", "L", "XL", "X", "IX", "V", "IV", "I"}; int result = 0; while (matcher.find()) { for (int i = 0; i < romanNumerals.length; i++) { if (romanNumerals[i].equals(matcher.group(0))) { result += decimalValues[i]; } } } return result; } static Map<String, String> getOrdinalMap(); static String convertToString(long input); static String convertNumberToString(String input); static long singleWordNumberValue(String word); static List<String> replaceNumberStrings(List<String> inputSequence); static List<String> seperateConnectedNumbers(List<String> inputSequence); static List<String> seperateConnectedNumbers(String input); static long convertToNumber(String... words); static long convertToNumber(String text); static String convertOrdinalNumberString(String input); static List<String> separateNumbers(String s); static String getOrdinal(String input); static boolean hasNumber(String s); static boolean hasOnlyNumber(String s); static int romanToDecimal(String s); static final long MAX_NUMBER; static final long MIN_NUMBER; }
@Test public void getPosTest() { RuleBasedAnalyzer analyzer = getAnalyzer("görmek"); List<SingleAnalysis> analyses = analyzer.analyze("görmek"); Assert.assertEquals(1, analyses.size()); SingleAnalysis analysis = analyses.get(0); Assert.assertEquals(analysis.getDictionaryItem(), analyzer.getLexicon().getItemById("görmek_Verb")); Assert.assertEquals(PrimaryPos.Noun, analysis.getPos()); }
public PrimaryPos getPos() { return getGroup(groupCount() - 1).getPos(); }
SingleAnalysis { public PrimaryPos getPos() { return getGroup(groupCount() - 1).getPos(); } }
SingleAnalysis { public PrimaryPos getPos() { return getGroup(groupCount() - 1).getPos(); } SingleAnalysis( DictionaryItem item, List<MorphemeData> morphemeDataList, int[] groupBoundaries); }
SingleAnalysis { public PrimaryPos getPos() { return getGroup(groupCount() - 1).getPos(); } SingleAnalysis( DictionaryItem item, List<MorphemeData> morphemeDataList, int[] groupBoundaries); static SingleAnalysis unknown(String input); static SingleAnalysis dummy(String input, DictionaryItem item); String surfaceForm(); boolean containsInformalMorpheme(); String getEnding(); String getStem(); boolean containsMorpheme(Morpheme morpheme); StemAndEnding getStemAndEnding(); DictionaryItem getDictionaryItem(); boolean isUnknown(); boolean isRuntime(); List<MorphemeData> getMorphemeDataList(); List<Morpheme> getMorphemes(); MorphemeGroup getGroup(int groupIndex); MorphemeGroup getLastGroup(); MorphemeGroup[] getGroups(); static SingleAnalysis fromSearchPath(SearchPath searchPath); boolean containsAnyMorpheme(Morpheme... morphemes); List<String> getStems(); List<String> getLemmas(); @Override String toString(); String formatLexical(); String formatMorphemesLexical(); PrimaryPos getPos(); String formatLong(); int groupCount(); @Override boolean equals(Object o); @Override int hashCode(); }
SingleAnalysis { public PrimaryPos getPos() { return getGroup(groupCount() - 1).getPos(); } SingleAnalysis( DictionaryItem item, List<MorphemeData> morphemeDataList, int[] groupBoundaries); static SingleAnalysis unknown(String input); static SingleAnalysis dummy(String input, DictionaryItem item); String surfaceForm(); boolean containsInformalMorpheme(); String getEnding(); String getStem(); boolean containsMorpheme(Morpheme morpheme); StemAndEnding getStemAndEnding(); DictionaryItem getDictionaryItem(); boolean isUnknown(); boolean isRuntime(); List<MorphemeData> getMorphemeDataList(); List<Morpheme> getMorphemes(); MorphemeGroup getGroup(int groupIndex); MorphemeGroup getLastGroup(); MorphemeGroup[] getGroups(); static SingleAnalysis fromSearchPath(SearchPath searchPath); boolean containsAnyMorpheme(Morpheme... morphemes); List<String> getStems(); List<String> getLemmas(); @Override String toString(); String formatLexical(); String formatMorphemesLexical(); PrimaryPos getPos(); String formatLong(); int groupCount(); @Override boolean equals(Object o); @Override int hashCode(); }
@Test public void getStemsTest() { RuleBasedAnalyzer analyzer = getAnalyzer("kitap"); SingleAnalysis analysis = analyzer.analyze("kitap").get(0); Assert.assertEquals(toList("kitap"), analysis.getStems()); analysis = analyzer.analyze("kitaplı").get(0); Assert.assertEquals(toList("kitap", "kitaplı"), analysis.getStems()); analysis = analyzer.analyze("kitaplarda").get(0); Assert.assertEquals(toList("kitap"), analysis.getStems()); analysis = analyzer.analyze("kitabımmış").get(0); Assert.assertEquals(toList("kitab", "kitabım"), analysis.getStems()); analysis = analyzer.analyze("kitapçığa").get(0); Assert.assertEquals(toList("kitap", "kitapçığ"), analysis.getStems()); analyzer = getAnalyzer("okumak"); analysis = analyzer.analyze("okut").get(0); Assert.assertEquals(toList("oku", "okut"), analysis.getStems()); analysis = analyzer.analyze("okuttur").get(0); Assert.assertEquals(toList("oku", "okut", "okuttur"), analysis.getStems()); analysis = analyzer.analyze("okutturuluyor").get(0); Assert.assertEquals(toList("oku", "okut", "okuttur", "okutturul"), analysis.getStems()); analysis = analyzer.analyze("okutturamıyor").get(0); Assert.assertEquals(toList("oku", "okut", "okuttur"), analysis.getStems()); analysis = analyzer.analyze("okutturabiliyor").get(0); Assert.assertEquals(toList("oku", "okut", "okuttur", "okutturabil"), analysis.getStems()); }
public List<String> getStems() { List<String> stems = Lists.newArrayListWithCapacity(2); stems.add(getStem()); String previousStem = getGroup(0).surfaceForm(); if (groupBoundaries.length > 1) { for (int i = 1; i < groupBoundaries.length; i++) { MorphemeGroup ig = getGroup(i); MorphemeData suffixData = ig.morphemes.get(0); String surface = suffixData.surface; String stem = previousStem + surface; if (!stems.contains(stem)) { stems.add(stem); } previousStem = previousStem + ig.surfaceForm(); } } return stems; }
SingleAnalysis { public List<String> getStems() { List<String> stems = Lists.newArrayListWithCapacity(2); stems.add(getStem()); String previousStem = getGroup(0).surfaceForm(); if (groupBoundaries.length > 1) { for (int i = 1; i < groupBoundaries.length; i++) { MorphemeGroup ig = getGroup(i); MorphemeData suffixData = ig.morphemes.get(0); String surface = suffixData.surface; String stem = previousStem + surface; if (!stems.contains(stem)) { stems.add(stem); } previousStem = previousStem + ig.surfaceForm(); } } return stems; } }
SingleAnalysis { public List<String> getStems() { List<String> stems = Lists.newArrayListWithCapacity(2); stems.add(getStem()); String previousStem = getGroup(0).surfaceForm(); if (groupBoundaries.length > 1) { for (int i = 1; i < groupBoundaries.length; i++) { MorphemeGroup ig = getGroup(i); MorphemeData suffixData = ig.morphemes.get(0); String surface = suffixData.surface; String stem = previousStem + surface; if (!stems.contains(stem)) { stems.add(stem); } previousStem = previousStem + ig.surfaceForm(); } } return stems; } SingleAnalysis( DictionaryItem item, List<MorphemeData> morphemeDataList, int[] groupBoundaries); }
SingleAnalysis { public List<String> getStems() { List<String> stems = Lists.newArrayListWithCapacity(2); stems.add(getStem()); String previousStem = getGroup(0).surfaceForm(); if (groupBoundaries.length > 1) { for (int i = 1; i < groupBoundaries.length; i++) { MorphemeGroup ig = getGroup(i); MorphemeData suffixData = ig.morphemes.get(0); String surface = suffixData.surface; String stem = previousStem + surface; if (!stems.contains(stem)) { stems.add(stem); } previousStem = previousStem + ig.surfaceForm(); } } return stems; } SingleAnalysis( DictionaryItem item, List<MorphemeData> morphemeDataList, int[] groupBoundaries); static SingleAnalysis unknown(String input); static SingleAnalysis dummy(String input, DictionaryItem item); String surfaceForm(); boolean containsInformalMorpheme(); String getEnding(); String getStem(); boolean containsMorpheme(Morpheme morpheme); StemAndEnding getStemAndEnding(); DictionaryItem getDictionaryItem(); boolean isUnknown(); boolean isRuntime(); List<MorphemeData> getMorphemeDataList(); List<Morpheme> getMorphemes(); MorphemeGroup getGroup(int groupIndex); MorphemeGroup getLastGroup(); MorphemeGroup[] getGroups(); static SingleAnalysis fromSearchPath(SearchPath searchPath); boolean containsAnyMorpheme(Morpheme... morphemes); List<String> getStems(); List<String> getLemmas(); @Override String toString(); String formatLexical(); String formatMorphemesLexical(); PrimaryPos getPos(); String formatLong(); int groupCount(); @Override boolean equals(Object o); @Override int hashCode(); }
SingleAnalysis { public List<String> getStems() { List<String> stems = Lists.newArrayListWithCapacity(2); stems.add(getStem()); String previousStem = getGroup(0).surfaceForm(); if (groupBoundaries.length > 1) { for (int i = 1; i < groupBoundaries.length; i++) { MorphemeGroup ig = getGroup(i); MorphemeData suffixData = ig.morphemes.get(0); String surface = suffixData.surface; String stem = previousStem + surface; if (!stems.contains(stem)) { stems.add(stem); } previousStem = previousStem + ig.surfaceForm(); } } return stems; } SingleAnalysis( DictionaryItem item, List<MorphemeData> morphemeDataList, int[] groupBoundaries); static SingleAnalysis unknown(String input); static SingleAnalysis dummy(String input, DictionaryItem item); String surfaceForm(); boolean containsInformalMorpheme(); String getEnding(); String getStem(); boolean containsMorpheme(Morpheme morpheme); StemAndEnding getStemAndEnding(); DictionaryItem getDictionaryItem(); boolean isUnknown(); boolean isRuntime(); List<MorphemeData> getMorphemeDataList(); List<Morpheme> getMorphemes(); MorphemeGroup getGroup(int groupIndex); MorphemeGroup getLastGroup(); MorphemeGroup[] getGroups(); static SingleAnalysis fromSearchPath(SearchPath searchPath); boolean containsAnyMorpheme(Morpheme... morphemes); List<String> getStems(); List<String> getLemmas(); @Override String toString(); String formatLexical(); String formatMorphemesLexical(); PrimaryPos getPos(); String formatLong(); int groupCount(); @Override boolean equals(Object o); @Override int hashCode(); }
@Test public void getLemmasTest() { RuleBasedAnalyzer analyzer = getAnalyzer("kitap"); SingleAnalysis analysis = analyzer.analyze("kitap").get(0); Assert.assertEquals(toList("kitap"), analysis.getLemmas()); analysis = analyzer.analyze("kitaplı").get(0); Assert.assertEquals(toList("kitap", "kitaplı"), analysis.getLemmas()); analysis = analyzer.analyze("kitaplarda").get(0); Assert.assertEquals(toList("kitap"), analysis.getLemmas()); analysis = analyzer.analyze("kitabımmış").get(0); Assert.assertEquals(toList("kitap", "kitabım"), analysis.getLemmas()); analysis = analyzer.analyze("kitapçığa").get(0); Assert.assertEquals(toList("kitap", "kitapçık"), analysis.getLemmas()); analyzer = getAnalyzer("okumak"); analysis = analyzer.analyze("okut").get(0); Assert.assertEquals(toList("oku", "okut"), analysis.getLemmas()); analysis = analyzer.analyze("okuttur").get(0); Assert.assertEquals(toList("oku", "okut", "okuttur"), analysis.getLemmas()); analysis = analyzer.analyze("okutturuluyor").get(0); Assert.assertEquals(toList("oku", "okut", "okuttur", "okutturul"), analysis.getLemmas()); analysis = analyzer.analyze("okutturamıyor").get(0); Assert.assertEquals(toList("oku", "okut", "okuttur"), analysis.getLemmas()); analysis = analyzer.analyze("okutturabiliyor").get(0); Assert.assertEquals(toList("oku", "okut", "okuttur", "okutturabil"), analysis.getLemmas()); }
public List<String> getLemmas() { List<String> lemmas = Lists.newArrayListWithCapacity(2); lemmas.add(item.root); String previousStem = getGroup(0).surfaceForm(); if (!previousStem.equals(item.root)) { if (previousStem.endsWith("ğ")) { previousStem = previousStem.substring(0, previousStem.length() - 1) + "k"; } } if (groupBoundaries.length > 1) { for (int i = 1; i < groupBoundaries.length; i++) { MorphemeGroup ig = getGroup(i); MorphemeData suffixData = ig.morphemes.get(0); String surface = suffixData.surface; String stem = previousStem + surface; if (stem.endsWith("ğ")) { stem = stem.substring(0, stem.length() - 1) + "k"; } if (!lemmas.contains(stem)) { lemmas.add(stem); } previousStem = previousStem + ig.surfaceForm(); } } return lemmas; }
SingleAnalysis { public List<String> getLemmas() { List<String> lemmas = Lists.newArrayListWithCapacity(2); lemmas.add(item.root); String previousStem = getGroup(0).surfaceForm(); if (!previousStem.equals(item.root)) { if (previousStem.endsWith("ğ")) { previousStem = previousStem.substring(0, previousStem.length() - 1) + "k"; } } if (groupBoundaries.length > 1) { for (int i = 1; i < groupBoundaries.length; i++) { MorphemeGroup ig = getGroup(i); MorphemeData suffixData = ig.morphemes.get(0); String surface = suffixData.surface; String stem = previousStem + surface; if (stem.endsWith("ğ")) { stem = stem.substring(0, stem.length() - 1) + "k"; } if (!lemmas.contains(stem)) { lemmas.add(stem); } previousStem = previousStem + ig.surfaceForm(); } } return lemmas; } }
SingleAnalysis { public List<String> getLemmas() { List<String> lemmas = Lists.newArrayListWithCapacity(2); lemmas.add(item.root); String previousStem = getGroup(0).surfaceForm(); if (!previousStem.equals(item.root)) { if (previousStem.endsWith("ğ")) { previousStem = previousStem.substring(0, previousStem.length() - 1) + "k"; } } if (groupBoundaries.length > 1) { for (int i = 1; i < groupBoundaries.length; i++) { MorphemeGroup ig = getGroup(i); MorphemeData suffixData = ig.morphemes.get(0); String surface = suffixData.surface; String stem = previousStem + surface; if (stem.endsWith("ğ")) { stem = stem.substring(0, stem.length() - 1) + "k"; } if (!lemmas.contains(stem)) { lemmas.add(stem); } previousStem = previousStem + ig.surfaceForm(); } } return lemmas; } SingleAnalysis( DictionaryItem item, List<MorphemeData> morphemeDataList, int[] groupBoundaries); }
SingleAnalysis { public List<String> getLemmas() { List<String> lemmas = Lists.newArrayListWithCapacity(2); lemmas.add(item.root); String previousStem = getGroup(0).surfaceForm(); if (!previousStem.equals(item.root)) { if (previousStem.endsWith("ğ")) { previousStem = previousStem.substring(0, previousStem.length() - 1) + "k"; } } if (groupBoundaries.length > 1) { for (int i = 1; i < groupBoundaries.length; i++) { MorphemeGroup ig = getGroup(i); MorphemeData suffixData = ig.morphemes.get(0); String surface = suffixData.surface; String stem = previousStem + surface; if (stem.endsWith("ğ")) { stem = stem.substring(0, stem.length() - 1) + "k"; } if (!lemmas.contains(stem)) { lemmas.add(stem); } previousStem = previousStem + ig.surfaceForm(); } } return lemmas; } SingleAnalysis( DictionaryItem item, List<MorphemeData> morphemeDataList, int[] groupBoundaries); static SingleAnalysis unknown(String input); static SingleAnalysis dummy(String input, DictionaryItem item); String surfaceForm(); boolean containsInformalMorpheme(); String getEnding(); String getStem(); boolean containsMorpheme(Morpheme morpheme); StemAndEnding getStemAndEnding(); DictionaryItem getDictionaryItem(); boolean isUnknown(); boolean isRuntime(); List<MorphemeData> getMorphemeDataList(); List<Morpheme> getMorphemes(); MorphemeGroup getGroup(int groupIndex); MorphemeGroup getLastGroup(); MorphemeGroup[] getGroups(); static SingleAnalysis fromSearchPath(SearchPath searchPath); boolean containsAnyMorpheme(Morpheme... morphemes); List<String> getStems(); List<String> getLemmas(); @Override String toString(); String formatLexical(); String formatMorphemesLexical(); PrimaryPos getPos(); String formatLong(); int groupCount(); @Override boolean equals(Object o); @Override int hashCode(); }
SingleAnalysis { public List<String> getLemmas() { List<String> lemmas = Lists.newArrayListWithCapacity(2); lemmas.add(item.root); String previousStem = getGroup(0).surfaceForm(); if (!previousStem.equals(item.root)) { if (previousStem.endsWith("ğ")) { previousStem = previousStem.substring(0, previousStem.length() - 1) + "k"; } } if (groupBoundaries.length > 1) { for (int i = 1; i < groupBoundaries.length; i++) { MorphemeGroup ig = getGroup(i); MorphemeData suffixData = ig.morphemes.get(0); String surface = suffixData.surface; String stem = previousStem + surface; if (stem.endsWith("ğ")) { stem = stem.substring(0, stem.length() - 1) + "k"; } if (!lemmas.contains(stem)) { lemmas.add(stem); } previousStem = previousStem + ig.surfaceForm(); } } return lemmas; } SingleAnalysis( DictionaryItem item, List<MorphemeData> morphemeDataList, int[] groupBoundaries); static SingleAnalysis unknown(String input); static SingleAnalysis dummy(String input, DictionaryItem item); String surfaceForm(); boolean containsInformalMorpheme(); String getEnding(); String getStem(); boolean containsMorpheme(Morpheme morpheme); StemAndEnding getStemAndEnding(); DictionaryItem getDictionaryItem(); boolean isUnknown(); boolean isRuntime(); List<MorphemeData> getMorphemeDataList(); List<Morpheme> getMorphemes(); MorphemeGroup getGroup(int groupIndex); MorphemeGroup getLastGroup(); MorphemeGroup[] getGroups(); static SingleAnalysis fromSearchPath(SearchPath searchPath); boolean containsAnyMorpheme(Morpheme... morphemes); List<String> getStems(); List<String> getLemmas(); @Override String toString(); String formatLexical(); String formatMorphemesLexical(); PrimaryPos getPos(); String formatLong(); int groupCount(); @Override boolean equals(Object o); @Override int hashCode(); }
@Test public void getLemmasAfterZeroMorphemeTest_Issue_175() { RuleBasedAnalyzer analyzer = getAnalyzer("gün"); List<SingleAnalysis> analyses = analyzer.analyze("günlüğüm"); boolean found = false; for (SingleAnalysis analysis : analyses) { if (analysis.formatLong().contains("Ness→Noun+A3sg|Zero→Verb")) { found = true; Assert.assertEquals(toList("gün", "günlük"), analysis.getLemmas()); } } if (!found) { Assert.fail("Counld not found an analysis with `Ness→Noun+A3sg|Zero→Verb` in it"); } }
public String formatLong() { return AnalysisFormatters.DEFAULT.format(this); }
SingleAnalysis { public String formatLong() { return AnalysisFormatters.DEFAULT.format(this); } }
SingleAnalysis { public String formatLong() { return AnalysisFormatters.DEFAULT.format(this); } SingleAnalysis( DictionaryItem item, List<MorphemeData> morphemeDataList, int[] groupBoundaries); }
SingleAnalysis { public String formatLong() { return AnalysisFormatters.DEFAULT.format(this); } SingleAnalysis( DictionaryItem item, List<MorphemeData> morphemeDataList, int[] groupBoundaries); static SingleAnalysis unknown(String input); static SingleAnalysis dummy(String input, DictionaryItem item); String surfaceForm(); boolean containsInformalMorpheme(); String getEnding(); String getStem(); boolean containsMorpheme(Morpheme morpheme); StemAndEnding getStemAndEnding(); DictionaryItem getDictionaryItem(); boolean isUnknown(); boolean isRuntime(); List<MorphemeData> getMorphemeDataList(); List<Morpheme> getMorphemes(); MorphemeGroup getGroup(int groupIndex); MorphemeGroup getLastGroup(); MorphemeGroup[] getGroups(); static SingleAnalysis fromSearchPath(SearchPath searchPath); boolean containsAnyMorpheme(Morpheme... morphemes); List<String> getStems(); List<String> getLemmas(); @Override String toString(); String formatLexical(); String formatMorphemesLexical(); PrimaryPos getPos(); String formatLong(); int groupCount(); @Override boolean equals(Object o); @Override int hashCode(); }
SingleAnalysis { public String formatLong() { return AnalysisFormatters.DEFAULT.format(this); } SingleAnalysis( DictionaryItem item, List<MorphemeData> morphemeDataList, int[] groupBoundaries); static SingleAnalysis unknown(String input); static SingleAnalysis dummy(String input, DictionaryItem item); String surfaceForm(); boolean containsInformalMorpheme(); String getEnding(); String getStem(); boolean containsMorpheme(Morpheme morpheme); StemAndEnding getStemAndEnding(); DictionaryItem getDictionaryItem(); boolean isUnknown(); boolean isRuntime(); List<MorphemeData> getMorphemeDataList(); List<Morpheme> getMorphemes(); MorphemeGroup getGroup(int groupIndex); MorphemeGroup getLastGroup(); MorphemeGroup[] getGroups(); static SingleAnalysis fromSearchPath(SearchPath searchPath); boolean containsAnyMorpheme(Morpheme... morphemes); List<String> getStems(); List<String> getLemmas(); @Override String toString(); String formatLexical(); String formatMorphemesLexical(); PrimaryPos getPos(); String formatLong(); int groupCount(); @Override boolean equals(Object o); @Override int hashCode(); }
@Test public void formatNonProperNoun() { TurkishMorphology morphology = TurkishMorphology.builder() .disableCache() .setLexicon("elma", "kitap", "demek", "evet") .build(); String[] inputs = {"elmamadaki", "elma", "kitalarımdan", "kitabımızsa", "diyebileceğimiz", "dedi", "evet"}; WordAnalysisSurfaceFormatter formatter = new WordAnalysisSurfaceFormatter(); for (String input : inputs) { WordAnalysis results = morphology.analyze(input); for (SingleAnalysis result : results) { Assert.assertEquals(input, formatter.format(result, null)); } } }
public String format(SingleAnalysis analysis, String apostrophe) { DictionaryItem item = analysis.getDictionaryItem(); String ending = analysis.getEnding(); if (apostrophe != null || apostropheRequired(analysis)) { if (apostrophe == null) { apostrophe = "'"; } return ending.length() > 0 ? item.normalizedLemma() + apostrophe + ending : item.normalizedLemma(); } else { if (item.attributes.contains(RootAttribute.NoQuote)) { return item.normalizedLemma() + ending; } else { return analysis.getStem() + ending; } } }
WordAnalysisSurfaceFormatter { public String format(SingleAnalysis analysis, String apostrophe) { DictionaryItem item = analysis.getDictionaryItem(); String ending = analysis.getEnding(); if (apostrophe != null || apostropheRequired(analysis)) { if (apostrophe == null) { apostrophe = "'"; } return ending.length() > 0 ? item.normalizedLemma() + apostrophe + ending : item.normalizedLemma(); } else { if (item.attributes.contains(RootAttribute.NoQuote)) { return item.normalizedLemma() + ending; } else { return analysis.getStem() + ending; } } } }
WordAnalysisSurfaceFormatter { public String format(SingleAnalysis analysis, String apostrophe) { DictionaryItem item = analysis.getDictionaryItem(); String ending = analysis.getEnding(); if (apostrophe != null || apostropheRequired(analysis)) { if (apostrophe == null) { apostrophe = "'"; } return ending.length() > 0 ? item.normalizedLemma() + apostrophe + ending : item.normalizedLemma(); } else { if (item.attributes.contains(RootAttribute.NoQuote)) { return item.normalizedLemma() + ending; } else { return analysis.getStem() + ending; } } } }
WordAnalysisSurfaceFormatter { public String format(SingleAnalysis analysis, String apostrophe) { DictionaryItem item = analysis.getDictionaryItem(); String ending = analysis.getEnding(); if (apostrophe != null || apostropheRequired(analysis)) { if (apostrophe == null) { apostrophe = "'"; } return ending.length() > 0 ? item.normalizedLemma() + apostrophe + ending : item.normalizedLemma(); } else { if (item.attributes.contains(RootAttribute.NoQuote)) { return item.normalizedLemma() + ending; } else { return analysis.getStem() + ending; } } } String format(SingleAnalysis analysis, String apostrophe); String format(SingleAnalysis analysis); String formatToCase(SingleAnalysis analysis, CaseType type, String apostrophe); String formatToCase(SingleAnalysis analysis, CaseType type); boolean canBeFormatted(SingleAnalysis analysis, CaseType type); CaseType guessCase(String input); }
WordAnalysisSurfaceFormatter { public String format(SingleAnalysis analysis, String apostrophe) { DictionaryItem item = analysis.getDictionaryItem(); String ending = analysis.getEnding(); if (apostrophe != null || apostropheRequired(analysis)) { if (apostrophe == null) { apostrophe = "'"; } return ending.length() > 0 ? item.normalizedLemma() + apostrophe + ending : item.normalizedLemma(); } else { if (item.attributes.contains(RootAttribute.NoQuote)) { return item.normalizedLemma() + ending; } else { return analysis.getStem() + ending; } } } String format(SingleAnalysis analysis, String apostrophe); String format(SingleAnalysis analysis); String formatToCase(SingleAnalysis analysis, CaseType type, String apostrophe); String formatToCase(SingleAnalysis analysis, CaseType type); boolean canBeFormatted(SingleAnalysis analysis, CaseType type); CaseType guessCase(String input); }
@Test public void formatNumerals() { TurkishMorphology morphology = TurkishMorphology.builder().disableCache().build(); String[] inputs = {"1e", "4ten", "123ü", "12,5ten"}; String[] expected = {"1'e", "4'ten", "123'ü", "12,5ten"}; WordAnalysisSurfaceFormatter formatter = new WordAnalysisSurfaceFormatter(); int i = 0; for (String input : inputs) { WordAnalysis results = morphology.analyze(input); for (SingleAnalysis result : results) { if (result.getDictionaryItem().primaryPos == PrimaryPos.Numeral) { Assert.assertEquals(expected[i], formatter.format(result, "'")); } } i++; } }
public String format(SingleAnalysis analysis, String apostrophe) { DictionaryItem item = analysis.getDictionaryItem(); String ending = analysis.getEnding(); if (apostrophe != null || apostropheRequired(analysis)) { if (apostrophe == null) { apostrophe = "'"; } return ending.length() > 0 ? item.normalizedLemma() + apostrophe + ending : item.normalizedLemma(); } else { if (item.attributes.contains(RootAttribute.NoQuote)) { return item.normalizedLemma() + ending; } else { return analysis.getStem() + ending; } } }
WordAnalysisSurfaceFormatter { public String format(SingleAnalysis analysis, String apostrophe) { DictionaryItem item = analysis.getDictionaryItem(); String ending = analysis.getEnding(); if (apostrophe != null || apostropheRequired(analysis)) { if (apostrophe == null) { apostrophe = "'"; } return ending.length() > 0 ? item.normalizedLemma() + apostrophe + ending : item.normalizedLemma(); } else { if (item.attributes.contains(RootAttribute.NoQuote)) { return item.normalizedLemma() + ending; } else { return analysis.getStem() + ending; } } } }
WordAnalysisSurfaceFormatter { public String format(SingleAnalysis analysis, String apostrophe) { DictionaryItem item = analysis.getDictionaryItem(); String ending = analysis.getEnding(); if (apostrophe != null || apostropheRequired(analysis)) { if (apostrophe == null) { apostrophe = "'"; } return ending.length() > 0 ? item.normalizedLemma() + apostrophe + ending : item.normalizedLemma(); } else { if (item.attributes.contains(RootAttribute.NoQuote)) { return item.normalizedLemma() + ending; } else { return analysis.getStem() + ending; } } } }
WordAnalysisSurfaceFormatter { public String format(SingleAnalysis analysis, String apostrophe) { DictionaryItem item = analysis.getDictionaryItem(); String ending = analysis.getEnding(); if (apostrophe != null || apostropheRequired(analysis)) { if (apostrophe == null) { apostrophe = "'"; } return ending.length() > 0 ? item.normalizedLemma() + apostrophe + ending : item.normalizedLemma(); } else { if (item.attributes.contains(RootAttribute.NoQuote)) { return item.normalizedLemma() + ending; } else { return analysis.getStem() + ending; } } } String format(SingleAnalysis analysis, String apostrophe); String format(SingleAnalysis analysis); String formatToCase(SingleAnalysis analysis, CaseType type, String apostrophe); String formatToCase(SingleAnalysis analysis, CaseType type); boolean canBeFormatted(SingleAnalysis analysis, CaseType type); CaseType guessCase(String input); }
WordAnalysisSurfaceFormatter { public String format(SingleAnalysis analysis, String apostrophe) { DictionaryItem item = analysis.getDictionaryItem(); String ending = analysis.getEnding(); if (apostrophe != null || apostropheRequired(analysis)) { if (apostrophe == null) { apostrophe = "'"; } return ending.length() > 0 ? item.normalizedLemma() + apostrophe + ending : item.normalizedLemma(); } else { if (item.attributes.contains(RootAttribute.NoQuote)) { return item.normalizedLemma() + ending; } else { return analysis.getStem() + ending; } } } String format(SingleAnalysis analysis, String apostrophe); String format(SingleAnalysis analysis); String formatToCase(SingleAnalysis analysis, CaseType type, String apostrophe); String formatToCase(SingleAnalysis analysis, CaseType type); boolean canBeFormatted(SingleAnalysis analysis, CaseType type); CaseType guessCase(String input); }
@Test public void formatToCase() { TurkishMorphology morphology = TurkishMorphology.builder() .disableCache() .setLexicon("kış", "şiir", "Aydın", "Google [Pr:gugıl]") .build(); String[] inputs = {"aydında", "googledan", "Google", "şiirde", "kışçığa", "kış"}; String[] expectedDefaultCase = {"Aydın'da", "Google'dan", "Google", "şiirde", "kışçığa", "kış"}; String[] expectedLowerCase = {"aydın'da", "google'dan", "google", "şiirde", "kışçığa", "kış"}; String[] expectedUpperCase = {"AYDIN'DA", "GOOGLE'DAN", "GOOGLE", "ŞİİRDE", "KIŞÇIĞA", "KIŞ"}; String[] expectedCapitalCase = {"Aydın'da", "Google'dan", "Google", "Şiirde", "Kışçığa", "Kış"}; String[] expectedUpperRootLowerEndingCase = {"AYDIN'da", "GOOGLE'dan", "GOOGLE", "ŞİİRde", "KIŞçığa", "KIŞ"}; testCaseType(morphology, inputs, expectedDefaultCase, DEFAULT_CASE); testCaseType(morphology, inputs, expectedLowerCase, LOWER_CASE); testCaseType(morphology, inputs, expectedUpperCase, UPPER_CASE); testCaseType(morphology, inputs, expectedCapitalCase, TITLE_CASE); testCaseType(morphology, inputs, expectedUpperRootLowerEndingCase, UPPER_CASE_ROOT_LOWER_CASE_ENDING); }
public String formatToCase(SingleAnalysis analysis, CaseType type, String apostrophe) { String formatted = format(analysis, apostrophe); Locale locale = analysis.getDictionaryItem().hasAttribute(RootAttribute.LocaleEn) ? Locale.ENGLISH : Turkish.LOCALE; switch (type) { case DEFAULT_CASE: return formatted; case LOWER_CASE: return formatted.toLowerCase(locale); case UPPER_CASE: return formatted.toUpperCase(locale); case TITLE_CASE: return Turkish.capitalize(formatted); case UPPER_CASE_ROOT_LOWER_CASE_ENDING: String ending = analysis.getEnding(); String lemmaUpper = analysis.getDictionaryItem().normalizedLemma().toUpperCase(locale); if (ending.length() == 0) { return lemmaUpper; } if (apostrophe != null || apostropheRequired(analysis)) { if (apostrophe == null) { apostrophe = "'"; } return lemmaUpper + apostrophe + ending; } else { return lemmaUpper + ending; } default: return ""; } }
WordAnalysisSurfaceFormatter { public String formatToCase(SingleAnalysis analysis, CaseType type, String apostrophe) { String formatted = format(analysis, apostrophe); Locale locale = analysis.getDictionaryItem().hasAttribute(RootAttribute.LocaleEn) ? Locale.ENGLISH : Turkish.LOCALE; switch (type) { case DEFAULT_CASE: return formatted; case LOWER_CASE: return formatted.toLowerCase(locale); case UPPER_CASE: return formatted.toUpperCase(locale); case TITLE_CASE: return Turkish.capitalize(formatted); case UPPER_CASE_ROOT_LOWER_CASE_ENDING: String ending = analysis.getEnding(); String lemmaUpper = analysis.getDictionaryItem().normalizedLemma().toUpperCase(locale); if (ending.length() == 0) { return lemmaUpper; } if (apostrophe != null || apostropheRequired(analysis)) { if (apostrophe == null) { apostrophe = "'"; } return lemmaUpper + apostrophe + ending; } else { return lemmaUpper + ending; } default: return ""; } } }
WordAnalysisSurfaceFormatter { public String formatToCase(SingleAnalysis analysis, CaseType type, String apostrophe) { String formatted = format(analysis, apostrophe); Locale locale = analysis.getDictionaryItem().hasAttribute(RootAttribute.LocaleEn) ? Locale.ENGLISH : Turkish.LOCALE; switch (type) { case DEFAULT_CASE: return formatted; case LOWER_CASE: return formatted.toLowerCase(locale); case UPPER_CASE: return formatted.toUpperCase(locale); case TITLE_CASE: return Turkish.capitalize(formatted); case UPPER_CASE_ROOT_LOWER_CASE_ENDING: String ending = analysis.getEnding(); String lemmaUpper = analysis.getDictionaryItem().normalizedLemma().toUpperCase(locale); if (ending.length() == 0) { return lemmaUpper; } if (apostrophe != null || apostropheRequired(analysis)) { if (apostrophe == null) { apostrophe = "'"; } return lemmaUpper + apostrophe + ending; } else { return lemmaUpper + ending; } default: return ""; } } }
WordAnalysisSurfaceFormatter { public String formatToCase(SingleAnalysis analysis, CaseType type, String apostrophe) { String formatted = format(analysis, apostrophe); Locale locale = analysis.getDictionaryItem().hasAttribute(RootAttribute.LocaleEn) ? Locale.ENGLISH : Turkish.LOCALE; switch (type) { case DEFAULT_CASE: return formatted; case LOWER_CASE: return formatted.toLowerCase(locale); case UPPER_CASE: return formatted.toUpperCase(locale); case TITLE_CASE: return Turkish.capitalize(formatted); case UPPER_CASE_ROOT_LOWER_CASE_ENDING: String ending = analysis.getEnding(); String lemmaUpper = analysis.getDictionaryItem().normalizedLemma().toUpperCase(locale); if (ending.length() == 0) { return lemmaUpper; } if (apostrophe != null || apostropheRequired(analysis)) { if (apostrophe == null) { apostrophe = "'"; } return lemmaUpper + apostrophe + ending; } else { return lemmaUpper + ending; } default: return ""; } } String format(SingleAnalysis analysis, String apostrophe); String format(SingleAnalysis analysis); String formatToCase(SingleAnalysis analysis, CaseType type, String apostrophe); String formatToCase(SingleAnalysis analysis, CaseType type); boolean canBeFormatted(SingleAnalysis analysis, CaseType type); CaseType guessCase(String input); }
WordAnalysisSurfaceFormatter { public String formatToCase(SingleAnalysis analysis, CaseType type, String apostrophe) { String formatted = format(analysis, apostrophe); Locale locale = analysis.getDictionaryItem().hasAttribute(RootAttribute.LocaleEn) ? Locale.ENGLISH : Turkish.LOCALE; switch (type) { case DEFAULT_CASE: return formatted; case LOWER_CASE: return formatted.toLowerCase(locale); case UPPER_CASE: return formatted.toUpperCase(locale); case TITLE_CASE: return Turkish.capitalize(formatted); case UPPER_CASE_ROOT_LOWER_CASE_ENDING: String ending = analysis.getEnding(); String lemmaUpper = analysis.getDictionaryItem().normalizedLemma().toUpperCase(locale); if (ending.length() == 0) { return lemmaUpper; } if (apostrophe != null || apostropheRequired(analysis)) { if (apostrophe == null) { apostrophe = "'"; } return lemmaUpper + apostrophe + ending; } else { return lemmaUpper + ending; } default: return ""; } } String format(SingleAnalysis analysis, String apostrophe); String format(SingleAnalysis analysis); String formatToCase(SingleAnalysis analysis, CaseType type, String apostrophe); String formatToCase(SingleAnalysis analysis, CaseType type); boolean canBeFormatted(SingleAnalysis analysis, CaseType type); CaseType guessCase(String input); }
@Test public void suggestVerb1() { TurkishMorphology morphology = TurkishMorphology.builder().setLexicon("okumak").build(); List<String> endings = Lists.newArrayList("dum"); StemEndingGraph graph = new StemEndingGraph(morphology, endings); TurkishSpellChecker spellChecker = new TurkishSpellChecker(morphology, graph.stemGraph); List<String> res = spellChecker.suggestForWord("okudm"); Assert.assertTrue(res.contains("okudum")); }
public List<String> suggestForWord(String word, NgramLanguageModel lm) { List<String> unRanked = getUnrankedSuggestions(word); return rankWithUnigramProbability(unRanked, lm); }
TurkishSpellChecker { public List<String> suggestForWord(String word, NgramLanguageModel lm) { List<String> unRanked = getUnrankedSuggestions(word); return rankWithUnigramProbability(unRanked, lm); } }
TurkishSpellChecker { public List<String> suggestForWord(String word, NgramLanguageModel lm) { List<String> unRanked = getUnrankedSuggestions(word); return rankWithUnigramProbability(unRanked, lm); } TurkishSpellChecker(TurkishMorphology morphology); TurkishSpellChecker(TurkishMorphology morphology, CharacterGraph graph); TurkishSpellChecker( TurkishMorphology morphology, CharacterGraphDecoder decoder, CharMatcher matcher); }
TurkishSpellChecker { public List<String> suggestForWord(String word, NgramLanguageModel lm) { List<String> unRanked = getUnrankedSuggestions(word); return rankWithUnigramProbability(unRanked, lm); } TurkishSpellChecker(TurkishMorphology morphology); TurkishSpellChecker(TurkishMorphology morphology, CharacterGraph graph); TurkishSpellChecker( TurkishMorphology morphology, CharacterGraphDecoder decoder, CharMatcher matcher); NgramLanguageModel getUnigramLanguageModel(); void setAnalysisPredicate(Predicate<SingleAnalysis> analysisPredicate); static List<String> tokenizeForSpelling(String sentence); boolean check(String input); List<String> suggestForWord(String word, NgramLanguageModel lm); List<String> suggestForWord( String word, String leftContext, String rightContext, NgramLanguageModel lm); List<String> suggestForWord(String word); CharacterGraphDecoder getDecoder(); List<String> rankWithUnigramProbability(List<String> strings, NgramLanguageModel lm); }
TurkishSpellChecker { public List<String> suggestForWord(String word, NgramLanguageModel lm) { List<String> unRanked = getUnrankedSuggestions(word); return rankWithUnigramProbability(unRanked, lm); } TurkishSpellChecker(TurkishMorphology morphology); TurkishSpellChecker(TurkishMorphology morphology, CharacterGraph graph); TurkishSpellChecker( TurkishMorphology morphology, CharacterGraphDecoder decoder, CharMatcher matcher); NgramLanguageModel getUnigramLanguageModel(); void setAnalysisPredicate(Predicate<SingleAnalysis> analysisPredicate); static List<String> tokenizeForSpelling(String sentence); boolean check(String input); List<String> suggestForWord(String word, NgramLanguageModel lm); List<String> suggestForWord( String word, String leftContext, String rightContext, NgramLanguageModel lm); List<String> suggestForWord(String word); CharacterGraphDecoder getDecoder(); List<String> rankWithUnigramProbability(List<String> strings, NgramLanguageModel lm); }
@Test public void guessCaseTest() { String[] inputs = {"abc", "Abc", "ABC", "Abc'de", "ABC'DE", "ABC.", "ABC'de", "a", "12", "A", "A1"}; WordAnalysisSurfaceFormatter.CaseType[] expected = { LOWER_CASE, TITLE_CASE, UPPER_CASE, TITLE_CASE, UPPER_CASE, UPPER_CASE, UPPER_CASE_ROOT_LOWER_CASE_ENDING, LOWER_CASE, DEFAULT_CASE, UPPER_CASE, UPPER_CASE, }; WordAnalysisSurfaceFormatter formatter = new WordAnalysisSurfaceFormatter(); int i = 0; for (String input : inputs) { Assert.assertEquals(expected[i], formatter.guessCase(input)); i++; } }
public CaseType guessCase(String input) { boolean firstLetterUpperCase = false; int lowerCaseCount = 0; int upperCaseCount = 0; int letterCount = 0; for (int i = 0; i < input.length(); i++) { char c = input.charAt(i); if (!Character.isAlphabetic(c)) { continue; } if (i == 0) { firstLetterUpperCase = Character.isUpperCase(c); if (firstLetterUpperCase) { upperCaseCount++; } else { lowerCaseCount++; } } else { if (Character.isUpperCase(c)) { upperCaseCount++; } else if (Character.isLowerCase(c)) { lowerCaseCount++; } } letterCount++; } if (letterCount == 0) { return CaseType.DEFAULT_CASE; } if (letterCount == lowerCaseCount) { return CaseType.LOWER_CASE; } if (letterCount == upperCaseCount) { return CaseType.UPPER_CASE; } if (firstLetterUpperCase && letterCount == lowerCaseCount + 1) { return letterCount == 1 ? CaseType.UPPER_CASE : CaseType.TITLE_CASE; } int apostropheIndex = input.indexOf('\''); if (apostropheIndex > 0 && apostropheIndex < input.length() - 1) { if (guessCase(input.substring(0, apostropheIndex)) == CaseType.UPPER_CASE && guessCase(input.substring(apostropheIndex + 1)) == CaseType.LOWER_CASE) { return CaseType.UPPER_CASE_ROOT_LOWER_CASE_ENDING; } } return CaseType.MIXED_CASE; }
WordAnalysisSurfaceFormatter { public CaseType guessCase(String input) { boolean firstLetterUpperCase = false; int lowerCaseCount = 0; int upperCaseCount = 0; int letterCount = 0; for (int i = 0; i < input.length(); i++) { char c = input.charAt(i); if (!Character.isAlphabetic(c)) { continue; } if (i == 0) { firstLetterUpperCase = Character.isUpperCase(c); if (firstLetterUpperCase) { upperCaseCount++; } else { lowerCaseCount++; } } else { if (Character.isUpperCase(c)) { upperCaseCount++; } else if (Character.isLowerCase(c)) { lowerCaseCount++; } } letterCount++; } if (letterCount == 0) { return CaseType.DEFAULT_CASE; } if (letterCount == lowerCaseCount) { return CaseType.LOWER_CASE; } if (letterCount == upperCaseCount) { return CaseType.UPPER_CASE; } if (firstLetterUpperCase && letterCount == lowerCaseCount + 1) { return letterCount == 1 ? CaseType.UPPER_CASE : CaseType.TITLE_CASE; } int apostropheIndex = input.indexOf('\''); if (apostropheIndex > 0 && apostropheIndex < input.length() - 1) { if (guessCase(input.substring(0, apostropheIndex)) == CaseType.UPPER_CASE && guessCase(input.substring(apostropheIndex + 1)) == CaseType.LOWER_CASE) { return CaseType.UPPER_CASE_ROOT_LOWER_CASE_ENDING; } } return CaseType.MIXED_CASE; } }
WordAnalysisSurfaceFormatter { public CaseType guessCase(String input) { boolean firstLetterUpperCase = false; int lowerCaseCount = 0; int upperCaseCount = 0; int letterCount = 0; for (int i = 0; i < input.length(); i++) { char c = input.charAt(i); if (!Character.isAlphabetic(c)) { continue; } if (i == 0) { firstLetterUpperCase = Character.isUpperCase(c); if (firstLetterUpperCase) { upperCaseCount++; } else { lowerCaseCount++; } } else { if (Character.isUpperCase(c)) { upperCaseCount++; } else if (Character.isLowerCase(c)) { lowerCaseCount++; } } letterCount++; } if (letterCount == 0) { return CaseType.DEFAULT_CASE; } if (letterCount == lowerCaseCount) { return CaseType.LOWER_CASE; } if (letterCount == upperCaseCount) { return CaseType.UPPER_CASE; } if (firstLetterUpperCase && letterCount == lowerCaseCount + 1) { return letterCount == 1 ? CaseType.UPPER_CASE : CaseType.TITLE_CASE; } int apostropheIndex = input.indexOf('\''); if (apostropheIndex > 0 && apostropheIndex < input.length() - 1) { if (guessCase(input.substring(0, apostropheIndex)) == CaseType.UPPER_CASE && guessCase(input.substring(apostropheIndex + 1)) == CaseType.LOWER_CASE) { return CaseType.UPPER_CASE_ROOT_LOWER_CASE_ENDING; } } return CaseType.MIXED_CASE; } }
WordAnalysisSurfaceFormatter { public CaseType guessCase(String input) { boolean firstLetterUpperCase = false; int lowerCaseCount = 0; int upperCaseCount = 0; int letterCount = 0; for (int i = 0; i < input.length(); i++) { char c = input.charAt(i); if (!Character.isAlphabetic(c)) { continue; } if (i == 0) { firstLetterUpperCase = Character.isUpperCase(c); if (firstLetterUpperCase) { upperCaseCount++; } else { lowerCaseCount++; } } else { if (Character.isUpperCase(c)) { upperCaseCount++; } else if (Character.isLowerCase(c)) { lowerCaseCount++; } } letterCount++; } if (letterCount == 0) { return CaseType.DEFAULT_CASE; } if (letterCount == lowerCaseCount) { return CaseType.LOWER_CASE; } if (letterCount == upperCaseCount) { return CaseType.UPPER_CASE; } if (firstLetterUpperCase && letterCount == lowerCaseCount + 1) { return letterCount == 1 ? CaseType.UPPER_CASE : CaseType.TITLE_CASE; } int apostropheIndex = input.indexOf('\''); if (apostropheIndex > 0 && apostropheIndex < input.length() - 1) { if (guessCase(input.substring(0, apostropheIndex)) == CaseType.UPPER_CASE && guessCase(input.substring(apostropheIndex + 1)) == CaseType.LOWER_CASE) { return CaseType.UPPER_CASE_ROOT_LOWER_CASE_ENDING; } } return CaseType.MIXED_CASE; } String format(SingleAnalysis analysis, String apostrophe); String format(SingleAnalysis analysis); String formatToCase(SingleAnalysis analysis, CaseType type, String apostrophe); String formatToCase(SingleAnalysis analysis, CaseType type); boolean canBeFormatted(SingleAnalysis analysis, CaseType type); CaseType guessCase(String input); }
WordAnalysisSurfaceFormatter { public CaseType guessCase(String input) { boolean firstLetterUpperCase = false; int lowerCaseCount = 0; int upperCaseCount = 0; int letterCount = 0; for (int i = 0; i < input.length(); i++) { char c = input.charAt(i); if (!Character.isAlphabetic(c)) { continue; } if (i == 0) { firstLetterUpperCase = Character.isUpperCase(c); if (firstLetterUpperCase) { upperCaseCount++; } else { lowerCaseCount++; } } else { if (Character.isUpperCase(c)) { upperCaseCount++; } else if (Character.isLowerCase(c)) { lowerCaseCount++; } } letterCount++; } if (letterCount == 0) { return CaseType.DEFAULT_CASE; } if (letterCount == lowerCaseCount) { return CaseType.LOWER_CASE; } if (letterCount == upperCaseCount) { return CaseType.UPPER_CASE; } if (firstLetterUpperCase && letterCount == lowerCaseCount + 1) { return letterCount == 1 ? CaseType.UPPER_CASE : CaseType.TITLE_CASE; } int apostropheIndex = input.indexOf('\''); if (apostropheIndex > 0 && apostropheIndex < input.length() - 1) { if (guessCase(input.substring(0, apostropheIndex)) == CaseType.UPPER_CASE && guessCase(input.substring(apostropheIndex + 1)) == CaseType.LOWER_CASE) { return CaseType.UPPER_CASE_ROOT_LOWER_CASE_ENDING; } } return CaseType.MIXED_CASE; } String format(SingleAnalysis analysis, String apostrophe); String format(SingleAnalysis analysis); String formatToCase(SingleAnalysis analysis, CaseType type, String apostrophe); String formatToCase(SingleAnalysis analysis, CaseType type); boolean canBeFormatted(SingleAnalysis analysis, CaseType type); CaseType guessCase(String input); }
@Test public void test() throws IOException { String input = "4 Neden önemli?"; TurkishMorphology analyzer = TurkishMorphology.createWithDefaults(); RuleBasedDisambiguator disambiguator = new RuleBasedDisambiguator(analyzer, Rules.fromResources()); ResultSentence resultSentence = disambiguator.disambiguate(input); System.out.println(resultSentence.allIgnoredCount()); for (AmbiguityAnalysis a : resultSentence.results) { a.getForTrainingOutput().forEach(System.out::println); } }
public ResultSentence disambiguate(String sentence) { List<WordAnalysis> ambiguous = analyzer.analyzeSentence(sentence); ResultSentence s = new ResultSentence(sentence, ambiguous); s.makeDecisions(rules); return s; }
RuleBasedDisambiguator { public ResultSentence disambiguate(String sentence) { List<WordAnalysis> ambiguous = analyzer.analyzeSentence(sentence); ResultSentence s = new ResultSentence(sentence, ambiguous); s.makeDecisions(rules); return s; } }
RuleBasedDisambiguator { public ResultSentence disambiguate(String sentence) { List<WordAnalysis> ambiguous = analyzer.analyzeSentence(sentence); ResultSentence s = new ResultSentence(sentence, ambiguous); s.makeDecisions(rules); return s; } RuleBasedDisambiguator(TurkishMorphology analyzer, Rules rules); }
RuleBasedDisambiguator { public ResultSentence disambiguate(String sentence) { List<WordAnalysis> ambiguous = analyzer.analyzeSentence(sentence); ResultSentence s = new ResultSentence(sentence, ambiguous); s.makeDecisions(rules); return s; } RuleBasedDisambiguator(TurkishMorphology analyzer, Rules rules); ResultSentence disambiguate(String sentence); }
RuleBasedDisambiguator { public ResultSentence disambiguate(String sentence) { List<WordAnalysis> ambiguous = analyzer.analyzeSentence(sentence); ResultSentence s = new ResultSentence(sentence, ambiguous); s.makeDecisions(rules); return s; } RuleBasedDisambiguator(TurkishMorphology analyzer, Rules rules); ResultSentence disambiguate(String sentence); }
@Test public void testTitleWithDoubleQuotes() { String meta = "<doc id=\"http: String content = "Fernandao yeni yıldan şampiyonluk bekliyor\n" + "30.12.2016 Cuma 17:04 (Güncellendi: 30.12.2016 Cuma 17:09)\n" + "Fernandao yeni yıldan şampiyonluk bekliyor\n" + "</doc>"; WebDocument d = WebDocument.fromText(meta, Splitter.on("\n").splitToList(content)); Assert.assertEquals("Fernandao \"yeni yıldan şampiyonluk\" bekliyor", d.getTitle()); }
public static WebDocument fromText(String meta, List<String> pageData) { String url = Regexps.firstMatch(urlPattern, meta, 2); String id = url.replaceAll("http: String source = Regexps.firstMatch(sourcePattern, meta, 2); String crawlDate = Regexps.firstMatch(crawlDatePattern, meta, 2); String labels = getAttribute(labelPattern, meta); String category = getAttribute(categoryPattern, meta); String title = getAttribute(titlePattern, meta); int i = source.lastIndexOf("/"); if (i >= 0 && i < source.length()) { source = source.substring(i + 1); } return new WebDocument(source, id, title, pageData, url, crawlDate, labels, category); }
WebDocument { public static WebDocument fromText(String meta, List<String> pageData) { String url = Regexps.firstMatch(urlPattern, meta, 2); String id = url.replaceAll("http: String source = Regexps.firstMatch(sourcePattern, meta, 2); String crawlDate = Regexps.firstMatch(crawlDatePattern, meta, 2); String labels = getAttribute(labelPattern, meta); String category = getAttribute(categoryPattern, meta); String title = getAttribute(titlePattern, meta); int i = source.lastIndexOf("/"); if (i >= 0 && i < source.length()) { source = source.substring(i + 1); } return new WebDocument(source, id, title, pageData, url, crawlDate, labels, category); } }
WebDocument { public static WebDocument fromText(String meta, List<String> pageData) { String url = Regexps.firstMatch(urlPattern, meta, 2); String id = url.replaceAll("http: String source = Regexps.firstMatch(sourcePattern, meta, 2); String crawlDate = Regexps.firstMatch(crawlDatePattern, meta, 2); String labels = getAttribute(labelPattern, meta); String category = getAttribute(categoryPattern, meta); String title = getAttribute(titlePattern, meta); int i = source.lastIndexOf("/"); if (i >= 0 && i < source.length()) { source = source.substring(i + 1); } return new WebDocument(source, id, title, pageData, url, crawlDate, labels, category); } WebDocument(String source, String id, String title, List<String> lines, String url, String crawlDate, String labels, String category); }
WebDocument { public static WebDocument fromText(String meta, List<String> pageData) { String url = Regexps.firstMatch(urlPattern, meta, 2); String id = url.replaceAll("http: String source = Regexps.firstMatch(sourcePattern, meta, 2); String crawlDate = Regexps.firstMatch(crawlDatePattern, meta, 2); String labels = getAttribute(labelPattern, meta); String category = getAttribute(categoryPattern, meta); String title = getAttribute(titlePattern, meta); int i = source.lastIndexOf("/"); if (i >= 0 && i < source.length()) { source = source.substring(i + 1); } return new WebDocument(source, id, title, pageData, url, crawlDate, labels, category); } WebDocument(String source, String id, String title, List<String> lines, String url, String crawlDate, String labels, String category); static WebDocument fromText(String meta, List<String> pageData); int contentLength(); void removeDuplicateLines(); String getDocumentHeader(); WebDocument emptyContent(); String getLabelString(); String getCategory(); List<String> getLabels(); String getTitle(); long getHash(); String getContentAsString(); List<String> getLines(); void setContent(List<String> lines); @Override boolean equals(Object o); String getSource(); String getId(); String getUrl(); @Override int hashCode(); }
WebDocument { public static WebDocument fromText(String meta, List<String> pageData) { String url = Regexps.firstMatch(urlPattern, meta, 2); String id = url.replaceAll("http: String source = Regexps.firstMatch(sourcePattern, meta, 2); String crawlDate = Regexps.firstMatch(crawlDatePattern, meta, 2); String labels = getAttribute(labelPattern, meta); String category = getAttribute(categoryPattern, meta); String title = getAttribute(titlePattern, meta); int i = source.lastIndexOf("/"); if (i >= 0 && i < source.length()) { source = source.substring(i + 1); } return new WebDocument(source, id, title, pageData, url, crawlDate, labels, category); } WebDocument(String source, String id, String title, List<String> lines, String url, String crawlDate, String labels, String category); static WebDocument fromText(String meta, List<String> pageData); int contentLength(); void removeDuplicateLines(); String getDocumentHeader(); WebDocument emptyContent(); String getLabelString(); String getCategory(); List<String> getLabels(); String getTitle(); long getHash(); String getContentAsString(); List<String> getLines(); void setContent(List<String> lines); @Override boolean equals(Object o); String getSource(); String getId(); String getUrl(); @Override int hashCode(); }
@Test public void saveLoadDocument() throws IOException, SQLException { Path tempDir = Files.createTempDirectory("foo"); CorpusDb storage = new CorpusDb(tempDir); Map<Integer, CorpusDocument> docMap = saveDocuments(storage); for (Integer key : docMap.keySet()) { CorpusDocument expected = docMap.get(key); CorpusDocument actual = storage.loadDocumentByKey(key); Assert.assertEquals(expected.id, actual.id); Assert.assertEquals(expected.content, actual.content); } IOUtil.deleteTempDir(tempDir); }
public CorpusDocument loadDocumentByKey(int key) { String sql = "SELECT ID, DOC_ID, SOURCE_ID, SOURCE_DATE, PROCESS_DATE, CONTENT FROM DOCUMENT_TABLE " + "WHERE ID = " + key; return getDocument(sql); }
CorpusDb { public CorpusDocument loadDocumentByKey(int key) { String sql = "SELECT ID, DOC_ID, SOURCE_ID, SOURCE_DATE, PROCESS_DATE, CONTENT FROM DOCUMENT_TABLE " + "WHERE ID = " + key; return getDocument(sql); } }
CorpusDb { public CorpusDocument loadDocumentByKey(int key) { String sql = "SELECT ID, DOC_ID, SOURCE_ID, SOURCE_DATE, PROCESS_DATE, CONTENT FROM DOCUMENT_TABLE " + "WHERE ID = " + key; return getDocument(sql); } CorpusDb(Path dbRoot); private CorpusDb(JdbcConnectionPool connectionPool); }
CorpusDb { public CorpusDocument loadDocumentByKey(int key) { String sql = "SELECT ID, DOC_ID, SOURCE_ID, SOURCE_DATE, PROCESS_DATE, CONTENT FROM DOCUMENT_TABLE " + "WHERE ID = " + key; return getDocument(sql); } CorpusDb(Path dbRoot); private CorpusDb(JdbcConnectionPool connectionPool); void addAll(List<CorpusDocument> docs); void generateTables(); void saveSentences(int docKey, List<String> sentences); List<SentenceSearchResult> search(String text); CorpusDocument loadDocumentByKey(int key); void addDocs(Path corpusFile); }
CorpusDb { public CorpusDocument loadDocumentByKey(int key) { String sql = "SELECT ID, DOC_ID, SOURCE_ID, SOURCE_DATE, PROCESS_DATE, CONTENT FROM DOCUMENT_TABLE " + "WHERE ID = " + key; return getDocument(sql); } CorpusDb(Path dbRoot); private CorpusDb(JdbcConnectionPool connectionPool); void addAll(List<CorpusDocument> docs); void generateTables(); void saveSentences(int docKey, List<String> sentences); List<SentenceSearchResult> search(String text); CorpusDocument loadDocumentByKey(int key); void addDocs(Path corpusFile); }
@Test public void search() throws IOException, SQLException { Path tempDir = Files.createTempDirectory("foo"); CorpusDb storage = new CorpusDb(tempDir); Map<Integer, CorpusDocument> docMap = saveDocuments(storage); for (Integer key : docMap.keySet()) { CorpusDocument doc = docMap.get(key); List<String> paragraphs = Splitter.on("\n").splitToList(doc.content); List<String> sentences = TurkishSentenceExtractor.DEFAULT.fromParagraphs(paragraphs); storage.saveSentences(key, sentences); } List<SentenceSearchResult> searchResults = storage.search("milyar"); for (SentenceSearchResult searchResult : searchResults) { System.out.println(searchResult); } IOUtil.deleteTempDir(tempDir); }
public List<SentenceSearchResult> search(String text) { try (Connection connection = connectionPool.getConnection()) { String sql = "SELECT T.* FROM FT_SEARCH_DATA('" + text + "', 0, 0) FT, SENTENCE_TABLE T " + "WHERE FT.TABLE='SENTENCE_TABLE' AND T.ID=FT.KEYS[0];"; Statement stat = connection.createStatement(); ResultSet set = stat.executeQuery(sql); List<SentenceSearchResult> result = new ArrayList<>(); while (set.next()) { result.add(new SentenceSearchResult(set.getInt(1), set.getInt(2), set.getString(3))); } return result; } catch (Exception e) { e.printStackTrace(); throw new RuntimeException(e); } }
CorpusDb { public List<SentenceSearchResult> search(String text) { try (Connection connection = connectionPool.getConnection()) { String sql = "SELECT T.* FROM FT_SEARCH_DATA('" + text + "', 0, 0) FT, SENTENCE_TABLE T " + "WHERE FT.TABLE='SENTENCE_TABLE' AND T.ID=FT.KEYS[0];"; Statement stat = connection.createStatement(); ResultSet set = stat.executeQuery(sql); List<SentenceSearchResult> result = new ArrayList<>(); while (set.next()) { result.add(new SentenceSearchResult(set.getInt(1), set.getInt(2), set.getString(3))); } return result; } catch (Exception e) { e.printStackTrace(); throw new RuntimeException(e); } } }
CorpusDb { public List<SentenceSearchResult> search(String text) { try (Connection connection = connectionPool.getConnection()) { String sql = "SELECT T.* FROM FT_SEARCH_DATA('" + text + "', 0, 0) FT, SENTENCE_TABLE T " + "WHERE FT.TABLE='SENTENCE_TABLE' AND T.ID=FT.KEYS[0];"; Statement stat = connection.createStatement(); ResultSet set = stat.executeQuery(sql); List<SentenceSearchResult> result = new ArrayList<>(); while (set.next()) { result.add(new SentenceSearchResult(set.getInt(1), set.getInt(2), set.getString(3))); } return result; } catch (Exception e) { e.printStackTrace(); throw new RuntimeException(e); } } CorpusDb(Path dbRoot); private CorpusDb(JdbcConnectionPool connectionPool); }
CorpusDb { public List<SentenceSearchResult> search(String text) { try (Connection connection = connectionPool.getConnection()) { String sql = "SELECT T.* FROM FT_SEARCH_DATA('" + text + "', 0, 0) FT, SENTENCE_TABLE T " + "WHERE FT.TABLE='SENTENCE_TABLE' AND T.ID=FT.KEYS[0];"; Statement stat = connection.createStatement(); ResultSet set = stat.executeQuery(sql); List<SentenceSearchResult> result = new ArrayList<>(); while (set.next()) { result.add(new SentenceSearchResult(set.getInt(1), set.getInt(2), set.getString(3))); } return result; } catch (Exception e) { e.printStackTrace(); throw new RuntimeException(e); } } CorpusDb(Path dbRoot); private CorpusDb(JdbcConnectionPool connectionPool); void addAll(List<CorpusDocument> docs); void generateTables(); void saveSentences(int docKey, List<String> sentences); List<SentenceSearchResult> search(String text); CorpusDocument loadDocumentByKey(int key); void addDocs(Path corpusFile); }
CorpusDb { public List<SentenceSearchResult> search(String text) { try (Connection connection = connectionPool.getConnection()) { String sql = "SELECT T.* FROM FT_SEARCH_DATA('" + text + "', 0, 0) FT, SENTENCE_TABLE T " + "WHERE FT.TABLE='SENTENCE_TABLE' AND T.ID=FT.KEYS[0];"; Statement stat = connection.createStatement(); ResultSet set = stat.executeQuery(sql); List<SentenceSearchResult> result = new ArrayList<>(); while (set.next()) { result.add(new SentenceSearchResult(set.getInt(1), set.getInt(2), set.getString(3))); } return result; } catch (Exception e) { e.printStackTrace(); throw new RuntimeException(e); } } CorpusDb(Path dbRoot); private CorpusDb(JdbcConnectionPool connectionPool); void addAll(List<CorpusDocument> docs); void generateTables(); void saveSentences(int docKey, List<String> sentences); List<SentenceSearchResult> search(String text); CorpusDocument loadDocumentByKey(int key); void addDocs(Path corpusFile); }
@Test public void WriteStringTest() throws IOException { new SimpleTextWriter(tmpFile).write("Hello World!"); Assert.assertEquals(new SimpleTextReader(tmpFile).asString(), "Hello World!"); new SimpleTextWriter(tmpFile).write(null); Assert.assertEquals(new SimpleTextReader(tmpFile).asString(), ""); new SimpleTextWriter(tmpFile).write(""); Assert.assertEquals(new SimpleTextReader(tmpFile).asString(), ""); }
public SimpleTextWriter write(String s) throws IOException { try { if (s == null || s.length() == 0) { return this; } writer.write(s); return this; } finally { if (!keepOpen) { close(); } } }
SimpleTextWriter implements AutoCloseable { public SimpleTextWriter write(String s) throws IOException { try { if (s == null || s.length() == 0) { return this; } writer.write(s); return this; } finally { if (!keepOpen) { close(); } } } }
SimpleTextWriter implements AutoCloseable { public SimpleTextWriter write(String s) throws IOException { try { if (s == null || s.length() == 0) { return this; } writer.write(s); return this; } finally { if (!keepOpen) { close(); } } } private SimpleTextWriter( BufferedWriter writer, OutputStream os, String encoding, boolean keepOpen, boolean addNewLineBeforeClose); SimpleTextWriter(String fileName); SimpleTextWriter(String fileName, String encoding); SimpleTextWriter(File file, String encoding); SimpleTextWriter(File file); }
SimpleTextWriter implements AutoCloseable { public SimpleTextWriter write(String s) throws IOException { try { if (s == null || s.length() == 0) { return this; } writer.write(s); return this; } finally { if (!keepOpen) { close(); } } } private SimpleTextWriter( BufferedWriter writer, OutputStream os, String encoding, boolean keepOpen, boolean addNewLineBeforeClose); SimpleTextWriter(String fileName); SimpleTextWriter(String fileName, String encoding); SimpleTextWriter(File file, String encoding); SimpleTextWriter(File file); static Builder builder(File file); static Builder utf8Builder(File file); static SimpleTextWriter oneShotUTF8Writer(File file); static SimpleTextWriter keepOpenUTF8Writer(File file); static SimpleTextWriter keepOpenWriter(OutputStream os, String encoding); static SimpleTextWriter oneShotWriter(OutputStream os, String encoding); static SimpleTextWriter keepOpenWriter(OutputStream os); String getEncoding(); boolean isKeepOpen(); SimpleTextWriter writeLines(Collection<String> lines); SimpleTextWriter writeLines(String... lines); SimpleTextWriter writeToStringLines(Collection<?> objects); SimpleTextWriter write(String s); SimpleTextWriter writeLine(String s); SimpleTextWriter writeLine(); SimpleTextWriter writeLine(Object obj); SimpleTextWriter copyFromStream(InputStream is); SimpleTextWriter copyFromURL(String urlStr); void close(); }
SimpleTextWriter implements AutoCloseable { public SimpleTextWriter write(String s) throws IOException { try { if (s == null || s.length() == 0) { return this; } writer.write(s); return this; } finally { if (!keepOpen) { close(); } } } private SimpleTextWriter( BufferedWriter writer, OutputStream os, String encoding, boolean keepOpen, boolean addNewLineBeforeClose); SimpleTextWriter(String fileName); SimpleTextWriter(String fileName, String encoding); SimpleTextWriter(File file, String encoding); SimpleTextWriter(File file); static Builder builder(File file); static Builder utf8Builder(File file); static SimpleTextWriter oneShotUTF8Writer(File file); static SimpleTextWriter keepOpenUTF8Writer(File file); static SimpleTextWriter keepOpenWriter(OutputStream os, String encoding); static SimpleTextWriter oneShotWriter(OutputStream os, String encoding); static SimpleTextWriter keepOpenWriter(OutputStream os); String getEncoding(); boolean isKeepOpen(); SimpleTextWriter writeLines(Collection<String> lines); SimpleTextWriter writeLines(String... lines); SimpleTextWriter writeToStringLines(Collection<?> objects); SimpleTextWriter write(String s); SimpleTextWriter writeLine(String s); SimpleTextWriter writeLine(); SimpleTextWriter writeLine(Object obj); SimpleTextWriter copyFromStream(InputStream is); SimpleTextWriter copyFromURL(String urlStr); void close(); }
@Test public void WriteStringKeepOpenTest() throws IOException { try (SimpleTextWriter sfw = new SimpleTextWriter .Builder(tmpFile) .keepOpen() .build()) { sfw.write("Hello"); sfw.write("Merhaba"); sfw.write(""); sfw.write(null); } Assert.assertEquals("HelloMerhaba", new SimpleTextReader(tmpFile).asString()); }
public SimpleTextWriter write(String s) throws IOException { try { if (s == null || s.length() == 0) { return this; } writer.write(s); return this; } finally { if (!keepOpen) { close(); } } }
SimpleTextWriter implements AutoCloseable { public SimpleTextWriter write(String s) throws IOException { try { if (s == null || s.length() == 0) { return this; } writer.write(s); return this; } finally { if (!keepOpen) { close(); } } } }
SimpleTextWriter implements AutoCloseable { public SimpleTextWriter write(String s) throws IOException { try { if (s == null || s.length() == 0) { return this; } writer.write(s); return this; } finally { if (!keepOpen) { close(); } } } private SimpleTextWriter( BufferedWriter writer, OutputStream os, String encoding, boolean keepOpen, boolean addNewLineBeforeClose); SimpleTextWriter(String fileName); SimpleTextWriter(String fileName, String encoding); SimpleTextWriter(File file, String encoding); SimpleTextWriter(File file); }
SimpleTextWriter implements AutoCloseable { public SimpleTextWriter write(String s) throws IOException { try { if (s == null || s.length() == 0) { return this; } writer.write(s); return this; } finally { if (!keepOpen) { close(); } } } private SimpleTextWriter( BufferedWriter writer, OutputStream os, String encoding, boolean keepOpen, boolean addNewLineBeforeClose); SimpleTextWriter(String fileName); SimpleTextWriter(String fileName, String encoding); SimpleTextWriter(File file, String encoding); SimpleTextWriter(File file); static Builder builder(File file); static Builder utf8Builder(File file); static SimpleTextWriter oneShotUTF8Writer(File file); static SimpleTextWriter keepOpenUTF8Writer(File file); static SimpleTextWriter keepOpenWriter(OutputStream os, String encoding); static SimpleTextWriter oneShotWriter(OutputStream os, String encoding); static SimpleTextWriter keepOpenWriter(OutputStream os); String getEncoding(); boolean isKeepOpen(); SimpleTextWriter writeLines(Collection<String> lines); SimpleTextWriter writeLines(String... lines); SimpleTextWriter writeToStringLines(Collection<?> objects); SimpleTextWriter write(String s); SimpleTextWriter writeLine(String s); SimpleTextWriter writeLine(); SimpleTextWriter writeLine(Object obj); SimpleTextWriter copyFromStream(InputStream is); SimpleTextWriter copyFromURL(String urlStr); void close(); }
SimpleTextWriter implements AutoCloseable { public SimpleTextWriter write(String s) throws IOException { try { if (s == null || s.length() == 0) { return this; } writer.write(s); return this; } finally { if (!keepOpen) { close(); } } } private SimpleTextWriter( BufferedWriter writer, OutputStream os, String encoding, boolean keepOpen, boolean addNewLineBeforeClose); SimpleTextWriter(String fileName); SimpleTextWriter(String fileName, String encoding); SimpleTextWriter(File file, String encoding); SimpleTextWriter(File file); static Builder builder(File file); static Builder utf8Builder(File file); static SimpleTextWriter oneShotUTF8Writer(File file); static SimpleTextWriter keepOpenUTF8Writer(File file); static SimpleTextWriter keepOpenWriter(OutputStream os, String encoding); static SimpleTextWriter oneShotWriter(OutputStream os, String encoding); static SimpleTextWriter keepOpenWriter(OutputStream os); String getEncoding(); boolean isKeepOpen(); SimpleTextWriter writeLines(Collection<String> lines); SimpleTextWriter writeLines(String... lines); SimpleTextWriter writeToStringLines(Collection<?> objects); SimpleTextWriter write(String s); SimpleTextWriter writeLine(String s); SimpleTextWriter writeLine(); SimpleTextWriter writeLine(Object obj); SimpleTextWriter copyFromStream(InputStream is); SimpleTextWriter copyFromURL(String urlStr); void close(); }
@Test(expected = IOException.class) public void keepOpenExcepionTest() throws IOException { SimpleTextWriter sfw = new SimpleTextWriter .Builder(tmpFile) .build(); sfw.write("Hello"); sfw.write("Now it will throw an exception.."); }
public SimpleTextWriter write(String s) throws IOException { try { if (s == null || s.length() == 0) { return this; } writer.write(s); return this; } finally { if (!keepOpen) { close(); } } }
SimpleTextWriter implements AutoCloseable { public SimpleTextWriter write(String s) throws IOException { try { if (s == null || s.length() == 0) { return this; } writer.write(s); return this; } finally { if (!keepOpen) { close(); } } } }
SimpleTextWriter implements AutoCloseable { public SimpleTextWriter write(String s) throws IOException { try { if (s == null || s.length() == 0) { return this; } writer.write(s); return this; } finally { if (!keepOpen) { close(); } } } private SimpleTextWriter( BufferedWriter writer, OutputStream os, String encoding, boolean keepOpen, boolean addNewLineBeforeClose); SimpleTextWriter(String fileName); SimpleTextWriter(String fileName, String encoding); SimpleTextWriter(File file, String encoding); SimpleTextWriter(File file); }
SimpleTextWriter implements AutoCloseable { public SimpleTextWriter write(String s) throws IOException { try { if (s == null || s.length() == 0) { return this; } writer.write(s); return this; } finally { if (!keepOpen) { close(); } } } private SimpleTextWriter( BufferedWriter writer, OutputStream os, String encoding, boolean keepOpen, boolean addNewLineBeforeClose); SimpleTextWriter(String fileName); SimpleTextWriter(String fileName, String encoding); SimpleTextWriter(File file, String encoding); SimpleTextWriter(File file); static Builder builder(File file); static Builder utf8Builder(File file); static SimpleTextWriter oneShotUTF8Writer(File file); static SimpleTextWriter keepOpenUTF8Writer(File file); static SimpleTextWriter keepOpenWriter(OutputStream os, String encoding); static SimpleTextWriter oneShotWriter(OutputStream os, String encoding); static SimpleTextWriter keepOpenWriter(OutputStream os); String getEncoding(); boolean isKeepOpen(); SimpleTextWriter writeLines(Collection<String> lines); SimpleTextWriter writeLines(String... lines); SimpleTextWriter writeToStringLines(Collection<?> objects); SimpleTextWriter write(String s); SimpleTextWriter writeLine(String s); SimpleTextWriter writeLine(); SimpleTextWriter writeLine(Object obj); SimpleTextWriter copyFromStream(InputStream is); SimpleTextWriter copyFromURL(String urlStr); void close(); }
SimpleTextWriter implements AutoCloseable { public SimpleTextWriter write(String s) throws IOException { try { if (s == null || s.length() == 0) { return this; } writer.write(s); return this; } finally { if (!keepOpen) { close(); } } } private SimpleTextWriter( BufferedWriter writer, OutputStream os, String encoding, boolean keepOpen, boolean addNewLineBeforeClose); SimpleTextWriter(String fileName); SimpleTextWriter(String fileName, String encoding); SimpleTextWriter(File file, String encoding); SimpleTextWriter(File file); static Builder builder(File file); static Builder utf8Builder(File file); static SimpleTextWriter oneShotUTF8Writer(File file); static SimpleTextWriter keepOpenUTF8Writer(File file); static SimpleTextWriter keepOpenWriter(OutputStream os, String encoding); static SimpleTextWriter oneShotWriter(OutputStream os, String encoding); static SimpleTextWriter keepOpenWriter(OutputStream os); String getEncoding(); boolean isKeepOpen(); SimpleTextWriter writeLines(Collection<String> lines); SimpleTextWriter writeLines(String... lines); SimpleTextWriter writeToStringLines(Collection<?> objects); SimpleTextWriter write(String s); SimpleTextWriter writeLine(String s); SimpleTextWriter writeLine(); SimpleTextWriter writeLine(Object obj); SimpleTextWriter copyFromStream(InputStream is); SimpleTextWriter copyFromURL(String urlStr); void close(); }
@Test public void WriteMultiLineStringTest() throws IOException { List<String> strs = new ArrayList<>(Arrays.asList("Merhaba", "Dunya", "")); new SimpleTextWriter(tmpFile).writeLines(strs); List<String> read = new SimpleTextReader(tmpFile).asStringList(); for (int i = 0; i < read.size(); i++) { Assert.assertEquals(read.get(i), strs.get(i)); } }
public SimpleTextWriter writeLines(Collection<String> lines) throws IOException { try { IOs.writeLines(lines, writer); return this; } finally { if (!keepOpen) { close(); } } }
SimpleTextWriter implements AutoCloseable { public SimpleTextWriter writeLines(Collection<String> lines) throws IOException { try { IOs.writeLines(lines, writer); return this; } finally { if (!keepOpen) { close(); } } } }
SimpleTextWriter implements AutoCloseable { public SimpleTextWriter writeLines(Collection<String> lines) throws IOException { try { IOs.writeLines(lines, writer); return this; } finally { if (!keepOpen) { close(); } } } private SimpleTextWriter( BufferedWriter writer, OutputStream os, String encoding, boolean keepOpen, boolean addNewLineBeforeClose); SimpleTextWriter(String fileName); SimpleTextWriter(String fileName, String encoding); SimpleTextWriter(File file, String encoding); SimpleTextWriter(File file); }
SimpleTextWriter implements AutoCloseable { public SimpleTextWriter writeLines(Collection<String> lines) throws IOException { try { IOs.writeLines(lines, writer); return this; } finally { if (!keepOpen) { close(); } } } private SimpleTextWriter( BufferedWriter writer, OutputStream os, String encoding, boolean keepOpen, boolean addNewLineBeforeClose); SimpleTextWriter(String fileName); SimpleTextWriter(String fileName, String encoding); SimpleTextWriter(File file, String encoding); SimpleTextWriter(File file); static Builder builder(File file); static Builder utf8Builder(File file); static SimpleTextWriter oneShotUTF8Writer(File file); static SimpleTextWriter keepOpenUTF8Writer(File file); static SimpleTextWriter keepOpenWriter(OutputStream os, String encoding); static SimpleTextWriter oneShotWriter(OutputStream os, String encoding); static SimpleTextWriter keepOpenWriter(OutputStream os); String getEncoding(); boolean isKeepOpen(); SimpleTextWriter writeLines(Collection<String> lines); SimpleTextWriter writeLines(String... lines); SimpleTextWriter writeToStringLines(Collection<?> objects); SimpleTextWriter write(String s); SimpleTextWriter writeLine(String s); SimpleTextWriter writeLine(); SimpleTextWriter writeLine(Object obj); SimpleTextWriter copyFromStream(InputStream is); SimpleTextWriter copyFromURL(String urlStr); void close(); }
SimpleTextWriter implements AutoCloseable { public SimpleTextWriter writeLines(Collection<String> lines) throws IOException { try { IOs.writeLines(lines, writer); return this; } finally { if (!keepOpen) { close(); } } } private SimpleTextWriter( BufferedWriter writer, OutputStream os, String encoding, boolean keepOpen, boolean addNewLineBeforeClose); SimpleTextWriter(String fileName); SimpleTextWriter(String fileName, String encoding); SimpleTextWriter(File file, String encoding); SimpleTextWriter(File file); static Builder builder(File file); static Builder utf8Builder(File file); static SimpleTextWriter oneShotUTF8Writer(File file); static SimpleTextWriter keepOpenUTF8Writer(File file); static SimpleTextWriter keepOpenWriter(OutputStream os, String encoding); static SimpleTextWriter oneShotWriter(OutputStream os, String encoding); static SimpleTextWriter keepOpenWriter(OutputStream os); String getEncoding(); boolean isKeepOpen(); SimpleTextWriter writeLines(Collection<String> lines); SimpleTextWriter writeLines(String... lines); SimpleTextWriter writeToStringLines(Collection<?> objects); SimpleTextWriter write(String s); SimpleTextWriter writeLine(String s); SimpleTextWriter writeLine(); SimpleTextWriter writeLine(Object obj); SimpleTextWriter copyFromStream(InputStream is); SimpleTextWriter copyFromURL(String urlStr); void close(); }
@Test public void testReader() throws IOException { Map<String, String> map = new KeyValueReader(":") .loadFromFile(new File(key_value_colon_separator.getFile())); Assert.assertEquals(map.size(), 4); Assert.assertTrue(TestUtil.containsAllKeys(map, "1", "2", "3", "4")); Assert.assertTrue(TestUtil.containsAllValues(map, "bir", "iki", "uc", "dort")); }
public Map<String, String> loadFromFile(File file) throws IOException { return loadFromFile(new SimpleTextReader. Builder(file) .trim() .ignoreIfStartsWith(ignorePrefix) .ignoreWhiteSpaceLines() .build()); }
KeyValueReader { public Map<String, String> loadFromFile(File file) throws IOException { return loadFromFile(new SimpleTextReader. Builder(file) .trim() .ignoreIfStartsWith(ignorePrefix) .ignoreWhiteSpaceLines() .build()); } }
KeyValueReader { public Map<String, String> loadFromFile(File file) throws IOException { return loadFromFile(new SimpleTextReader. Builder(file) .trim() .ignoreIfStartsWith(ignorePrefix) .ignoreWhiteSpaceLines() .build()); } KeyValueReader(String seperator); KeyValueReader(String seperator, String ignorePrefix); }
KeyValueReader { public Map<String, String> loadFromFile(File file) throws IOException { return loadFromFile(new SimpleTextReader. Builder(file) .trim() .ignoreIfStartsWith(ignorePrefix) .ignoreWhiteSpaceLines() .build()); } KeyValueReader(String seperator); KeyValueReader(String seperator, String ignorePrefix); Map<String, String> loadFromFile(File file); Map<String, String> loadFromFile(File file, String encoding); Map<String, String> loadFromStream(InputStream is); Map<String, String> loadFromStream(InputStream is, String encoding); Map<String, String> loadFromFile(SimpleTextReader sfr); }
KeyValueReader { public Map<String, String> loadFromFile(File file) throws IOException { return loadFromFile(new SimpleTextReader. Builder(file) .trim() .ignoreIfStartsWith(ignorePrefix) .ignoreWhiteSpaceLines() .build()); } KeyValueReader(String seperator); KeyValueReader(String seperator, String ignorePrefix); Map<String, String> loadFromFile(File file); Map<String, String> loadFromFile(File file, String encoding); Map<String, String> loadFromStream(InputStream is); Map<String, String> loadFromStream(InputStream is, String encoding); Map<String, String> loadFromFile(SimpleTextReader sfr); }
@Test public void checkVerb1() { TurkishMorphology morphology = TurkishMorphology.builder().setLexicon("okumak").build(); List<String> endings = Lists.newArrayList("dum"); StemEndingGraph graph = new StemEndingGraph(morphology, endings); TurkishSpellChecker spellChecker = new TurkishSpellChecker(morphology, graph.stemGraph); Assert.assertTrue(spellChecker.check("okudum")); }
public boolean check(String input) { WordAnalysis analyses = morphology.analyze(input); WordAnalysisSurfaceFormatter.CaseType caseType = formatter.guessCase(input); for (SingleAnalysis analysis : analyses) { if (analysis.isUnknown()) { continue; } if (analysisPredicate != null && !analysisPredicate.test(analysis)) { continue; } String apostrophe = getApostrophe(input); if (formatter.canBeFormatted(analysis, caseType)) { String formatted = formatter.formatToCase(analysis, caseType, apostrophe); if (input.equals(formatted)) { return true; } } } return false; }
TurkishSpellChecker { public boolean check(String input) { WordAnalysis analyses = morphology.analyze(input); WordAnalysisSurfaceFormatter.CaseType caseType = formatter.guessCase(input); for (SingleAnalysis analysis : analyses) { if (analysis.isUnknown()) { continue; } if (analysisPredicate != null && !analysisPredicate.test(analysis)) { continue; } String apostrophe = getApostrophe(input); if (formatter.canBeFormatted(analysis, caseType)) { String formatted = formatter.formatToCase(analysis, caseType, apostrophe); if (input.equals(formatted)) { return true; } } } return false; } }
TurkishSpellChecker { public boolean check(String input) { WordAnalysis analyses = morphology.analyze(input); WordAnalysisSurfaceFormatter.CaseType caseType = formatter.guessCase(input); for (SingleAnalysis analysis : analyses) { if (analysis.isUnknown()) { continue; } if (analysisPredicate != null && !analysisPredicate.test(analysis)) { continue; } String apostrophe = getApostrophe(input); if (formatter.canBeFormatted(analysis, caseType)) { String formatted = formatter.formatToCase(analysis, caseType, apostrophe); if (input.equals(formatted)) { return true; } } } return false; } TurkishSpellChecker(TurkishMorphology morphology); TurkishSpellChecker(TurkishMorphology morphology, CharacterGraph graph); TurkishSpellChecker( TurkishMorphology morphology, CharacterGraphDecoder decoder, CharMatcher matcher); }
TurkishSpellChecker { public boolean check(String input) { WordAnalysis analyses = morphology.analyze(input); WordAnalysisSurfaceFormatter.CaseType caseType = formatter.guessCase(input); for (SingleAnalysis analysis : analyses) { if (analysis.isUnknown()) { continue; } if (analysisPredicate != null && !analysisPredicate.test(analysis)) { continue; } String apostrophe = getApostrophe(input); if (formatter.canBeFormatted(analysis, caseType)) { String formatted = formatter.formatToCase(analysis, caseType, apostrophe); if (input.equals(formatted)) { return true; } } } return false; } TurkishSpellChecker(TurkishMorphology morphology); TurkishSpellChecker(TurkishMorphology morphology, CharacterGraph graph); TurkishSpellChecker( TurkishMorphology morphology, CharacterGraphDecoder decoder, CharMatcher matcher); NgramLanguageModel getUnigramLanguageModel(); void setAnalysisPredicate(Predicate<SingleAnalysis> analysisPredicate); static List<String> tokenizeForSpelling(String sentence); boolean check(String input); List<String> suggestForWord(String word, NgramLanguageModel lm); List<String> suggestForWord( String word, String leftContext, String rightContext, NgramLanguageModel lm); List<String> suggestForWord(String word); CharacterGraphDecoder getDecoder(); List<String> rankWithUnigramProbability(List<String> strings, NgramLanguageModel lm); }
TurkishSpellChecker { public boolean check(String input) { WordAnalysis analyses = morphology.analyze(input); WordAnalysisSurfaceFormatter.CaseType caseType = formatter.guessCase(input); for (SingleAnalysis analysis : analyses) { if (analysis.isUnknown()) { continue; } if (analysisPredicate != null && !analysisPredicate.test(analysis)) { continue; } String apostrophe = getApostrophe(input); if (formatter.canBeFormatted(analysis, caseType)) { String formatted = formatter.formatToCase(analysis, caseType, apostrophe); if (input.equals(formatted)) { return true; } } } return false; } TurkishSpellChecker(TurkishMorphology morphology); TurkishSpellChecker(TurkishMorphology morphology, CharacterGraph graph); TurkishSpellChecker( TurkishMorphology morphology, CharacterGraphDecoder decoder, CharMatcher matcher); NgramLanguageModel getUnigramLanguageModel(); void setAnalysisPredicate(Predicate<SingleAnalysis> analysisPredicate); static List<String> tokenizeForSpelling(String sentence); boolean check(String input); List<String> suggestForWord(String word, NgramLanguageModel lm); List<String> suggestForWord( String word, String leftContext, String rightContext, NgramLanguageModel lm); List<String> suggestForWord(String word); CharacterGraphDecoder getDecoder(); List<String> rankWithUnigramProbability(List<String> strings, NgramLanguageModel lm); }
@Test public void isEmptyTest() { assertTrue(isNullOrEmpty(null)); assertTrue(isNullOrEmpty("")); assertFalse(isNullOrEmpty("\n")); assertFalse(isNullOrEmpty("\t")); assertFalse(isNullOrEmpty(" ")); assertFalse(isNullOrEmpty("a")); assertFalse(isNullOrEmpty("as")); }
public static boolean isNullOrEmpty(String str) { return str == null || str.length() == 0; }
Strings { public static boolean isNullOrEmpty(String str) { return str == null || str.length() == 0; } }
Strings { public static boolean isNullOrEmpty(String str) { return str == null || str.length() == 0; } private Strings(); }
Strings { public static boolean isNullOrEmpty(String str) { return str == null || str.length() == 0; } private Strings(); static boolean isNullOrEmpty(String str); static boolean hasText(String s); static boolean allHasText(String... strings); static boolean allNullOrEmpty(String... strings); static String leftTrim(String s); static String rightTrim(String str); static boolean containsNone(String str, String invalidCharsStr); static boolean containsOnly(String str, String allowedChars); static String repeat(char c, int count); static String repeat(String str, int count); static String reverse(String str); static String insertFromLeft(String str, int interval, String stringToInsert); static String insertFromRight(String str, int interval, String stringToInsert); static String rightPad(String str, int size); static String rightPad(String str, int size, char padChar); static String rightPad(String str, int size, String padStr); static String leftPad(String str, int size); static String leftPad(String str, int size, char padChar); static String subStringUntilFirst(String str, String s); static String subStringUntilLast(String str, String s); static String subStringAfterFirst(String str, String s); static String subStringAfterLast(String str, String s); static String leftPad(String str, int size, String padStr); static String whiteSpacesToSingleSpace(String str); static String eliminateWhiteSpaces(String str); static String[] separateGrams(String word, int gramSize); }
Strings { public static boolean isNullOrEmpty(String str) { return str == null || str.length() == 0; } private Strings(); static boolean isNullOrEmpty(String str); static boolean hasText(String s); static boolean allHasText(String... strings); static boolean allNullOrEmpty(String... strings); static String leftTrim(String s); static String rightTrim(String str); static boolean containsNone(String str, String invalidCharsStr); static boolean containsOnly(String str, String allowedChars); static String repeat(char c, int count); static String repeat(String str, int count); static String reverse(String str); static String insertFromLeft(String str, int interval, String stringToInsert); static String insertFromRight(String str, int interval, String stringToInsert); static String rightPad(String str, int size); static String rightPad(String str, int size, char padChar); static String rightPad(String str, int size, String padStr); static String leftPad(String str, int size); static String leftPad(String str, int size, char padChar); static String subStringUntilFirst(String str, String s); static String subStringUntilLast(String str, String s); static String subStringAfterFirst(String str, String s); static String subStringAfterLast(String str, String s); static String leftPad(String str, int size, String padStr); static String whiteSpacesToSingleSpace(String str); static String eliminateWhiteSpaces(String str); static String[] separateGrams(String word, int gramSize); static final String EMPTY_STRING; static final String[] EMPTY_STRING_ARRAY; }
@Test public void hasTextTest() { assertFalse(hasText(null)); assertTrue(hasText("a")); assertTrue(hasText("abc")); assertFalse(hasText("")); assertFalse(hasText(null)); assertFalse(hasText(" ")); assertFalse(hasText("\t")); assertFalse(hasText("\n")); assertFalse(hasText(" \t")); }
public static boolean hasText(String s) { return s != null && s.length() > 0 && s.trim().length() > 0; }
Strings { public static boolean hasText(String s) { return s != null && s.length() > 0 && s.trim().length() > 0; } }
Strings { public static boolean hasText(String s) { return s != null && s.length() > 0 && s.trim().length() > 0; } private Strings(); }
Strings { public static boolean hasText(String s) { return s != null && s.length() > 0 && s.trim().length() > 0; } private Strings(); static boolean isNullOrEmpty(String str); static boolean hasText(String s); static boolean allHasText(String... strings); static boolean allNullOrEmpty(String... strings); static String leftTrim(String s); static String rightTrim(String str); static boolean containsNone(String str, String invalidCharsStr); static boolean containsOnly(String str, String allowedChars); static String repeat(char c, int count); static String repeat(String str, int count); static String reverse(String str); static String insertFromLeft(String str, int interval, String stringToInsert); static String insertFromRight(String str, int interval, String stringToInsert); static String rightPad(String str, int size); static String rightPad(String str, int size, char padChar); static String rightPad(String str, int size, String padStr); static String leftPad(String str, int size); static String leftPad(String str, int size, char padChar); static String subStringUntilFirst(String str, String s); static String subStringUntilLast(String str, String s); static String subStringAfterFirst(String str, String s); static String subStringAfterLast(String str, String s); static String leftPad(String str, int size, String padStr); static String whiteSpacesToSingleSpace(String str); static String eliminateWhiteSpaces(String str); static String[] separateGrams(String word, int gramSize); }
Strings { public static boolean hasText(String s) { return s != null && s.length() > 0 && s.trim().length() > 0; } private Strings(); static boolean isNullOrEmpty(String str); static boolean hasText(String s); static boolean allHasText(String... strings); static boolean allNullOrEmpty(String... strings); static String leftTrim(String s); static String rightTrim(String str); static boolean containsNone(String str, String invalidCharsStr); static boolean containsOnly(String str, String allowedChars); static String repeat(char c, int count); static String repeat(String str, int count); static String reverse(String str); static String insertFromLeft(String str, int interval, String stringToInsert); static String insertFromRight(String str, int interval, String stringToInsert); static String rightPad(String str, int size); static String rightPad(String str, int size, char padChar); static String rightPad(String str, int size, String padStr); static String leftPad(String str, int size); static String leftPad(String str, int size, char padChar); static String subStringUntilFirst(String str, String s); static String subStringUntilLast(String str, String s); static String subStringAfterFirst(String str, String s); static String subStringAfterLast(String str, String s); static String leftPad(String str, int size, String padStr); static String whiteSpacesToSingleSpace(String str); static String eliminateWhiteSpaces(String str); static String[] separateGrams(String word, int gramSize); static final String EMPTY_STRING; static final String[] EMPTY_STRING_ARRAY; }
@Test public void testIfAllHasText() { assertTrue(allHasText("fg", "a", "hyh")); assertFalse(allHasText("fg", null, "hyh")); assertFalse(allHasText("fg", " ", "hyh")); }
public static boolean allHasText(String... strings) { checkVarargString(strings); for (String s : strings) { if (!hasText(s)) { return false; } } return true; }
Strings { public static boolean allHasText(String... strings) { checkVarargString(strings); for (String s : strings) { if (!hasText(s)) { return false; } } return true; } }
Strings { public static boolean allHasText(String... strings) { checkVarargString(strings); for (String s : strings) { if (!hasText(s)) { return false; } } return true; } private Strings(); }
Strings { public static boolean allHasText(String... strings) { checkVarargString(strings); for (String s : strings) { if (!hasText(s)) { return false; } } return true; } private Strings(); static boolean isNullOrEmpty(String str); static boolean hasText(String s); static boolean allHasText(String... strings); static boolean allNullOrEmpty(String... strings); static String leftTrim(String s); static String rightTrim(String str); static boolean containsNone(String str, String invalidCharsStr); static boolean containsOnly(String str, String allowedChars); static String repeat(char c, int count); static String repeat(String str, int count); static String reverse(String str); static String insertFromLeft(String str, int interval, String stringToInsert); static String insertFromRight(String str, int interval, String stringToInsert); static String rightPad(String str, int size); static String rightPad(String str, int size, char padChar); static String rightPad(String str, int size, String padStr); static String leftPad(String str, int size); static String leftPad(String str, int size, char padChar); static String subStringUntilFirst(String str, String s); static String subStringUntilLast(String str, String s); static String subStringAfterFirst(String str, String s); static String subStringAfterLast(String str, String s); static String leftPad(String str, int size, String padStr); static String whiteSpacesToSingleSpace(String str); static String eliminateWhiteSpaces(String str); static String[] separateGrams(String word, int gramSize); }
Strings { public static boolean allHasText(String... strings) { checkVarargString(strings); for (String s : strings) { if (!hasText(s)) { return false; } } return true; } private Strings(); static boolean isNullOrEmpty(String str); static boolean hasText(String s); static boolean allHasText(String... strings); static boolean allNullOrEmpty(String... strings); static String leftTrim(String s); static String rightTrim(String str); static boolean containsNone(String str, String invalidCharsStr); static boolean containsOnly(String str, String allowedChars); static String repeat(char c, int count); static String repeat(String str, int count); static String reverse(String str); static String insertFromLeft(String str, int interval, String stringToInsert); static String insertFromRight(String str, int interval, String stringToInsert); static String rightPad(String str, int size); static String rightPad(String str, int size, char padChar); static String rightPad(String str, int size, String padStr); static String leftPad(String str, int size); static String leftPad(String str, int size, char padChar); static String subStringUntilFirst(String str, String s); static String subStringUntilLast(String str, String s); static String subStringAfterFirst(String str, String s); static String subStringAfterLast(String str, String s); static String leftPad(String str, int size, String padStr); static String whiteSpacesToSingleSpace(String str); static String eliminateWhiteSpaces(String str); static String[] separateGrams(String word, int gramSize); static final String EMPTY_STRING; static final String[] EMPTY_STRING_ARRAY; }
@Test(expected = IllegalArgumentException.class) public void testIfAllHasTextExceptionIAE() { allHasText(); }
public static boolean allHasText(String... strings) { checkVarargString(strings); for (String s : strings) { if (!hasText(s)) { return false; } } return true; }
Strings { public static boolean allHasText(String... strings) { checkVarargString(strings); for (String s : strings) { if (!hasText(s)) { return false; } } return true; } }
Strings { public static boolean allHasText(String... strings) { checkVarargString(strings); for (String s : strings) { if (!hasText(s)) { return false; } } return true; } private Strings(); }
Strings { public static boolean allHasText(String... strings) { checkVarargString(strings); for (String s : strings) { if (!hasText(s)) { return false; } } return true; } private Strings(); static boolean isNullOrEmpty(String str); static boolean hasText(String s); static boolean allHasText(String... strings); static boolean allNullOrEmpty(String... strings); static String leftTrim(String s); static String rightTrim(String str); static boolean containsNone(String str, String invalidCharsStr); static boolean containsOnly(String str, String allowedChars); static String repeat(char c, int count); static String repeat(String str, int count); static String reverse(String str); static String insertFromLeft(String str, int interval, String stringToInsert); static String insertFromRight(String str, int interval, String stringToInsert); static String rightPad(String str, int size); static String rightPad(String str, int size, char padChar); static String rightPad(String str, int size, String padStr); static String leftPad(String str, int size); static String leftPad(String str, int size, char padChar); static String subStringUntilFirst(String str, String s); static String subStringUntilLast(String str, String s); static String subStringAfterFirst(String str, String s); static String subStringAfterLast(String str, String s); static String leftPad(String str, int size, String padStr); static String whiteSpacesToSingleSpace(String str); static String eliminateWhiteSpaces(String str); static String[] separateGrams(String word, int gramSize); }
Strings { public static boolean allHasText(String... strings) { checkVarargString(strings); for (String s : strings) { if (!hasText(s)) { return false; } } return true; } private Strings(); static boolean isNullOrEmpty(String str); static boolean hasText(String s); static boolean allHasText(String... strings); static boolean allNullOrEmpty(String... strings); static String leftTrim(String s); static String rightTrim(String str); static boolean containsNone(String str, String invalidCharsStr); static boolean containsOnly(String str, String allowedChars); static String repeat(char c, int count); static String repeat(String str, int count); static String reverse(String str); static String insertFromLeft(String str, int interval, String stringToInsert); static String insertFromRight(String str, int interval, String stringToInsert); static String rightPad(String str, int size); static String rightPad(String str, int size, char padChar); static String rightPad(String str, int size, String padStr); static String leftPad(String str, int size); static String leftPad(String str, int size, char padChar); static String subStringUntilFirst(String str, String s); static String subStringUntilLast(String str, String s); static String subStringAfterFirst(String str, String s); static String subStringAfterLast(String str, String s); static String leftPad(String str, int size, String padStr); static String whiteSpacesToSingleSpace(String str); static String eliminateWhiteSpaces(String str); static String[] separateGrams(String word, int gramSize); static final String EMPTY_STRING; static final String[] EMPTY_STRING_ARRAY; }
@Test public void testAllEmpty() { assertTrue(allNullOrEmpty("", "", null)); assertFalse(allNullOrEmpty("", null, "hyh")); assertFalse(allNullOrEmpty(" ", "", "")); }
public static boolean allNullOrEmpty(String... strings) { checkVarargString(strings); for (String s : strings) { if (!isNullOrEmpty(s)) { return false; } } return true; }
Strings { public static boolean allNullOrEmpty(String... strings) { checkVarargString(strings); for (String s : strings) { if (!isNullOrEmpty(s)) { return false; } } return true; } }
Strings { public static boolean allNullOrEmpty(String... strings) { checkVarargString(strings); for (String s : strings) { if (!isNullOrEmpty(s)) { return false; } } return true; } private Strings(); }
Strings { public static boolean allNullOrEmpty(String... strings) { checkVarargString(strings); for (String s : strings) { if (!isNullOrEmpty(s)) { return false; } } return true; } private Strings(); static boolean isNullOrEmpty(String str); static boolean hasText(String s); static boolean allHasText(String... strings); static boolean allNullOrEmpty(String... strings); static String leftTrim(String s); static String rightTrim(String str); static boolean containsNone(String str, String invalidCharsStr); static boolean containsOnly(String str, String allowedChars); static String repeat(char c, int count); static String repeat(String str, int count); static String reverse(String str); static String insertFromLeft(String str, int interval, String stringToInsert); static String insertFromRight(String str, int interval, String stringToInsert); static String rightPad(String str, int size); static String rightPad(String str, int size, char padChar); static String rightPad(String str, int size, String padStr); static String leftPad(String str, int size); static String leftPad(String str, int size, char padChar); static String subStringUntilFirst(String str, String s); static String subStringUntilLast(String str, String s); static String subStringAfterFirst(String str, String s); static String subStringAfterLast(String str, String s); static String leftPad(String str, int size, String padStr); static String whiteSpacesToSingleSpace(String str); static String eliminateWhiteSpaces(String str); static String[] separateGrams(String word, int gramSize); }
Strings { public static boolean allNullOrEmpty(String... strings) { checkVarargString(strings); for (String s : strings) { if (!isNullOrEmpty(s)) { return false; } } return true; } private Strings(); static boolean isNullOrEmpty(String str); static boolean hasText(String s); static boolean allHasText(String... strings); static boolean allNullOrEmpty(String... strings); static String leftTrim(String s); static String rightTrim(String str); static boolean containsNone(String str, String invalidCharsStr); static boolean containsOnly(String str, String allowedChars); static String repeat(char c, int count); static String repeat(String str, int count); static String reverse(String str); static String insertFromLeft(String str, int interval, String stringToInsert); static String insertFromRight(String str, int interval, String stringToInsert); static String rightPad(String str, int size); static String rightPad(String str, int size, char padChar); static String rightPad(String str, int size, String padStr); static String leftPad(String str, int size); static String leftPad(String str, int size, char padChar); static String subStringUntilFirst(String str, String s); static String subStringUntilLast(String str, String s); static String subStringAfterFirst(String str, String s); static String subStringAfterLast(String str, String s); static String leftPad(String str, int size, String padStr); static String whiteSpacesToSingleSpace(String str); static String eliminateWhiteSpaces(String str); static String[] separateGrams(String word, int gramSize); static final String EMPTY_STRING; static final String[] EMPTY_STRING_ARRAY; }
@Test(expected = IllegalArgumentException.class) public void testAllEmptyExceptionIAE() { allNullOrEmpty(); }
public static boolean allNullOrEmpty(String... strings) { checkVarargString(strings); for (String s : strings) { if (!isNullOrEmpty(s)) { return false; } } return true; }
Strings { public static boolean allNullOrEmpty(String... strings) { checkVarargString(strings); for (String s : strings) { if (!isNullOrEmpty(s)) { return false; } } return true; } }
Strings { public static boolean allNullOrEmpty(String... strings) { checkVarargString(strings); for (String s : strings) { if (!isNullOrEmpty(s)) { return false; } } return true; } private Strings(); }
Strings { public static boolean allNullOrEmpty(String... strings) { checkVarargString(strings); for (String s : strings) { if (!isNullOrEmpty(s)) { return false; } } return true; } private Strings(); static boolean isNullOrEmpty(String str); static boolean hasText(String s); static boolean allHasText(String... strings); static boolean allNullOrEmpty(String... strings); static String leftTrim(String s); static String rightTrim(String str); static boolean containsNone(String str, String invalidCharsStr); static boolean containsOnly(String str, String allowedChars); static String repeat(char c, int count); static String repeat(String str, int count); static String reverse(String str); static String insertFromLeft(String str, int interval, String stringToInsert); static String insertFromRight(String str, int interval, String stringToInsert); static String rightPad(String str, int size); static String rightPad(String str, int size, char padChar); static String rightPad(String str, int size, String padStr); static String leftPad(String str, int size); static String leftPad(String str, int size, char padChar); static String subStringUntilFirst(String str, String s); static String subStringUntilLast(String str, String s); static String subStringAfterFirst(String str, String s); static String subStringAfterLast(String str, String s); static String leftPad(String str, int size, String padStr); static String whiteSpacesToSingleSpace(String str); static String eliminateWhiteSpaces(String str); static String[] separateGrams(String word, int gramSize); }
Strings { public static boolean allNullOrEmpty(String... strings) { checkVarargString(strings); for (String s : strings) { if (!isNullOrEmpty(s)) { return false; } } return true; } private Strings(); static boolean isNullOrEmpty(String str); static boolean hasText(String s); static boolean allHasText(String... strings); static boolean allNullOrEmpty(String... strings); static String leftTrim(String s); static String rightTrim(String str); static boolean containsNone(String str, String invalidCharsStr); static boolean containsOnly(String str, String allowedChars); static String repeat(char c, int count); static String repeat(String str, int count); static String reverse(String str); static String insertFromLeft(String str, int interval, String stringToInsert); static String insertFromRight(String str, int interval, String stringToInsert); static String rightPad(String str, int size); static String rightPad(String str, int size, char padChar); static String rightPad(String str, int size, String padStr); static String leftPad(String str, int size); static String leftPad(String str, int size, char padChar); static String subStringUntilFirst(String str, String s); static String subStringUntilLast(String str, String s); static String subStringAfterFirst(String str, String s); static String subStringAfterLast(String str, String s); static String leftPad(String str, int size, String padStr); static String whiteSpacesToSingleSpace(String str); static String eliminateWhiteSpaces(String str); static String[] separateGrams(String word, int gramSize); static final String EMPTY_STRING; static final String[] EMPTY_STRING_ARRAY; }
@Test public void leftTrimTest() { assertNull(leftTrim(null)); assertEquals(leftTrim(""), ""); assertEquals(leftTrim(" \t "), ""); assertEquals(leftTrim(" 123"), "123"); assertEquals(leftTrim("\t123"), "123"); assertEquals(leftTrim("\n123"), "123"); assertEquals(leftTrim("123"), "123"); assertEquals(leftTrim(" \n 123"), "123"); assertEquals(leftTrim("123 "), "123 "); assertEquals(leftTrim(" 3 123 "), "3 123 "); }
public static String leftTrim(String s) { if (s == null) { return null; } if (s.length() == 0) { return EMPTY_STRING; } int j = 0; for (int i = 0; i < s.length(); i++) { if (Character.isWhitespace(s.charAt(i))) { j++; } else { break; } } return s.substring(j); }
Strings { public static String leftTrim(String s) { if (s == null) { return null; } if (s.length() == 0) { return EMPTY_STRING; } int j = 0; for (int i = 0; i < s.length(); i++) { if (Character.isWhitespace(s.charAt(i))) { j++; } else { break; } } return s.substring(j); } }
Strings { public static String leftTrim(String s) { if (s == null) { return null; } if (s.length() == 0) { return EMPTY_STRING; } int j = 0; for (int i = 0; i < s.length(); i++) { if (Character.isWhitespace(s.charAt(i))) { j++; } else { break; } } return s.substring(j); } private Strings(); }
Strings { public static String leftTrim(String s) { if (s == null) { return null; } if (s.length() == 0) { return EMPTY_STRING; } int j = 0; for (int i = 0; i < s.length(); i++) { if (Character.isWhitespace(s.charAt(i))) { j++; } else { break; } } return s.substring(j); } private Strings(); static boolean isNullOrEmpty(String str); static boolean hasText(String s); static boolean allHasText(String... strings); static boolean allNullOrEmpty(String... strings); static String leftTrim(String s); static String rightTrim(String str); static boolean containsNone(String str, String invalidCharsStr); static boolean containsOnly(String str, String allowedChars); static String repeat(char c, int count); static String repeat(String str, int count); static String reverse(String str); static String insertFromLeft(String str, int interval, String stringToInsert); static String insertFromRight(String str, int interval, String stringToInsert); static String rightPad(String str, int size); static String rightPad(String str, int size, char padChar); static String rightPad(String str, int size, String padStr); static String leftPad(String str, int size); static String leftPad(String str, int size, char padChar); static String subStringUntilFirst(String str, String s); static String subStringUntilLast(String str, String s); static String subStringAfterFirst(String str, String s); static String subStringAfterLast(String str, String s); static String leftPad(String str, int size, String padStr); static String whiteSpacesToSingleSpace(String str); static String eliminateWhiteSpaces(String str); static String[] separateGrams(String word, int gramSize); }
Strings { public static String leftTrim(String s) { if (s == null) { return null; } if (s.length() == 0) { return EMPTY_STRING; } int j = 0; for (int i = 0; i < s.length(); i++) { if (Character.isWhitespace(s.charAt(i))) { j++; } else { break; } } return s.substring(j); } private Strings(); static boolean isNullOrEmpty(String str); static boolean hasText(String s); static boolean allHasText(String... strings); static boolean allNullOrEmpty(String... strings); static String leftTrim(String s); static String rightTrim(String str); static boolean containsNone(String str, String invalidCharsStr); static boolean containsOnly(String str, String allowedChars); static String repeat(char c, int count); static String repeat(String str, int count); static String reverse(String str); static String insertFromLeft(String str, int interval, String stringToInsert); static String insertFromRight(String str, int interval, String stringToInsert); static String rightPad(String str, int size); static String rightPad(String str, int size, char padChar); static String rightPad(String str, int size, String padStr); static String leftPad(String str, int size); static String leftPad(String str, int size, char padChar); static String subStringUntilFirst(String str, String s); static String subStringUntilLast(String str, String s); static String subStringAfterFirst(String str, String s); static String subStringAfterLast(String str, String s); static String leftPad(String str, int size, String padStr); static String whiteSpacesToSingleSpace(String str); static String eliminateWhiteSpaces(String str); static String[] separateGrams(String word, int gramSize); static final String EMPTY_STRING; static final String[] EMPTY_STRING_ARRAY; }
@Test public void rightTrimTest() { assertNull(rightTrim(null)); assertEquals(rightTrim(""), ""); assertEquals(rightTrim(" \t"), ""); assertEquals(rightTrim("aaa "), "aaa"); assertEquals(rightTrim("aaa \t "), "aaa"); assertEquals(rightTrim("aaa\n "), "aaa"); assertEquals(rightTrim("aaa"), "aaa"); assertEquals(rightTrim(" 123 "), " 123"); assertEquals(rightTrim(" 3 123 \t"), " 3 123"); }
public static String rightTrim(String str) { if (str == null) { return null; } if (str.length() == 0) { return EMPTY_STRING; } int j = str.length(); for (int i = str.length() - 1; i >= 0; --i) { if (Character.isWhitespace(str.charAt(i))) { j--; } else { break; } } return str.substring(0, j); }
Strings { public static String rightTrim(String str) { if (str == null) { return null; } if (str.length() == 0) { return EMPTY_STRING; } int j = str.length(); for (int i = str.length() - 1; i >= 0; --i) { if (Character.isWhitespace(str.charAt(i))) { j--; } else { break; } } return str.substring(0, j); } }
Strings { public static String rightTrim(String str) { if (str == null) { return null; } if (str.length() == 0) { return EMPTY_STRING; } int j = str.length(); for (int i = str.length() - 1; i >= 0; --i) { if (Character.isWhitespace(str.charAt(i))) { j--; } else { break; } } return str.substring(0, j); } private Strings(); }
Strings { public static String rightTrim(String str) { if (str == null) { return null; } if (str.length() == 0) { return EMPTY_STRING; } int j = str.length(); for (int i = str.length() - 1; i >= 0; --i) { if (Character.isWhitespace(str.charAt(i))) { j--; } else { break; } } return str.substring(0, j); } private Strings(); static boolean isNullOrEmpty(String str); static boolean hasText(String s); static boolean allHasText(String... strings); static boolean allNullOrEmpty(String... strings); static String leftTrim(String s); static String rightTrim(String str); static boolean containsNone(String str, String invalidCharsStr); static boolean containsOnly(String str, String allowedChars); static String repeat(char c, int count); static String repeat(String str, int count); static String reverse(String str); static String insertFromLeft(String str, int interval, String stringToInsert); static String insertFromRight(String str, int interval, String stringToInsert); static String rightPad(String str, int size); static String rightPad(String str, int size, char padChar); static String rightPad(String str, int size, String padStr); static String leftPad(String str, int size); static String leftPad(String str, int size, char padChar); static String subStringUntilFirst(String str, String s); static String subStringUntilLast(String str, String s); static String subStringAfterFirst(String str, String s); static String subStringAfterLast(String str, String s); static String leftPad(String str, int size, String padStr); static String whiteSpacesToSingleSpace(String str); static String eliminateWhiteSpaces(String str); static String[] separateGrams(String word, int gramSize); }
Strings { public static String rightTrim(String str) { if (str == null) { return null; } if (str.length() == 0) { return EMPTY_STRING; } int j = str.length(); for (int i = str.length() - 1; i >= 0; --i) { if (Character.isWhitespace(str.charAt(i))) { j--; } else { break; } } return str.substring(0, j); } private Strings(); static boolean isNullOrEmpty(String str); static boolean hasText(String s); static boolean allHasText(String... strings); static boolean allNullOrEmpty(String... strings); static String leftTrim(String s); static String rightTrim(String str); static boolean containsNone(String str, String invalidCharsStr); static boolean containsOnly(String str, String allowedChars); static String repeat(char c, int count); static String repeat(String str, int count); static String reverse(String str); static String insertFromLeft(String str, int interval, String stringToInsert); static String insertFromRight(String str, int interval, String stringToInsert); static String rightPad(String str, int size); static String rightPad(String str, int size, char padChar); static String rightPad(String str, int size, String padStr); static String leftPad(String str, int size); static String leftPad(String str, int size, char padChar); static String subStringUntilFirst(String str, String s); static String subStringUntilLast(String str, String s); static String subStringAfterFirst(String str, String s); static String subStringAfterLast(String str, String s); static String leftPad(String str, int size, String padStr); static String whiteSpacesToSingleSpace(String str); static String eliminateWhiteSpaces(String str); static String[] separateGrams(String word, int gramSize); static final String EMPTY_STRING; static final String[] EMPTY_STRING_ARRAY; }
@Test public void repeatTest() { assertEquals(repeat('c', -1), ""); assertEquals(repeat('c', 3), "ccc"); assertEquals(repeat('c', 1), "c"); assertEquals(repeat('c', 0), ""); assertNull(repeat(null, 1)); assertEquals(repeat("ab", -1), ""); assertEquals(repeat("ab", 3), "ababab"); assertEquals(repeat("ab", 1), "ab"); assertEquals(repeat("ab", 0), ""); }
public static String repeat(char c, int count) { if (count < 1) { return EMPTY_STRING; } char[] chars = new char[count]; Arrays.fill(chars, c); return new String(chars); }
Strings { public static String repeat(char c, int count) { if (count < 1) { return EMPTY_STRING; } char[] chars = new char[count]; Arrays.fill(chars, c); return new String(chars); } }
Strings { public static String repeat(char c, int count) { if (count < 1) { return EMPTY_STRING; } char[] chars = new char[count]; Arrays.fill(chars, c); return new String(chars); } private Strings(); }
Strings { public static String repeat(char c, int count) { if (count < 1) { return EMPTY_STRING; } char[] chars = new char[count]; Arrays.fill(chars, c); return new String(chars); } private Strings(); static boolean isNullOrEmpty(String str); static boolean hasText(String s); static boolean allHasText(String... strings); static boolean allNullOrEmpty(String... strings); static String leftTrim(String s); static String rightTrim(String str); static boolean containsNone(String str, String invalidCharsStr); static boolean containsOnly(String str, String allowedChars); static String repeat(char c, int count); static String repeat(String str, int count); static String reverse(String str); static String insertFromLeft(String str, int interval, String stringToInsert); static String insertFromRight(String str, int interval, String stringToInsert); static String rightPad(String str, int size); static String rightPad(String str, int size, char padChar); static String rightPad(String str, int size, String padStr); static String leftPad(String str, int size); static String leftPad(String str, int size, char padChar); static String subStringUntilFirst(String str, String s); static String subStringUntilLast(String str, String s); static String subStringAfterFirst(String str, String s); static String subStringAfterLast(String str, String s); static String leftPad(String str, int size, String padStr); static String whiteSpacesToSingleSpace(String str); static String eliminateWhiteSpaces(String str); static String[] separateGrams(String word, int gramSize); }
Strings { public static String repeat(char c, int count) { if (count < 1) { return EMPTY_STRING; } char[] chars = new char[count]; Arrays.fill(chars, c); return new String(chars); } private Strings(); static boolean isNullOrEmpty(String str); static boolean hasText(String s); static boolean allHasText(String... strings); static boolean allNullOrEmpty(String... strings); static String leftTrim(String s); static String rightTrim(String str); static boolean containsNone(String str, String invalidCharsStr); static boolean containsOnly(String str, String allowedChars); static String repeat(char c, int count); static String repeat(String str, int count); static String reverse(String str); static String insertFromLeft(String str, int interval, String stringToInsert); static String insertFromRight(String str, int interval, String stringToInsert); static String rightPad(String str, int size); static String rightPad(String str, int size, char padChar); static String rightPad(String str, int size, String padStr); static String leftPad(String str, int size); static String leftPad(String str, int size, char padChar); static String subStringUntilFirst(String str, String s); static String subStringUntilLast(String str, String s); static String subStringAfterFirst(String str, String s); static String subStringAfterLast(String str, String s); static String leftPad(String str, int size, String padStr); static String whiteSpacesToSingleSpace(String str); static String eliminateWhiteSpaces(String str); static String[] separateGrams(String word, int gramSize); static final String EMPTY_STRING; static final String[] EMPTY_STRING_ARRAY; }
@Test public void reverseTest() { assertNull(reverse(null), null); assertEquals(reverse(""), ""); assertEquals(reverse("a"), "a"); assertEquals(reverse("ab"), "ba"); assertEquals(reverse("ab cd "), " dc ba"); }
public static String reverse(String str) { if (str == null) { return null; } if (str.length() == 0) { return EMPTY_STRING; } return new StringBuilder(str).reverse().toString(); }
Strings { public static String reverse(String str) { if (str == null) { return null; } if (str.length() == 0) { return EMPTY_STRING; } return new StringBuilder(str).reverse().toString(); } }
Strings { public static String reverse(String str) { if (str == null) { return null; } if (str.length() == 0) { return EMPTY_STRING; } return new StringBuilder(str).reverse().toString(); } private Strings(); }
Strings { public static String reverse(String str) { if (str == null) { return null; } if (str.length() == 0) { return EMPTY_STRING; } return new StringBuilder(str).reverse().toString(); } private Strings(); static boolean isNullOrEmpty(String str); static boolean hasText(String s); static boolean allHasText(String... strings); static boolean allNullOrEmpty(String... strings); static String leftTrim(String s); static String rightTrim(String str); static boolean containsNone(String str, String invalidCharsStr); static boolean containsOnly(String str, String allowedChars); static String repeat(char c, int count); static String repeat(String str, int count); static String reverse(String str); static String insertFromLeft(String str, int interval, String stringToInsert); static String insertFromRight(String str, int interval, String stringToInsert); static String rightPad(String str, int size); static String rightPad(String str, int size, char padChar); static String rightPad(String str, int size, String padStr); static String leftPad(String str, int size); static String leftPad(String str, int size, char padChar); static String subStringUntilFirst(String str, String s); static String subStringUntilLast(String str, String s); static String subStringAfterFirst(String str, String s); static String subStringAfterLast(String str, String s); static String leftPad(String str, int size, String padStr); static String whiteSpacesToSingleSpace(String str); static String eliminateWhiteSpaces(String str); static String[] separateGrams(String word, int gramSize); }
Strings { public static String reverse(String str) { if (str == null) { return null; } if (str.length() == 0) { return EMPTY_STRING; } return new StringBuilder(str).reverse().toString(); } private Strings(); static boolean isNullOrEmpty(String str); static boolean hasText(String s); static boolean allHasText(String... strings); static boolean allNullOrEmpty(String... strings); static String leftTrim(String s); static String rightTrim(String str); static boolean containsNone(String str, String invalidCharsStr); static boolean containsOnly(String str, String allowedChars); static String repeat(char c, int count); static String repeat(String str, int count); static String reverse(String str); static String insertFromLeft(String str, int interval, String stringToInsert); static String insertFromRight(String str, int interval, String stringToInsert); static String rightPad(String str, int size); static String rightPad(String str, int size, char padChar); static String rightPad(String str, int size, String padStr); static String leftPad(String str, int size); static String leftPad(String str, int size, char padChar); static String subStringUntilFirst(String str, String s); static String subStringUntilLast(String str, String s); static String subStringAfterFirst(String str, String s); static String subStringAfterLast(String str, String s); static String leftPad(String str, int size, String padStr); static String whiteSpacesToSingleSpace(String str); static String eliminateWhiteSpaces(String str); static String[] separateGrams(String word, int gramSize); static final String EMPTY_STRING; static final String[] EMPTY_STRING_ARRAY; }
@Test public void stemEndingTest1() { TurkishMorphology morphology = TurkishMorphology.builder() .setLexicon("bakmak", "gelmek").build(); List<String> endings = Lists.newArrayList("acak", "ecek"); StemEndingGraph graph = new StemEndingGraph(morphology, endings); CharacterGraphDecoder spellChecker = new CharacterGraphDecoder(graph.stemGraph); List<String> res = spellChecker.getSuggestions("bakcaak"); Assert.assertEquals(1, res.size()); Assert.assertEquals("bakacak", res.get(0)); }
public List<String> getSuggestions(String input) { return new Decoder().decode(input).getKeyList(); }
CharacterGraphDecoder { public List<String> getSuggestions(String input) { return new Decoder().decode(input).getKeyList(); } }
CharacterGraphDecoder { public List<String> getSuggestions(String input) { return new Decoder().decode(input).getKeyList(); } CharacterGraphDecoder(float maxPenalty); CharacterGraphDecoder(); CharacterGraphDecoder(CharacterGraph graph); CharacterGraphDecoder(float maxPenalty, Map<Character, String> nearKeyMap); }
CharacterGraphDecoder { public List<String> getSuggestions(String input) { return new Decoder().decode(input).getKeyList(); } CharacterGraphDecoder(float maxPenalty); CharacterGraphDecoder(); CharacterGraphDecoder(CharacterGraph graph); CharacterGraphDecoder(float maxPenalty, Map<Character, String> nearKeyMap); CharacterGraph getGraph(); void addWord(String word); void addWords(String... words); void addWords(List<String> vocabulary); List<ScoredItem<String>> getSuggestionsWithScores(String input); List<ScoredItem<String>> getSuggestionsWithScores(String input, CharMatcher matcher); FloatValueMap<String> decode(String input); List<String> getSuggestions(String input); List<String> getSuggestions(String input, CharMatcher matcher); List<String> getSuggestionsSorted(String input); }
CharacterGraphDecoder { public List<String> getSuggestions(String input) { return new Decoder().decode(input).getKeyList(); } CharacterGraphDecoder(float maxPenalty); CharacterGraphDecoder(); CharacterGraphDecoder(CharacterGraph graph); CharacterGraphDecoder(float maxPenalty, Map<Character, String> nearKeyMap); CharacterGraph getGraph(); void addWord(String word); void addWords(String... words); void addWords(List<String> vocabulary); List<ScoredItem<String>> getSuggestionsWithScores(String input); List<ScoredItem<String>> getSuggestionsWithScores(String input, CharMatcher matcher); FloatValueMap<String> decode(String input); List<String> getSuggestions(String input); List<String> getSuggestions(String input, CharMatcher matcher); List<String> getSuggestionsSorted(String input); static final Map<Character, String> TURKISH_FQ_NEAR_KEY_MAP; static final Map<Character, String> TURKISH_Q_NEAR_KEY_MAP; static final DiacriticsIgnoringMatcher DIACRITICS_IGNORING_MATCHER; final float maxPenalty; final boolean checkNearKeySubstitution; public Map<Character, String> nearKeyMap; }