repo
stringlengths
1
191
file
stringlengths
23
351
code
stringlengths
0
5.32M
file_length
int64
0
5.32M
avg_line_length
float64
0
2.9k
max_line_length
int64
0
288k
extension_type
stringclasses
1 value
grobid
grobid-master/grobid-core/src/test/java/org/grobid/core/engines/tagging/DeLFTTaggerIntegrationTest.java
package org.grobid.core.engines.tagging; import jep.Jep; import jep.JepConfig; import org.grobid.core.data.Date; import org.grobid.core.engines.DateParser; import org.grobid.core.engines.EngineParsers; import org.grobid.core.jni.JEPThreadPool; import org.grobid.core.main.LibraryLoader; import org.junit.Test; import java.util.List; import static org.junit.Assert.*; public class DeLFTTaggerIntegrationTest { DeLFTTagger target; @Test public void setUp() throws Exception { LibraryLoader.load(); // System.setProperty("java.library.path", System.getProperty("java.library.path") + ":" + LibraryLoader.getLibraryFolder()); // System.setProperty("java.library.path", System.getProperty("java.library.path") + ":" + "/anaconda3/envs/tensorflow/lib"); // System.setProperty("java.library.path", System.getProperty("java.library.path") + ":" + "/anaconda3/envs/tensorflow/lib/python3.6/site-packages/"); // System.out.println(System.getProperty("java.library.path")); // System.loadLibrary("python3.6m"); // System.loadLibrary("jep"); // JepConfig config = new JepConfig(); // config.setInteractive(false); // config.setClassLoader(this.getClass().getClassLoader()); // System.out.println(LibraryLoader.getLibraryFolder()); // Jep jep = JEPThreadPool.getInstance().getJEPInstance(); // jep.eval("import keras"); EngineParsers engineParsers = new EngineParsers(); DateParser dateParser = engineParsers.getDateParser(); List<Date> processing = dateParser.processing("23 november 2019"); System.out.println(processing.get(0).toString()); } }
1,690
32.156863
157
java
grobid
grobid-master/grobid-core/src/test/java/org/grobid/core/engines/tagging/DummyTaggerTest.java
package org.grobid.core.engines.tagging; import org.grobid.core.GrobidModels; import org.grobid.core.exceptions.GrobidException; import org.grobid.core.utilities.GrobidProperties; import org.junit.Before; import org.junit.Test; import org.junit.BeforeClass; import java.util.Arrays; import static org.hamcrest.CoreMatchers.*; import static org.hamcrest.MatcherAssert.assertThat; public class DummyTaggerTest { GenericTagger target; @BeforeClass public static void init() { GrobidProperties.getInstance(); } @Before public void setUp() throws Exception { target = TaggerFactory.getTagger(GrobidModels.DUMMY); } @Test public void testDummyTagger_shouldReturnDummyLabel() { assertThat(target.label("bao"), is("<dummy>")); } @Test public void testDummyTagger() { assertThat(target.label(Arrays.asList("bao", "miao", "ciao")), is(equalTo("bao\t<dummy>\nmiao\t<dummy>\nciao\t<dummy>"))); } @Test(expected = GrobidException.class) public void testWrongModelInitialisation_shouldThrowException() { target = new DummyTagger(GrobidModels.HEADER); } }
1,166
25.522727
71
java
grobid
grobid-master/grobid-core/src/test/java/org/grobid/core/engines/tagging/GenericTaggerUtilsTest.java
package org.grobid.core.engines.tagging; import org.junit.Test; import static org.hamcrest.CoreMatchers.is; import static org.junit.Assert.*; public class GenericTaggerUtilsTest { @Test public void testGetPlainLabel_normalValue() throws Exception { assertThat(GenericTaggerUtils.getPlainLabel("<status>"), is("<status>")); } @Test public void testGetPlainLabel_startingValue() throws Exception { assertThat(GenericTaggerUtils.getPlainLabel("I-<status>"), is("<status>")); } @Test public void testGetPlainLabel_I_startingValue() throws Exception { assertThat(GenericTaggerUtils.getPlainLabel("I-<status>"), is("<status>")); } @Test public void testGetPlainLabel_nullValue() throws Exception { assertNull(GenericTaggerUtils.getPlainLabel(null)); } @Test public void testIsBeginningOfEntity_true() throws Exception { assertTrue(GenericTaggerUtils.isBeginningOfEntity("I-<status>")); } @Test public void testIsBeginningOfEntity_false() throws Exception { assertFalse(GenericTaggerUtils.isBeginningOfEntity("<status>")); } @Test public void testIsBeginningOfEntity_false2() throws Exception { assertFalse(GenericTaggerUtils.isBeginningOfEntity("<I-status>")); } @Test public void testIsBeginningOfIOBEntity_B_true() throws Exception { assertTrue(GenericTaggerUtils.isBeginningOfIOBEntity("B-<status>")); } @Test public void testIsBeginningOfEntity_B_false2() throws Exception { assertFalse(GenericTaggerUtils.isBeginningOfEntity("<B-status>")); } }
1,635
29.296296
83
java
grobid
grobid-master/grobid-core/src/test/java/org/grobid/core/engines/tagging/TaggerFactoryTest.java
package org.grobid.core.engines.tagging; import org.grobid.core.GrobidModels; import org.grobid.core.main.LibraryLoader; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.powermock.reflect.Whitebox; import java.util.HashMap; import static org.hamcrest.CoreMatchers.is; import static org.junit.Assert.assertThat; public class TaggerFactoryTest { @Before public void setUp() throws Exception { LibraryLoader.load(); Whitebox.setInternalState(TaggerFactory.class, "cache", new HashMap<>()); } @After public void tearDown() throws Exception { Whitebox.setInternalState(TaggerFactory.class, "cache", new HashMap<>()); } @Test public void testGetTagger_shouldReturnDummyTagger() { GenericTagger tagger = TaggerFactory.getTagger(GrobidModels.DUMMY); assertThat(tagger instanceof DummyTagger, is(true)); } @Test public void testGetDelftTagger_existingModel_shouldReturn() { GenericTagger tagger = TaggerFactory.getTagger(GrobidModels.HEADER, GrobidCRFEngine.DELFT); assertThat(tagger instanceof DeLFTTagger, is(true)); } @Test public void testGetWapitiTagger_existingModel_shouldReturn() { GenericTagger tagger = TaggerFactory.getTagger(GrobidModels.DATE, GrobidCRFEngine.WAPITI); assertThat(tagger instanceof WapitiTagger, is(true)); } }
1,411
26.153846
99
java
grobid
grobid-master/grobid-core/src/test/java/org/grobid/core/engines/patent/ReferenceExtractorTest.java
package org.grobid.core.engines.patent; import static org.grobid.core.utilities.TextUtilities.delimiters; import static org.junit.Assert.assertEquals; import java.io.File; import java.util.ArrayList; import java.util.List; import java.util.StringTokenizer; import org.grobid.core.data.BibDataSet; import org.grobid.core.data.PatentItem; import org.grobid.core.factory.AbstractEngineFactory; import org.grobid.core.utilities.counters.GrobidTimer; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Ignore; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class ReferenceExtractorTest { public static final Logger LOGGER = LoggerFactory .getLogger(ReferenceExtractorTest.class); @BeforeClass public static void setInitialContext() throws Exception { AbstractEngineFactory.init(); } @AfterClass public static void destroyInitialContext() throws Exception { } // extractor.extractAllReferencesXMLFile(new // File("src/test/resources/org/grobid/core/engines/patent/ReferenceExtractor/sample-24514352.tei.xml").getAbsolutePath(), // false, false, patents, articles); @Test public void extractAllReferencesStringNull() { ReferenceExtractor extractor = new ReferenceExtractor(); String res = extractor .extractAllReferencesString( "Economic Development Quarterly November 2011 25: 353-365, first published on August 25, 2011.", false, 0, false, null, null); //assertEquals(0, nbRes); } @Test public void extractAllReferencesStringArticles() { ReferenceExtractor extractor = new ReferenceExtractor(); List<PatentItem> patents = new ArrayList<PatentItem>(); List<BibDataSet> articles = new ArrayList<BibDataSet>(); String toExtract = "Some other description includes ref. US 2011/0155847 A1 in aerodynamic" + " and applied physics. " + "This patent, ref. US 7930197 says data mining of personal data is patented. " + "That article refers to Economic Development Quarterly November 2011 25: 353-365, first" + " published on August 25, 2011."; GrobidTimer timer = new GrobidTimer(true); extractor.extractAllReferencesString(toExtract, false, 0, false, patents, articles); timer.stop("STOP"); System.out.println(timer.getElapsedTimeFromStartFormated("STOP")); LOGGER.info("BibDataSet: " + articles.toString()); assertEquals(2, patents.size()); assertEquals(1, articles.size()); LOGGER.info(articles.get(0).getOffsets().toString()); } @Test public void extractAllReferencesStringArticles2() { ReferenceExtractor extractor = new ReferenceExtractor(); List<PatentItem> patents = new ArrayList<PatentItem>(); List<BibDataSet> articles = new ArrayList<BibDataSet>(); extractor .extractAllReferencesString( "That article It refers to Economic Development Quarterly November 2011 25: 353-365," + " first published on August 25, 2011.", false, 0, false, patents, articles); LOGGER.info("BibDataSet: " + articles.toString()); assertEquals(0, patents.size()); assertEquals(1, articles.size()); List<Integer> offsets = articles.get(0).getOffsets(); int startOffset = -1; int endOffset = -1; String rawBib = articles.get(0).getRawBib(); if (!offsets.isEmpty() && offsets.get(0) != null) { startOffset = offsets.get(0).intValue(); StringTokenizer stt = new StringTokenizer(rawBib, delimiters, true); int count2 = 0; int charCpt = 0; String token2; while (stt.hasMoreTokens()) { token2 = stt.nextToken(); if (token2.trim().length() != 0) { count2++; } charCpt += token2.length(); System.err.println(token2 + " count=" + count2 + " charCpt=" + charCpt); } endOffset = startOffset + count2; System.out.println("RawBib=" + rawBib); System.out.println("Start=" + startOffset + " offset=" + count2 + " end=" + endOffset); } LOGGER.info(articles.get(0).getOffsets().toString()); } //@Test public void extractAllReferencesStringPatents() { ReferenceExtractor extractor = new ReferenceExtractor(); List<PatentItem> patents = new ArrayList<PatentItem>(); List<BibDataSet> articles = new ArrayList<BibDataSet>(); String toExtract = "US-8303618, Intravascular filter and method A filter disposed at the distal end of an elongate guidewire. Catheters are provided for delivering the filter to, and retrieving the filter from, a treatment..."; toExtract = "this patent refers US-8303618, bla bla"; extractor.extractAllReferencesString(toExtract, false, 0, false, patents, articles); LOGGER.info("PatentItem: " + patents.toString()); assertEquals(1, patents.size()); assertEquals(0, articles.size()); PatentItem patent = patents.get(0); assertEquals("8303618", patent.getNumberEpoDoc()); System.out.println("context=" + patent.getContext()); System.out.println("offset start/end/raw=" + patent.getOffsetBegin() + "/" + patent.getOffsetEnd() + "/" + patent.getOffsetRaw()); } @Test public void extractAllReferencesXmlST36() { ReferenceExtractor extractor = new ReferenceExtractor(); List<PatentItem> patents = new ArrayList<PatentItem>(); List<BibDataSet> articles = new ArrayList<BibDataSet>(); extractor .extractAllReferencesXMLFile( new File( "src/test/resources/org/grobid/core/engines/patent/ReferenceExtractor/st36-sample-1.xml") .getAbsolutePath(), false, 0, false, patents, articles); LOGGER.info("PatentItem: " + patents.toString()); assertEquals(2, patents.size()); assertEquals(0, articles.size()); assertEquals("9937368", patents.get(0).getNumberEpoDoc()); assertEquals("6083121", patents.get(1).getNumberEpoDoc()); } @Test public void extractAllReferencesXml() { ReferenceExtractor extractor = new ReferenceExtractor(); List<PatentItem> patents = new ArrayList<PatentItem>(); List<BibDataSet> articles = new ArrayList<BibDataSet>(); extractor .extractAllReferencesXMLFile( new File( "src/test/resources/patents/006271747.xml") .getAbsolutePath(), false, 0, false, patents, articles); //LOGGER.info("PatentItem: " + patents.toString()); assertEquals("20050675311", patents.get(0).getNumberEpoDoc()); assertEquals("9202190", patents.get(1).getNumberEpoDoc()); } @Ignore public void extractAllReferencesPdf() { ReferenceExtractor extractor = new ReferenceExtractor(); List<PatentItem> patents = new ArrayList<PatentItem>(); List<BibDataSet> articles = new ArrayList<BibDataSet>(); extractor .extractAllReferencesPDFFile( new File( "src/test/resources/org/grobid/core/engines/patent/ReferenceExtractor/sample-1.pdf") .getAbsolutePath(), false, 0, false, patents, articles); } @Test public void jaProcessing() { String text_jp = "すなわち、相対的な頻度で、エポキシドをベースとする液体接着剤及び接着結合剤が、" + "例えばWO98/21287A1。これらの主な使用分野は、硬質装置のみならず適度に柔軟な装置における縁部の結合である。" + "硬化は、熱により又はUV照射により行われる。"; System.out.println(text_jp); ReferenceExtractor extractor = new ReferenceExtractor(); List<PatentItem> patents = new ArrayList<PatentItem>(); extractor.extractAllReferencesString(text_jp, false, 0, false, patents, null); LOGGER.info("PatentItem: " + patents.toString()); assertEquals(1, patents.size()); assertEquals("21287", patents.get(0).getNumberEpoDoc()); } @Test public void krProcessing() { String text_kr = "미국의 애플사의 미국 출원 2012/012710." + "따라서, 전기화학적 센서들의 제조, 특히 혈액 또는 간질액과 같은 신체 마커들(포도당, 프룩토사민, " + "하에마토크릿 등)의측정을 위한 전기화학적 센서들을 제조하기 위해 개선된 프로세스가 필요하다. 또한, 합리적인 가격으로 센서 스트립들을제조하기 " + "위한 고속의 예측가능하고 재생가능한 방법에 대한 필요성이 있다. 또한, 각각의 완료된 스트립이 재생가능한 방법으로 체액의 분석 대상물들을 " + "신뢰성있고 예측가능하며 정밀하게 측정하는데 사용될 수 있는 매우 작은 특성들을 갖는센서 스트립들을 고속의 예측가능하고 반복가능한 방법으로 제조할 필요가 있다."; System.out.println(text_kr); ReferenceExtractor extractor = new ReferenceExtractor(); List<PatentItem> patents = new ArrayList<PatentItem>(); extractor.extractAllReferencesString(text_kr, false, 0, false, patents, null); LOGGER.info("PatentItem: " + patents.toString()); assertEquals(1, patents.size()); assertEquals("2012012710", patents.get(0).getNumberEpoDoc()); } @Test public void zhProcessing() { String text_zh = "在本申请的申请人于2008年8月26日提交的申请号为US2008/001534的PCT国际申请中," + "揭示了一种等截面三角形定向棱镜圆形反光板及由其制成的圆板灯。该圆板灯包括:等截面三角形微棱镜圆形导光板;" + "围绕导光板的散热框,该散热框与导光板之间形成间隙而构成环形灯槽;以及嵌装于环形灯槽内的环形灯组件," + "该环形灯组件由多个发光二极管(LED)贴片、电阻和线路板构成。该申请的全部内容,通过引用结合于此。"; System.out.println(text_zh); ReferenceExtractor extractor = new ReferenceExtractor(); List<PatentItem> patents = new ArrayList<PatentItem>(); extractor.extractAllReferencesString(text_zh, false, 0, false, patents, null); LOGGER.info("PatentItem: " + patents.toString()); assertEquals(1, patents.size()); assertEquals("2008001534", patents.get(0).getNumberEpoDoc()); } }
8,763
38.656109
229
java
grobid
grobid-master/grobid-core/src/test/java/org/grobid/core/engines/citations/CalloutAnalyzerTest.java
package org.grobid.core.engines.citations; import org.junit.Before; import org.junit.BeforeClass; import org.junit.AfterClass; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.nullValue; import static org.junit.Assert.assertThat; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.not; import org.grobid.core.engines.citations.CalloutAnalyzer.MarkerType; import org.grobid.core.analyzers.GrobidDefaultAnalyzer; import org.grobid.core.layout.LayoutToken; import java.util.List; public class CalloutAnalyzerTest { @Test public void testGetCalloutTypeText() throws Exception { String input = "(Dé&amps, C & Bidule, D., 2010)"; List<LayoutToken> inputCallout = GrobidDefaultAnalyzer.getInstance().tokenizeWithLayoutToken(input); MarkerType result = MarkerType.PARENTHESIS_TEXT; assertThat(CalloutAnalyzer.getCalloutType(inputCallout), is(result)); } @Test public void testGetCalloutTypeTextFail() throws Exception { String input = "(1,3)"; List<LayoutToken> inputCallout = GrobidDefaultAnalyzer.getInstance().tokenizeWithLayoutToken(input); MarkerType result = MarkerType.PARENTHESIS_TEXT; assertThat(CalloutAnalyzer.getCalloutType(inputCallout), not(result)); } @Test public void testGetCalloutTypeNumber() throws Exception { String input = "[1-5]"; List<LayoutToken> inputCallout = GrobidDefaultAnalyzer.getInstance().tokenizeWithLayoutToken(input); MarkerType result = MarkerType.BRACKET_NUMBER; assertThat(CalloutAnalyzer.getCalloutType(inputCallout), is(result)); } @Test public void testGetCalloutTypeNumberFail() throws Exception { String input = "[Foppiano et. al, 2004]"; List<LayoutToken> inputCallout = GrobidDefaultAnalyzer.getInstance().tokenizeWithLayoutToken(input); MarkerType result = MarkerType.BRACKET_NUMBER; assertThat(CalloutAnalyzer.getCalloutType(inputCallout), not(result)); } @Test public void testGetCalloutCatastrophicBacktracking() throws Exception { String input = "(1915-1919, 1920-1924, 1925-1929, 1930-1934, and 1935-1939)"; List<LayoutToken> inputCallout = GrobidDefaultAnalyzer.getInstance().tokenizeWithLayoutToken(input); MarkerType result = MarkerType.PARENTHESIS_NUMBER; assertThat(CalloutAnalyzer.getCalloutType(inputCallout), is(result)); } @Test public void testGetCalloutCatastrophicBacktrackingFail() throws Exception { String input = "(1915-1919, 1920-1924, 1925-1929, 1930-1934, and 1935-1939)"; List<LayoutToken> inputCallout = GrobidDefaultAnalyzer.getInstance().tokenizeWithLayoutToken(input); MarkerType result = MarkerType.BRACKET_NUMBER; assertThat(CalloutAnalyzer.getCalloutType(inputCallout), not(result)); } }
2,980
40.985915
108
java
grobid
grobid-master/grobid-core/src/test/java/org/grobid/core/lexicon/FastMatcherTest.java
package org.grobid.core.lexicon; import org.grobid.core.utilities.GrobidProperties; import org.grobid.core.utilities.OffsetPosition; import org.grobid.core.layout.LayoutToken; import org.grobid.core.analyzers.GrobidAnalyzer; import org.junit.*; import java.io.File; import java.util.Arrays; import java.util.List; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.Matchers.hasSize; import static org.junit.Assert.assertThat; public class FastMatcherTest { FastMatcher target; @BeforeClass public static void setInitialContext() throws Exception { GrobidProperties.getInstance(); } @Before public void setUp() throws Exception { target = new FastMatcher(); } @Test public void testFastMatcher_InitFrom_GROBID_HOME() { new FastMatcher(new File( GrobidProperties.getGrobidHomePath() + "/lexicon/journals/abbrev_journals.txt")); } @Test public void testProcessToken_noSpace_shouldReturnToken() throws Exception { FastMatcher fastMatcher = new FastMatcher(); assertThat(fastMatcher.processToken("Hebrew"), is(" Hebrew")); } @Test public void testProcessToken_space_shouldReturnSpace() throws Exception { FastMatcher fastMatcher = new FastMatcher(); assertThat(fastMatcher.processToken(" "), is(" ")); } @Test public void testProcessToken_newLine_shouldBeIgnored() throws Exception { FastMatcher fastMatcher = new FastMatcher(); assertThat(fastMatcher.processToken("@newline"), is("")); } @Test public void testProcessToken_tabulation_shouldBecomeSpace() throws Exception { FastMatcher fastMatcher = new FastMatcher(); assertThat(fastMatcher.processToken("\t"), is(" ")); } @Test public void testMatcherList_location() throws Exception { target = new FastMatcher(this.getClass().getResourceAsStream("location.txt")); final String input = "I'm walking in The Bronx"; List<LayoutToken> tokenisedInput = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(input); final List<OffsetPosition> offsetPositions = target.matchLayoutToken(tokenisedInput); assertThat(offsetPositions, hasSize(2)); assertThat(offsetPositions.get(0).start, is(8)); assertThat(offsetPositions.get(0).end, is(10)); assertThat(offsetPositions.get(1).start, is(10)); assertThat(offsetPositions.get(1).end, is(10)); } @Test public void testMatchString_location() throws Exception { target = new FastMatcher(this.getClass().getResourceAsStream("location.txt")); final String input = "I'm walking in The Bronx"; final List<OffsetPosition> positions = target.matchCharacter(input); assertThat(positions, hasSize(2)); //The Bronx assertThat(positions.get(0).start, is(15)); assertThat(positions.get(0).end, is(24)); //Bronx assertThat(positions.get(1).start, is(19)); assertThat(positions.get(1).end, is(24)); } @Ignore @Test public void testMatchStringWithTag_location() throws Exception { target = new FastMatcher(this.getClass().getResourceAsStream("location.txt")); final String input = "I'm walking <p> in The Bronx"; final List<OffsetPosition> positions = target.matchCharacter(input); assertThat(positions, hasSize(2)); //The Bronx assertThat(positions.get(0).start, is(19)); assertThat(positions.get(0).end, is(28)); //Bronx assertThat(positions.get(1).start, is(23)); assertThat(positions.get(1).end, is(28)); } @Ignore @Test public void testMatchStringOnlyTag_location_noMatch() throws Exception { target = new FastMatcher(this.getClass().getResourceAsStream("location.txt")); final String input = "<p>"; final List<OffsetPosition> positions = target.matchCharacter(input); assertThat(positions, hasSize(0)); } @Ignore @Test public void testMatchStringAndTag_location_noMatch() throws Exception { target = new FastMatcher(this.getClass().getResourceAsStream("location.txt")); final String input = "This is <p>"; final List<OffsetPosition> positions = target.matchCharacter(input); assertThat(positions, hasSize(0)); } @Ignore @Test public void testMatchList_location_noMatch() throws Exception { target = new FastMatcher(this.getClass().getResourceAsStream("location.txt")); final String input = "This is <p>"; // final List<OffsetPosition> offsetPositions = target.matchCharacter(Arrays.asList(input.split(" "))); //assertThat(offsetPositions, hasSize(0)); } @Test public void testMatchList_location_1Match() throws Exception { target = new FastMatcher(this.getClass().getResourceAsStream("location.txt")); final String input = "This is Bronx"; List<LayoutToken> tokenisedInput = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(input); final List<OffsetPosition> offsetPositions = target.matchCharacterLayoutToken(tokenisedInput); assertThat(offsetPositions, hasSize(1)); assertThat(offsetPositions.get(0).start, is(4)); assertThat(offsetPositions.get(0).end, is(4)); } @Test public void testMatchList_location_2Matches() throws Exception { target = new FastMatcher(this.getClass().getResourceAsStream("location.txt")); final String input = "This is The Bronx"; List<LayoutToken> tokenisedInput = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(input); final List<OffsetPosition> offsetPositions = target.matchCharacterLayoutToken(tokenisedInput); assertThat(offsetPositions, hasSize(2)); assertThat(offsetPositions.get(0).start, is(4)); assertThat(offsetPositions.get(0).end, is(6)); assertThat(offsetPositions.get(1).start, is(6)); assertThat(offsetPositions.get(1).end, is(6)); } }
6,115
34.149425
110
java
grobid
grobid-master/grobid-core/src/test/java/org/grobid/core/lexicon/LexiconIntegrationTest.java
package org.grobid.core.lexicon; import org.grobid.core.analyzers.GrobidAnalyzer; import org.grobid.core.utilities.OffsetPosition; import org.grobid.core.utilities.LayoutTokensUtil; import org.grobid.core.layout.LayoutToken; import org.junit.AfterClass; import org.junit.Before; import org.junit.Test; import java.util.List; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.hasSize; import static org.junit.Assert.assertNotNull; public class LexiconIntegrationTest { private Lexicon target = null; @Before public void setUp() { target = Lexicon.getInstance(); } // journals @Test public void testInAbbrevJournalNames_case1() throws Exception { String input = "Nature"; List<OffsetPosition> journalsPositions = target.tokenPositionsAbbrevJournalNames(input); assertNotNull(journalsPositions); assertThat(journalsPositions, hasSize(1)); assertThat(journalsPositions.get(0).start, is(0)); } @Test public void testInAbbrevJournalNames_case2() throws Exception { String input = "in Nature, volume"; List<OffsetPosition> journalsPositions = target.tokenPositionsAbbrevJournalNames(input); assertNotNull(journalsPositions); assertThat(journalsPositions, hasSize(1)); assertThat(journalsPositions.get(0).start, is(1)); } @Test public void testJournalNames_case1() throws Exception { String input = "Taylor, et al., Nature 297:(1982)"; List<OffsetPosition> journalsPositions = target.tokenPositionsJournalNames(input); assertNotNull(journalsPositions); assertThat(journalsPositions, hasSize(1)); assertThat(journalsPositions.get(0).start, is(6)); assertThat(journalsPositions.get(0).end, is(6)); } @Test public void testJournalNames_case2() throws Exception { String input = "to be published in the official publication of the National Venereology Council " + "of Australia, volume 10, 2010."; List<OffsetPosition> journalsPositions = target.tokenPositionsJournalNames(input); assertNotNull(journalsPositions); assertThat(journalsPositions, hasSize(2)); } @Test public void testCity() throws Exception { String input = "University of New-York, USA, bla bla City, bla"; List<OffsetPosition> citiesPositions = target.tokenPositionsCityNames(input); assertNotNull(citiesPositions); assertThat(citiesPositions, hasSize(2)); } @Test public void testInJournalNames() throws Exception { List<OffsetPosition> inJournalNames = target.tokenPositionsJournalNames("abc <p> Economics </p>"); assertNotNull(inJournalNames); assertThat(inJournalNames, hasSize(1)); assertThat(inJournalNames.get(0).start, is(2)); assertThat(inJournalNames.get(0).end, is(2)); } /** * Locations **/ @Test public void testGetPositionInLocation_case1() throws Exception { final String input = "In retrospect, the National Archives of Belgium were established by the French law of October 26th 1796 (5 Brumair V), which, amongst others, foresaw in the organisation of departmental depots (amongst others, in Brussels), in which the archives of the disbanded institutions of the Ancien Régime would be stored."; final List<OffsetPosition> positions = target.charPositionsLocationNames(input); assertThat(positions, hasSize(15)); assertThat(positions.get(0).start, is(0)); assertThat(positions.get(0).end, is(2)); } @Test public void testGetPositionInLocation_case1_tokenised() throws Exception { String input = "In retrospect, the National Archives of Belgium were established by the French law of October 26th 1796 (5 Brumair V), which, amongst others, foresaw in the organisation of departmental depots (amongst others, in Brussels), in which the archives of the disbanded institutions of the Ancien Régime would be stored."; List<LayoutToken> tokenisedInput = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(input); final List<OffsetPosition> positions = target.charPositionsLocationNames(tokenisedInput); assertThat(positions, hasSize(15)); assertThat(positions.get(0).start, is(0)); assertThat(positions.get(0).end, is(0)); } @Test public void testGetPositionsInLocation_case2() throws Exception { final String input = "I'm walking in The Bronx"; final List<OffsetPosition> positions = target.charPositionsLocationNames(input); assertThat(positions, hasSize(4)); assertThat(positions.get(3).start, is(19)); assertThat(positions.get(3).end, is(24)); } @Test public void testGetPositionsInLocation_case2_tokenised() throws Exception { final String input = "I'm walking in The Bronx"; List<LayoutToken> tokenisedInput = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(input); final List<OffsetPosition> positions = target.charPositionsLocationNames(tokenisedInput); assertThat(positions, hasSize(4)); assertThat(positions.get(3).start, is(10)); assertThat(positions.get(3).end, is(10)); assertThat(positions.get(2).start, is(8)); assertThat(positions.get(2).end, is(10)); } /** * ORG Form **/ @Test public void testGetPositionInOrgForm() throws Exception { final String input = "Matusa Inc. was bought by Bayer"; final List<OffsetPosition> positions = target.charPositionsOrgForm(input); assertThat(positions, hasSize(1)); assertThat(positions.get(0).start, is(7)); assertThat(positions.get(0).end, is(10)); } @Test public void testGetPositionInOrgForm_tokenised() throws Exception { final String input = "Matusa Inc. was bought by Bayer"; List<LayoutToken> tokenisedInput = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(input); final List<OffsetPosition> positions = target.charPositionsOrgForm(tokenisedInput); assertThat(positions, hasSize(1)); assertThat(positions.get(0).start, is(2)); assertThat(positions.get(0).end, is(2)); } /** * Organisation names */ @Test public void testGetPositionInOrganisationNames() throws Exception { final String input = "Matusa Inc. was bought by Bayer"; final List<OffsetPosition> positions = target.charPositionsOrganisationNames(input); assertThat(positions, hasSize(1)); assertThat(positions.get(0).start, is(26)); assertThat(positions.get(0).end, is(31)); } @Test public void testGetPositionInOrganisationNames_tokenised() throws Exception { final String input = "Matusa Inc. was bought by Bayer"; List<LayoutToken> tokenisedInput = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(input); final List<OffsetPosition> positions = target.charPositionsOrganisationNames(tokenisedInput); assertThat(positions, hasSize(1)); assertThat(positions.get(0).start, is(11)); assertThat(positions.get(0).end, is(11)); } /** * Person title **/ @Test public void testGetPositionInPersonTitleNames() throws Exception { final String input = "The president had a meeting with the vice president, duke and cto of the company."; final List<OffsetPosition> positions = target.charPositionsPersonTitle(input); assertThat(positions, hasSize(4)); assertThat(positions.get(0).start, is(4)); assertThat(positions.get(0).end, is(13)); assertThat(positions.get(1).start, is(37)); assertThat(positions.get(1).end, is(51)); assertThat(positions.get(2).start, is(42)); assertThat(positions.get(2).end, is(51)); assertThat(positions.get(3).start, is(53)); assertThat(positions.get(3).end, is(57)); } @Test public void testGetPositionInPersonTitleNames_tokenised() throws Exception { final String input = "The president had a meeting with the vice president, duke and cto of the company."; List<LayoutToken> tokenisedInput = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(input); final List<OffsetPosition> positions = target.charPositionsPersonTitle(tokenisedInput); assertThat(positions, hasSize(4)); assertThat(positions.get(0).start, is(2)); assertThat(positions.get(0).end, is(2)); assertThat(positions.get(1).start, is(14)); assertThat(positions.get(1).end, is(16)); assertThat(positions.get(2).start, is(16)); assertThat(positions.get(2).end, is(16)); assertThat(positions.get(3).start, is(19)); assertThat(positions.get(3).end, is(19)); } @Test public void testInJournalNamesLayoutToken() { String piece = "Greaves M, Lawlor F. Angioedema: manifestations and management. J Am Acad Dermatol. 1991;25(1 Pt 2):155-161;"; List<LayoutToken> tokens = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(piece); List<OffsetPosition> positions = target.tokenPositionsJournalNames(tokens); assertThat(positions, hasSize(1)); assertThat(positions.get(0).start, is(18)); assertThat(positions.get(0).end, is(18)); } @Test public void testInAbbrevJournalNamesLayoutToken() { String piece = "Greaves M, Lawlor F. Angioedema: manifestations and management. J Am Acad Dermatol. 1991;25(1 Pt 2):155-161;"; List<LayoutToken> tokens = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(piece); List<OffsetPosition> positions = target.tokenPositionsAbbrevJournalNames(tokens); assertThat(positions, hasSize(2)); /*for(OffsetPosition position : positions) { System.out.print(position.start + " / " + position.end + ": "); for(int j = position.start; j <= position.end; j++) System.out.print(tokens.get(j)); System.out.println(""); }*/ assertThat(positions.get(0).start, is(18)); assertThat(positions.get(0).end, is(18)); assertThat(positions.get(1).start, is(21)); assertThat(positions.get(1).end, is(27)); } @Test public void testInLocationNamesLayoutToken() { String piece = "Academic Press, New York. 1987. Near Porosły-Kolonia."; List<LayoutToken> tokens = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(piece); List<OffsetPosition> positions = target.tokenPositionsLocationNames(tokens); assertThat(positions, hasSize(4)); assertThat(positions.get(0).start, is(5)); assertThat(positions.get(0).end, is(7)); assertThat(positions.get(1).end, is(7)); assertThat(positions.get(1).end, is(7)); assertThat(positions.get(2).end, is(15)); assertThat(positions.get(2).end, is(15)); assertThat(positions.get(3).end, is(17)); assertThat(positions.get(3).end, is(17)); } @Test public void testInPublisherNamesLayoutToken() { String piece = "Academic Press, New York. 1987. Near Porosły-Kolonia."; List<LayoutToken> tokens = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(piece); List<OffsetPosition> positions = target.tokenPositionsPublisherNames(tokens); assertThat(positions, hasSize(1)); assertThat(positions.get(0).start, is(0)); assertThat(positions.get(0).end, is(2)); } @Test public void testInDOIPatternLayoutToken1() { String piece = "Garza, K., Goble, C., Brooke, J., & Jay, C. (2015). Framing the community data system interface. "+ "In Proceedings of the 2015 British HCI Conference on - British HCI ’15. ACM Press. 10.1145/2783446.2783605"; List<LayoutToken> tokens = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(piece); String text = LayoutTokensUtil.toText(tokens); List<OffsetPosition> positions = target.tokenPositionsDOIPattern(tokens, text); assertThat(positions, hasSize(1)); assertThat(positions.get(0).start, is(80)); assertThat(positions.get(0).end, is(86)); } @Test public void testInDOIPatternLayoutToken2() { String piece = "Morey, C. C., Cong, Y., Zheng, Y., Price, M., & Morey, R. D. (2015). The color-sharing bonus: Roles of "+ "perceptual organization and attentive processes in visual working memory. Archives of Scientific Psychology, 3, 18–29. https://doi.org/10.1037/arc0000014"; List<LayoutToken> tokens = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(piece); String text = LayoutTokensUtil.toText(tokens); List<OffsetPosition> positions = target.tokenPositionsDOIPattern(tokens, text); assertThat(positions, hasSize(1)); assertThat(positions.get(0).start, is(104)); assertThat(positions.get(0).end, is(108)); } @Test public void testInArXivPatternLayoutToken1() { String piece = "ATLAS collaboration, Measurements of the Nuclear Modification Factor for Jets in Pb+Pb Collisionsat √ "+ "sNN = 2 . 76TeVwith the ATLAS Detector, Phys. Rev. Lett. 114(2015) 072302 [ arXiv: 1411.2357][INSPIRE] ."; List<LayoutToken> tokens = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(piece); String text = LayoutTokensUtil.toText(tokens); List<OffsetPosition> positions = target.tokenPositionsArXivPattern(tokens, text); /*for(OffsetPosition position : positions) { System.out.print(position.start + " / " + position.end + ": "); for(int j = position.start; j <= position.end; j++) System.out.print(tokens.get(j)); System.out.println(""); }*/ assertThat(positions, hasSize(1)); assertThat(positions.get(0).start, is(64)); assertThat(positions.get(0).end, is(69)); } @Test public void testInArXivPatternLayoutToken2() { String piece = "O .Suvorova arXiv .org:hep -ph/9911415( 1999)."; List<LayoutToken> tokens = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(piece); String text = LayoutTokensUtil.toText(tokens); List<OffsetPosition> positions = target.tokenPositionsArXivPattern(tokens, text); /*for(OffsetPosition position : positions) { System.out.print(position.start + " / " + position.end + ": "); for(int j = position.start; j <= position.end; j++) System.out.print(tokens.get(j)); System.out.println(""); }*/ assertThat(positions, hasSize(1)); assertThat(positions.get(0).start, is(5)); assertThat(positions.get(0).end, is(15)); } @Test public void testInIdentifierPatternLayoutToken() { String piece = "ATLAS collaboration, Measurements of the Nuclear Modification Factor for Jets in Pb+Pb Collisionsat √ "+ "sNN = 2 . 76TeVwith the ATLAS Detector, Phys. Rev. Lett. 114(2015) 072302 [ arXiv: 1411.2357][INSPIRE] ."; List<LayoutToken> tokens = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(piece); List<OffsetPosition> positions = target.tokenPositionsIdentifierPattern(tokens); /*for(OffsetPosition position : positions) { System.out.print(position.start + " / " + position.end + ": "); for(int j = position.start; j <= position.end; j++) System.out.print(tokens.get(j)); System.out.println(""); }*/ assertThat(positions, hasSize(1)); assertThat(positions.get(0).start, is(64)); assertThat(positions.get(0).end, is(69)); } @Test public void testInUrlPatternLayoutToken() { String piece = "ATLAS collaboration, . https://doi.org/10.1145/2783446.2783605, https://inria.fr/index.html, http://inria.fr/index.html. " + "wikipedia: httpS://en.wikipedia.org/wiki/Reich_(disambiguation), Ftp://pubmed.truc.edu"; List<LayoutToken> tokens = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(piece); List<OffsetPosition> positions = target.tokenPositionsUrlPattern(tokens); assertThat(positions, hasSize(5)); assertThat(positions.get(0).start, is(7)); assertThat(positions.get(0).end, is(21)); assertThat(positions.get(1).start, is(24)); assertThat(positions.get(1).end, is(34)); assertThat(positions.get(2).start, is(37)); assertThat(positions.get(2).end, is(47)); assertThat(positions.get(3).start, is(53)); assertThat(positions.get(3).end, is(68)); assertThat(positions.get(4).start, is(71)); assertThat(positions.get(4).end, is(79)); } @Test public void testInEmailPatternLayoutToken() { String piece = "20000 NW Walker Rd, Beaverton, Oregon 97006 \nericwan @ece.ogi.edu, [email protected] \nAbstract \n"; List<LayoutToken> tokens = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(piece); List<OffsetPosition> positions = target.tokenPositionsEmailPattern(tokens); assertThat(positions, hasSize(2)); assertThat(positions.get(0).start, is(17)); assertThat(positions.get(0).end, is(24)); assertThat(positions.get(1).start, is(27)); assertThat(positions.get(1).end, is(33)); } }
17,516
42.902256
345
java
grobid
grobid-master/grobid-core/src/test/java/org/grobid/core/analyzers/GrobidAnalyzerTest.java
package org.grobid.core.analyzers; import org.grobid.core.layout.LayoutToken; import org.grobid.core.lang.Language; import org.grobid.core.utilities.UnicodeUtil; import org.junit.Before; import org.junit.Test; import java.util.List; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.junit.Assert.*; public class GrobidAnalyzerTest { GrobidAnalyzer target; @Before public void setUp() throws Exception { target = GrobidAnalyzer.getInstance(); } @Test public void testTokenizeWithLayoutToken() { final List<LayoutToken> layoutTokens = target.tokenizeWithLayoutToken("This is a normal \ntext,\n\n\n on several lines.\n"); assertThat(layoutTokens, hasSize(22)); assertThat(layoutTokens.get(0).getText(), is("This")); assertThat(layoutTokens.get(1).getText(), is(" ")); assertThat(layoutTokens.get(6).getText(), is("normal")); assertThat(layoutTokens.get(7).getText(), is(" ")); assertThat(layoutTokens.get(7).isNewLineAfter(), is(true)); assertThat(layoutTokens.get(8).getText(), is("\n")); assertThat(layoutTokens.get(8).isNewLineAfter(), is(false)); assertThat(layoutTokens.get(10).getText(), is(",")); assertThat(layoutTokens.get(10).isNewLineAfter(), is(true)); assertThat(layoutTokens.get(11).getText(), is("\n")); assertThat(layoutTokens.get(11).isNewLineAfter(), is(true)); assertThat(layoutTokens.get(12).getText(), is("\n")); assertThat(layoutTokens.get(12).isNewLineAfter(), is(true)); assertThat(layoutTokens.get(13).getText(), is("\n")); assertThat(layoutTokens.get(13).isNewLineAfter(), is(false)); } @Test public void testTokenizeWithLayoutToken_emptyText() { assertThat(target.tokenizeWithLayoutToken(""), hasSize(0)); } @Test public void testTokenize_Korean() { String input = "최지수. 윤석민 (2019), 가짜뉴스 거버넌스: 정부규제, 자율규제, 공동규제 모형에 대한 비교를 중심으로, 사이버커뮤니케이션학보, 제36권 제1호, 127-180쪽."; input = UnicodeUtil.normaliseText(input); List<String> tokensStr = target.tokenize(input, new Language("kr")); assertThat(tokensStr, hasSize(35)); List<LayoutToken> tokens = target.tokenizeWithLayoutToken(input, new Language("kr")); assertThat(tokens, hasSize(35)); tokens = target.tokenizeWithLayoutToken(input, new Language("kr")); assertThat(tokens, hasSize(35)); tokens = target.tokenizeWithLayoutToken(input, new Language("kr")); assertThat(tokens, hasSize(35)); tokensStr = target.tokenize(input, new Language("kr")); tokensStr = target.retokenizeSubdigits(tokensStr); assertThat(tokensStr, hasSize(36)); tokensStr = target.tokenize(input, new Language("kr")); tokens = target.retokenizeSubdigitsWithLayoutToken(tokensStr); assertThat(tokens, hasSize(36)); tokens = target.tokenizeWithLayoutToken(input, new Language("kr")); tokens = target.retokenizeSubdigitsFromLayoutToken(tokens); assertThat(tokens, hasSize(36)); } @Test public void testTokenize_Japanese() { String input = "오다쇼고(小田省吾), 京城尊都の 由來こ 基の 城壁に就て</title>, 朝鮮 제197호, 1931"; input = UnicodeUtil.normaliseText(input); List<String> tokensStr = target.tokenize(input, new Language("jp")); assertThat(tokensStr, hasSize(23)); List<LayoutToken> tokens = target.tokenizeWithLayoutToken(input, new Language("jp")); assertThat(tokens, hasSize(23)); tokens = target.tokenizeWithLayoutToken(input, new Language("jp")); assertThat(tokens, hasSize(23)); tokens = target.tokenizeWithLayoutToken(input, new Language("jp")); assertThat(tokens, hasSize(23)); tokensStr = target.tokenize(input, new Language("jp")); tokensStr = target.retokenizeSubdigits(tokensStr); assertThat(tokensStr, hasSize(25)); tokensStr = target.tokenize(input, new Language("jp")); tokens = target.retokenizeSubdigitsWithLayoutToken(tokensStr); assertThat(tokens, hasSize(25)); tokens = target.tokenizeWithLayoutToken(input, new Language("jp")); tokens = target.retokenizeSubdigitsFromLayoutToken(tokens); assertThat(tokens, hasSize(25)); } @Test public void testTokenize_Chinese() { String input = "郭宏奇. 中藥辨証治療灼口綜合征臨床觀察. 疾病監測与控制雜誌2009;8:484-5."; input = UnicodeUtil.normaliseText(input); List<String> tokensStr = target.tokenize(input, new Language("zh")); assertThat(tokensStr, hasSize(35)); List<LayoutToken> tokens = target.tokenizeWithLayoutToken(input, new Language("zh")); assertThat(tokens, hasSize(35)); tokens = target.tokenizeWithLayoutToken(input, new Language("zh")); assertThat(tokens, hasSize(35)); tokens = target.tokenizeWithLayoutToken(input, new Language("zh")); assertThat(tokens, hasSize(35)); tokensStr = target.tokenize(input, new Language("zh")); tokensStr = target.retokenizeSubdigits(tokensStr); assertThat(tokensStr, hasSize(35)); tokensStr = target.tokenize(input, new Language("zh")); tokens = target.retokenizeSubdigitsWithLayoutToken(tokensStr); assertThat(tokens, hasSize(35)); tokens = target.tokenizeWithLayoutToken(input, new Language("zh")); tokens = target.retokenizeSubdigitsFromLayoutToken(tokens); assertThat(tokens, hasSize(35)); } }
5,555
39.852941
132
java
grobid
grobid-master/grobid-core/src/test/java/org/grobid/core/analyzers/GrobidDefaultAnalyzerTest.java
package org.grobid.core.analyzers; import org.grobid.core.layout.LayoutToken; import org.junit.Before; import org.junit.Test; import java.util.List; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.junit.Assert.*; public class GrobidDefaultAnalyzerTest { GrobidDefaultAnalyzer target; @Before public void setUp() throws Exception { target = GrobidDefaultAnalyzer.getInstance(); } @Test public void testTokenizeWithLayoutToken() { final List<LayoutToken> layoutTokens = target.tokenizeWithLayoutToken("This is a normal \ntext,\n\n\n on several lines.\n"); assertThat(layoutTokens, hasSize(22)); assertThat(layoutTokens.get(0).getText(), is("This")); assertThat(layoutTokens.get(1).getText(), is(" ")); assertThat(layoutTokens.get(6).getText(), is("normal")); assertThat(layoutTokens.get(7).getText(), is(" ")); assertThat(layoutTokens.get(7).isNewLineAfter(), is(true)); assertThat(layoutTokens.get(8).getText(), is("\n")); assertThat(layoutTokens.get(8).isNewLineAfter(), is(false)); assertThat(layoutTokens.get(10).getText(), is(",")); assertThat(layoutTokens.get(10).isNewLineAfter(), is(true)); assertThat(layoutTokens.get(11).getText(), is("\n")); assertThat(layoutTokens.get(11).isNewLineAfter(), is(true)); assertThat(layoutTokens.get(12).getText(), is("\n")); assertThat(layoutTokens.get(12).isNewLineAfter(), is(true)); assertThat(layoutTokens.get(13).getText(), is("\n")); assertThat(layoutTokens.get(13).isNewLineAfter(), is(false)); } @Test public void testTokenizeWithLayoutToken_emptyText() { assertThat(target.tokenizeWithLayoutToken(""), hasSize(0)); } }
1,823
37.808511
132
java
grobid
grobid-master/grobid-core/src/test/java/org/grobid/core/data/PersonDeduplicationTest.java
package org.grobid.core.data; import org.junit.Test; import java.util.Arrays; import java.util.List; import java.util.ArrayList; import static org.hamcrest.Matchers.hasSize; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertEquals; import static org.hamcrest.CoreMatchers.*; import static org.hamcrest.core.IsNull.notNullValue; public class PersonDeduplicationTest { Person target; @Test public void testDeduplication0() { // test nothing to deduplicate target = new Person(); target.setFirstName("OJ"); target.setLastName("Simpson"); target.normalizeName(); List<Person> persons = new ArrayList<Person>(); persons.add(target); target.deduplicate(persons); assertThat(persons.size(), is(1)); } @Test public void testDeduplication1() { // test simple deduplication, removal of second target = new Person(); target.setFirstName("OJ"); target.setLastName("Simpson"); target.normalizeName(); Person other = new Person(); other.setFirstName("O"); other.setLastName("Simpson"); other.normalizeName(); List<Person> persons = new ArrayList<Person>(); persons.add(target); persons.add(other); target.deduplicate(persons); assertThat(persons.size(), is(1)); assertThat(persons.get(0).getFirstName(), is("O")); assertThat(persons.get(0).getMiddleName(), is("J")); assertThat(persons.get(0).getLastName(), is("Simpson")); } @Test public void testDeduplication2() { // test simple deduplication, removal of first target = new Person(); target.setFirstName("O"); target.setLastName("Simpson"); target.normalizeName(); Person other = new Person(); other.setFirstName("O"); other.setMiddleName("J"); other.setLastName("Simpson"); other.normalizeName(); List<Person> persons = new ArrayList<Person>(); persons.add(target); persons.add(other); target.deduplicate(persons); assertThat(persons.size(), is(1)); assertThat(persons.get(0).getFirstName(), is("O")); assertThat(persons.get(0).getMiddleName(), is("J")); assertThat(persons.get(0).getLastName(), is("Simpson")); } @Test public void testDeduplication3() { // test less simple deduplication, keep most detailed firstname target = new Person(); target.setFirstName("O"); target.setMiddleName("J"); target.setLastName("Simpson"); target.normalizeName(); Person other = new Person(); other.setFirstName("Orenthal"); other.setMiddleName("James"); other.setLastName("Simpson"); other.normalizeName(); List<Person> persons = new ArrayList<Person>(); persons.add(target); persons.add(other); target.deduplicate(persons); assertThat(persons.size(), is(1)); assertThat(persons.get(0).getFirstName(), is("Orenthal")); assertThat(persons.get(0).getMiddleName(), is("James")); assertThat(persons.get(0).getLastName(), is("Simpson")); } @Test public void testDeduplication4() { // test less simple deduplication, keep most detailed firstname target = new Person(); target.setFirstName("Orenthal"); target.setMiddleName("J"); target.setLastName("Simpson"); target.normalizeName(); Person other = new Person(); other.setFirstName("Orenthal"); other.setMiddleName("James"); other.setLastName("Simpson"); other.normalizeName(); List<Person> persons = new ArrayList<Person>(); persons.add(target); persons.add(other); target.deduplicate(persons); assertThat(persons.size(), is(1)); assertThat(persons.get(0).getFirstName(), is("Orenthal")); assertThat(persons.get(0).getMiddleName(), is("James")); assertThat(persons.get(0).getLastName(), is("Simpson")); } @Test public void testDeduplication5() { // test deduplication with more duplicated guys target = new Person(); target.setFirstName("O"); target.setMiddleName("J"); target.setLastName("Simpson"); target.normalizeName(); Person other = new Person(); other.setFirstName("Orenthal"); other.setMiddleName("James"); other.setLastName("Simpson"); other.normalizeName(); Person other2 = new Person(); other2.setFirstName("Orenthal"); other2.setLastName("Simpson"); other2.normalizeName(); Person other3 = new Person(); other3.setFirstName("O"); other3.setLastName("Simpson"); other3.normalizeName(); Person other4 = new Person(); other4.setFirstName("Orenthal"); other4.setMiddleName("J"); other4.setLastName("Simpson"); other4.normalizeName(); List<Person> persons = new ArrayList<Person>(); persons.add(target); persons.add(other); persons.add(other2); persons.add(other3); persons.add(other4); target.deduplicate(persons); assertThat(persons.size(), is(1)); assertThat(persons.get(0).getFirstName(), is("Orenthal")); assertThat(persons.get(0).getMiddleName(), is("James")); assertThat(persons.get(0).getLastName(), is("Simpson")); } @Test public void testDeduplication6() { // test deduplication with affiliation to be kept from other guy target = new Person(); target.setFirstName("O"); target.setMiddleName("J"); target.setLastName("Simpson"); target.normalizeName(); Person other = new Person(); other.setFirstName("O"); other.setLastName("Simpson"); other.normalizeName(); Affiliation aff = new Affiliation(); aff.addInstitution("National Football League"); other.addAffiliation(aff); List<Person> persons = new ArrayList<Person>(); persons.add(target); persons.add(other); target.deduplicate(persons); assertThat(persons.size(), is(1)); assertThat(persons.get(0).getFirstName(), is("O")); assertThat(persons.get(0).getMiddleName(), is("J")); assertThat(persons.get(0).getLastName(), is("Simpson")); assertThat(persons.get(0).getAffiliations(), notNullValue()); } @Test public void testDeduplication7() { // test no deduplication, middlename clashing target = new Person(); target.setFirstName("O"); target.setMiddleName("J"); target.setLastName("Simpson"); target.normalizeName(); Person other = new Person(); other.setFirstName("O"); other.setMiddleName("P"); other.setLastName("Simpson"); other.normalizeName(); List<Person> persons = new ArrayList<Person>(); persons.add(target); persons.add(other); target.deduplicate(persons); assertThat(persons.size(), is(2)); } }
7,255
30.68559
72
java
grobid
grobid-master/grobid-core/src/test/java/org/grobid/core/data/AuthorEmailAssignerTest.java
package org.grobid.core.data; import org.grobid.core.data.util.AuthorEmailAssigner; import org.grobid.core.data.util.ClassicAuthorEmailAssigner; import org.junit.Test; import java.util.Arrays; import java.util.List; public class AuthorEmailAssignerTest { @Test public void testEmailAssignment() { AuthorEmailAssigner assigner = new ClassicAuthorEmailAssigner(); List<Person> authors = l(p("Jalal Al-Muhtadi"), p("Manish Anand"), p("M.", "Dennis Mickunas"), p("Roy Campbell")); assigner.assign( authors, l("[email protected]", "[email protected]", "[email protected]", "[email protected]")); System.out.println(authors); } private Person p(String name) { String[] split = name.split(" "); return p(split[0], split[1]); } private Person p(String fistName, String lastName) { Person p = new Person(); p.setFirstName(fistName.trim()); p.setLastName(lastName.trim()); return p; } public static <T> List<T> l(T... els) { return Arrays.asList(els); } }
1,106
25.357143
122
java
grobid
grobid-master/grobid-core/src/test/java/org/grobid/core/data/EmailSanitizerTest.java
package org.grobid.core.data; import org.grobid.core.data.util.EmailSanitizer; import org.junit.Test; import java.util.Arrays; import java.util.List; import static org.junit.Assert.assertEquals; public class EmailSanitizerTest { EmailSanitizer sanitizer = new EmailSanitizer(); @Test public void testEmailSanitizer() { a("[email protected]", "[email protected]"); a(l("[email protected]", "[email protected]"), "abc/[email protected]"); } private void a(String expected, String actual) { assertEquals(l(expected), sanitizer.splitAndClean(l(actual))); } private void a(List<String> expected, String actual) { assertEquals(expected, sanitizer.splitAndClean(l(actual))); } private static <T> List<T> l(T... els) { return Arrays.asList(els); } }
825
23.294118
74
java
grobid
grobid-master/grobid-core/src/test/java/org/grobid/core/data/DateTest.java
package org.grobid.core.data; import org.junit.Test; import static org.hamcrest.CoreMatchers.is; import static org.junit.Assert.assertThat; public class DateTest { Date target; Date other; @Test public void testDateMerging_yearVsYearMonth_shouldReturnYearMonth() { // "2010" "2010-10" -> "2010-10" target = new Date(); target.setYear(2010); other = new Date(); other.setYear(2010); other.setMonth(10); Date merged = Date.merge(target, other); assertThat(merged.getYear(), is(2010)); assertThat(merged.getMonth(), is(10)); } @Test public void testDateMerging_yearVsYearMonthDay_shouldReturnYearMonthDay() { // "2010" "2010-10-27" -> "2010-10-27" target = new Date(); target.setYear(2010); other = new Date(); other.setYear(2010); other.setMonth(10); other.setDay(27); Date merged = Date.merge(target, other); assertThat(merged.getYear(), is(2010)); assertThat(merged.getMonth(), is(10)); assertThat(merged.getDay(), is(27)); } @Test public void testDateMerging_yearMonthVsYearMonthDay_shouldReturnYearMonthDay() { // "2010-10" "2010-10-27" -> "2010-10-27" target = new Date(); target.setYear(2010); target.setMonth(10); other = new Date(); other.setYear(2010); other.setMonth(10); other.setDay(27); Date merged = Date.merge(target, other); assertThat(merged.getYear(), is(2010)); assertThat(merged.getMonth(), is(10)); assertThat(merged.getDay(), is(27)); } @Test public void testDateMerging_YearMonthDayVsYearMonth_shouldReturnYearMonthDay() { // "2010-10-27" "2010-10" -> "2010-10-27" target = new Date(); target.setYear(2010); target.setMonth(10); target.setDay(27); other = new Date(); other.setYear(2010); other.setMonth(10); Date merged = Date.merge(target, other); assertThat(merged.getYear(), is(2010)); assertThat(merged.getMonth(), is(10)); assertThat(merged.getDay(), is(27)); } @Test public void testDateMerging_differentDates_yearMonth_shouldReturnOriginal() { // "2011-10" "2010-10-27" -> "2011-10" target = new Date(); target.setYear(2011); target.setMonth(10); other = new Date(); other.setYear(2010); other.setMonth(10); other.setDay(27); Date merged = Date.merge(target, other); assertThat(merged.getYear(), is(2011)); assertThat(merged.getMonth(), is(10)); } @Test public void testDateMerging_differentDates_year_shouldReturnOriginal() { // "2010" "2016-10-27" -> "2010" target = new Date(); target.setYear(2010); other = new Date(); other.setYear(2016); other.setMonth(10); other.setDay(27); Date merged = Date.merge(target, other); assertThat(merged.getYear(), is(2010)); } @Test public void testDateMerging_differentDates_missingYearFromTarget() { // "" "2016-10-27" -> "2016-10-27" target = new Date(); other = new Date(); other.setYear(2016); other.setMonth(10); other.setDay(27); Date merged = Date.merge(target, other); assertThat(merged.getYear(), is(2016)); assertThat(merged.getMonth(), is(10)); assertThat(merged.getDay(), is(27)); } @Test public void testDateMerging_differentDates_onlyYear_shouldReturnOriginal() { // "2011" "2010" -> 2011 target = new Date(); target.setYear(2011); other = new Date(); other.setYear(2010); Date merged = Date.merge(target, other); assertThat(merged.getYear(), is(2011)); } @Test public void testToISOString_onlyYear() { Date date = new Date(); date.setYear(2016); date.setMonth(10); date.setDay(27); assertThat(Date.toISOString(date), is("2016-10-27")); } @Test public void testToISOString_onlyYear_WithoutPrefix() { Date date = new Date(); date.setYear(16); date.setMonth(10); date.setDay(27); assertThat(Date.toISOString(date), is("0016-10-27")); } @Test public void testToISOString_completeDate_missingMonth() { Date date = new Date(); date.setYear(2016); date.setDay(27); assertThat(Date.toISOString(date), is("2016")); } @Test public void testToISOString_onlyDay() { Date date = new Date(); date.setDay(27); assertThat(Date.toISOString(date), is("")); } }
4,824
23.871134
84
java
grobid
grobid-master/grobid-core/src/test/java/org/grobid/core/data/BiblioItemTest.java
package org.grobid.core.data; import org.grobid.core.main.LibraryLoader; import org.hamcrest.Matchers; import org.junit.Before; import org.junit.Test; import java.util.Arrays; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.Matchers.hasSize; import static org.junit.Assert.assertThat; import java.io.IOException; import java.io.StringReader; import java.util.ArrayList; import java.util.List; import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; import javax.xml.parsers.ParserConfigurationException; import javax.xml.xpath.XPath; import javax.xml.xpath.XPathConstants; import javax.xml.xpath.XPathExpression; import javax.xml.xpath.XPathExpressionException; import javax.xml.xpath.XPathFactory; import org.grobid.core.engines.config.GrobidAnalysisConfig; import org.grobid.core.utilities.GrobidProperties; import org.junit.BeforeClass; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.w3c.dom.Document; import org.w3c.dom.NodeList; import org.xml.sax.InputSource; import org.xml.sax.SAXException; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.Matchers.empty; public class BiblioItemTest { public static final Logger LOGGER = LoggerFactory.getLogger(BiblioItemTest.class); @Before public void setUp() throws Exception { LibraryLoader.load(); } private GrobidAnalysisConfig.GrobidAnalysisConfigBuilder configBuilder = ( new GrobidAnalysisConfig.GrobidAnalysisConfigBuilder() ); @BeforeClass public static void init() { GrobidProperties.getInstance(); } private static Document parseXml(String xml) throws ParserConfigurationException, SAXException, IOException { DocumentBuilderFactory domFactory = DocumentBuilderFactory.newInstance(); domFactory.setNamespaceAware(true); DocumentBuilder builder = domFactory.newDocumentBuilder(); return builder.parse(new InputSource(new StringReader(xml))); } private static List<String> getXpathStrings( Document doc, String xpath_expr ) throws XPathExpressionException { XPath xpath = XPathFactory.newInstance().newXPath(); XPathExpression expr = xpath.compile(xpath_expr); NodeList nodes = (NodeList) expr.evaluate(doc, XPathConstants.NODESET); ArrayList<String> matchingStrings = new ArrayList<>(); for (int i = 0; i < nodes.getLength(); i++) { matchingStrings.add(nodes.item(i).getNodeValue()); } return matchingStrings; } @Test public void shouldGenerateRawAffiliationTextIfEnabled() throws Exception { GrobidAnalysisConfig config = configBuilder.includeRawAffiliations(true).build(); Affiliation aff = new Affiliation(); aff.setRawAffiliationString("raw affiliation 1"); aff.setFailAffiliation(false); Person author = new Person(); author.setLastName("Smith"); author.setAffiliations(Arrays.asList(aff)); BiblioItem biblioItem = new BiblioItem(); biblioItem.setFullAuthors(Arrays.asList(author)); biblioItem.setFullAffiliations(Arrays.asList(aff)); String tei = biblioItem.toTEI(0, 2, config); LOGGER.debug("tei: {}", tei); Document doc = parseXml(tei); assertThat( "raw_affiliation", getXpathStrings(doc, "//note[@type=\"raw_affiliation\"]/text()"), is(Arrays.asList("raw affiliation 1")) ); } @Test public void shouldIncludeMarkerInRawAffiliationText() throws Exception { GrobidAnalysisConfig config = configBuilder.includeRawAffiliations(true).build(); Affiliation aff = new Affiliation(); aff.setMarker("A"); aff.setRawAffiliationString("raw affiliation 1"); aff.setFailAffiliation(false); Person author = new Person(); author.setLastName("Smith"); author.setAffiliations(Arrays.asList(aff)); BiblioItem biblioItem = new BiblioItem(); biblioItem.setFullAuthors(Arrays.asList(author)); biblioItem.setFullAffiliations(Arrays.asList(aff)); String tei = biblioItem.toTEI(0, 2, config); LOGGER.debug("tei: {}", tei); Document doc = parseXml(tei); assertThat( "raw_affiliation label", getXpathStrings(doc, "//note[@type=\"raw_affiliation\"]/label/text()"), is(Arrays.asList("A")) ); assertThat( "raw_affiliation", getXpathStrings(doc, "//note[@type=\"raw_affiliation\"]/text()"), is(Arrays.asList(" raw affiliation 1")) ); } @Test public void shouldIncludeEscapedMarkerInRawAffiliationText() throws Exception { GrobidAnalysisConfig config = configBuilder.includeRawAffiliations(true).build(); Affiliation aff = new Affiliation(); aff.setMarker("&"); aff.setRawAffiliationString("raw affiliation 1"); aff.setFailAffiliation(false); Person author = new Person(); author.setLastName("Smith"); author.setAffiliations(Arrays.asList(aff)); BiblioItem biblioItem = new BiblioItem(); biblioItem.setFullAuthors(Arrays.asList(author)); biblioItem.setFullAffiliations(Arrays.asList(aff)); String tei = biblioItem.toTEI(0, 2, config); LOGGER.debug("tei: {}", tei); Document doc = parseXml(tei); assertThat( "raw_affiliation label", getXpathStrings(doc, "//note[@type=\"raw_affiliation\"]/label/text()"), is(Arrays.asList("&")) ); assertThat( "raw_affiliation", getXpathStrings(doc, "//note[@type=\"raw_affiliation\"]/text()"), is(Arrays.asList(" raw affiliation 1")) ); } @Test public void shouldGenerateRawAffiliationTextForFailAffiliationsIfEnabled() throws Exception { GrobidAnalysisConfig config = configBuilder.includeRawAffiliations(true).build(); Affiliation aff = new Affiliation(); aff.setRawAffiliationString("raw affiliation 1"); aff.setFailAffiliation(true); BiblioItem biblioItem = new BiblioItem(); biblioItem.setFullAffiliations(Arrays.asList(aff)); String tei = biblioItem.toTEI(0, 2, config); LOGGER.debug("tei: {}", tei); Document doc = parseXml(tei); assertThat( "raw_affiliation", getXpathStrings(doc, "//note[@type=\"raw_affiliation\"]/text()"), is(Arrays.asList("raw affiliation 1")) ); } @Test public void shouldNotGenerateRawAffiliationTextIfNotEnabled() throws Exception { GrobidAnalysisConfig config = configBuilder.includeRawAffiliations(false).build(); Affiliation aff = new Affiliation(); aff.setRawAffiliationString("raw affiliation 1"); Person author = new Person(); author.setLastName("Smith"); author.setAffiliations(Arrays.asList(aff)); BiblioItem biblioItem = new BiblioItem(); biblioItem.setFullAuthors(Arrays.asList(author)); biblioItem.setFullAffiliations(Arrays.asList(aff)); String tei = biblioItem.toTEI(0, 2, config); LOGGER.debug("tei: {}", tei); Document doc = parseXml(tei); assertThat( "raw_affiliation", getXpathStrings(doc, "//note[@type=\"raw_affiliation\"]/text()"), is(empty()) ); } @Test public void injectIdentifiers() { BiblioItem item1 = new BiblioItem(); item1.setDOI("10.1233/23232/3232"); item1.setPMID("pmid"); item1.setPMCID("bao"); item1.setPII("miao"); item1.setIstexId("zao"); item1.setArk("Noah!"); BiblioItem item2 = new BiblioItem(); BiblioItem.injectIdentifiers(item2, item1); assertThat(item2.getDOI(), is("10.1233/23232/3232")); assertThat(item2.getPMID(), is("pmid")); assertThat(item2.getPMCID(), is("bao")); assertThat(item2.getPII(), is("miao")); assertThat(item2.getIstexId(), is("zao")); assertThat(item2.getArk(), is("Noah!")); } @Test public void shouldEscapeIdentifiers() throws Exception { BiblioItem item1 = new BiblioItem(); item1.setJournal("Dummy Journal Title"); item1.setDOI("10.1233/23232&3232"); item1.setPMID("pmid & 123"); item1.setArk("Noah & !"); item1.setISSN("0974&9756"); GrobidAnalysisConfig config = configBuilder.build(); String tei = item1.toTEI(0, 2, config); LOGGER.debug("tei: {}", tei); Document doc = parseXml(tei); assertThat( "DOI", getXpathStrings(doc, "//idno[@type=\"DOI\"]/text()"), is(Arrays.asList("10.1233/23232&3232")) ); assertThat( "ISSN", getXpathStrings(doc, "//idno[@type=\"ISSN\"]/text()"), is(Arrays.asList("0974&9756")) ); assertThat( "PMID", getXpathStrings(doc, "//idno[@type=\"PMID\"]/text()"), is(Arrays.asList("pmid&123")) ); assertThat( "Ark", getXpathStrings(doc, "//idno[@type=\"ark\"]/text()"), is(Arrays.asList("Noah & !")) ); } @Test public void correct_empty_shouldNotFail() { BiblioItem.correct(new BiblioItem(), new BiblioItem()); } @Test public void correct_1author_shouldWork() { BiblioItem biblio1 = new BiblioItem(); List<Person> authors = new ArrayList<>(); authors.add(createPerson("John", "Doe")); biblio1.setFullAuthors(authors); BiblioItem biblio2 = new BiblioItem(); authors = new ArrayList<>(); authors.add(createPerson("John1", "Doe")); biblio2.setFullAuthors(authors); BiblioItem.correct(biblio1, biblio2); assertThat(biblio1.getFirstAuthorSurname(), is(biblio2.getFirstAuthorSurname())); assertThat(biblio1.getFullAuthors().get(0).getFirstName(), is(biblio2.getFullAuthors().get(0).getFirstName())); } @Test public void correct_2authors_shouldMatchFullName_shouldUpdateAffiliation() { BiblioItem biblio1 = new BiblioItem(); List<Person> authors = new ArrayList<>(); authors.add(createPerson("John", "Doe")); authors.add(createPerson("Jane", "Will")); biblio1.setFullAuthors(authors); BiblioItem biblio2 = new BiblioItem(); authors = new ArrayList<>(); authors.add(createPerson("John", "Doe", "UCLA")); authors.add(createPerson("Jane", "Will","Harward")); biblio2.setFullAuthors(authors); BiblioItem.correct(biblio1, biblio2); assertThat(biblio1.getFirstAuthorSurname(), is(biblio2.getFirstAuthorSurname())); assertThat(biblio1.getFullAuthors(), hasSize(2)); assertThat(biblio1.getFullAuthors().get(0).getFirstName(), is(biblio2.getFullAuthors().get(0).getFirstName())); // biblio1 affiliations empty we update them with ones from biblio2 assertThat(biblio1.getFullAuthors().get(0).getAffiliations().get(0).getAffiliationString(), is(biblio2.getFullAuthors().get(0).getAffiliations().get(0).getAffiliationString())); assertThat(biblio1.getFullAuthors().get(1).getFirstName(), is(biblio2.getFullAuthors().get(1).getFirstName())); assertThat(biblio1.getFullAuthors().get(1).getAffiliations().get(0).getAffiliationString(), is(biblio2.getFullAuthors().get(1).getAffiliations().get(0).getAffiliationString())); } @Test public void correct_2authors_shouldMatchFullName_shouldKeepAffiliation() { BiblioItem biblio1 = new BiblioItem(); List<Person> authors = new ArrayList<>(); authors.add(createPerson("John", "Doe", "Stanford")); authors.add(createPerson("Jane", "Will", "Cambridge")); biblio1.setFullAuthors(authors); BiblioItem biblio2 = new BiblioItem(); authors = new ArrayList<>(); authors.add(createPerson("John", "Doe" )); authors.add(createPerson("Jane", "Will", "UCLA")); biblio2.setFullAuthors(authors); BiblioItem.correct(biblio1, biblio2); assertThat(biblio1.getFirstAuthorSurname(), is(biblio2.getFirstAuthorSurname())); assertThat(biblio1.getFullAuthors(), hasSize(2)); assertThat(biblio1.getFullAuthors().get(0).getFirstName(), is(biblio2.getFullAuthors().get(0).getFirstName())); // biblio1 affiliations not empty, we keep biblio1 as is assertThat(biblio1.getFullAuthors().get(0).getAffiliations().get(0).getAffiliationString(), is(biblio1.getFullAuthors().get(0).getAffiliations().get(0).getAffiliationString())); assertThat(biblio1.getFullAuthors().get(1).getFirstName(), is(biblio2.getFullAuthors().get(1).getFirstName())); assertThat(biblio1.getFullAuthors().get(1).getAffiliations().get(0).getAffiliationString(), is(biblio1.getFullAuthors().get(1).getAffiliations().get(0).getAffiliationString())); assertThat(biblio1.getFullAuthors().get(1).getAffiliations().get(0).getAffiliationString(), is(biblio2.getFullAuthors().get(1).getAffiliations().get(0).getAffiliationString())); } @Test public void correct_2authors_initial_2_shouldUpdateAuthor() { BiblioItem biblio1 = new BiblioItem(); List<Person> authors = new ArrayList<>(); authors.add(createPerson("John", "Doe", "ULCA")); authors.add(createPerson("J", "Will", "Harward")); biblio1.setFullAuthors(authors); BiblioItem biblio2 = new BiblioItem(); authors = new ArrayList<>(); authors.add(createPerson("John1", "Doe", "Stanford")); authors.add(createPerson("Jane", "Will", "Berkeley")); biblio2.setFullAuthors(authors); BiblioItem.correct(biblio1, biblio2); assertThat(biblio1.getFirstAuthorSurname(), is(biblio2.getFirstAuthorSurname())); assertThat(biblio1.getFullAuthors(), hasSize(2)); assertThat(biblio1.getFullAuthors().get(0).getFirstName(), is(biblio2.getFullAuthors().get(0).getFirstName())); // affiliation should be kept though since not empty assertThat(biblio1.getFullAuthors().get(0).getAffiliations().get(0).getAffiliationString(), is(biblio1.getFullAuthors().get(0).getAffiliations().get(0).getAffiliationString())); assertThat(biblio1.getFullAuthors().get(1).getFirstName(), is(biblio2.getFullAuthors().get(1).getFirstName())); assertThat(biblio1.getFullAuthors().get(1).getAffiliations().get(0).getAffiliationString(), is(biblio1.getFullAuthors().get(1).getAffiliations().get(0).getAffiliationString())); } @Test public void correct_2authors_initial_shouldUpdateAuthor() { BiblioItem biblio1 = new BiblioItem(); List<Person> authors = new ArrayList<>(); authors.add(createPerson("John", "Doe", "ULCA")); authors.add(createPerson("Jane", "Will", "Harward")); biblio1.setFullAuthors(authors); BiblioItem biblio2 = new BiblioItem(); authors = new ArrayList<>(); authors.add(createPerson("John1", "Doe", "Stanford")); authors.add(createPerson("J", "Will", "Berkeley")); biblio2.setFullAuthors(authors); BiblioItem.correct(biblio1, biblio2); assertThat(biblio1.getFirstAuthorSurname(), is(biblio2.getFirstAuthorSurname())); assertThat(biblio1.getFullAuthors(), hasSize(2)); assertThat(biblio1.getFullAuthors().get(0).getFirstName(), is(biblio2.getFullAuthors().get(0).getFirstName())); // affiliation should be kept though assertThat(biblio1.getFullAuthors().get(0).getAffiliations().get(0).getAffiliationString(), is(biblio1.getFullAuthors().get(0).getAffiliations().get(0).getAffiliationString())); //assertThat(biblio1.getFullAuthors().get(1).getFirstName(), is(biblio2.getFullAuthors().get(0).getFirstName())); assertThat(biblio1.getFullAuthors().get(1).getAffiliations().get(0).getAffiliationString(), is(biblio1.getFullAuthors().get(1).getAffiliations().get(0).getAffiliationString())); } @Test public void testCleanDOIxPrefix1_shouldRemovePrefix() throws Exception { String doi = "doi:10.1063/1.1905789"; String cleanDoi = BiblioItem.cleanDOI(doi); assertThat(cleanDoi, Matchers.is("10.1063/1.1905789")); } @Test public void testCleanDOIPrefix2_shouldRemovePrefix() throws Exception { String doi = "doi/10.1063/1.1905789"; String cleanDoi = BiblioItem.cleanDOI(doi); assertThat(cleanDoi, Matchers.is("10.1063/1.1905789")); } @Test public void testCleanDOI_cleanCommonExtractionPatterns() throws Exception { String doi = "43-61.DOI:10.1093/jpepsy/14.1.436/7"; String cleanedDoi = BiblioItem.cleanDOI(doi); assertThat(cleanedDoi, is("10.1093/jpepsy/14.1.436/7")); } @Test public void testCleanDOI_removeURL_http() throws Exception { String doi = "http://doi.org/10.1063/1.1905789"; String cleanDoi = BiblioItem.cleanDOI(doi); assertThat(cleanDoi, Matchers.is("10.1063/1.1905789")); } @Test public void testCleanDOI_removeURL_https() throws Exception { String doi = "https://doi.org/10.1063/1.1905789"; String cleanDoi = BiblioItem.cleanDOI(doi); assertThat(cleanDoi, Matchers.is("10.1063/1.1905789")); } @Test public void testCleanDOI_removeURL_file() throws Exception { String doi = "file://doi.org/10.1063/1.1905789"; String cleanDoi = BiblioItem.cleanDOI(doi); assertThat(cleanDoi, Matchers.is("10.1063/1.1905789")); } @Test public void testCleanDOI_diactric() throws Exception { String doi = "10.1063/1.1905789͔"; String cleanDoi = BiblioItem.cleanDOI(doi); assertThat(cleanDoi, Matchers.is("10.1063/1.1905789")); } private Person createPerson(String firstName, String secondName) { final Person person = new Person(); person.setFirstName(firstName); person.setLastName(secondName); return person; } private Person createPerson(String firstName, String secondName, String affiliation) { final Person person = createPerson(firstName, secondName); final Affiliation affiliation1 = new Affiliation(); affiliation1.setAffiliationString(affiliation); List<Affiliation> affiliations = new ArrayList<>(); affiliations.add(affiliation1); person.setAffiliations(affiliations); return person; } }
18,680
39.699346
185
java
grobid
grobid-master/grobid-core/src/test/java/org/grobid/core/data/PersonNormalisationTest.java
package org.grobid.core.data; import org.junit.Test; import java.util.Arrays; import java.util.List; import static org.hamcrest.Matchers.hasSize; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertEquals; import static org.hamcrest.CoreMatchers.*; public class PersonNormalisationTest { Person target; @Test public void testAgglitinatedInitialsNormalisation1() { target = new Person(); target.setFirstName("OJ"); target.setLastName("Simpson"); target.normalizeName(); assertThat(target.getFirstName(), is("O")); assertThat(target.getMiddleName(), is("J")); assertThat(target.getLastName(), is("Simpson")); } @Test public void testCrossRefNameNormlisation1() { target = new Person(); target.setFirstName("M. L."); target.normalizeCrossRefFirstName(); assertThat(target.getFirstName(), is("M")); assertThat(target.getMiddleName(), is("L")); } @Test public void testCrossRefNameNormlisation2() { target = new Person(); target.setFirstName("L.S."); target.normalizeCrossRefFirstName(); assertThat(target.getFirstName(), is("L")); assertThat(target.getMiddleName(), is("S")); } @Test public void testCrossRefNameNormlisation3() { target = new Person(); target.setFirstName("Nicholas J."); target.normalizeCrossRefFirstName(); assertThat(target.getFirstName(), is("Nicholas")); assertThat(target.getMiddleName(), is("J")); } @Test public void testCrossRefNameNormlisation4() { target = new Person(); target.setFirstName("John W.S."); target.normalizeCrossRefFirstName(); assertThat(target.getFirstName(), is("John")); assertThat(target.getMiddleName(), is("W S")); } @Test public void testCrossRefNameNormlisation5() { target = new Person(); target.setFirstName("John W. S."); target.normalizeCrossRefFirstName(); assertThat(target.getFirstName(), is("John")); assertThat(target.getMiddleName(), is("W S")); } @Test public void testCrossRefNameNormlisation6() { target = new Person(); target.setFirstName("G. Arjen"); target.normalizeCrossRefFirstName(); assertThat(target.getFirstName(), is("G")); assertThat(target.getMiddleName(), is("Arjen")); } @Test public void testCrossRefNameNormlisation7() { target = new Person(); target.setFirstName("HermanHG"); target.normalizeCrossRefFirstName(); assertThat(target.getFirstName(), is("Herman")); assertThat(target.getMiddleName(), is("H G")); } @Test public void testCrossRefNameNormlisation8() { target = new Person(); target.setFirstName("HaHGP"); target.normalizeCrossRefFirstName(); assertThat(target.getFirstName(), is("Ha")); assertThat(target.getMiddleName(), is("H G P")); } @Test public void testCrossRefNameNormlisation9() { target = new Person(); target.setFirstName("HaP"); target.normalizeCrossRefFirstName(); assertThat(target.getFirstName(), is("Ha")); assertThat(target.getMiddleName(), is("P")); } @Test public void testCrossRefNameNormlisation10() { target = new Person(); target.setFirstName("Zs."); target.setLastName("Biró"); target.normalizeCrossRefFirstName(); assertThat(target.getFirstName(), is("Z")); assertThat(target.getMiddleName(), is("S")); assertThat(target.getLastName(), is("Biró")); } @Test public void testCrossRefNameNormlisation11() { target = new Person(); target.setFirstName("J.-L."); target.normalizeCrossRefFirstName(); assertThat(target.getFirstName(), is("J-L")); assertThat(target.getMiddleName(), is(nullValue())); } @Test public void testCrossRefNameNormlisation12() { target = new Person(); target.setFirstName("J-L."); target.normalizeCrossRefFirstName(); assertThat(target.getFirstName(), is("J-L")); assertThat(target.getMiddleName(), is(nullValue())); } }
4,310
30.467153
60
java
grobid
grobid-master/grobid-core/src/test/java/org/grobid/core/transformation/xslt/JatsTransformerTest.java
package org.grobid.core.transformation.xslt; import org.apache.commons.io.IOUtils; import org.junit.Before; import org.junit.Ignore; import org.junit.Test; public class JatsTransformerTest { JATSTransformer target; @Before public void setUp() throws Exception { target = new JATSTransformer(); } @Test @Ignore("Not ready yet") public void testTransform_teiHeader() throws Exception { String teiInput = IOUtils.toString(this.getClass().getResourceAsStream("/xslt/sample1.tei.header.xml"), "UTF-8"); String output = target.transform(teiInput); System.out.println(output); } @Test @Ignore("Not ready yet") public void testTransform_teiFulltext() throws Exception { String teiInput = IOUtils.toString(this.getClass().getResourceAsStream("/xslt/sample2.tei.fulltext.xml"), "UTF-8"); String output = target.transform(teiInput); System.out.println(output); } }
963
29.125
123
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/GrobidModel.java
package org.grobid.core; import java.io.Serializable; public interface GrobidModel extends Serializable { String getFolderName(); String getModelPath(); String getModelName(); String getTemplateName(); String toString(); }
250
13.764706
51
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/GrobidModels.java
package org.grobid.core; import org.apache.commons.lang3.StringUtils; import org.grobid.core.utilities.GrobidProperties; import java.io.File; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import static org.grobid.core.engines.EngineParsers.LOGGER; /** * This enum class acts as a registry for all Grobid models. * */ public enum GrobidModels implements GrobidModel { // models are declared with a enumerated unique name associated to a **folder name** for the model // the folder name is where we will find the model implementation and its resources under grobid-home AFFILIATION_ADDRESS("affiliation-address"), SEGMENTATION("segmentation"), CITATION("citation"), REFERENCE_SEGMENTER("reference-segmenter"), DATE("date"), DICTIONARIES_LEXICAL_ENTRIES("dictionaries-lexical-entries"), DICTIONARIES_SENSE("dictionaries-sense"), MONOGRAPH("monograph"), ENTITIES_CHEMISTRY("entities/chemistry"), // ENTITIES_CHEMISTRY("chemistry"), FULLTEXT("fulltext"), SHORTTEXT("shorttext"), FIGURE("figure"), TABLE("table"), HEADER("header"), NAMES_CITATION("name/citation"), NAMES_HEADER("name/header"), PATENT_PATENT("patent/patent"), PATENT_NPL("patent/npl"), PATENT_CITATION("patent/citation"), PATENT_STRUCTURE("patent/structure"), PATENT_EDIT("patent/edit"), ENTITIES_NER("ner"), ENTITIES_NERFR("nerfr"), ENTITIES_NERSense("nersense"), // ENTITIES_BIOTECH("entities/biotech"), ENTITIES_BIOTECH("bio"), ASTRO("astro"), SOFTWARE("software"), DATASEER("dataseer"), DUMMY("none"); //I cannot declare it before public static final String DUMMY_FOLDER_LABEL = "none"; // Collections are dedicated models variant, but using the same base parser. // This is used in particular for scientific or technical documents like standards (SDO) // which have a particular overall zoning and/or header, while the rest of the content // is similar to other general technical and scientific document public enum Collection { IETF("sdo/ietf"); public final String label; private Collection(String label) { this.label = label; } public String getLabel() { return label; } }; /** * Absolute path to the model. */ private String modelPath; private String folderName; private static final ConcurrentMap<String, GrobidModel> models = new ConcurrentHashMap<>(); GrobidModels(String folderName) { if (StringUtils.equals(DUMMY_FOLDER_LABEL, folderName)) { modelPath = DUMMY_FOLDER_LABEL; this.folderName = DUMMY_FOLDER_LABEL; return; } this.folderName = folderName; File path = GrobidProperties.getModelPath(this); if (path != null) modelPath = path.getAbsolutePath(); } public String getFolderName() { return folderName; } public String getModelPath() { return modelPath; } public String getModelName() { return folderName.replaceAll("/", "-"); } public String getTemplateName() { return StringUtils.substringBefore(folderName, "/") + ".template"; } @Override public String toString() { return folderName; } public static GrobidModel modelFor(final String name) { if (models.isEmpty()) { for (GrobidModel model : values()) models.putIfAbsent(model.getFolderName(), model); } models.putIfAbsent(name.toString(/* null-check */), new GrobidModel() { @Override public String getFolderName() { return name; } @Override public String getModelPath() { File path = GrobidProperties.getModelPath(this); if (path == null) { LOGGER.warn("The file path to the " + name + " model is invalid, path is null"); } else if (!path.exists()) { LOGGER.warn("The file path to the " + name + " model is invalid: " + path.getAbsolutePath()); } if (path == null) return null; else return path.getAbsolutePath(); } @Override public String getModelName() { return getFolderName().replaceAll("/", "-"); } @Override public String getTemplateName() { return StringUtils.substringBefore(getFolderName(), "/") + ".template"; } }); return models.get(name); } public String getName() { return name(); } }
4,811
29.075
113
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/factory/GrobidPoolingFactory.java
package org.grobid.core.factory; import java.util.NoSuchElementException; import org.apache.commons.pool.PoolableObjectFactory; import org.apache.commons.pool.impl.GenericObjectPool; import org.grobid.core.engines.Engine; import org.grobid.core.exceptions.GrobidException; import org.grobid.core.utilities.GrobidProperties; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class GrobidPoolingFactory extends AbstractEngineFactory implements PoolableObjectFactory<Engine> { /** * A pool which contains objects of type Engine for the conversion. */ private static volatile GenericObjectPool<Engine> grobidEnginePool = null; private static volatile Object grobidEnginePoolControl = new Object(); private static final Logger LOGGER = LoggerFactory .getLogger(GrobidPoolingFactory.class); private static volatile Boolean preload = false; /** * Constructor. */ protected GrobidPoolingFactory() { //fullInit(); init(); } /** * Creates a pool for {@link Engine} objects. So a number of objects is * always available and ready to start immediatly. * * @return GenericObjectPool */ protected static GenericObjectPool<Engine> newPoolInstance() { if (grobidEnginePool == null) { // initialize grobidEnginePool LOGGER.debug("synchronized newPoolInstance"); synchronized (grobidEnginePoolControl) { if (grobidEnginePool == null) { grobidEnginePool = new GenericObjectPool<>(GrobidPoolingFactory.newInstance()); //grobidEnginePool.setFactory(GrobidPoolingFactory.newInstance()); grobidEnginePool .setWhenExhaustedAction(GenericObjectPool.WHEN_EXHAUSTED_BLOCK); grobidEnginePool.setMaxWait(GrobidProperties.getPoolMaxWait()); grobidEnginePool.setMaxActive(GrobidProperties.getMaxConcurrency()); grobidEnginePool.setTestWhileIdle(false); grobidEnginePool.setLifo(false); grobidEnginePool.setTimeBetweenEvictionRunsMillis(2000); grobidEnginePool.setMaxIdle(0); } } } return grobidEnginePool; } /** * Obtains an instance from this pool.<br> * * By contract, clients must call {@link GrobidPoolingFactory#returnEngine} * when they finish to use the engine. */ public static synchronized Engine getEngineFromPool(boolean preloadModels) { preload = preloadModels; if (grobidEnginePool == null) { grobidEnginePool = newPoolInstance(); } Engine engine = null; try { engine = grobidEnginePool.borrowObject(); } catch (NoSuchElementException nseExp) { throw new NoSuchElementException(); } catch (Exception exp) { throw new GrobidException("An error occurred while getting an engine from the engine pool", exp); } LOGGER.info("Number of Engines in pool active/max: " + grobidEnginePool.getNumActive() + "/" + grobidEnginePool.getMaxActive()); return engine; } /** * By contract, engine must have been obtained using * {@link GrobidPoolingFactory#getEngineFromPool}.<br> */ public static void returnEngine(Engine engine) { try { //engine.close(); if (grobidEnginePool == null) LOGGER.error("grobidEnginePool is null !"); grobidEnginePool.returnObject(engine); } catch (Exception exp) { throw new GrobidException( "An error occurred while returning an engine from the engine pool", exp); } } /** * Creates and returns an instance of GROBIDFactory. The init() method will * be called. * * @return */ protected static GrobidPoolingFactory newInstance() { return new GrobidPoolingFactory(); } @Override public void activateObject(Engine arg0) throws Exception { } @Override public void destroyObject(Engine engine) throws Exception { } @Override public Engine makeObject() throws Exception { return (createEngine(this.preload)); } @Override public void passivateObject(Engine arg0) throws Exception { } @Override public boolean validateObject(Engine arg0) { return false; } }
3,923
27.642336
100
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/factory/AbstractEngineFactory.java
package org.grobid.core.factory; import org.grobid.core.engines.Engine; import org.grobid.core.engines.ModelMap; import org.grobid.core.engines.tagging.GrobidCRFEngine; import org.grobid.core.lexicon.Lexicon; import org.grobid.core.main.LibraryLoader; import org.grobid.core.utilities.GrobidProperties; import java.util.Collections; import java.util.Set; import org.apache.commons.collections4.CollectionUtils; /** * * Abstract factory to get engine instance. * */ public class AbstractEngineFactory { /** * The engine. */ private static Engine engine; /** * Return a new instance of engine if it doesn't exist, the existing * instance else. * * @return Engine */ protected synchronized Engine getEngine() { return getEngine(false); } /** * Return a new instance of engine if it doesn't exist, the existing * instance else. * * @return Engine */ protected synchronized Engine getEngine(boolean preload) { if (engine == null) { engine = createEngine(preload); } return engine; } /** * Return a new instance of engine. * * @return Engine */ protected Engine createEngine() { return createEngine(false); } /** * Return a new instance of engine. * * @return Engine */ protected Engine createEngine(boolean preload) { return new Engine(preload); } /** * Initializes all necessary things for starting grobid */ public static void init() { GrobidProperties.getInstance(); LibraryLoader.load(); Lexicon.getInstance(); } /** * Initializes all the models */ @Deprecated public static void fullInit() { init(); Set<GrobidCRFEngine> distinctModels = GrobidProperties.getDistinctModels(); if (CollectionUtils.containsAny(distinctModels, Collections.singletonList(GrobidCRFEngine.CRFPP))) { ModelMap.initModels(); } //Lexicon.getInstance(); } }
1,868
20
104
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/factory/GrobidFactory.java
package org.grobid.core.factory; import org.grobid.core.engines.Engine; /** * Factory to get engine instances. */ public class GrobidFactory extends AbstractEngineFactory { /** * The instance of GrobidFactory. */ private static GrobidFactory factory = null; /** * Constructor. */ protected GrobidFactory() { init(); } /** * Return a new instance of GrobidFactory if it doesn't exist, the existing * instance else. * * @return GrobidFactory */ public static GrobidFactory getInstance() { if (factory == null) { factory = newInstance(); } return factory; } /** * {@inheritDoc} */ @Override public synchronized Engine getEngine() { return super.getEngine(false); } /** * {@inheritDoc} */ @Override public synchronized Engine getEngine(boolean preload) { return super.getEngine(preload); } /** * {@inheritDoc} */ @Override public Engine createEngine() { return createEngine(false); } /** * {@inheritDoc} */ @Override public Engine createEngine(boolean preload) { return super.createEngine(preload); } /** * Creates a new instance of GrobidFactory. * * @return GrobidFactory */ protected static GrobidFactory newInstance() { return new GrobidFactory(); } /** * Resets this class and all its static fields. For instance sets the * current object to null. */ public static void reset() { factory = null; } }
1,459
16.176471
76
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/features/FeaturesVectorName.java
package org.grobid.core.features; import org.apache.commons.lang3.StringUtils; import org.apache.commons.collections4.CollectionUtils; import org.grobid.core.utilities.OffsetPosition; import org.grobid.core.utilities.TextUtilities; import org.grobid.core.utilities.UnicodeUtil; import org.grobid.core.utilities.TextUtilities; import org.grobid.core.layout.LayoutToken; import java.util.List; import java.util.regex.Matcher; /** * Class for features used for parsing sequence of names. * */ public class FeaturesVectorName { public String string = null; // lexical feature public String label = null; // label if known public String lineStatus = null; // one of LINESTART, LINEIN, LINEEND public String capitalisation = null; // one of INITCAP, ALLCAPS, NOCAPS public String digit; // one of ALLDIGIT, CONTAINDIGIT, NODIGIT public boolean singleChar = false; public boolean commonName = false; public boolean firstName = false; public boolean lastName = false; public String punctType = null; // one of NOPUNCT, OPENBRACKET, ENDBRACKET, DOT, COMMA, HYPHEN, QUOTE, PUNCT (default) public boolean isKnownTitle = false; public boolean isKnownSuffix = false; public String printVector() { if (string == null) return null; if (string.length() == 0) return null; StringBuffer res = new StringBuffer(); // token string (1) res.append(string); // lowercase string res.append(" " + string.toLowerCase()); // prefix (4) res.append(" " + TextUtilities.prefix(string, 1)); res.append(" " + TextUtilities.prefix(string, 2)); res.append(" " + TextUtilities.prefix(string, 3)); res.append(" " + TextUtilities.prefix(string, 4)); // suffix (4) res.append(" " + TextUtilities.suffix(string, 1)); res.append(" " + TextUtilities.suffix(string, 2)); res.append(" " + TextUtilities.suffix(string, 3)); res.append(" " + TextUtilities.suffix(string, 4)); // line information (1) res.append(" " + lineStatus); // capitalisation (1) if (digit.equals("ALLDIGIT")) res.append(" NOCAPS"); else res.append(" " + capitalisation); // digit information (1) res.append(" " + digit); // character information (1) if (singleChar) res.append(" 1"); else res.append(" 0"); // lexical information (3) if (commonName) res.append(" 1"); else res.append(" 0"); if (firstName) res.append(" 1"); else res.append(" 0"); if (lastName) res.append(" 1"); else res.append(" 0"); if (isKnownTitle) res.append(" 1"); else res.append(" 0"); if (isKnownSuffix) res.append(" 1"); else res.append(" 0"); // punctuation information (1) res.append(" " + punctType); // in case the token is a punctuation (NO otherwise) // label - for training data (1) if (label != null) res.append(" " + label + "\n"); else res.append(" 0\n"); return res.toString(); } /** * Add feature for name parsing. */ static public String addFeaturesName(List<LayoutToken> tokens, List<String> labels, List<OffsetPosition> titlePosition, List<OffsetPosition> suffixPosition) throws Exception { FeatureFactory featureFactory = FeatureFactory.getInstance(); StringBuffer header = new StringBuffer(); boolean newline = true; String previousTag = null; String previousText = null; FeaturesVectorName features = null; LayoutToken token = null; int currentTitlePosition = 0; int currentSuffixPosition = 0; boolean isTitleToken; boolean isSuffixToken; boolean skipTest; for(int n=0; n<tokens.size(); n++) { boolean outputLineStatus = false; isTitleToken = false; isSuffixToken = false; skipTest = false; token = tokens.get(n); /*if (line == null) { header.append("\n \n"); newBlock = true; newline = true; n++; continue; } line = line.trim(); if (line.length() == 0) { header.append("\n \n"); newBlock = true; newline = true; n++; continue; } if (line.equals("@newline")) { if (newline) { newBlock = true; } newline = true; n++; continue; }*/ //int ind = line.indexOf(" "); String text = token.getText(); if (text.equals(" ")) { continue; } newline = false; if (text.equals("\n")) { newline = true; continue; } // parano normalisation text = UnicodeUtil.normaliseTextAndRemoveSpaces(text); if (text.trim().length() == 0 ) { continue; } // check the position of matches for journals if ((titlePosition != null) && (titlePosition.size() > 0)) { if (currentTitlePosition == titlePosition.size() - 1) { if (titlePosition.get(currentTitlePosition).end < n) { skipTest = true; } } if (!skipTest) { for (int i = currentTitlePosition; i < titlePosition.size(); i++) { if ((titlePosition.get(i).start <= n) && (titlePosition.get(i).end >= n)) { isTitleToken = true; currentTitlePosition = i; break; } else if (titlePosition.get(i).start > n) { isTitleToken = false; currentTitlePosition = i; break; } } } } // check the position of matches for abbreviated journals skipTest = false; if (suffixPosition != null) { if (currentSuffixPosition == suffixPosition.size() - 1) { if (suffixPosition.get(currentSuffixPosition).end < n) { skipTest = true; } } if (!skipTest) { for (int i = currentSuffixPosition; i < suffixPosition.size(); i++) { if ((suffixPosition.get(i).start <= n) && (suffixPosition.get(i).end >= n)) { isSuffixToken = true; currentSuffixPosition = i; break; } else if (suffixPosition.get(i).start > n) { isSuffixToken = false; currentSuffixPosition = i; break; } } } } String tag = null; if (!CollectionUtils.isEmpty(labels) && (labels.size() > n)) { tag = labels.get(n); } if (TextUtilities.filterLine(text)) { continue; } features = new FeaturesVectorName(); features.string = text; if (newline) { features.lineStatus = "LINESTART"; outputLineStatus = true; } Matcher m0 = featureFactory.isPunct.matcher(text); if (m0.find()) { features.punctType = "PUNCT"; } if ((text.equals("(")) | (text.equals("["))) { features.punctType = "OPENBRACKET"; } else if ((text.equals(")")) | (text.equals("]"))) { features.punctType = "ENDBRACKET"; } else if (text.equals(".")) { features.punctType = "DOT"; } else if (text.equals(",")) { features.punctType = "COMMA"; } else if (text.equals("-")) { features.punctType = "HYPHEN"; } else if (text.equals("\"") | text.equals("\'") | text.equals("`")) { features.punctType = "QUOTE"; } if (n == 0) { if (!outputLineStatus) { features.lineStatus = "LINESTART"; outputLineStatus = true; } } else if (tokens.size() == n + 1) { if (!outputLineStatus) { features.lineStatus = "LINEEND"; outputLineStatus = true; } } else { // look ahead... boolean endline = false; int i = 1; boolean endloop = false; while ((tokens.size() > n + i) & (!endloop)) { String newLine = tokens.get(n + i).getText(); if (newLine != null) { if (newLine.equals("\n")) { endline = true; if (!outputLineStatus) { features.lineStatus = "LINEEND"; outputLineStatus = true; } endloop = true; } else if (!newLine.equals(" ")) { endloop = true; } } /*if ((endline) & (!outputLineStatus)) { features.lineStatus = "LINEEND"; outputLineStatus = true; }*/ i++; } } if (!outputLineStatus) { features.lineStatus = "LINEIN"; outputLineStatus = true; } if (text.length() == 1) { features.singleChar = true; } if (Character.isUpperCase(text.charAt(0))) { features.capitalisation = "INITCAP"; } if (featureFactory.test_all_capital(text)) { features.capitalisation = "ALLCAP"; } if (features.capitalisation == null) features.capitalisation = "NOCAPS"; if (featureFactory.test_digit(text)) { features.digit = "CONTAINSDIGITS"; } if (featureFactory.test_common(text)) { features.commonName = true; } if (featureFactory.test_first_names(text)) { features.firstName = true; } if (featureFactory.test_last_names(text)) { features.lastName = true; } Matcher m = featureFactory.isDigit.matcher(text); if (m.find()) { features.digit = "ALLDIGIT"; } if (features.digit == null) features.digit = "NODIGIT"; if (features.punctType == null) features.punctType = "NOPUNCT"; if (isTitleToken) { features.isKnownTitle = true; } if (isSuffixToken) { features.isKnownSuffix = true; } features.label = tag; header.append(features.printVector()); previousTag = tag; previousText = text; } return header.toString(); } }
12,022
31.233244
103
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/features/FeaturesVectorDate.java
package org.grobid.core.features; import org.grobid.core.utilities.TextUtilities; import java.util.List; import java.util.regex.Matcher; /** * Class for features used for parsing date chunk. * */ public class FeaturesVectorDate { public String string = null; // lexical feature public String label = null; // label if known public String lineStatus = null; // one of LINESTART, LINEIN, LINEEND public String capitalisation = null; // one of INITCAP, ALLCAPS, NOCAPS public String digit; // one of ALLDIGIT, CONTAINDIGIT, NODIGIT public boolean singleChar = false; public boolean containDash = false; public boolean year = false; public boolean month = false; public String punctType = null; // one of NOPUNCT, OPENBRACKET, ENDBRACKET, DOT, COMMA, HYPHEN, QUOTE, PUNCT (default) public boolean containPunct = false; public String printVector() { if (string == null) return null; if (string.length() == 0) return null; StringBuffer res = new StringBuffer(); // token string (1) res.append(string); // lowercase string res.append(" " + string.toLowerCase()); // prefix (4) res.append(" " + TextUtilities.prefix(string, 1)); res.append(" " + TextUtilities.prefix(string, 2)); res.append(" " + TextUtilities.prefix(string, 3)); res.append(" " + TextUtilities.prefix(string, 4)); // suffix (4) res.append(" " + TextUtilities.suffix(string, 1)); res.append(" " + TextUtilities.suffix(string, 2)); res.append(" " + TextUtilities.suffix(string, 3)); res.append(" " + TextUtilities.suffix(string, 4)); // line information (1) res.append(" " + lineStatus); // capitalisation (1) if (digit.equals("ALLDIGIT")) res.append(" NOCAPS"); else res.append(" " + capitalisation); // digit information (1) res.append(" " + digit); // character information (1) if (singleChar) res.append(" 1"); else res.append(" 0"); // lexical information (2) if (year) res.append(" 1"); else res.append(" 0"); if (month) res.append(" 1"); else res.append(" 0"); // punctuation information (2) res.append(" " + punctType); // in case the token is a punctuation (NO otherwise) // label - for training data (1) if (label != null) res.append(" " + label + "\n"); else res.append(" 0\n"); return res.toString(); } /** * Add feature for date parsing. */ public static String addFeaturesDate(List<String> lines) throws Exception { FeatureFactory featureFactory = FeatureFactory.getInstance(); String line; StringBuilder stringBuilder = new StringBuilder(); boolean newline = true; boolean newBlock = true; String currentFont = null; int currentFontSize = -1; boolean endblock = false; String previousTag = null; String previousText = null; FeaturesVectorDate features = null; for (int n = 0; n < lines.size(); n++) { boolean outputLineStatus = false; boolean outputBlockStatus = false; line = lines.get(n); if (line == null) { stringBuilder.append(" \n"); newBlock = true; newline = true; continue; } line = line.trim(); if (line.length() == 0) { stringBuilder.append("\n \n"); newBlock = true; newline = true; continue; } if (line.equals("@newline")) { if (newline) { newBlock = true; } newline = true; continue; } int ind = line.indexOf(" "); String text = null; String tag = null; if (ind != -1) { text = line.substring(0, ind); tag = line.substring(ind + 1, line.length()); } boolean filter = false; if (text == null) { filter = true; } else if (text.length() == 0) { filter = true; } else if (text.startsWith("@IMAGE")) { filter = true; } else if (text.contains(".pbm")) { filter = true; } else if (text.contains(".svg")) { filter = true; } else if (text.contains(".jpg")) { filter = true; } else if (text.contains(".png")) { filter = true; } if (filter) { continue; } features = new FeaturesVectorDate(); features.string = text; if (newline) { features.lineStatus = "LINESTART"; outputLineStatus = true; } Matcher m0 = featureFactory.isPunct.matcher(text); if (m0.find()) { features.punctType = "PUNCT"; } if ((text.equals("(")) | (text.equals("["))) { features.punctType = "OPENBRACKET"; } else if ((text.equals(")")) | (text.equals("]"))) { features.punctType = "ENDBRACKET"; } else if (text.equals(".")) { features.punctType = "DOT"; } else if (text.equals(",")) { features.punctType = "COMMA"; } else if (text.equals("-")) { features.punctType = "HYPHEN"; } else if (text.equals("\"") | text.equals("\'") | text.equals("`")) { features.punctType = "QUOTE"; } if (n == 0) { if (!outputLineStatus) { features.lineStatus = "LINESTART"; outputLineStatus = true; } } else if (lines.size() == n + 1) { if (!outputLineStatus) { features.lineStatus = "LINEEND"; outputLineStatus = true; } } else { // look ahead... boolean endline = false; int i = 1; boolean endloop = false; while ((lines.size() > n + i) && (!endloop)) { String newLine = lines.get(n + i); if (newLine != null) { if (newLine.trim().length() == 0) { endline = true; endblock = true; if (!outputLineStatus) { features.lineStatus = "LINEEND"; outputLineStatus = true; } } else if (newLine.equals("@newline")) { endline = true; if (!outputLineStatus) { features.lineStatus = "LINEEND"; outputLineStatus = true; } } else { endloop = true; } } if (endline && !outputLineStatus) { features.lineStatus = "LINEEND"; outputLineStatus = true; } i++; } } newline = false; if (!outputLineStatus) { features.lineStatus = "LINEIN"; outputLineStatus = true; } if (text.length() == 1) { features.singleChar = true; ; } if (Character.isUpperCase(text.charAt(0))) { features.capitalisation = "INITCAP"; } if (featureFactory.test_all_capital(text)) { features.capitalisation = "ALLCAP"; } if (features.capitalisation == null) features.capitalisation = "NOCAPS"; if (featureFactory.test_digit(text)) { features.digit = "CONTAINSDIGITS"; } if (featureFactory.test_month(text)) { features.month = true; } Matcher m = featureFactory.isDigit.matcher(text); if (m.find()) { features.digit = "ALLDIGIT"; } if (features.digit == null) features.digit = "NODIGIT"; Matcher m2 = featureFactory.year.matcher(text); if (m2.find()) { features.year = true; } if (features.punctType == null) features.punctType = "NOPUNCT"; features.label = tag; stringBuilder.append(features.printVector()); previousTag = tag; previousText = text; } return stringBuilder.toString(); } }
9,248
30.566553
90
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/features/FeaturesVectorChemicalEntity.java
package org.grobid.core.features; import org.grobid.core.utilities.TextUtilities; import java.util.StringTokenizer; import java.util.regex.Matcher; /** * Class for features used for chemical entity identification in raw texts such as scientific articles * and patent descriptions. * */ public class FeaturesVectorChemicalEntity { // default bins for relative position, set experimentally static private int nbBins = 12; public String string = null; // lexical feature public String label = null; // label if known public String capitalisation = null;// one of INITCAP, ALLCAPS, NOCAPS public String digit; // one of ALLDIGIT, CONTAINDIGIT, NODIGIT public boolean singleChar = false; public boolean properName = false; public boolean commonName = false; public boolean firstName = false; public String punctType = null; // one of NOPUNCT, OPENBRACKET, ENDBRACKET, DOT, COMMA, HYPHEN, QUOTE, PUNCT (default) // OPENQUOTE, ENDQUOTE public boolean isKnownChemicalToken = false; public boolean isKnownChemicalNameToken = false; public int relativeDocumentPosition = -1; public FeaturesVectorChemicalEntity() { } public String printVector() { if (string == null) return null; if (string.length() == 0) return null; StringBuffer res = new StringBuffer(); // token string (1) res.append(string); // lowercase string res.append(" " + string.toLowerCase()); // prefix (4) res.append(" " + TextUtilities.prefix(string, 1)); res.append(" " + TextUtilities.prefix(string, 2)); res.append(" " + TextUtilities.prefix(string, 3)); res.append(" " + TextUtilities.prefix(string, 4)); // suffix (4) res.append(" " + TextUtilities.suffix(string, 1)); res.append(" " + TextUtilities.suffix(string, 2)); res.append(" " + TextUtilities.suffix(string, 3)); res.append(" " + TextUtilities.suffix(string, 4)); // capitalisation (1) if (digit.equals("ALLDIGIT")) res.append(" NOCAPS"); else res.append(" " + capitalisation); // digit information (1) res.append(" " + digit); // character information (1) if (singleChar) res.append(" 1"); else res.append(" 0"); // lexical information (2) if (properName) res.append(" 1"); else res.append(" 0"); if (commonName) res.append(" 1"); else res.append(" 0"); // chemistry vocabulary information (2) if (isKnownChemicalToken) res.append(" 1"); else res.append(" 0"); if (isKnownChemicalNameToken) res.append(" 1"); else res.append(" 0"); // punctuation information (1) res.append(" " + punctType); // in case the token is a punctuation (NO otherwise) // token length res.append(" " + string.length()); // relative document position res.append(" " + relativeDocumentPosition); // label - for training data (1) if (label != null) res.append(" " + label + "\n"); else res.append(" 0\n"); return res.toString(); } /** * Add the features for the chemical entity extraction model. */ static public FeaturesVectorChemicalEntity addFeaturesChemicalEntities(String line, int totalLength, int position, boolean isChemicalToken, boolean isChemicalNameToken) { FeatureFactory featureFactory = FeatureFactory.getInstance(); FeaturesVectorChemicalEntity featuresVector = new FeaturesVectorChemicalEntity(); StringTokenizer st = new StringTokenizer(line, "\t"); if (st.hasMoreTokens()) { String word = st.nextToken(); String label = null; if (st.hasMoreTokens()) label = st.nextToken(); featuresVector.string = word; featuresVector.label = label; if (word.length() == 1) { featuresVector.singleChar = true; } if (featureFactory.test_all_capital(word)) featuresVector.capitalisation = "ALLCAPS"; else if (featureFactory.test_first_capital(word)) featuresVector.capitalisation = "INITCAP"; else featuresVector.capitalisation = "NOCAPS"; if (featureFactory.test_number(word)) featuresVector.digit = "ALLDIGIT"; else if (featureFactory.test_digit(word)) featuresVector.digit = "CONTAINDIGIT"; else featuresVector.digit = "NODIGIT"; if (featureFactory.test_common(word)) featuresVector.commonName = true; if (featureFactory.test_names(word)) featuresVector.properName = true; Matcher m0 = featureFactory.isPunct.matcher(word); if (m0.find()) { featuresVector.punctType = "PUNCT"; } if ((word.equals("(")) | (word.equals("["))) { featuresVector.punctType = "OPENBRACKET"; } else if ((word.equals(")")) | (word.equals("]"))) { featuresVector.punctType = "ENDBRACKET"; } else if (word.equals(".")) { featuresVector.punctType = "DOT"; } else if (word.equals(",")) { featuresVector.punctType = "COMMA"; } else if (word.equals("-")) { featuresVector.punctType = "HYPHEN"; } else if (word.equals("\"") | word.equals("\'") | word.equals("`")) { featuresVector.punctType = "QUOTE"; } if (featuresVector.capitalisation == null) featuresVector.capitalisation = "NOCAPS"; if (featuresVector.digit == null) featuresVector.digit = "NODIGIT"; if (featuresVector.punctType == null) featuresVector.punctType = "NOPUNCT"; featuresVector.relativeDocumentPosition = featureFactory.linearScaling(position, totalLength, nbBins); if (isChemicalToken) { featuresVector.isKnownChemicalToken = true; } if (isChemicalNameToken) { featuresVector.isKnownChemicalNameToken = true; } } return featuresVector; } }
6,899
32.333333
105
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/features/FeaturesVectorReferenceSegmenter.java
package org.grobid.core.features; import java.util.List; import java.util.regex.Matcher; import org.grobid.core.layout.LayoutToken; import org.grobid.core.utilities.TextUtilities; /** * Class for features used for header parsing. * */ public class FeaturesVectorReferenceSegmenter { // default bins for relative position, set experimentally public LayoutToken token = null; // not a feature, reference value public String string = null; // lexical feature public String label = null; // label if known public String blockStatus = null; // one of BLOCKSTART, BLOCKIN, BLOCKEND public String lineStatus = null; // one of LINESTART, LINEIN, LINEEND public String alignmentStatus = null; // one of ALIGNEDLEFT, INDENT, CENTERED, applied to the whole line public String fontStatus = null; // one of NEWFONT, SAMEFONT public String fontSize = null; // one of HIGHERFONT, SAMEFONTSIZE, LOWERFONT public boolean bold = false; public boolean italic = false; public boolean rotation = false; public String capitalisation = null; // one of INITCAP, ALLCAPS, NOCAPS public String digit; // one of ALLDIGIT, CONTAINDIGIT, NODIGIT public boolean singleChar = false; public boolean containDash = false; public boolean properName = false; public boolean commonName = false; public boolean firstName = false; public boolean locationName = false; public boolean year = false; public boolean month = false; public boolean email = false; public boolean http = false; //public boolean acronym = false; public String punctType = null; // one of NOPUNCT, OPENBRACKET, ENDBRACKET, DOT, COMMA, HYPHEN, QUOTE, PUNCT //public boolean containPunct = false; public int relativePosition = -1; public int lineLength = 0; public String punctuationProfile = null; // the punctuations of the current line of the token // true if the token is part of a predefinied name (single or multi-token) public String printVector() { if (string == null) return null; if (string.length() == 0) return null; StringBuilder res = new StringBuilder(); // token string (1) res.append(string); // lowercase string (1) res.append(" ").append(string.toLowerCase()); // prefix (4) res.append(" " + TextUtilities.prefix(string, 1)); res.append(" " + TextUtilities.prefix(string, 2)); res.append(" " + TextUtilities.prefix(string, 3)); res.append(" " + TextUtilities.prefix(string, 4)); // suffix (4) res.append(" " + TextUtilities.suffix(string, 1)); res.append(" " + TextUtilities.suffix(string, 2)); res.append(" " + TextUtilities.suffix(string, 3)); res.append(" " + TextUtilities.suffix(string, 4)); // line information (1) res.append(" ").append(lineStatus); // line position/indentation (1) res.append(" " + alignmentStatus); // capitalisation (1) if (digit.equals("ALLDIGIT")) res.append(" NOCAPS"); else res.append(" ").append(capitalisation); // digit information (1) res.append(" ").append(digit); // character information (1) if (singleChar) res.append(" 1"); else res.append(" 0"); // lexical information (8) if (properName) res.append(" 1"); else res.append(" 0"); if (commonName) res.append(" 1"); else res.append(" 0"); /* TODO: to review, never set! */ if (firstName) res.append(" 1"); else res.append(" 0"); /* TODO: to review, never set! */ if (locationName) res.append(" 1"); else res.append(" 0"); if (year) res.append(" 1"); else res.append(" 0"); if (month) res.append(" 1"); else res.append(" 0"); /*if (email) res.append(" 1"); else res.append(" 0"); */ if (http) res.append(" 1"); else res.append(" 0"); // punctuation information (1) res.append(" ").append(punctType); // in case the token is a punctuation (NO otherwise) // relative length on the line as compared to the max line length on a predefined scale (1) res.append(" ").append(relativePosition); // relative position in the line on a predefined scale (1) res.append(" " + lineLength); // block information (1) //if (blockStatus != null) res.append(" " + blockStatus); // punctuation profile if ( (punctuationProfile == null) || (punctuationProfile.length() == 0) ) res.append(" no"); else { int theLength = punctuationProfile.length(); if (theLength > 10) theLength = 10; res.append(" " + theLength); } // label - for training data (1) if (label != null) res.append(" ").append(label).append("\n"); else res.append(" 0\n"); return res.toString(); } }
5,181
30.216867
112
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/features/FeaturesVectorMonograph.java
package org.grobid.core.features; import org.grobid.core.layout.LayoutToken; import org.grobid.core.utilities.TextUtilities; /** * Class for features used for high level segmentation of a monograph. * */ public class FeaturesVectorMonograph { public LayoutToken token = null; // not a feature, reference value public String line = null; // not a feature, the complete processed line public String block = null; // not a feature, the complete processed block public String string = null; // first lexical feature public String secondString = null; // second lexical feature public String label = null; // label if known public String blockStatus = null; // one of BLOCKSTART, BLOCKIN, BLOCKEND public String lineStatus = null; // one of LINESTART, LINEIN, LINEEND public String fontStatus = null; // one of NEWFONT, SAMEFONT public String fontSize = null; // one of HIGHERFONT, SAMEFONTSIZE, LOWERFONT public String pageStatus = null; // one of PAGESTART, PAGEIN, PAGEEND public String alignmentStatus = null; // one of ALIGNEDLEFT, INDENT, CENTERED public boolean bold = false; public boolean italic = false; public String capitalisation = null; // one of INITCAP, ALLCAPS, NOCAPS public String digit; // one of ALLDIGIT, CONTAINDIGIT, NODIGIT public boolean singleChar = false; public boolean properName = false; public boolean commonName = false; public boolean firstName = false; public boolean locationName = false; public boolean year = false; public boolean month = false; public boolean email = false; public boolean http = false; //public boolean acronym = false; public String punctType = null; // one of NOPUNCT, OPENBRACKET, ENDBRACKET, DOT, COMMA, HYPHEN, QUOTE, PUNCT (default) public int relativeDocumentPosition = -1; public int relativePagePosition = -1; public int relativePagePositionChar = -1; // not used public String punctuationProfile = null; // the punctuations of the current line of the token public boolean firstPageBlock = false; public boolean lastPageBlock = false; public int lineLength = 0; public boolean bitmapAround = false; public boolean vectorAround = false; public boolean inMainArea = true; public boolean repetitivePattern = false; // if true, the textual pattern is repeated at the same position on other pages public boolean firstRepetitivePattern = false; // if true, this is a repetitive textual pattern and this is its first occurrence in the doc public int spacingWithPreviousBlock = 0; // discretized public int characterDensity = 0; // discretized public String printVector() { if (string == null) return null; if (string.length() == 0) return null; StringBuffer res = new StringBuffer(); // token string (1) res.append(string); // second token string if (secondString != null) res.append(" " + secondString); else res.append(" " + string); // lowercase string res.append(" " + string.toLowerCase()); // prefix (4) res.append(" " + TextUtilities.prefix(string, 1)); res.append(" " + TextUtilities.prefix(string, 2)); res.append(" " + TextUtilities.prefix(string, 3)); res.append(" " + TextUtilities.prefix(string, 4)); // block information (1) if (blockStatus != null) res.append(" " + blockStatus); //res.append(" 0"); // line information (1) if (lineStatus != null) res.append(" " + lineStatus); // line alignment/identation information (1) //res.append(" " + alignmentStatus); // page information (1) res.append(" " + pageStatus); // font information (1) res.append(" " + fontStatus); // font size information (1) res.append(" " + fontSize); // string type information (3) if (bold) res.append(" 1"); else res.append(" 0"); if (italic) res.append(" 1"); else res.append(" 0"); // capitalisation (1) if (digit.equals("ALLDIGIT")) res.append(" NOCAPS"); else res.append(" " + capitalisation); // digit information (1) res.append(" " + digit); // character information (1) if (singleChar) res.append(" 1"); else res.append(" 0"); // lexical information (9) if (properName) res.append(" 1"); else res.append(" 0"); if (commonName) res.append(" 1"); else res.append(" 0"); if (firstName) res.append(" 1"); else res.append(" 0"); if (year) res.append(" 1"); else res.append(" 0"); if (month) res.append(" 1"); else res.append(" 0"); if (email) res.append(" 1"); else res.append(" 0"); if (http) res.append(" 1"); else res.append(" 0"); // punctuation information (1) if (punctType != null) res.append(" " + punctType); // in case the token is a punctuation (NO otherwise) // relative document position (1) res.append(" " + relativeDocumentPosition); // relative page position coordinate (1) //res.append(" " + relativePagePosition); // relative page position characters (1) res.append(" " + relativePagePositionChar); // punctuation profile if ( (punctuationProfile == null) || (punctuationProfile.length() == 0) ) { // string profile res.append(" no"); // number of punctuation symbols in the line res.append(" 0"); } else { // string profile res.append(" " + punctuationProfile); // number of punctuation symbols in the line res.append(" "+punctuationProfile.length()); } // current line length on a predefined scale and relative to the longest line of the current block res.append(" " + lineLength); if (bitmapAround) { res.append(" 1"); } else { res.append(" 0"); } if (vectorAround) { res.append(" 1"); } else { res.append(" 0"); } if (repetitivePattern) { res.append(" 1"); } else { res.append(" 0"); } if (firstRepetitivePattern) { res.append(" 1"); } else { res.append(" 0"); } // if the block is in the page main area (1) if (inMainArea) { res.append(" 1"); } else { res.append(" 0"); } // space with previous block, discretised (1) //res.append(" " + spacingWithPreviousBlock); //res.append(" " + 0); // character density of the previous block, discretised (1) //res.append(" " + characterDensity); //res.append(" " + 0); // label - for training data (1) /*if (label != null) res.append(" " + label + "\n"); else res.append(" 0\n"); */ res.append("\n"); return res.toString(); } }
7,323
29.139918
143
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/features/FeaturesVectorAffiliationAddress.java
package org.grobid.core.features; import java.util.List; import java.util.StringTokenizer; import java.util.regex.Matcher; import org.grobid.core.utilities.OffsetPosition; import org.grobid.core.exceptions.GrobidException; import org.grobid.core.utilities.TextUtilities; import org.grobid.core.layout.LayoutToken; /** * Class for features used for parsing a block corresponding to affiliation and address. * */ public class FeaturesVectorAffiliationAddress { public String string = null; // lexical feature public String label = null; // label if known public String lineStatus = null; // one of LINESTART, LINEIN, LINEEND public boolean bold = false; public boolean italic = false; public String capitalisation = null; // one of INITCAP, ALLCAPS, NOCAPS public String digit; // one of ALLDIGIT, CONTAINDIGIT, NODIGIT public boolean singleChar = false; public boolean properName = false; public boolean commonName = false; public boolean firstName = false; public boolean locationName = false; public boolean countryName = false; public String punctType = null; public String wordShape = null; // one of NOPUNCT, OPENBRACKET, ENDBRACKET, DOT, COMMA, HYPHEN, QUOTE, PUNCT (default) public String printVector() { if (string == null) return null; if (string.length() == 0) return null; StringBuffer res = new StringBuffer(); // token string (1) res.append(string); // lowercase string res.append(" " + string.toLowerCase()); // prefix (4) res.append(" " + TextUtilities.prefix(string, 1)); res.append(" " + TextUtilities.prefix(string, 2)); res.append(" " + TextUtilities.prefix(string, 3)); res.append(" " + TextUtilities.prefix(string, 4)); // suffix (4) res.append(" " + TextUtilities.suffix(string, 1)); res.append(" " + TextUtilities.suffix(string, 2)); res.append(" " + TextUtilities.suffix(string, 3)); res.append(" " + TextUtilities.suffix(string, 4)); // line information (1) res.append(" " + lineStatus); // capitalisation (1) if (digit.equals("ALLDIGIT")) res.append(" NOCAPS"); else res.append(" " + capitalisation); // digit information (1) res.append(" " + digit); // character information (1) if (singleChar) res.append(" 1"); else res.append(" 0"); // lexical information (5) if (properName) res.append(" 1"); else res.append(" 0"); if (commonName) res.append(" 1"); else res.append(" 0"); if (firstName) res.append(" 1"); else res.append(" 0"); if (locationName) res.append(" 1"); else res.append(" 0"); if (countryName) res.append(" 1"); else res.append(" 0"); // punctuation information (1) res.append(" " + punctType); // in case the token is a punctuation (NO otherwise) res.append(" ").append(wordShape); // label - for training data (1) if (label != null) res.append(" " + label + "\n"); else res.append(" 0\n"); return res.toString(); } /** * Add the features for the affiliation+address model. */ static public String addFeaturesAffiliationAddress(List<String> lines, List<List<LayoutToken>> allTokens, List<List<OffsetPosition>> locationPlaces) throws Exception { if (locationPlaces == null) { throw new GrobidException("At least one list of gazetter matches positions is null."); } if (locationPlaces.size() == 0) { throw new GrobidException("At least one list of gazetter matches positions is empty."); } //System.out.println(lines); StringBuffer result = new StringBuffer(); List<String> block = null; boolean isPlace = false; String lineStatus = "LINESTART"; int locPlace = 0; List<OffsetPosition> currentLocationPlaces = locationPlaces.get(locPlace); List<LayoutToken> tokens = allTokens.get(locPlace); int currentPosPlaces = 0; int mm = 0; // position of the token in the current sentence String line = null; for (int i = 0; i < lines.size(); i++) { line = lines.get(i); isPlace = false; if (line.equals("\n")) { result.append("\n \n"); continue; } while ( (tokens != null) && (mm < tokens.size()) ) { LayoutToken token = tokens.get(mm); if (token.getText().equals(" ") || token.getText().equals("\n")) mm++; else break; } // check the position of matches for place names boolean skipTest = false; if ((currentLocationPlaces != null) && (currentLocationPlaces.size() > 0)) { if (currentPosPlaces == currentLocationPlaces.size() - 1) { if (currentLocationPlaces.get(currentPosPlaces).end < mm) { skipTest = true; } } if (!skipTest) { for (int j = currentPosPlaces; j < currentLocationPlaces.size(); j++) { if ((currentLocationPlaces.get(j).start <= mm) && (currentLocationPlaces.get(j).end >= mm)) { isPlace = true; currentPosPlaces = j; break; } else if (currentLocationPlaces.get(j).start > mm) { isPlace = false; currentPosPlaces = j; break; } } } } if (line.trim().contains("@newline")) { lineStatus = "LINESTART"; continue; } if (line.trim().length() == 0) { result.append("\n"); lineStatus = "LINESTART"; currentLocationPlaces = locationPlaces.get(locPlace); tokens = allTokens.get(locPlace); currentPosPlaces = 0; locPlace++; mm = 0; } else { // look ahead for line status update if (!lineStatus.equals("LINESTART")) { if ((i + 1) < lines.size()) { String nextLine = lines.get(i + 1); if ((nextLine.trim().length() == 0) || (nextLine.trim().contains("@newline"))) { lineStatus = "LINEEND"; } } else if ((i + 1) == lines.size()) { lineStatus = "LINEEND"; } } FeaturesVectorAffiliationAddress vector = addFeaturesAffiliationAddress(line, lineStatus, isPlace); result.append(vector.printVector()); if (lineStatus.equals("LINESTART")) { lineStatus = "LINEIN"; } else if (lineStatus.equals("LINEEND")) { lineStatus = "LINESTART"; } } mm++; } //System.out.println(result.toString()); return result.toString(); } static private FeaturesVectorAffiliationAddress addFeaturesAffiliationAddress(String line, String lineStatus, boolean isPlace) { FeatureFactory featureFactory = FeatureFactory.getInstance(); FeaturesVectorAffiliationAddress featuresVector = new FeaturesVectorAffiliationAddress(); StringTokenizer st = new StringTokenizer(line.trim(), "\t "); if (st.hasMoreTokens()) { String word = st.nextToken(); String label = null; if (st.hasMoreTokens()) label = st.nextToken(); featuresVector.string = word; featuresVector.label = label; featuresVector.lineStatus = lineStatus; if (word.length() == 1) { featuresVector.singleChar = true; } if (featureFactory.test_all_capital(word)) featuresVector.capitalisation = "ALLCAPS"; else if (featureFactory.test_first_capital(word)) featuresVector.capitalisation = "INITCAP"; else featuresVector.capitalisation = "NOCAPS"; if (featureFactory.test_number(word)) featuresVector.digit = "ALLDIGIT"; else if (featureFactory.test_digit(word)) featuresVector.digit = "CONTAINDIGIT"; else featuresVector.digit = "NODIGIT"; if (featureFactory.test_common(word)) featuresVector.commonName = true; if (featureFactory.test_names(word)) featuresVector.properName = true; Matcher m0 = featureFactory.isPunct.matcher(word); if (m0.find()) { featuresVector.punctType = "PUNCT"; } if ((word.equals("(")) | (word.equals("["))) { featuresVector.punctType = "OPENBRACKET"; } else if ((word.equals(")")) | (word.equals("]"))) { featuresVector.punctType = "ENDBRACKET"; } else if (word.equals(".")) { featuresVector.punctType = "DOT"; } else if (word.equals(",")) { featuresVector.punctType = "COMMA"; } else if (word.equals("-")) { featuresVector.punctType = "HYPHEN"; } else if (word.equals("\"") | word.equals("\'") | word.equals("`")) { featuresVector.punctType = "QUOTE"; } if (featuresVector.capitalisation == null) featuresVector.capitalisation = "NOCAPS"; if (featuresVector.digit == null) featuresVector.digit = "NODIGIT"; if (featuresVector.punctType == null) featuresVector.punctType = "NOPUNCT"; if (featureFactory.test_country(word)) { featuresVector.countryName = true; } if (isPlace) { featuresVector.locationName = true; } featuresVector.wordShape = TextUtilities.wordShape(word); } return featuresVector; } }
10,926
34.70915
116
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/features/FeaturesVectorHeader.java
package org.grobid.core.features; import java.io.*; import java.util.List; import java.util.ArrayList; import java.util.regex.Matcher; import org.grobid.core.layout.LayoutToken; import org.grobid.core.utilities.TextUtilities; /** * Class for features used for header parsing. * */ public class FeaturesVectorHeader { public LayoutToken token = null; // not a feature, reference value public String string = null; // lexical feature public String label = null; // label if known public String blockStatus = null; // one of BLOCKSTART, BLOCKIN, BLOCKEND public String lineStatus = null; // one of LINESTART, LINEIN, LINEEND public String alignmentStatus = null; // one of ALIGNEDLEFT, INDENTED, CENTERED - applied to the whole line public String fontStatus = null; // one of NEWFONT, SAMEFONT public boolean bold = false; public boolean italic = false; public boolean rotation = false; public String capitalisation = null; // one of INITCAP, ALLCAPS, NOCAPS public String digit; // one of ALLDIGIT, CONTAINDIGIT, NODIGIT public boolean singleChar = false; //public boolean containDash = false; public boolean properName = false; public boolean commonName = false; public boolean firstName = false; public boolean locationName = false; public boolean year = false; public boolean month = false; public boolean email = false; public boolean http = false; //public boolean acronym = false; public String punctType = null; // one of NOPUNCT, OPENBRACKET, ENDBRACKET, DOT, COMMA, HYPHEN, QUOTE, PUNCT (default) //public boolean containPunct = false; public String punctuationProfile = null; // the punctuations of the current line of the token public int spacingWithPreviousBlock = 0; // discretized public int characterDensity = 0; // discretized // font size related public String fontSize = null; // one of HIGHERFONT, SAMEFONTSIZE, LOWERFONT public boolean largestFont = false; public boolean smallestFont = false; public boolean largerThanAverageFont = false; //public boolean superscript = false; public String printVector() { if (string == null) return null; if (string.length() == 0) return null; StringBuffer res = new StringBuffer(); // token string (1) res.append(string); // lowercase string res.append(" " + string.toLowerCase()); // prefix (4) res.append(" " + TextUtilities.prefix(string, 1)); res.append(" " + TextUtilities.prefix(string, 2)); res.append(" " + TextUtilities.prefix(string, 3)); res.append(" " + TextUtilities.prefix(string, 4)); // suffix (4) res.append(" " + TextUtilities.suffix(string, 1)); res.append(" " + TextUtilities.suffix(string, 2)); res.append(" " + TextUtilities.suffix(string, 3)); res.append(" " + TextUtilities.suffix(string, 4)); // 10 first features written at this stage // block information (1) res.append(" " + blockStatus); // line information (1) res.append(" " + lineStatus); // line position/indentation (1) res.append(" " + alignmentStatus); // font information (1) res.append(" " + fontStatus); // font size information (1) res.append(" " + fontSize); // string type information (2) if (bold) res.append(" 1"); else res.append(" 0"); if (italic) res.append(" 1"); else res.append(" 0"); // capitalisation (1) if (digit.equals("ALLDIGIT")) res.append(" NOCAPS"); else res.append(" " + capitalisation); // digit information (1) res.append(" " + digit); // character information (1) if (singleChar) res.append(" 1"); else res.append(" 0"); // 20 first features written at this stage // lexical information (7) if (properName) res.append(" 1"); else res.append(" 0"); if (commonName) res.append(" 1"); else res.append(" 0"); /*if (firstName) res.append(" 1"); else res.append(" 0");*/ if (year) res.append(" 1"); else res.append(" 0"); if (month) res.append(" 1"); else res.append(" 0"); if (locationName) res.append(" 1"); else res.append(" 0"); if (email) res.append(" 1"); else res.append(" 0"); if (http) res.append(" 1"); else res.append(" 0"); // punctuation information (1) res.append(" " + punctType); // in case the token is a punctuation (NO otherwise) // 28 features written at this point // space with previous block, discretised (1) //res.append(" " + spacingWithPreviousBlock); //res.append(" " + 0); // character density of the previous block, discretised (1) //res.append(" " + characterDensity); //res.append(" " + 0); if (largestFont) res.append(" 1"); else res.append(" 0"); if (smallestFont) res.append(" 1"); else res.append(" 0"); if (largerThanAverageFont) res.append(" 1"); else res.append(" 0"); /*if (superscript) res.append(" 1"); else res.append(" 0");*/ // 30 features written at this point // label - for training data (1) if (label != null) res.append(" " + label + "\n"); /*else res.append("\n");*/ else res.append(" 0\n"); return res.toString(); } }
6,001
27.580952
122
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/features/FeaturesVectorCitation.java
package org.grobid.core.features; import org.grobid.core.exceptions.GrobidException; import org.grobid.core.utilities.OffsetPosition; import org.grobid.core.utilities.TextUtilities; import org.grobid.core.utilities.UnicodeUtil; import org.grobid.core.engines.label.TaggingLabel; import org.grobid.core.layout.LayoutToken; import java.util.List; import java.util.regex.Matcher; /** * Class for features used for header parsing. * */ public class FeaturesVectorCitation { // default bins for relative position, set experimentally static private int nbBins = 12; public String string = null; // lexical feature public String label = null; // label if known public String blockStatus = null; // one of BLOCKSTART, BLOCKIN, BLOCKEND public String lineStatus = null; // one of LINESTART, LINEIN, LINEEND public String fontStatus = null; // one of NEWFONT, SAMEFONT public String fontSize = null; // one of HIGHERFONT, SAMEFONTSIZE, LOWERFONT public boolean bold = false; public boolean italic = false; public String capitalisation = null; // one of INITCAP, ALLCAPS, NOCAPS public String digit; // one of ALLDIGIT, CONTAINDIGIT, NODIGIT public boolean singleChar = false; public boolean properName = false; public boolean commonName = false; public boolean firstName = false; public boolean lastName = false; public boolean year = false; public boolean month = false; public boolean http = false; public String punctType = null; // one of NOPUNCT, OPENBRACKET, ENDBRACKET, DOT, COMMA, HYPHEN, QUOTE, PUNCT (default) public boolean containPunct = false; public int relativePosition = -1; // true if the token is part of a predefinied name (single or multi-token) public boolean isKnownJournalTitle = false; public boolean isKnownAbbrevJournalTitle = false; public boolean isKnownConferenceTitle = false; public boolean isKnownPublisher = false; public boolean isKnownLocation = false; public boolean isKnownCollaboration = false; public boolean isKnownIdentifier = false; public String printVector() { if (string == null) return null; if (string.length() == 0) return null; StringBuilder res = new StringBuilder(); // token string (1) res.append(string); // lowercase string (1) res.append(" ").append(string.toLowerCase()); // prefix (4) res.append(" " + TextUtilities.prefix(string, 1)); res.append(" " + TextUtilities.prefix(string, 2)); res.append(" " + TextUtilities.prefix(string, 3)); res.append(" " + TextUtilities.prefix(string, 4)); // suffix (4) res.append(" " + TextUtilities.suffix(string, 1)); res.append(" " + TextUtilities.suffix(string, 2)); res.append(" " + TextUtilities.suffix(string, 3)); res.append(" " + TextUtilities.suffix(string, 4)); // line information (1) res.append(" ").append(lineStatus); // capitalisation (1) if (digit.equals("ALLDIGIT")) res.append(" NOCAPS"); else res.append(" ").append(capitalisation); // digit information (1) res.append(" ").append(digit); // character information (1) if (singleChar) res.append(" 1"); else res.append(" 0"); // lexical information (9) if (properName) res.append(" 1"); else res.append(" 0"); if (commonName) res.append(" 1"); else res.append(" 0"); if (firstName) res.append(" 1"); else res.append(" 0"); if (lastName) res.append(" 1"); else res.append(" 0"); if (isKnownLocation) res.append(" 1"); else res.append(" 0"); if (year) res.append(" 1"); else res.append(" 0"); if (month) res.append(" 1"); else res.append(" 0"); if (http) res.append(" 1"); else res.append(" 0"); if (isKnownCollaboration) res.append(" 1"); else res.append(" 0"); // bibliographical information(3) if (isKnownJournalTitle || isKnownAbbrevJournalTitle) res.append(" 1"); else res.append(" 0"); if (isKnownConferenceTitle) res.append(" 1"); else res.append(" 0"); if (isKnownPublisher) res.append(" 1"); else res.append(" 0"); if (isKnownIdentifier) res.append(" 1"); else res.append(" 0"); // punctuation information (1) res.append(" ").append(punctType); // in case the token is a punctuation (NO otherwise) // relative position in the sequence (1) res.append(" ").append(relativePosition); // label - for training data (1) if (label != null) res.append(" ").append(label).append("\n"); else res.append(" 0\n"); return res.toString(); } /** * Add feature for citation parsing. */ static public String addFeaturesCitation(List<LayoutToken> tokens, List<String> labels, List<OffsetPosition> journalPositions, List<OffsetPosition> abbrevJournalPositions, List<OffsetPosition> conferencePositions, List<OffsetPosition> publisherPositions, List<OffsetPosition> locationPositions, List<OffsetPosition> collaborationPositions, List<OffsetPosition> identifierPositions, List<OffsetPosition> urlPositions) throws Exception { if ((journalPositions == null) || (abbrevJournalPositions == null) || (conferencePositions == null) || (publisherPositions == null) || (locationPositions == null) || (collaborationPositions == null) || (identifierPositions == null) || (urlPositions == null)) { throw new GrobidException("At least one list of gazetter matches positions is null."); } FeatureFactory featureFactory = FeatureFactory.getInstance(); StringBuilder citation = new StringBuilder(); int currentJournalPositions = 0; int currentAbbrevJournalPositions = 0; int currentConferencePositions = 0; int currentPublisherPositions = 0; int currentLocationPositions = 0; int currentCollaborationPositions = 0; int currentIdentifierPositions = 0; int currentUrlPositions = 0; boolean isJournalToken; boolean isAbbrevJournalToken; boolean isConferenceToken; boolean isPublisherToken; boolean isLocationToken; boolean isCollaborationToken; boolean isIdentifierToken; boolean isUrlToken; boolean skipTest; String previousTag = null; String previousText = null; FeaturesVectorCitation features = null; int sentenceLenth = tokens.size(); // length of the current sentence for (int n=0; n < tokens.size(); n++) { LayoutToken token = tokens.get(n); String tag = null; if ( (labels != null) && (labels.size() > 0) && (n < labels.size()) ) tag = labels.get(n); boolean outputLineStatus = false; isJournalToken = false; isAbbrevJournalToken = false; isConferenceToken = false; isPublisherToken = false; isLocationToken = false; isCollaborationToken = false; isIdentifierToken = false; isUrlToken = false; skipTest = false; String text = token.getText(); if (text.equals(" ")) { continue; } if (text.equals("\n")) { // should not be the case for citation model continue; } // parano normalisation text = UnicodeUtil.normaliseTextAndRemoveSpaces(text); if (text.trim().length() == 0 ) { continue; } // check the position of matches for journals if ((journalPositions != null) && (journalPositions.size() > 0)) { if (currentJournalPositions == journalPositions.size() - 1) { if (journalPositions.get(currentJournalPositions).end < n) { skipTest = true; } } if (!skipTest) { for (int i = currentJournalPositions; i < journalPositions.size(); i++) { if ((journalPositions.get(i).start <= n) && (journalPositions.get(i).end >= n)) { isJournalToken = true; currentJournalPositions = i; break; } else if (journalPositions.get(i).start > n) { isJournalToken = false; currentJournalPositions = i; break; } } } } // check the position of matches for abbreviated journals skipTest = false; if (abbrevJournalPositions != null) { if (currentAbbrevJournalPositions == abbrevJournalPositions.size() - 1) { if (abbrevJournalPositions.get(currentAbbrevJournalPositions).end < n) { skipTest = true; } } if (!skipTest) { for (int i = currentAbbrevJournalPositions; i < abbrevJournalPositions.size(); i++) { if ((abbrevJournalPositions.get(i).start <= n) && (abbrevJournalPositions.get(i).end >= n)) { isAbbrevJournalToken = true; currentAbbrevJournalPositions = i; break; } else if (abbrevJournalPositions.get(i).start > n) { isAbbrevJournalToken = false; currentAbbrevJournalPositions = i; break; } } } } // check the position of matches for conferences skipTest = false; if (conferencePositions != null) { if (currentConferencePositions == conferencePositions.size() - 1) { if (conferencePositions.get(currentConferencePositions).end < n) { skipTest = true; } } if (!skipTest) { for (int i = currentConferencePositions; i < conferencePositions.size(); i++) { if ((conferencePositions.get(i).start <= n) && (conferencePositions.get(i).end >= n)) { isConferenceToken = true; currentConferencePositions = i; break; } else if (conferencePositions.get(i).start > n) { isConferenceToken = false; currentConferencePositions = i; break; } } } } // check the position of matches for publishers skipTest = false; if (publisherPositions != null) { if (currentPublisherPositions == publisherPositions.size() - 1) { if (publisherPositions.get(currentPublisherPositions).end < n) { skipTest = true; } } if (!skipTest) { for (int i = currentPublisherPositions; i < publisherPositions.size(); i++) { if ((publisherPositions.get(i).start <= n) && (publisherPositions.get(i).end >= n)) { isPublisherToken = true; currentPublisherPositions = i; break; } else if (publisherPositions.get(i).start > n) { isPublisherToken = false; currentPublisherPositions = i; break; } } } } // check the position of matches for locations skipTest = false; if (locationPositions != null) { if (currentLocationPositions == locationPositions.size() - 1) { if (locationPositions.get(currentLocationPositions).end < n) { skipTest = true; } } if (!skipTest) { for (int i = currentLocationPositions; i < locationPositions.size(); i++) { if ((locationPositions.get(i).start <= n) && (locationPositions.get(i).end >= n)) { isLocationToken = true; currentLocationPositions = i; break; } else if (locationPositions.get(i).start > n) { isLocationToken = false; currentLocationPositions = i; break; } } } } // check the position of matches for collaboration skipTest = false; if (collaborationPositions != null) { if (currentCollaborationPositions == collaborationPositions.size() - 1) { if (collaborationPositions.get(currentCollaborationPositions).end < n) { skipTest = true; } } if (!skipTest) { for (int i = currentCollaborationPositions; i < collaborationPositions.size(); i++) { if ((collaborationPositions.get(i).start <= n) && (collaborationPositions.get(i).end >= n)) { isCollaborationToken = true; currentCollaborationPositions = i; break; } else if (collaborationPositions.get(i).start > n) { isCollaborationToken = false; currentCollaborationPositions = i; break; } } } } // check the position of matches for identifier skipTest = false; if (identifierPositions != null) { if (currentIdentifierPositions == identifierPositions.size() - 1) { if (identifierPositions.get(currentIdentifierPositions).end < n) { skipTest = true; } } if (!skipTest) { for (int i = currentIdentifierPositions; i < identifierPositions.size(); i++) { if ((identifierPositions.get(i).start <= n) && (identifierPositions.get(i).end >= n)) { isIdentifierToken = true; currentIdentifierPositions = i; break; } else if (identifierPositions.get(i).start > n) { isIdentifierToken = false; currentIdentifierPositions = i; break; } } } } // check the position of matches for url skipTest = false; if (urlPositions != null) { if (currentUrlPositions == urlPositions.size() - 1) { if (urlPositions.get(currentUrlPositions).end < n) { skipTest = true; } } if (!skipTest) { for (int i = currentUrlPositions; i < urlPositions.size(); i++) { if ((urlPositions.get(i).start <= n) && (urlPositions.get(i).end >= n)) { isUrlToken = true; currentUrlPositions = i; break; } else if (urlPositions.get(i).start > n) { isUrlToken = false; currentUrlPositions = i; break; } } } } if (TextUtilities.filterLine(text)) { continue; } features = new FeaturesVectorCitation(); features.string = text; features.relativePosition = featureFactory.linearScaling(n, sentenceLenth, nbBins); if (n == 0) { features.lineStatus = "LINESTART"; outputLineStatus = true; } Matcher m0 = featureFactory.isPunct.matcher(text); if (m0.find()) { features.punctType = "PUNCT"; } if ((text.equals("(")) | (text.equals("["))) { features.punctType = "OPENBRACKET"; } else if ((text.equals(")")) | (text.equals("]"))) { features.punctType = "ENDBRACKET"; } else if (text.equals(".")) { features.punctType = "DOT"; } else if (text.equals(",")) { features.punctType = "COMMA"; } else if (text.equals("-")) { features.punctType = "HYPHEN"; } else if (text.equals("\"") | text.equals("\'") | text.equals("`")) { features.punctType = "QUOTE"; } if (n == 0) { if (!outputLineStatus) { features.lineStatus = "LINESTART"; outputLineStatus = true; } } else if (tokens.size() == n+1) { if (!outputLineStatus) { features.lineStatus = "LINEEND"; outputLineStatus = true; } } if (!outputLineStatus) { features.lineStatus = "LINEIN"; outputLineStatus = true; } if (text.length() == 1) { features.singleChar = true; } if (Character.isUpperCase(text.charAt(0))) { features.capitalisation = "INITCAP"; } if (featureFactory.test_all_capital(text)) { features.capitalisation = "ALLCAP"; } if (featureFactory.test_digit(text)) { features.digit = "CONTAINSDIGITS"; } if (featureFactory.test_common(text)) { features.commonName = true; } if (featureFactory.test_names(text)) { features.properName = true; } if (featureFactory.test_month(text)) { features.month = true; } if (featureFactory.test_last_names(text)) { features.lastName = true; } if (featureFactory.test_first_names(text)) { features.firstName = true; } Matcher m = featureFactory.isDigit.matcher(text); if (m.find()) { features.digit = "ALLDIGIT"; } Matcher m2 = featureFactory.year.matcher(text); if (m2.find()) { features.year = true; } if (isCollaborationToken) features.isKnownCollaboration = true; /*Matcher m5 = featureFactory.ACRONYM.matcher(text); if (m5.find()) { features.acronym = true; }*/ if (features.capitalisation == null) features.capitalisation = "NOCAPS"; if (features.digit == null) features.digit = "NODIGIT"; if (features.punctType == null) features.punctType = "NOPUNCT"; if (isJournalToken) { features.isKnownJournalTitle = true; } if (isAbbrevJournalToken) { features.isKnownAbbrevJournalTitle = true; } if (isConferenceToken) { features.isKnownConferenceTitle = true; } if (isPublisherToken) { features.isKnownPublisher = true; } if (isLocationToken) { features.isKnownLocation = true; } if (isIdentifierToken) { features.isKnownIdentifier = true; } if (isUrlToken) { features.http = true; } features.label = tag; citation.append(features.printVector()); previousTag = tag; previousText = text; } return citation.toString(); } }
22,072
36.097479
122
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/features/FeaturesVectorSegmentation.java
package org.grobid.core.features; import org.grobid.core.layout.LayoutToken; import org.grobid.core.utilities.TextUtilities; /** * Class for features used for high level segmentation of document. * */ public class FeaturesVectorSegmentation { public LayoutToken token = null; // not a feature, reference value public String line = null; // not a feature, the complete processed line public String string = null; // first lexical feature public String secondString = null; // second lexical feature public String label = null; // label if known public String blockStatus = null; // one of BLOCKSTART, BLOCKIN, BLOCKEND public String lineStatus = null; // one of LINESTART, LINEIN, LINEEND public String fontStatus = null; // one of NEWFONT, SAMEFONT public String fontSize = null; // one of HIGHERFONT, SAMEFONTSIZE, LOWERFONT public String pageStatus = null; // one of PAGESTART, PAGEIN, PAGEEND public String alignmentStatus = null; // one of ALIGNEDLEFT, INDENT, CENTERED public boolean bold = false; public boolean italic = false; public String capitalisation = null; // one of INITCAP, ALLCAPS, NOCAPS public String digit; // one of ALLDIGIT, CONTAINDIGIT, NODIGIT public boolean singleChar = false; public boolean properName = false; public boolean commonName = false; public boolean firstName = false; public boolean locationName = false; public boolean year = false; public boolean month = false; public boolean email = false; public boolean http = false; //public boolean acronym = false; public String punctType = null; // one of NOPUNCT, OPENBRACKET, ENDBRACKET, DOT, COMMA, HYPHEN, QUOTE, PUNCT (default) public int relativeDocumentPosition = -1; public int relativePagePosition = -1; public int relativePagePositionChar = -1; // not used public String punctuationProfile = null; // the punctuations of the current line of the token public boolean firstPageBlock = false; public boolean lastPageBlock = false; public int lineLength = 0; public boolean bitmapAround = false; public boolean vectorAround = false; public boolean inMainArea = true; public boolean repetitivePattern = false; // if true, the textual pattern is repeated at the same position on other pages public boolean firstRepetitivePattern = false; // if true, this is a repetitive textual pattern and this is its first occurrence in the doc public int spacingWithPreviousBlock = 0; // discretized public int characterDensity = 0; // discretized public String printVector() { if (string == null) return null; if (string.length() == 0) return null; StringBuffer res = new StringBuffer(); // token string (1) res.append(string); // second token string if (secondString != null) res.append(" " + secondString); else res.append(" " + string); // lowercase string res.append(" " + string.toLowerCase()); // prefix (4) res.append(" " + TextUtilities.prefix(string, 1)); res.append(" " + TextUtilities.prefix(string, 2)); res.append(" " + TextUtilities.prefix(string, 3)); res.append(" " + TextUtilities.prefix(string, 4)); // block information (1) if (blockStatus != null) res.append(" " + blockStatus); //res.append(" 0"); // line information (1) if (lineStatus != null) res.append(" " + lineStatus); // line alignment/identation information (1) //res.append(" " + alignmentStatus); // page information (1) res.append(" " + pageStatus); // font information (1) res.append(" " + fontStatus); // font size information (1) res.append(" " + fontSize); // string type information (3) if (bold) res.append(" 1"); else res.append(" 0"); if (italic) res.append(" 1"); else res.append(" 0"); // capitalisation (1) if (digit.equals("ALLDIGIT")) res.append(" NOCAPS"); else res.append(" " + capitalisation); // digit information (1) res.append(" " + digit); // character information (1) if (singleChar) res.append(" 1"); else res.append(" 0"); // lexical information (9) if (properName) res.append(" 1"); else res.append(" 0"); if (commonName) res.append(" 1"); else res.append(" 0"); /* TODO: to review, never set! */ if (firstName) res.append(" 1"); else res.append(" 0"); if (year) res.append(" 1"); else res.append(" 0"); if (month) res.append(" 1"); else res.append(" 0"); if (email) res.append(" 1"); else res.append(" 0"); if (http) res.append(" 1"); else res.append(" 0"); // punctuation information (1) if (punctType != null) res.append(" " + punctType); // in case the token is a punctuation (NO otherwise) // relative document position (1) res.append(" " + relativeDocumentPosition); // relative page position coordinate (1) //res.append(" " + relativePagePosition); // relative page position characters (1) res.append(" " + relativePagePositionChar); // punctuation profile if ( (punctuationProfile == null) || (punctuationProfile.length() == 0) ) { // string profile res.append(" no"); // number of punctuation symbols in the line res.append(" 0"); } else { // string profile res.append(" " + punctuationProfile); // number of punctuation symbols in the line res.append(" "+punctuationProfile.length()); } // current line length on a predefined scale and relative to the longest line of the current block res.append(" " + lineLength); if (bitmapAround) { res.append(" 1"); } else { res.append(" 0"); } if (vectorAround) { res.append(" 1"); } else { res.append(" 0"); } if (repetitivePattern) { res.append(" 1"); } else { res.append(" 0"); } if (firstRepetitivePattern) { res.append(" 1"); } else { res.append(" 0"); } // if the block is in the page main area (1) if (inMainArea) { res.append(" 1"); } else { res.append(" 0"); } // space with previous block, discretised (1) //res.append(" " + spacingWithPreviousBlock); //res.append(" " + 0); // character density of the previous block, discretised (1) //res.append(" " + characterDensity); //res.append(" " + 0); // label - for training data (1) /*if (label != null) res.append(" " + label + "\n"); else res.append(" 0\n"); */ res.append("\n"); return res.toString(); } }
7,287
28.99177
143
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/features/FeatureFactory.java
package org.grobid.core.features; import org.grobid.core.lexicon.Lexicon; import org.grobid.core.utilities.OffsetPosition; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.regex.Pattern; /** * Class providing a toolkit for managing and creating features or string sequence tagging problems. * */ public class FeatureFactory { private static FeatureFactory instance; public boolean newline = true; public Lexicon lexicon = Lexicon.getInstance(); public Pattern year = Pattern.compile("[1,2][0-9][0-9][0-9]"); public Pattern http = Pattern.compile("http(s)?"); public Pattern isDigit = Pattern.compile("^\\d+$"); public Pattern email2 = Pattern.compile("\\w+((\\.|\\-|_)\\w+)*@\\w+((\\.|\\-)\\w+)+"); public Pattern email = Pattern.compile("^(?:[a-zA-Z0-9_'^&amp;/+-])+(?:\\.(?:[a-zA-Z0-9_'^&amp;/+-])+)*@(?:(?:\\[?(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?))\\.){3}(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\]?)|(?:[a-zA-Z0-9-]+\\.)+(?:[a-zA-Z]){2,}\\.?)$"); public Pattern acronym = Pattern.compile("[A-Z]\\.([A-Z]\\.)*"); public Pattern isPunct = Pattern.compile("^[\\,\\:;\\?\\.]+$"); static public List<String> KEYWORDSPUB = Arrays.asList( new String[]{ "Journal", "journal", "Proceedings", "proceedings", "Conference", "conference", "Workshop", "workshop", "Symposium", "symposium" }); static public List<String> MONTHS = new ArrayList<String>() {{ add("january"); add("february"); add("march"); add("april"); add("may"); add("june"); add("july"); add("august"); add("september"); add("october"); add("november"); add("december"); add("jan"); add("feb"); add("mar"); add("apr"); add("may"); add("jun"); add("jul"); add("aug"); add("sep"); add("oct"); add("nov"); add("dec"); }}; static public List<String> COUNTRY_CODES = new ArrayList<String>() {{ add("US"); add("EP"); add("WO"); add("DE"); add("AU"); add("GB"); add("DK"); add("BE"); add("AT"); add("CN"); add("KR"); add("EA"); add("CH"); add("JP"); add("FR"); add("UK"); add("RU"); add("CA"); add("NL"); add("DD"); add("SE"); add("FI"); add("MX"); add("OA"); add("AP"); add("AR"); add("BR"); add("BG"); add("CL"); add("GR"); add("HU"); add("IS"); add("IN"); add("IE"); add("IL"); add("IT"); add("LU"); add("NO"); add("NZ"); add("PL"); add("RU"); add("ES"); add("TW"); add("TR"); }}; static public List<String> KIND_CODES = new ArrayList<String>() {{ add("A"); add("B"); add("C"); add("U"); add("P"); }}; ; // hidden constructor private FeatureFactory() { } public static FeatureFactory getInstance() { if (instance == null) { synchronized (FeatureFactory.class) { if (instance == null) { instance = new FeatureFactory(); } } } return instance; } /** * Test if the first letter of the string is a capital letter */ public boolean test_first_capital(String tok) { if (tok == null) return false; if (tok.length() == 0) return false; char a = tok.charAt(0); if (Character.isUpperCase(a)) return true; else return false; } /** * Test if all the letters of the string are capital letters * (characters can be also digits which are then ignored) */ public boolean test_all_capital(String tok) { if (tok == null) return false; if (tok.length() == 0) return false; char a; for (int i = 0; i < tok.length(); i++) { a = tok.charAt(i); if (Character.isLowerCase(a)) return false; } return true; } /** * Test for a given character occurrence in the string */ public boolean test_char(String tok, char c) { if (tok == null) return false; if (tok.length() == 0) return false; int i = tok.indexOf(c); if (i == -1) return false; else return true; } /** * Test for the current string contains at least one digit */ public boolean test_digit(String tok) { if (tok == null) return false; if (tok.length() == 0) return false; char a; for (int i = 0; i < tok.length(); i++) { a = tok.charAt(i); if (Character.isDigit(a)) return true; } return false; } /** * Test for the current string contains only digit */ public boolean test_number(String tok) { if (tok == null) return false; if (tok.length() == 0) return false; char a; for (int i = 0; i < tok.length(); i++) { a = tok.charAt(i); if (!Character.isDigit(a)) return false; } return true; } /** * Test for the current string is a number or a decimal number, i.e. containing only digits or ",", "." */ public boolean test_complex_number(String tok) { if (tok == null) return false; if (tok.length() == 0) return false; char a; for (int i = 0; i < tok.length(); i++) { a = tok.charAt(i); if ((!Character.isDigit(a)) & (a != ',') & (a != '.')) return false; } return true; } /** * Test if the current string is a common name */ public boolean test_common(String tok) { if (tok == null) return false; else if (tok.length() == 0) return false; else return lexicon.inDictionary(tok.trim().toLowerCase()); } /** * Test if the current string is a first name or family name */ public boolean test_names(String tok) { return (lexicon.inFirstNames(tok.toLowerCase()) || lexicon.inLastNames(tok.toLowerCase())); } /** * Test if the current string is a family name */ public boolean test_first_names(String tok) { return lexicon.inFirstNames(tok.toLowerCase()); } /** * Test if the current string is a family name */ public boolean test_last_names(String tok) { return lexicon.inLastNames(tok.toLowerCase()); } /** * Test if the current string refers to a month */ public boolean test_month(String tok) { return MONTHS.contains(tok.toLowerCase()); } /** * Test if the current string refers to country code */ public boolean test_country_codes(String tok) { return COUNTRY_CODES.contains(tok); } /** * Test if the current string refers to a kind code */ public boolean test_kind_codes(String tok) { return KIND_CODES.contains(tok); } /** * Test if the current string refers to a country */ public boolean test_country(String tok) { return lexicon.isCountry(tok.toLowerCase()); } /** * Test if the current string refers to a known city */ public boolean test_city(String tok) { List<OffsetPosition> pos = lexicon.tokenPositionsCityNames(tok.toLowerCase()); if ((pos != null) && (pos.size() > 0)) return true; else return false; } /** * Given an integer value between 0 and total, discretized into nbBins following a linear scale */ public int linearScaling(int pos, int total, int nbBins) { if (pos >= total) return nbBins; if (pos <= 0) return 0; float rel = (float) pos / total; float rel2 = (rel * nbBins);// + 1; return ((int) rel2); } /** * Given an double value between 0.0 and total, discretized into nbBins following a linear scale */ public int linearScaling(double pos, double total, int nbBins) { if (pos >= total) return nbBins; if (pos <= 0) return 0; double rel = pos / total; double rel2 = (rel * nbBins);// + 1; return ((int) rel2); } /** * Given an double value between 0.0 and total, discretized into nbBins following a log scale */ public int logScaling(double pos, double total, int nbBins) { //System.out.println("total: " + total + " / pos: " + pos); if (pos >= total) return nbBins; if (pos <= 0) return 0; double max = Math.log(total + 1); double val = Math.log(pos + 1); //System.out.println("max: " + max + " / val: " + val); double rel = val / max; double rel2 = (rel * nbBins); return ((int) rel2); } /** * Transform a text in a text pattern where punctuations are ignored and * remaining text in lowercase */ public String getPattern(String text) { String pattern = text.replaceAll("[^a-zA-Z]", "").toLowerCase(); //pattern = pattern.replaceAll("[0-9]", "X"); return pattern; } }
9,929
26.057221
264
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/features/FeaturesVectorReference.java
package org.grobid.core.features; import org.grobid.core.utilities.TextUtilities; import java.util.StringTokenizer; import java.util.regex.Matcher; /** * Class for features used for reference identification in raw texts such as patent descriptions. * It covers references to scholar works and to patent publications. * */ public class FeaturesVectorReference { // default bins for relative position, set experimentally static private int nbBins = 12; public String string = null; // lexical feature public String label = null; // label if known public String capitalisation = null; // one of INITCAP, ALLCAPS, NOCAPS public String digit; // one of ALLDIGIT, CONTAINDIGIT, NODIGIT public boolean singleChar = false; public boolean properName = false; public boolean commonName = false; public boolean firstName = false; public boolean locationName = false; public boolean year = false; public boolean month = false; public boolean http = false; public String punctType = null; // one of NOPUNCT, OPENBRACKET, ENDBRACKET, DOT, // COMMA, HYPHEN, QUOTE, PUNCT (default) // OPENQUOTE, ENDQUOTE public boolean isKnownJournalTitle = false; public boolean isKnownAbbrevJournalTitle = false; public boolean isKnownConferenceTitle = false; public boolean isKnownPublisher = false; public boolean isCountryCode = false; public boolean isKindCode = false; public int relativeDocumentPosition = -1; public FeaturesVectorReference() { } public String printVector() { if (string == null) return null; if (string.length() == 0) return null; StringBuffer res = new StringBuffer(); // token string (1) res.append(string); // lowercase string res.append(" " + string.toLowerCase()); // prefix (4) res.append(" " + TextUtilities.prefix(string, 1)); res.append(" " + TextUtilities.prefix(string, 2)); res.append(" " + TextUtilities.prefix(string, 3)); res.append(" " + TextUtilities.prefix(string, 4)); // suffix (4) res.append(" " + TextUtilities.suffix(string, 1)); res.append(" " + TextUtilities.suffix(string, 2)); res.append(" " + TextUtilities.suffix(string, 3)); res.append(" " + TextUtilities.suffix(string, 4)); // capitalisation (1) if (digit.equals("ALLDIGIT")) res.append(" NOCAPS"); else res.append(" " + capitalisation); // digit information (1) res.append(" " + digit); // character information (1) if (singleChar) res.append(" 1"); else res.append(" 0"); // lexical information (7) if (properName) res.append(" 1"); else res.append(" 0"); if (commonName) res.append(" 1"); else res.append(" 0"); if (firstName) res.append(" 1"); else res.append(" 0"); if (locationName) res.append(" 1"); else res.append(" 0"); if (year) res.append(" 1"); else res.append(" 0"); if (month) res.append(" 1"); else res.append(" 0"); if (http) res.append(" 1"); else res.append(" 0"); // bibliographical information(4) if (isKnownJournalTitle || isKnownAbbrevJournalTitle) res.append(" 1"); else res.append(" 0"); if (isKnownConferenceTitle) res.append(" 1"); else res.append(" 0"); if (isKnownPublisher) res.append(" 1"); else res.append(" 0"); if (isCountryCode) res.append(" 1"); else res.append(" 0"); if (isKindCode) res.append(" 1"); else res.append(" 0"); // punctuation information (1) res.append(" " + punctType); // in case the token is a punctuation (NO otherwise) // token length res.append(" " + string.length()); // relative document position res.append(" " + relativeDocumentPosition); // label - for training data (1) if (label != null) res.append(" " + label + "\n"); //else // res.append(" 0\n"); return res.toString(); } /** * Add the features for the patent reference extraction model. */ public static FeaturesVectorReference addFeaturesPatentReferences(String line, int totalLength, int position, boolean isJournalToken, boolean isAbbrevJournalToken, boolean isConferenceToken, boolean isPublisherToken) { FeatureFactory featureFactory = FeatureFactory.getInstance(); FeaturesVectorReference featuresVector = new FeaturesVectorReference(); StringTokenizer st = new StringTokenizer(line, "\t"); if (st.hasMoreTokens()) { String word = st.nextToken(); String label = null; if (st.hasMoreTokens()) label = st.nextToken(); featuresVector.string = word; featuresVector.label = label; if (word.length() == 1) { featuresVector.singleChar = true; } if (featureFactory.test_all_capital(word)) featuresVector.capitalisation = "ALLCAPS"; else if (featureFactory.test_first_capital(word)) featuresVector.capitalisation = "INITCAP"; else featuresVector.capitalisation = "NOCAPS"; if (featureFactory.test_number(word)) featuresVector.digit = "ALLDIGIT"; else if (featureFactory.test_digit(word)) featuresVector.digit = "CONTAINDIGIT"; else featuresVector.digit = "NODIGIT"; if (featureFactory.test_common(word)) featuresVector.commonName = true; if (featureFactory.test_names(word)) featuresVector.properName = true; if (featureFactory.test_month(word)) featuresVector.month = true; Matcher m0 = featureFactory.isPunct.matcher(word); if (m0.find()) { featuresVector.punctType = "PUNCT"; } if ((word.equals("(")) | (word.equals("["))) { featuresVector.punctType = "OPENBRACKET"; } else if ((word.equals(")")) | (word.equals("]"))) { featuresVector.punctType = "ENDBRACKET"; } else if (word.equals(".")) { featuresVector.punctType = "DOT"; } else if (word.equals(",")) { featuresVector.punctType = "COMMA"; } else if (word.equals("-")) { featuresVector.punctType = "HYPHEN"; } else if (word.equals("\"") | word.equals("\'") | word.equals("`")) { featuresVector.punctType = "QUOTE"; } Matcher m2 = featureFactory.year.matcher(word); if (m2.find()) { featuresVector.year = true; } Matcher m4 = featureFactory.http.matcher(word); if (m4.find()) { featuresVector.http = true; } if (featureFactory.test_city(word)) { featuresVector.locationName = true; } if (featuresVector.capitalisation == null) featuresVector.capitalisation = "NOCAPS"; if (featuresVector.digit == null) featuresVector.digit = "NODIGIT"; if (featuresVector.punctType == null) featuresVector.punctType = "NOPUNCT"; if (featureFactory.test_country_codes(word)) featuresVector.isCountryCode = true; if (featureFactory.test_kind_codes(word)) featuresVector.isKindCode = true; featuresVector.relativeDocumentPosition = featureFactory.linearScaling(position, totalLength, nbBins); if (isJournalToken) { featuresVector.isKnownJournalTitle = true; } if (isAbbrevJournalToken) { featuresVector.isKnownAbbrevJournalTitle = true; } if (isConferenceToken) { featuresVector.isKnownConferenceTitle = true; } if (isPublisherToken) { featuresVector.isKnownPublisher = true; } } return featuresVector; } }
9,123
30.570934
99
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/features/FeaturesVectorFigure.java
package org.grobid.core.features; import org.grobid.core.layout.LayoutToken; import org.grobid.core.utilities.TextUtilities; /** * Class for features used for figure/table parsing. * */ public class FeaturesVectorFigure { public LayoutToken token = null; // not a feature, reference value public String string = null; // lexical feature public String label = null; // label if known public String blockStatus = null; // one of BLOCKSTART, BLOCKIN, BLOCKEND public String lineStatus = null; // one of LINESTART, LINEIN, LINEEND public String fontStatus = null; // one of NEWFONT, SAMEFONT public String fontSize = null; // one of HIGHERFONT, SAMEFONTSIZE, LOWERFONT public String pageStatus = null; // one of PAGESTART, PAGEIN, PAGEEND public String alignmentStatus = null; // one of ALIGNEDLEFT, INDENT, CENTERED public boolean bold = false; public boolean italic = false; public String capitalisation = null; // one of INITCAP, ALLCAPS, NOCAPS public String digit; // one of ALLDIGIT, CONTAINDIGIT, NODIGIT public boolean singleChar = false; public boolean properName = false; public boolean commonName = false; public boolean firstName = false; public boolean locationName = false; public boolean year = false; public boolean month = false; public boolean email = false; public boolean http = false; //public boolean acronym = false; public String punctType = null; // one of NOPUNCT, OPENBRACKET, ENDBRACKET, DOT, COMMA, HYPHEN, QUOTE, PUNCT (default) public int relativeDocumentPosition = -1; public int relativePagePosition = -1; public boolean bitmapAround = false; public boolean vectorAround = false; public int closestGraphicHeight = -1; public int closestGraphicWidth = -1; public int closestGraphicSurface = -1; public String printVector() { if (string == null) return null; if (string.length() == 0) return null; StringBuffer res = new StringBuffer(); // token string (1) res.append(string); // lowercase string res.append(" " + string.toLowerCase()); // prefix (4) res.append(" " + TextUtilities.prefix(string, 1)); res.append(" " + TextUtilities.prefix(string, 2)); res.append(" " + TextUtilities.prefix(string, 3)); res.append(" " + TextUtilities.prefix(string, 4)); // suffix (4) res.append(" " + TextUtilities.suffix(string, 1)); res.append(" " + TextUtilities.suffix(string, 2)); res.append(" " + TextUtilities.suffix(string, 3)); res.append(" " + TextUtilities.suffix(string, 4)); // block information (1) res.append(" " + blockStatus); //res.append(" 0"); // line information (1) res.append(" " + lineStatus); // page information (1) res.append(" " + pageStatus); // alignmet/horizontal position information (1) //res.append(" " + alignmentStatus); // font information (1) res.append(" " + fontStatus); // font size information (1) res.append(" " + fontSize); // string type information (3) if (bold) res.append(" 1"); else res.append(" 0"); if (italic) res.append(" 1"); else res.append(" 0"); // capitalisation (1) if (digit.equals("ALLDIGIT")) res.append(" NOCAPS"); else res.append(" " + capitalisation); // digit information (1) res.append(" " + digit); // character information (1) if (singleChar) res.append(" 1"); else res.append(" 0"); // lexical information (9) if (properName) res.append(" 1"); else res.append(" 0"); if (commonName) res.append(" 1"); else res.append(" 0"); if (firstName) res.append(" 1"); else res.append(" 0"); if (year) res.append(" 1"); else res.append(" 0"); if (month) res.append(" 1"); else res.append(" 0"); if (email) res.append(" 1"); else res.append(" 0"); if (http) res.append(" 1"); else res.append(" 0"); // punctuation information (2) res.append(" " + punctType); // in case the token is a punctuation (NO otherwise) // relative document position (1) res.append(" " + relativeDocumentPosition); // relative page position (1) res.append(" " + relativePagePosition); if (bitmapAround) res.append(" 1"); else res.append(" 0"); if (vectorAround) res.append(" 1"); else res.append(" 0"); // label - for training data (1) /*if (label != null) res.append(" " + label + "\n"); else res.append(" 0\n"); */ res.append("\n"); return res.toString(); } }
5,224
27.708791
90
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/features/FeaturesVectorFulltext.java
package org.grobid.core.features; import org.grobid.core.layout.LayoutToken; import org.grobid.core.utilities.TextUtilities; /** * Class for features used for fulltext parsing. * */ public class FeaturesVectorFulltext { public LayoutToken token = null; // not a feature, reference value public String string = null; // lexical feature public String label = null; // label if known public String blockStatus = null; // one of BLOCKSTART, BLOCKIN, BLOCKEND public String lineStatus = null; // one of LINESTART, LINEIN, LINEEND public String fontStatus = null; // one of NEWFONT, SAMEFONT public String fontSize = null; // one of HIGHERFONT, SAMEFONTSIZE, LOWERFONT public String alignmentStatus = null; // one of ALIGNEDLEFT, INDENTED, CENTERED - applied to the whole line public boolean bold = false; public boolean italic = false; public String capitalisation = null; // one of INITCAP, ALLCAPS, NOCAPS public String digit; // one of ALLDIGIT, CONTAINDIGIT, NODIGIT public boolean singleChar = false; public String punctType = null; // one of NOPUNCT, OPENBRACKET, ENDBRACKET, DOT, COMMA, HYPHEN, QUOTE, PUNCT (default) public int relativeDocumentPosition = -1; public int relativePagePositionChar = -1; public int relativePagePosition = -1; // graphic in closed proximity of the current block public boolean bitmapAround = false; public boolean vectorAround = false; // if a graphic is in close proximity of the current block, characteristics of this graphic public int closestGraphicHeight = -1; public int closestGraphicWidth = -1; public int closestGraphicSurface = -1; public int spacingWithPreviousBlock = 0; // discretized public int characterDensity = 0; // discretized // how the reference callouts are expressed, if known public String calloutType = null; // one of UNKNOWN, NUMBER, AUTHOR public boolean calloutKnown = false; // true if the token match a known reference label public boolean superscript = false; public String printVector() { if (string == null) return null; if (string.length() == 0) return null; StringBuffer res = new StringBuffer(); // token string (1) res.append(string); // lowercase string res.append(" " + string.toLowerCase()); // prefix (4) res.append(" " + TextUtilities.prefix(string, 1)); res.append(" " + TextUtilities.prefix(string, 2)); res.append(" " + TextUtilities.prefix(string, 3)); res.append(" " + TextUtilities.prefix(string, 4)); // suffix (4) res.append(" " + TextUtilities.suffix(string, 1)); res.append(" " + TextUtilities.suffix(string, 2)); res.append(" " + TextUtilities.suffix(string, 3)); res.append(" " + TextUtilities.suffix(string, 4)); // at this stage, we have written 10 features // block information (1) res.append(" " + blockStatus); // line information (1) res.append(" " + lineStatus); // line position/identation (1) res.append(" " + alignmentStatus); // font information (1) res.append(" " + fontStatus); // font size information (1) res.append(" " + fontSize); // string type information (3) if (bold) res.append(" 1"); else res.append(" 0"); if (italic) res.append(" 1"); else res.append(" 0"); // capitalisation (1) if (digit.equals("ALLDIGIT")) res.append(" NOCAPS"); else res.append(" " + capitalisation); // digit information (1) res.append(" " + digit); // character information (1) if (singleChar) res.append(" 1"); else res.append(" 0"); // at this stage, we have written 20 features // punctuation information (1) res.append(" " + punctType); // in case the token is a punctuation (NO otherwise) // relative document position (1) res.append(" " + relativeDocumentPosition); // relative page position (1) res.append(" " + relativePagePosition); // proximity of a graphic to the current block (2) if (bitmapAround) res.append(" 1"); else res.append(" 0"); /*if (vectorAround) res.append(" 1"); else res.append(" 0");*/ // space with previous block, discretised (1) //res.append(" " + spacingWithPreviousBlock); //res.append(" " + 0); // character density of the previous block, discretised (1) //res.append(" " + characterDensity); //res.append(" " + 0); // label - for training data (1) /*if (label != null) res.append(" " + label + "\n"); else res.append(" 0\n"); */ if (calloutType != null) res.append(" " + calloutType); else res.append(" UNKNOWN"); if (calloutKnown) res.append(" 1"); else res.append(" 0"); if (superscript) res.append(" 1"); else res.append(" 0"); res.append("\n"); return res.toString(); } }
5,369
30.22093
111
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/lang/Language.java
package org.grobid.core.lang; import org.grobid.core.exceptions.GrobidException; /** * Language recognition result */ public final class Language { //common language constants (TBD use an external ISO_639-1 reference lib.) public static final String EN = "en"; public static final String DE = "de"; public static final String FR = "fr"; public static final String IT = "it"; public static final String ES = "es"; public static final String JA = "ja"; public static final String AR = "ar"; public static final String ZH = "zh"; public static final String RU = "ru"; public static final String PT = "pt"; public static final String UK = "uk"; public static final String LN = "nl"; public static final String PL = "pl"; public static final String SV = "sv"; public static final String KR = "kr"; private String lang; private double conf; // default construction for jackson mapping public Language() {} public Language(String langId) { if (langId == null) { throw new GrobidException("Language id cannot be null"); } if ((langId.length() != 3 && langId.length() != 2 && (!langId.equals("sorb")) && (!langId.equals("zh-cn")) && (!langId.equals("zh-tw"))) || !(Character.isLetter(langId.charAt(0)) && Character.isLetter(langId.charAt(1)))) { throw new GrobidException("Language id should consist of two or three letters, but was: " + langId); } this.lang = langId; this.conf = 1.0; } public Language(String langId, double confidence) { if (langId == null) { throw new GrobidException("Language id cannot be null"); } if ((langId.length() != 3 && langId.length() != 2 && (!langId.equals("sorb")) && (!langId.equals("zh-cn")) && (!langId.equals("zh-tw"))) || !(Character.isLetter(langId.charAt(0)) && Character.isLetter(langId.charAt(1)))) { throw new GrobidException("Language id should consist of two or three letters, but was: " + langId); } this.lang = langId; this.conf = confidence; } public boolean isChinese() { return "zh".equals(lang) || "zh-cn".equals(lang) || "zh-tw".equals(lang); } public boolean isJapaneses() { return "ja".equals(lang); } public boolean isKorean() { return "kr".equals(lang); } public boolean isArabic() { return "ar".equals(lang); } public String getLang() { return lang; } public void setLang(String lang) { this.lang = lang; } @SuppressWarnings({"UnusedDeclaration"}) public double getConf() { return conf; } public void setConf(double conf) { this.conf = conf; } @Override public String toString() { return lang + ";" + conf; } public String toJSON() { return "{\"lang\":\""+lang+"\", \"conf\": "+conf+"}"; } }
3,004
27.894231
112
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/lang/LanguageDetector.java
package org.grobid.core.lang; /** * Interface for language recognition method/library */ public interface LanguageDetector { /** * Detects a language id that must consist of two letter together with a confidence coefficient. * If coefficient cannot be provided for some reason, it should be 1.0 * @param text text to detect a language from * @return a language id together with a confidence coefficient */ public Language detect(String text); }
481
31.133333
100
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/lang/SentenceDetectorFactory.java
package org.grobid.core.lang; /** * Factory for sentence detector instance */ public interface SentenceDetectorFactory { SentenceDetector getInstance(); }
162
17.111111
42
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/lang/LanguageDetectorFactory.java
package org.grobid.core.lang; /** * Factory for language detector instance */ public interface LanguageDetectorFactory { LanguageDetector getInstance(); }
162
17.111111
42
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/lang/SentenceDetector.java
package org.grobid.core.lang; import org.grobid.core.utilities.OffsetPosition; import java.util.List; /** * Interface for sentence recognition method/library */ public interface SentenceDetector { /** * Detects sentence boundaries * @param text text to detect sentence boundaries * @return a list of offset positions indicating start and end character * position of the recognized sentence in the text */ public List<OffsetPosition> detect(String text); /** * Detects sentence boundaries using a specified language * @param text text to detect sentence boundaries * @param lang language to be used for detecting sentence boundaries * @return a list of offset positions indicating start and end character * position of the recognized sentence in the text */ public List<OffsetPosition> detect(String text, Language lang); }
919
30.724138
77
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/lang/impl/OpenNLPSentenceDetectorFactory.java
package org.grobid.core.lang.impl; import org.grobid.core.lang.SentenceDetector; import org.grobid.core.lang.SentenceDetectorFactory; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.File; /** * Implementation of a sentence segmenter factory with OpenNLP language identifier */ public class OpenNLPSentenceDetectorFactory implements SentenceDetectorFactory { private static final Logger LOGGER = LoggerFactory.getLogger(OpenNLPSentenceDetectorFactory.class); private static volatile SentenceDetector instance = null; public SentenceDetector getInstance() { if (instance == null) { synchronized (this) { if(instance == null) { LOGGER.debug("synchronized getNewInstance"); instance = new OpenNLPSentenceDetector(); } } } return instance; } }
906
29.233333
103
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/lang/impl/PragmaticSentenceDetectorFactory.java
package org.grobid.core.lang.impl; import org.grobid.core.lang.SentenceDetector; import org.grobid.core.lang.SentenceDetectorFactory; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.File; /** * Implementation of a sentence segmenter factory with OpenNLP language identifier */ public class PragmaticSentenceDetectorFactory implements SentenceDetectorFactory { private static final Logger LOGGER = LoggerFactory.getLogger(PragmaticSentenceDetectorFactory.class); private static volatile SentenceDetector instance = null; public SentenceDetector getInstance() { if (instance == null) { synchronized (this) { if(instance == null) { LOGGER.debug("synchronized getNewInstance"); instance = new PragmaticSentenceDetector(); } } } return instance; } }
912
29.433333
105
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/lang/impl/CybozuLanguageDetector.java
package org.grobid.core.lang.impl; import com.cybozu.labs.langdetect.Detector; import com.cybozu.labs.langdetect.DetectorFactory; import com.cybozu.labs.langdetect.LangDetectException; import org.grobid.core.lang.Language; import org.grobid.core.lang.LanguageDetector; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.ArrayList; public class CybozuLanguageDetector implements LanguageDetector { private static final Logger LOGGER = LoggerFactory.getLogger(CybozuLanguageDetector.class); @Override public Language detect(String text) { Detector detector; try { detector = DetectorFactory.create(); detector.append(text); ArrayList<com.cybozu.labs.langdetect.Language> probabilities = detector.getProbabilities(); if (probabilities == null || probabilities.isEmpty()) { return null; } LOGGER.debug(probabilities.toString()); com.cybozu.labs.langdetect.Language l = probabilities.get(0); return new Language(l.lang, l.prob); } catch (LangDetectException e) { LOGGER.warn("Cannot detect language because of: " + e.getClass().getName() + ": " + e.getMessage()); return null; } } }
1,292
33.945946
112
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/lang/impl/PragmaticSentenceDetector.java
package org.grobid.core.lang.impl; import org.jruby.embed.PathType; import org.jruby.embed.ScriptingContainer; import org.jruby.embed.LocalContextScope; import org.jruby.embed.LocalVariableBehavior; import org.grobid.core.lang.SentenceDetector; import org.grobid.core.lang.Language; import org.grobid.core.utilities.OffsetPosition; import org.grobid.core.utilities.GrobidProperties; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.ArrayList; import java.util.List; import java.io.*; /** * Implementation of sentence segmentation via the Pragmatic Segmenter * */ public class PragmaticSentenceDetector implements SentenceDetector { private static final Logger LOGGER = LoggerFactory.getLogger(PragmaticSentenceDetector.class); private ScriptingContainer instance = null; public PragmaticSentenceDetector() { String segmenterRbFile = GrobidProperties.getGrobidHomePath() + File.separator + "sentence-segmentation" + File.separator + "pragmatic_segmenter"+ File.separator + "segmenter.rb"; String segmenterLoadPath = GrobidProperties.getGrobidHomePath() + File.separator + "sentence-segmentation"; /*String unicodeLoadPath = GrobidProperties.getGrobidHomePath() + File.separator + "sentence-segmentation" + File.separator + "pragmatic_segmenter" + File.separator + "gem" + File.separator + "gems" + File.separator + "unicode-0.4.4.4-java" + File.separator + "lib";*/ String unicodeLoadPath = GrobidProperties.getGrobidHomePath() + File.separator + "sentence-segmentation" + File.separator + "pragmatic_segmenter" + File.separator + "lib"; //System.out.println(vendorLoadPath); List<String> loadPaths = new ArrayList(); loadPaths.add(segmenterLoadPath); loadPaths.add(unicodeLoadPath); instance = new ScriptingContainer(LocalContextScope.CONCURRENT, LocalVariableBehavior.PERSISTENT); instance.setClassLoader(instance.getClass().getClassLoader()); instance.setLoadPaths(loadPaths); instance.runScriptlet(PathType.ABSOLUTE, segmenterRbFile); } @Override public List<OffsetPosition> detect(String text) { return detect(text, new Language(Language.EN)); } @Override public List<OffsetPosition> detect(String text, Language lang) { instance.put("text", text); String script = null; if (lang == null || "en".equals(lang.getLang())) script = "ps = PragmaticSegmenter::Segmenter.new(text: text, clean: false)\nps.segment"; else script = "ps = PragmaticSegmenter::Segmenter.new(text: text, language: '" + lang.getLang() + "', clean: false)\nps.segment"; Object ret = instance.runScriptlet(script); //System.out.println(text); //System.out.println(ret.toString()); // build offset positions from the string chunks List<OffsetPosition> result = new ArrayList<>(); int pos = 0; int previousEnd = 0; // indicate when the sentence as provided by the Pragmatic Segmented does not match the original string // and we had to "massage" the string to identify/approximate offsets in the original string boolean recovered = false; List<String> retList = (List<String>) ret; for(int i=0; i<retList.size(); i++) { String chunk = retList.get(i); recovered = false; int start = text.indexOf(chunk, pos); if (start == -1) { LOGGER.warn("Extracted sentence does not match orginal text - " + chunk); // Unfortunately the pragmatic segmenter can modify the string when it gives back the array of sentences as string. // it usually concerns removed white space, which then make it hard to locate exactly the offsets. // we take as first fallback the previous end of sentence and move it to the next non space character // next heuristics is to use the next sentence matching to re-synchronize to the original text // note: the white space removal can be avoided by commenting out @language::ExtraWhiteSpaceRule: // see https://github.com/echan00/pragmatic_segmenter/commit/e5e4244bacd0bd12e65b560b648d331980fc1ce4 // but it requires then a modified version of the tool (which is OK :) // but it can also be much more ugly/unmanageable when input is more noisy: // "The dissolved oxygen concentration in the sediment was measured in the lab with an OX-500 micro electrode (Unisense, Aarhus, Denmark) and was below detection limit (\0.01 mg l -1 )." // -> ["The dissolved oxygen concentration in the sediment was measured in the lab with an OX-500 micro electrode (Unisense, Aarhus, Denmark) and was below detection limit (((((((((\\0.01 mg l -1 ).01 mg l -1 ).01 mg l -1 ).01 mg l -1 ).01 mg l -1 ).01 mg l -1 ).01 mg l -1 ).01 mg l -1 ).01 mg l -1 )."] // original full paragraph: Nonylphenol polluted sediment was collected in June 2005 from the Spanish Huerva River in Zaragoza (41°37 0 23 00 N, 0°54 0 28 00 W), which is a tributary of the Ebro River. At the moment of sampling, the river water had a temperature of 25.1°C, a redox potential of 525 mV and a pH of 7.82. The water contained 3.8 mg l -1 dissolved oxygen. The dissolved oxygen concentration in the sediment was measured in the lab with an OX-500 micro electrode (Unisense, Aarhus, Denmark) and was below detection limit (\0.01 mg l -1 ). The redox potential, temperature and pH were not determined in the sediment for practical reasons. Sediment was taken anaerobically with stainless steel cores, and transported on ice to the laboratory. Cores were opened in an anaerobic glove box with ±1% H 2 -gas and ±99% N 2 -gas to maintain anaerobic conditions, and the sediment was put in a glass jar. The glass jar was stored at 4°C in an anaerobic box that was flushed with N 2 -gas. The sediment contained a mixture of tNP isomers (20 mg kg -1 dry weight), but 4-n-NP was not present in the sediment. The chromatogram of the gas chromatography-mass spectrometry (GC-MS) of the mixture of tNP isomers present in the sediment was comparable to the chromatogram of the tNP technical mixture ordered from Merck. The individual branched isomers were not identified. The total organic carbon fraction of the sediment was 3.5% and contained mainly clay particles with a diameter size \ 32 lM. // it's less frequent that white space removal, but can happen hundred of times when processing thousand PDF // -> note it might be related to jruby sharing of the string and encoding/escaping if (previousEnd != pos) { // previous sentence was "recovered", which means we are unsure about its end offset start = text.indexOf(chunk, previousEnd); if (start != -1) { // apparently the current sentence match a bit before the end offset of the previous sentence, which mean that // the previous sentence was modified by the segmenter and is longer than "real" (see example above). // we need to correct the previous sentence end offset given the start of the current sentence if (result.size() > 0) { int newPreviousEnd = start; while(newPreviousEnd >= 1 && text.charAt(newPreviousEnd-1) == ' ') { newPreviousEnd--; if (start - newPreviousEnd > 10) { // this is a break to avoid going too far newPreviousEnd = start; // but look back previous character to cover general case if (newPreviousEnd >= 1 && text.charAt(newPreviousEnd-1) == ' ') { newPreviousEnd--; } } } result.get(result.size()-1).end = newPreviousEnd; } } } // still no start, the provided sentence has been modified by the segmenter and it is really not matching the original string // we approximate the start of the non-matching sentence based on the end of the previous sentence if (start == -1) { start = previousEnd; while(text.charAt(start) == ' ') { start++; if (start - previousEnd > 10) { // this is a break to avoid going too far start = previousEnd+1; } } recovered = true; } } int end = start+chunk.length(); // in case the last sentence is modified if (end > text.length() && i == retList.size()-1) end = text.length(); result.add(new OffsetPosition(start, end)); pos = start+chunk.length(); if (recovered) previousEnd += 1; else previousEnd = pos; } return result; } }
9,514
59.99359
1,511
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/lang/impl/CybozuLanguageDetectorFactory.java
package org.grobid.core.lang.impl; import com.cybozu.labs.langdetect.DetectorFactory; import com.cybozu.labs.langdetect.LangDetectException; import org.grobid.core.lang.LanguageDetector; import org.grobid.core.lang.LanguageDetectorFactory; import org.grobid.core.utilities.GrobidProperties; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.File; /** * Implementation of a language detector factory with Cybozu language identifier */ public class CybozuLanguageDetectorFactory implements LanguageDetectorFactory { private static final Logger LOGGER = LoggerFactory.getLogger(CybozuLanguageDetectorFactory.class); private static volatile LanguageDetector instance = null; private static void init() { File profilePath = new File(GrobidProperties.getLanguageDetectionResourcePath(), "cybozu/profiles").getAbsoluteFile(); if (!profilePath.exists() || !profilePath.isDirectory()) { throw new IllegalStateException("Profiles path for cybozu language detection does not exist or not a directory: " + profilePath); } try { DetectorFactory.loadProfile(profilePath); } catch (LangDetectException e) { throw new IllegalStateException("Cannot read profiles for cybozu language detection from: " + profilePath, e); } } public LanguageDetector getInstance() { if (instance == null) { synchronized (this) { if(instance == null) { init(); LOGGER.debug("synchronized getNewInstance"); instance = new CybozuLanguageDetector(); } } } return instance; } }
1,717
34.791667
141
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/lang/impl/OpenNLPSentenceDetector.java
package org.grobid.core.lang.impl; import opennlp.tools.sentdetect.SentenceDetectorME; import opennlp.tools.sentdetect.SentenceModel; import opennlp.tools.util.Span; import org.grobid.core.lang.SentenceDetector; import org.grobid.core.lang.Language; import org.grobid.core.utilities.OffsetPosition; import org.grobid.core.utilities.GrobidProperties; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.ArrayList; import java.util.List; import java.io.*; /** * Implementation of sentence segmentation via OpenNLP * */ public class OpenNLPSentenceDetector implements SentenceDetector { private static final Logger LOGGER = LoggerFactory.getLogger(OpenNLPSentenceDetector.class); // components for sentence segmentation private SentenceModel model = null; public OpenNLPSentenceDetector() { // Loading sentence detector model String openNLPModelFile = GrobidProperties.getGrobidHomePath() + File.separator + "sentence-segmentation" + File.separator + "openNLP" + File.separator + "en-sent.bin"; try(InputStream inputStream = new FileInputStream(openNLPModelFile)) { model = new SentenceModel(inputStream); } catch(IOException e) { LOGGER.warn("Problem when loading the sentence segmenter", e); } } @Override public List<OffsetPosition> detect(String text) { return detect(text, new Language(Language.EN)); } @Override public List<OffsetPosition> detect(String text, Language lang) { // unfortunately OpenNLP sentence detector is not thread safe, only the model can be share SentenceDetectorME detector = new SentenceDetectorME(model); Span spans[] = detector.sentPosDetect(text); List<OffsetPosition> result = new ArrayList<>(); // convert Span to OffsetPosition for(int i=0; i<spans.length; i++) { result.add(new OffsetPosition(spans[i].getStart(), spans[i].getEnd())); } return result; } }
2,045
33.1
115
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/sax/CountryCodeSaxParser.java
package org.grobid.core.sax; import org.xml.sax.Attributes; import org.xml.sax.SAXException; import org.xml.sax.helpers.DefaultHandler; import java.util.Map; import java.util.Set; /** * SAX parser for the XML description of country codes in ISO 3166. * */ public class CountryCodeSaxParser extends DefaultHandler { private StringBuffer accumulator = new StringBuffer(); // Accumulate parsed text private String code = null; private String country = null; private Map countryCodes = null; private Set countries = null; private boolean isCode = false; private boolean isName = false; public CountryCodeSaxParser() { } public CountryCodeSaxParser(Map cc, Set co) { countryCodes = cc; countries = co; } public void characters(char[] buffer, int start, int length) { accumulator.append(buffer, start, length); } public String getText() { return accumulator.toString().trim(); } public void endElement(java.lang.String uri, java.lang.String localName, java.lang.String qName) throws SAXException { if (qName.equals("row")) { code = null; country = null; isCode = false; isName = false; } else if (qName.equals("cell")) { if (isCode) code = getText(); else if (isName) { country = getText(); if (country != null) { country = country.toLowerCase(); } countryCodes.put(country, code); if (!countries.contains(country)) { countries.add(country); } } } accumulator.setLength(0); } public void startElement(String namespaceURI, String localName, String qName, Attributes atts) throws SAXException { if (qName.equals("cell")) { int length = atts.getLength(); // Process each attribute for (int i = 0; i < length; i++) { // Get names and values for each attribute String name = atts.getQName(i); String value = atts.getValue(i); if (name != null) { if (name.equals("role")) { if (value.equals("a2code")) { isCode = true; isName = false; } else if ((value.equals("name")) | (value.equals("nameAlt"))) { isCode = false; isName = true; } else if (value.equals("a3code")) { isCode = false; isName = false; } } } } } accumulator.setLength(0); } }
3,031
29.019802
88
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/sax/CrossrefSaxParser.java
package org.grobid.core.sax; import org.grobid.core.data.BiblioItem; import java.io.*; import org.xml.sax.*; import org.xml.sax.helpers.*; /** * SAX parser for XML crossref DOI metadata descriptions. * See http://www.crossref.org/openurl_info.html * * -> This is not used anymore, we use the JSON REST API from * CrossRef or from biblio-glutton, und das ist auch gut so. * */ public class CrossrefSaxParser extends DefaultHandler { private BiblioItem biblio; private String author = null; private CharArrayWriter text = new CharArrayWriter(); public CrossrefSaxParser() { } public CrossrefSaxParser(BiblioItem b) { biblio = b; } public void characters(char[] ch, int start, int length) { text.reset(); text.write(ch, start, length); } public String getText() { System.out.println(text.toString()); return text.toString().trim(); } public void endElement(java.lang.String uri, java.lang.String localName, java.lang.String qName) throws SAXException { System.out.println(qName); if (qName.equals("article_title")) { biblio.setArticleTitle(getText()); } if (qName.equals("journal_title")) { biblio.setTitle(getText()); } if (qName.equals("ISSN")) { biblio.setISSN(getText()); } if (qName.equals("volume")) { String volume = getText(); if (volume != null) { if (volume.length() > 0) { biblio.setVolume(volume); } } } if (qName.equals("issue")) { String issue = getText(); if (issue != null) { if (issue.length() > 0) { biblio.setNumber(issue); } } } if (qName.equals("year")) { String year = getText(); biblio.setPublicationDate(year); biblio.setYear(year); } if (qName.equals("first_page")) { String page = getText(); if (page != null) { if (page.length() > 0) { biblio.setBeginPage(Integer.parseInt(page)); } } } if (qName.equals("contributor")) { biblio.addAuthor(author); author = null; } if (qName.equals("given_name")) { author = getText(); } if (qName.equals("surname")) { author = author + " " + getText(); } //biblio.setDOIRetrieval(true); } public void startElement(String namespaceURI, String localName, String qName, Attributes atts) throws SAXException { if (qName.equals("query")) { String n1 = atts.getValue("status"); } } }
3,002
26.550459
72
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/sax/PatentAnnotationSaxParser.java
package org.grobid.core.sax; import java.io.*; import org.xml.sax.*; import org.xml.sax.helpers.*; import java.util.*; import org.grobid.core.data.PatentItem; import org.grobid.core.data.BibDataSet; import org.grobid.core.utilities.TextUtilities; import org.grobid.core.exceptions.GrobidException; /** * This SAX parser mirror the input XML document, and add as extra annotation identified reference to * patent and NPL. The possible tags within the chunk are removed to avoid hierarchical invalid documents. * */ public class PatentAnnotationSaxParser extends DefaultHandler { StringBuffer accumulator = new StringBuffer(); // Accumulate parsed text private Writer writer = null; private int offset = 0; private boolean counting = false; private ArrayList<PatentItem> patents = null; private ArrayList<BibDataSet> articles = null; // for getting track of the offset walk private int currentPatentIndex = 0; private int currentArticleIndex = 0; //private static String delimiters = " \n\t" + TextUtilities.fullPunctuations; public PatentAnnotationSaxParser() {} public void characters(char[] buffer, int start, int length) { accumulator.append(buffer, start, length); } public void setWriter(Writer writer) { this.writer = writer; } public void setPatents(ArrayList<PatentItem> patents) { this.patents = patents; } public void setArticles(ArrayList<BibDataSet> articles) { this.articles = articles; } public String getText() { String text = accumulator.toString(); if (text.trim().length() == 0) { return ""; } /*text = text.replace("\n", " "); text = text.replace(" ", " ");*/ if (counting) { /* StringTokenizer st = new StringTokenizer(text, delimiters, true); int count = 0; while(st.hasMoreTokens()) { String token = st.nextToken().trim(); if (token.length() == 0) { continue; } count++; } */ int i = currentPatentIndex; int count = text.length(); while(i < patents.size() ) { PatentItem currentPatent = patents.get(i); if (currentPatent != null) { int startOffset = currentPatent.getOffsetBegin(); int endOffset = currentPatent.getOffsetEnd(); if ( (startOffset >= offset) && (endOffset <= offset+count) ) { String context = currentPatent.getContext(); /*System.out.println("OFFSET: " + offset); System.out.println("count: " + count); System.out.println("startOffset: " + startOffset); System.out.println("endOffset: " + endOffset); System.out.println("context: " + context); System.out.println("text: " + text);*/ String target = ""; if (context.charAt(0) == ' ') { target = " <ref type=\"patent\">"+context.substring(1,context.length())+"</ref>"; } else { target = "<ref type=\"patent\">"+context+"</ref>"; } text = text.replace(context, target); currentPatentIndex = i; } } i++; } //i = currentArticleIndex; i = 0; while(i < articles.size() ) { BibDataSet currentArticle = articles.get(i); if (currentArticle != null) { List<Integer> offsets = currentArticle.getOffsets(); int startOffset = -1; int endOffset = -1; String context = currentArticle.getRawBib().trim(); if (offsets.size() > 0) { if (offsets.get(0) != null) { startOffset = offsets.get(0).intValue(); /*StringTokenizer stt = new StringTokenizer(context, delimiters, true); int count2 = 0; while(stt.hasMoreTokens()) { String token2 = stt.nextToken().trim(); if (token2.length() == 0) { continue; } count2++; }*/ //endOffset = offsets.get(1).intValue(); endOffset = startOffset + context.length(); } } //if ( (startOffset >= offset) && (endOffset <= offset+count) ) { if ( (startOffset >= offset) ) { /*System.out.println("OFFSET: " + offset); System.out.println("count: " + count); System.out.println("startOffset: " + startOffset); System.out.println("endOffset: " + endOffset); System.out.println("context: " + context); System.out.println("text: " + text);*/ String target = " <ref type=\"npl\">"+context+"</ref> "; text = text.replace(context, target); currentArticleIndex = i; } } i++; } offset += count; } return text; } public void endElement(java.lang.String uri, java.lang.String localName, java.lang.String qName) throws SAXException { try { if (qName.equals("p") || qName.equals("description")) { writer.write(getText()); accumulator.setLength(0); } if (qName.equals("description")) { counting = false; } if (!counting) { writer.write(getText()); accumulator.setLength(0); writer.write("</"+qName+">\n"); } else { if (qName.equals("row")) { accumulator.append(" "); } if (qName.equals("p")) { writer.write("\n"); accumulator.append(" "); } } } catch (Exception e) { // e.printStackTrace(); throw new GrobidException("An exception occured while running Grobid.",e); } } public void startElement(String namespaceURI, String localName, String qName, Attributes atts) throws SAXException { try { // we output the remaining text if (!counting) { writer.write(getText()); accumulator.setLength(0); } if (!counting) { writer.write("<"+qName); int length = atts.getLength(); // Process each attribute for (int i=0; i<length; i++) { // Get names and values for each attribute String name = atts.getQName(i); String value = atts.getValue(i); if ( (name != null) && (value != null) ) { writer.write(" "+name+"=\""+value+"\""); } } writer.write(">"); } if (qName.equals("description")) { offset = 0; counting = true; } else if (qName.equals("patent-document")) { counting = false; } } catch (Exception e) { // e.printStackTrace(); throw new GrobidException("An exception occured while running Grobid.",e); } } }
6,408
25.704167
107
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/sax/TextSaxParser.java
package org.grobid.core.sax; import org.xml.sax.*; import org.xml.sax.helpers.*; /** * Stupid SAX parser which accumulate the textual content. * <p/> * As an option, it is possible to accumulate only the content under a given * element name, for instance "description" for getting the description of a * patent XML document. * */ public class TextSaxParser extends DefaultHandler { StringBuffer accumulator = new StringBuffer(); // Accumulate parsed text private String filter = null; // the name of an element for getting only the // corresponding text private boolean accumule = true; public String currentPatentNumber = null; public String country = null; public TextSaxParser() { } public void characters(char[] buffer, int start, int length) { if (accumule) { accumulator.append(buffer, start, length); } } public void setFilter(String filt) { filter = filt; accumule = false; } public String getText() { String text = accumulator.toString().trim(); text = text.replace("\n", " "); text = text.replace("\t", " "); text = text.replaceAll("\\p{Space}+", " "); return text; } public void endElement(String uri, String localName, String qName) throws SAXException { if (qName.equals(filter)) { accumule = false; } if (accumule) { if (qName.equals("row") || qName.equals("p")) { accumulator.append(" "); } } } public void startElement(String namespaceURI, String localName, String qName, Attributes atts) throws SAXException { if (qName.equals("patent-document")) { int length = atts.getLength(); String docID = null; String docNumber = null; String kindCode = null; // Process each attribute for (int i = 0; i < length; i++) { // Get names and values for each attribute String name = atts.getQName(i); String value = atts.getValue(i); if (name != null) { if (name.equals("country")) { country = value; } else if (name.equals("kind")) { kindCode = value; } else if (name.equals("doc-number") || name.equals("docnumber")) { docNumber = value; } else if (name.equals("id") || name.equals("ID")) { docID = value; } } } if ( (country != null) && (docNumber != null) ) { if (kindCode != null) { currentPatentNumber = country + docNumber + kindCode; } else currentPatentNumber = country + docNumber; } else if (docID != null) { currentPatentNumber = docID; } } if (qName.equals(filter)) { accumule = true; } } }
2,562
22.731481
77
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/sax/CrossrefUnixrefSaxParser.java
package org.grobid.core.sax; import org.grobid.core.data.BiblioItem; import org.apache.commons.lang3.StringUtils; import org.xml.sax.*; import org.xml.sax.helpers.*; import java.util.*; /** * SAX parser for XML crossref DOI metadata descriptions. * See http://www.crossref.org/openurl_info.html * */ public class CrossrefUnixrefSaxParser extends DefaultHandler { private BiblioItem biblio = null; private BiblioItem biblioParent = null; private List<BiblioItem> biblios = null; private List<String> authors = null; private List<String> editors = null; private String author = null; private StringBuffer accumulator = new StringBuffer(); // Accumulate parsed text private String media = null; // print or electronic, for ISSN public CrossrefUnixrefSaxParser() { } public CrossrefUnixrefSaxParser(BiblioItem b) { biblio = b; } public CrossrefUnixrefSaxParser(List<BiblioItem> b) { biblios = b; } public boolean journalMetadataBlock = false; public boolean journalIssueBlock = false; public boolean journalArticleBlock = false; public boolean conferencePaperBlock = false; public boolean proceedingsMetadataBlock = false; public boolean contentItemBlock = false; public boolean eventMetadataBlock = false; public boolean bookMetadataBlock = false; public boolean serieMetadataBlock = false; public boolean doiDataBlock = false; public boolean online = false; public boolean authorBlock = false; public boolean editorBlock = false; public boolean firstAuthor = false; public void characters(char[] ch, int start, int length) { accumulator.append(ch, start, length); } public String getText() { return accumulator.toString().trim(); } public void endElement(java.lang.String uri, java.lang.String localName, java.lang.String qName) throws SAXException { if (qName.equals("journal_metadata")) { journalMetadataBlock = false; biblio.setItem(BiblioItem.Periodical); } else if (qName.equals("journal_issue")) { journalIssueBlock = false; biblio.setItem(BiblioItem.Periodical); } else if (qName.equals("journal_article")) { journalArticleBlock = false; biblio.setItem(BiblioItem.Article); // biblio.setItem(BiblioItem.Periodical); } else if (qName.equals("proceedings_metadata")) { proceedingsMetadataBlock = false; biblio.setItem(BiblioItem.InProceedings); } else if (qName.equals("content_item")) { contentItemBlock = false; } else if (qName.equals("event_metadata")) { eventMetadataBlock = false; } else if (qName.equals("conference_paper")) { conferencePaperBlock = false; biblio.setItem(BiblioItem.InProceedings); } else if (qName.equals("doi_data")){ doiDataBlock = false; } else if (qName.equals("title")) { if (journalArticleBlock || contentItemBlock || conferencePaperBlock) { biblio.setArticleTitle(getText()); } else if (serieMetadataBlock) { biblio.setSerieTitle(getText()); } else { biblio.setTitle(getText()); } } else if (qName.equals("full_title")) { biblio.setJournal(getText()); } else if (qName.equals("abbrev_title")) { biblio.setJournalAbbrev(getText()); } else if (qName.equals("issn")) { String issn = getText(); if (media != null) { if (media.equals("print")) biblio.setISSN(issn); else biblio.setISSNe(issn); } else biblio.setISSN(issn); } else if (qName.equals("isbn")) { biblio.setISBN13(getText()); } else if (qName.equals("volume")) { String volume = getText(); if (volume != null) { if (volume.length() > 0) { biblio.setVolume(volume); biblio.setVolumeBlock(volume, true); } } } else if (qName.equals("issue")) { String issue = getText(); // issue can be of the form 4-5 if (issue != null) if (issue.length() > 0) { biblio.setNumber(issue); biblio.setIssue(issue); //biblio.setNumber(Integer.parseInt(issue)); } } else if (qName.equals("year")) { String year = getText(); biblio.setPublicationDate(year); if (online) biblio.setE_Year(year); else biblio.setYear(year); } else if (qName.equals("month")) { String month = getText(); if (online) biblio.setE_Month(month); else biblio.setMonth(month); } else if (qName.equals("day")) { String day = getText(); if (online) biblio.setE_Day(day); else biblio.setDay(day); } else if (qName.equals("first_page")) { String page = getText(); if (StringUtils.isNotEmpty(page)) { /*if (page.startsWith("L") | page.startsWith("l")) { page = page.substring(1, page.length()); }*/ page = cleanPage(page); try { biblio.setBeginPage(Integer.parseInt(page)); } catch (Exception e) { // warning message to be logged here } } } else if (qName.equals("last_page")) { String page = getText(); if (StringUtils.isNotEmpty(page)) { page = cleanPage(page); try { biblio.setEndPage(Integer.parseInt(page)); } catch (Exception e) { // warning message to be logged here } } } else if (qName.equals("doi")) { String doi = getText(); if (doiDataBlock) biblio.setDOI(doi); biblio.setError(false); } else if (qName.equals("given_name")) { author = getText(); } else if (qName.equals("surname")) { String sauce = getText(); if (!sauce.equals("Unknown")) { if (author == null) author = sauce; else author = author + " " + sauce; authors.add(author); if (authorBlock) biblio.addAuthor(author); else if (editorBlock) biblio.addEditor(author); author = null; } } else if (qName.equals("person_name")) { firstAuthor = false; authorBlock = false; } else if (qName.equals("conference_name")) { String event = getText(); biblio.setEvent(event); biblio.setItem(BiblioItem.InProceedings); } else if (qName.equals("conference_location")) { String location = getText(); biblio.setLocation(location); biblio.setItem(BiblioItem.InProceedings); } else if (qName.equals("conference_acronym")) { String acro = getText(); if (biblio.getEvent() == null) biblio.setEvent(acro); else biblio.setEvent(biblio.getEvent() + ", " + acro); biblio.setItem(BiblioItem.InProceedings); } else if (qName.equals("proceedings_title")) { String proc = getText(); if (proc != null) proc = proc.replaceAll(" - ", ", "); biblio.setBookTitle(proc); biblio.setItem(BiblioItem.InProceedings); } else if (qName.equals("doi_record")) { if (biblios != null) { biblios.add(biblio); biblio = null; } } else if (qName.equals("publisher_name")) { String publisher = getText(); biblio.setPublisher(publisher); } else if (qName.equals("publisher_place")) { String location = getText(); biblio.setLocationPublisher(location); } else if (qName.equals("series_metadata")) { serieMetadataBlock = false; biblio.setItem(BiblioItem.InCollection); } else if (qName.equals("book_metadata")) { bookMetadataBlock = false; biblio.setItem(BiblioItem.InBook); } accumulator.setLength(0); } public void startElement(String namespaceURI, String localName, String qName, Attributes atts) throws SAXException { if (qName.equals("journal_metadata")) { journalMetadataBlock = true; biblio.setItem(BiblioItem.Periodical); } else if (qName.equals("proceedings_metadata")) { proceedingsMetadataBlock = true; biblio.setItem(BiblioItem.InProceedings); } else if (qName.equals("book_metadata")) { bookMetadataBlock = true; biblio.setItem(BiblioItem.InBook); } else if (qName.equals("series_metadata")) { serieMetadataBlock = true; if (bookMetadataBlock) biblio.setItem(BiblioItem.InCollection); } else if (qName.equals("content_item")) { BiblioItem biblio2 = new BiblioItem(); biblio2.setParentItem(biblio); biblio = biblio2; contentItemBlock = true; } else if (qName.equals("event_metadata")) { eventMetadataBlock = true; } else if (qName.equals("conference_paper")) { conferencePaperBlock = true; biblio.setItem(BiblioItem.InProceedings); } else if (qName.equals("journal_issue")) { journalIssueBlock = true; biblio.setItem(BiblioItem.Periodical); } else if (qName.equals("journal_article")) { journalArticleBlock = true; biblio.setItem(BiblioItem.Periodical); } else if (qName.equals("doi_data")){ doiDataBlock = true; } else if (qName.equals("contributors")) { authors = new ArrayList<>(0); editors = new ArrayList<>(0); } else if (qName.equals("error")) { biblio.setError(true); } else if (qName.equals("person_name")) { int length = atts.getLength(); // Process each attribute for (int i = 0; i < length; i++) { // Get names and values for each attribute String name = atts.getQName(i); String value = atts.getValue(i); if ((name != null) & (value != null)) { if (name.equals("sequence")) { if (value.equals("firstAuthor")) firstAuthor = true; else firstAuthor = false; } if (name.equals("contributor_role")) { if (value.equals("author")) { authorBlock = true; editorBlock = true; } else if (value.equals("editor")) { authorBlock = false; editorBlock = true; } else { authorBlock = false; editorBlock = false; } } } } } else if (qName.equals("doi_record")) { if (biblios != null) { biblio = new BiblioItem(); } } else if (qName.equals("publication_date")) { int length = atts.getLength(); // Process each attribute for (int i = 0; i < length; i++) { // Get names and values for each attribute String name = atts.getQName(i); String value = atts.getValue(i); if ((name != null) & (value != null)) { if (name.equals("media_type")) { if (value.equals("online")) online = true; else online = false; } } } } else if (qName.equals("issn")) { int length = atts.getLength(); // Process each attribute for (int i = 0; i < length; i++) { // Get names and values for each attribute String name = atts.getQName(i); String value = atts.getValue(i); if ((name != null) & (value != null)) { if (name.equals("media_type")) { media = value; } } } } accumulator.setLength(0); } protected static String cleanPage(String page) { return StringUtils.stripStart(page, "Ll"); } }
13,357
36.522472
122
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/sax/MarecSaxParser.java
package org.grobid.core.sax; import org.grobid.core.exceptions.GrobidException; import org.grobid.core.utilities.TextUtilities; import org.grobid.core.lexicon.Lexicon; import org.grobid.core.utilities.OffsetPosition; import org.grobid.core.analyzers.GrobidAnalyzer; import org.xml.sax.*; import org.xml.sax.helpers.*; import java.util.*; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * SAX parser initially made for XML CLEF IP data (collection, training and topics). * But it works also fine for parsing ST.36 stuff as the formats are similar. * */ public class MarecSaxParser extends DefaultHandler { public static final Logger LOGGER = LoggerFactory.getLogger(MarecSaxParser.class); private StringBuffer accumulator = new StringBuffer(); // Accumulate parsed text private StringBuffer accumulatorRef = new StringBuffer(); // Accumulate parsed text private String PatentNumber = null; private int PatentID = -1; private String PublicDate = null; private String PriorityDate = null; private String CodeType = null; private String PublicationDate = null; private String Content = null; private List<String> CitedPatentNumber = null; private List<Integer> CitationID = null; private String Classification = null; // working variables private String cited_number = null; public Map<String, ArrayList<String>> referencesPatent = null; public List<String> referencesNPL = null; public List<String> citations = null; // search report citations private boolean npl = false; // indicate if the current reference is to patent or to a npl private boolean ref = false; // are we reading a ref? private boolean refFound = false; private int nbNPLRef = 0; private int nbPatentRef = 0; public int nbAllRef = 0; private int N = -1; // window of text to be output around the reference strings // value at -1 means no window considered - everything will be outputed public boolean patentReferences = false; public boolean nplReferences = false; private String currentFileName = null; public Lexicon lexicon = Lexicon.getInstance(); public List<OffsetPosition> journalsPositions = null; public List<OffsetPosition> abbrevJournalsPositions = null; public List<OffsetPosition> conferencesPositions = null; public List<OffsetPosition> publishersPositions = null; public StringBuffer accumulatedText = null; private StringBuffer allContent = null; private GrobidAnalyzer analyzer = GrobidAnalyzer.getInstance(); public MarecSaxParser() { } public void setN(int n) { N = n; } public void characters(char[] buffer, int start, int length) { if (ref) { accumulatorRef.append(buffer, start, length); } else { accumulator.append(buffer, start, length); } } public String getText() { //System.out.println(accumulator.toString().trim()); return accumulator.toString().trim(); } public int getNbNPLRef() { return nbNPLRef; } public int getNbPatentRef() { return nbPatentRef; } public String getRefText() { //System.out.println(accumulator.toString().trim()); return accumulatorRef.toString().trim(); } public void setFileName(String name) { currentFileName = name; if (referencesPatent == null) { referencesPatent = new HashMap<String, ArrayList<String>>(); } referencesPatent.put(name, new ArrayList<String>()); } public void endElement(java.lang.String uri, java.lang.String localName, java.lang.String qName) throws SAXException { if (qName.equals("date")) { accumulator.setLength(0); } else if (qName.equals("ref") || qName.equals("bibl")) { String refString = getRefText(); refString = refString.replace("\n", " "); refString = refString.replace("\t", " "); refString = refString.replace(" ", " "); if (npl && ref) { if (referencesNPL == null) referencesNPL = new ArrayList<String>(); referencesNPL.add(refString); refFound = true; if (nplReferences) nbNPLRef++; } else if (ref) { if (referencesPatent == null) { referencesPatent = new HashMap<String, ArrayList<String>>(); } ArrayList<String> refss = referencesPatent.get(currentFileName); if (refss == null) { refss = new ArrayList<String>(); } refss.add(refString); referencesPatent.put(currentFileName, refss); refFound = true; if (patentReferences) { nbPatentRef++; } } if (refFound) { // we tokenize the text //ArrayList<String> tokens = TextUtilities.segment(refString, "[("+TextUtilities.punctuations); //StringTokenizer st = new StringTokenizer(refString, delimiters, true); List<String> tokenizations = new ArrayList<String>(); try { // TBD: pass a language object to the tokenize method call tokenizations = analyzer.tokenize(refString); } catch(Exception e) { LOGGER.debug("Tokenization for XML patent document has failed."); } int i = 0; //String token = null; //for(String token : tokens) { //while (st.hasMoreTokens()) { for(String token : tokenizations) { //token = st.nextToken().trim(); if ( (token.trim().length() == 0) || (token.equals(" ")) || (token.equals("\t")) || (token.equals("\n")) || (token.equals("\r")) ) { continue; } try { accumulatedText.append(token + "\t"); allContent.append(token + " "); if (npl) { if (nplReferences) { if (i == 0) { //accumulatedText.append("refNPLBegin\n"); accumulatedText.append("I-<refNPL>\n"); } else if (token == null) { //accumulatedText.append("refNPLEnd\n"); accumulatedText.append("E-<refNPL>\n"); } else { accumulatedText.append("<refNPL>\n"); } } else accumulatedText.append("<other>\n"); } else { if (patentReferences) { if (i == 0) accumulatedText.append("I-<refPatent>\n"); else if (token == null) accumulatedText.append("E-<refPatent>\n"); else accumulatedText.append("<refPatent>\n"); } else accumulatedText.append("<other>\n"); } } catch (Exception e) { // e.printStackTrace(); throw new GrobidException("An exception occured while running Grobid.", e); } i++; } } ref = false; } else if (qName.equals("classification-ipcr")) { accumulator.setLength(0); } else if (qName.equals("classification-symbol")) { accumulator.setLength(0); } else if (qName.equals("abstract")) { accumulator.setLength(0); } else if (qName.equals("heading")) { accumulator.append(" "); } else if (qName.equals("description")) { if (refFound) { String content = getText(); // we tokenize the text //ArrayList<String> tokens = TextUtilities.segment(content, "[("+TextUtilities.punctuations); //StringTokenizer st = new StringTokenizer(content, delimiters, true); List<String> tokenizations = new ArrayList<String>(); try { // TBD: pass a language object to the tokenize method call tokenizations = analyzer.tokenize(content); } catch(Exception e) { LOGGER.debug("Tokenization for XML patent document has failed."); } int i = 0; //String token = null; //for(String token : tokens) { //while (st.hasMoreTokens()) { for(String token : tokenizations) { //token = st.nextToken().trim(); if ( (token.trim().length() == 0) || (token.equals(" ")) || (token.equals("\t")) || (token.equals("\n")) || (token.equals("\r")) ) { continue; } // we print only a window of N words if ( (i > N) && (N != -1) ) { //break; token = token.trim(); if (token.length() > 0) { accumulatedText.append(token + "\t" + "<ignore>\n"); allContent.append(token + " "); } } else { try { token = token.trim(); if (token.length() > 0) { accumulatedText.append(token + "\t" + "<other>\n"); allContent.append(token + " "); } } catch (Exception e) { throw new GrobidException("An exception occured while running Grobid.", e); } } i++; } accumulator.setLength(0); refFound = false; } } else if (qName.equals("patcit")) { // we register the citation, the citation context will be marked in a later stage if (citations == null) citations = new ArrayList<String>(); citations.add(cited_number); accumulator.setLength(0); } else if (qName.equals("invention-title")) { accumulator.setLength(0); } else if (qName.equals("applicants")) { accumulator.setLength(0); } else if (qName.equals("inventors")) { accumulator.setLength(0); } else if (qName.equals("document-id")) { accumulator.setLength(0); } else if (qName.equals("legal-status")) { accumulator.setLength(0); } else if (qName.equals("bibliographic-data")) { accumulator.setLength(0); } else if (qName.equals("doc-number")) { accumulator.setLength(0); } else if (qName.equals("country")) { accumulator.setLength(0); } else if (qName.equals("kind")) { accumulator.setLength(0); } else if (qName.equals("classification-symbol")) { accumulator.setLength(0); } else if (qName.equals("classification-ecla")) { accumulator.setLength(0); } else if (qName.equals("patent-document") || qName.equals("fulltext-document")) { String allString = allContent.toString(); journalsPositions = lexicon.tokenPositionsJournalNames(allString); abbrevJournalsPositions = lexicon.tokenPositionsAbbrevJournalNames(allString); conferencesPositions = lexicon.tokenPositionsConferenceNames(allString); publishersPositions = lexicon.tokenPositionsPublisherNames(allString); allContent = null; allString = null; } else if (qName.equals("row")) { accumulator.append(" "); } else if (qName.equals("p")) { accumulator.append("\n"); } } public void startElement(String namespaceURI, String localName, String qName, Attributes atts) throws SAXException { if (qName.equals("patent-document") || qName.equals("fulltext-document")) { nbNPLRef = 0; nbPatentRef = 0; nbAllRef = 0; int length = atts.getLength(); // Process each attribute for (int i = 0; i < length; i++) { // Get names and values for each attribute String name = atts.getQName(i); String value = atts.getValue(i); if (name != null) { if (name.equals("lang")) { //Global_Language_Code = value.toLowerCase(); } if (name.equals("doc-number")) { PatentNumber = "EP" + value; } if (name.equals("kind")) { CodeType = value; } if (name.equals("date")) { PublicDate = value; } } } CitedPatentNumber = new ArrayList<String>(); accumulatedText = new StringBuffer(); allContent = new StringBuffer(); accumulator.setLength(0); } else if (qName.equals("description")) { accumulator.setLength(0); } else if (qName.equals("ref") || qName.equals("bibl")) { int length = atts.getLength(); nbAllRef++; // Process each attribute for (int i = 0; i < length; i++) { // Get names and values for each attribute String name = atts.getQName(i); String value = atts.getValue(i); if (name != null) { if (name.equals("type") || name.equals("typ")) { if (value.equals("npl") || value.equals("book") || value.equals("journal")) { String content = getText(); // we output what has been read so far in the description // we tokenize the text //ArrayList<String> tokens = //StringTokenizer st = new StringTokenizer(content, delimiters, true); List<String> tokenizations = new ArrayList<String>(); try { // TBD: pass a language object to the tokenize method call tokenizations = analyzer.tokenize(content); } catch(Exception e) { LOGGER.debug("Tokenization for XML patent document has failed."); } //int nbTokens = st.countTokens(); int nbTokens = tokenizations.size(); int j = 0; //while (st.hasMoreTokens()) { for(String token : tokenizations) { //String token = st.nextToken().trim(); if ( (token.trim().length() == 0) || (token.equals(" ")) || (token.equals("\t")) || (token.equals("\n")) || (token.equals("\r")) ) { continue; } if ((j > (nbTokens - N) && (N != -1)) || (refFound && (j < N) && (N != -1))) { try { accumulatedText.append(token + "\t" + "<other>\n"); allContent.append(token + " "); } catch (Exception e) { // e.printStackTrace(); throw new GrobidException( "An exception occured while running Grobid.", e); } } else { try { accumulatedText.append(token + "\t" + "<ignore>\n"); allContent.append(token + " "); } catch (Exception e) { // e.printStackTrace(); throw new GrobidException( "An exception occured while running Grobid.", e); } } j++; } accumulator.setLength(0); npl = true; ref = true; } else if (value.equals("patent") || value.equals("pl")) { String content = getText(); // we output what has been read so far in the description // we tokenize the text //ArrayList<String> tokens = // TextUtilities.segment(content,"[("+TextUtilities.punctuations); //StringTokenizer st = new StringTokenizer(content, delimiters, true); List<String> tokenizations = new ArrayList<String>(); try { // TBD: pass a language object to the tokenize method call tokenizations = analyzer.tokenize(content); } catch(Exception e) { LOGGER.debug("Tokenization for XML patent document has failed."); } //int nbTokens = st.countTokens(); int nbTokens = tokenizations.size(); int j = 0; for(String token : tokenizations) { //while (st.hasMoreTokens()) { //String token = st.nextToken().trim(); if ( (token.trim().length() == 0) || (token.equals(" ")) || (token.equals("\t")) || (token.equals("\n")) || (token.equals("\r")) ) { continue; } if ((j > (nbTokens - N)) | (refFound & (j < N))) { try { accumulatedText.append(token + "\t" + "<other>\n"); allContent.append(token + " "); } catch (Exception e) { // e.printStackTrace(); throw new GrobidException( "An exception occured while running Grobid.", e); } } else { try { accumulatedText.append(token + "\t" + "<ignore>\n"); allContent.append(token + " "); } catch (Exception e) { // e.printStackTrace(); throw new GrobidException( "An exception occured while running Grobid.", e); } } j++; } accumulator.setLength(0); npl = false; ref = true; } else { System.out.println("Warning: unknown attribute value for ref or bibl: " + value); ref = false; npl = false; } } } } accumulatorRef.setLength(0); } else if (qName.equals("claim")) { accumulator.setLength(0); } else if (qName.equals("invention-title")) { accumulator.setLength(0); } else if (qName.equals("patcit")) { int length = atts.getLength(); // Process each attribute for (int i = 0; i < length; i++) { // Get names and values for each attribute String name = atts.getQName(i); String value = atts.getValue(i); if (name != null) { if (name.equals("ucid")) { cited_number = value; // we normally need to normalize a little bit this patent nummer } } } } } }
21,154
39.998062
111
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/sax/PDFALTOAnnotationSaxHandler.java
package org.grobid.core.sax; import org.grobid.core.layout.Block; import org.grobid.core.layout.Page; import org.grobid.core.layout.PDFAnnotation; import org.grobid.core.document.Document; import org.grobid.core.layout.LayoutToken; import org.grobid.core.layout.GraphicObject; import org.grobid.core.layout.BoundingBox; import org.grobid.core.utilities.TextUtilities; import org.grobid.core.analyzers.GrobidAnalyzer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.xml.sax.*; import org.xml.sax.helpers.*; import java.util.*; /** * SAX parser for ALTO XML representation of the annotations present on PDF files * obtained via pdfalto. We only consider here link annotations, other type * of annotations (e.g. highlight) are ignored. * */ public class PDFALTOAnnotationSaxHandler extends DefaultHandler { public static final Logger LOGGER = LoggerFactory.getLogger(PDFALTOAnnotationSaxHandler.class); private StringBuilder accumulator = new StringBuilder(); // Accumulate parsed text private Document doc = null; private List<PDFAnnotation> annotations = null; private PDFAnnotation currentAnnotation = null; private List<Double> x_points = null; private List<Double> y_points = null; public PDFALTOAnnotationSaxHandler(Document doc, List<PDFAnnotation> annotations) { this.doc = doc; this.annotations = annotations; } public void characters(char[] ch, int start, int length) { accumulator.append(ch, start, length); } public String getText() { return accumulator.toString().trim(); } public List<PDFAnnotation> getPDFAnnotations() { return annotations; } public void endElement(java.lang.String uri, java.lang.String localName, java.lang.String qName) throws SAXException { if (qName.equals("ANNOTATION")) { if (currentAnnotation != null) { annotations.add(currentAnnotation); } currentAnnotation = null; } else if (qName.equals("DEST") && (currentAnnotation != null)) { currentAnnotation.setDestination(getText()); } else if (qName.equals("QUADRILATERAL") && (currentAnnotation != null)) { // create the bounding box double x = -1.0; double y = -1.0; double width = -1.0; double height = -1.0; double max = -1.0; double min = 1000.0; for(double val : x_points) { if (val < min) min = val; if (val > max) max = val; } x = min; width = max - min; max = -1.0; min = 1000.0; for(double val : y_points) { if (val < min) min = val; if (val > max) max = val; } y = min; height = max - min; BoundingBox box = BoundingBox .fromPointAndDimensions(currentAnnotation.getPageNumber(), x, y, width, height); currentAnnotation.addBoundingBox(box); } } public void startElement(String namespaceURI, String localName, String qName, Attributes atts) throws SAXException { if (qName.equals("ANNOTATION")) { // we only consider annotation with attribute @subtype of value "Link" int length = atts.getLength(); // Process attributes for (int i = 0; i < length; i++) { // Get names and values for each attribute String name = atts.getQName(i); String value = atts.getValue(i); if ((name != null) && (value != null)) { if (name.equals("subtype")) { if (value.equals("Link")) { currentAnnotation = new PDFAnnotation(); } } else if (name.equals("pagenum")) { int page = -1; try { page = Integer.parseInt(value); } catch(Exception e) { LOGGER.error("The page number attribute for PDF annotation is not a valid integer: " + value); } if (page != -1) currentAnnotation.setPageNumber(page); } } } } else if (qName.equals("ACTION") && (currentAnnotation != null)) { int length = atts.getLength(); // Process attributes for (int i = 0; i < length; i++) { // Get names and values for each attribute String name = atts.getQName(i); String value = atts.getValue(i); if ((name != null) && (value != null)) { if (name.equals("type")) { if (value.equals("uri")) { currentAnnotation.setType(PDFAnnotation.Type.URI); } else if (value.equals("goto")) { currentAnnotation.setType(PDFAnnotation.Type.GOTO); } else if (value.equals("gotor")) { currentAnnotation.setType(PDFAnnotation.Type.GOTOR); } else { LOGGER.info("the link annotation type is not recognized: " + value); currentAnnotation.setType(PDFAnnotation.Type.UNKNOWN); } } } } } else if (qName.equals("POINT") && (currentAnnotation != null)) { int length = atts.getLength(); // Process attributes for (int i = 0; i < length; i++) { // Get names and values for each attribute String name = atts.getQName(i); String value = atts.getValue(i); if ((name != null) && (value != null)) { if (name.equals("HPOS")) { double val = -1.0; try { val = Double.parseDouble(value); } catch(Exception e) { LOGGER.error("The value for x coordinate attribute is not a valid double: " + value); } if (val != -1.0) x_points.add(val); } else if (name.equals("VPOS")) { double val = -1.0; try { val = Double.parseDouble(value); } catch(Exception e) { LOGGER.error("The value for y coordinate attribute is not a valid double: " + value); } if (val != -1.0) y_points.add(val); } } } } else if (qName.equals("QUADRILATERAL") && (currentAnnotation != null)) { x_points = new ArrayList<Double>(); y_points = new ArrayList<Double>(); } accumulator.setLength(0); } }
5,707
29.524064
101
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/sax/PDFALTOSaxHandler.java
package org.grobid.core.sax; import org.apache.commons.lang3.StringUtils; import org.grobid.core.analyzers.Analyzer; import org.grobid.core.analyzers.GrobidAnalyzer; import org.grobid.core.document.Document; import org.grobid.core.layout.*; import org.grobid.core.utilities.TextUtilities; import org.grobid.core.utilities.UnicodeUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.xml.sax.Attributes; import org.xml.sax.SAXException; import org.xml.sax.helpers.DefaultHandler; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import static org.apache.commons.lang3.StringUtils.isNotBlank; /** * SAX parser for XML ALTO representation of fixed layout documents. Typographical and layout information * are defined at token level, with line and block structures. * * This parser supports: * - pdfalto output from PDF * * Expected coverage includes ALTO files generated by OCR (e.g. ABBYY) and ALTO files generated * from docx files. * * TBD: character-level ALTO files * */ public class PDFALTOSaxHandler extends DefaultHandler { public static final Logger LOGGER = LoggerFactory.getLogger(PDFALTOSaxHandler.class); private StringBuilder accumulator = new StringBuilder(); // Accumulate parsed text private String previousToken = null; private LayoutToken previousTok = null; private double currentX = 0.0; private double currentY = 0.0; private double currentWidth = 0.0; private double currentHeight = 0.0; private Block block = null; // current block private int nbTokens = 0; // nb tokens in the current block private List<GraphicObject> images = null; private HashMap<String, TextStyle> textStyles = new HashMap<String, TextStyle>(); private boolean currentRotation = false; private List<LayoutToken> tokenizations = null; private Document doc = null; //starting page count from 1 since most of the PDF-related software count pages from 1 private int currentPage = 0; private Page page = null; // the current page object private Analyzer analyzer = GrobidAnalyzer.getInstance(); // use the default one by default ;) private int currentOffset = 0; public PDFALTOSaxHandler(Document d, List<GraphicObject> im) { doc = d; images = im; tokenizations = new ArrayList<>(); } public void setAnalyzer(Analyzer analyzer) { this.analyzer = analyzer; } public Analyzer getAnalyzer() { return this.analyzer; } private void addToken(LayoutToken layoutToken) { layoutToken.setOffset(currentOffset); currentOffset += layoutToken.getText().length(); tokenizations.add(layoutToken); if (doc.getBlocks() == null) { layoutToken.setBlockPtr(0); } else { layoutToken.setBlockPtr(doc.getBlocks().size()); } if (block == null) { LOGGER.info("addToken called with null block object: " + layoutToken.toString()); } else { block.addToken(layoutToken); } } private void addBlock(Block block) { if (block == null) LOGGER.info("addBlock called with null block object"); if (!block.isNull() && (block.getStartToken() != block.getEndToken())) { block.setPage(page); doc.addBlock(block); page.addBlock(block); } } public List<LayoutToken> getTokenization() { return tokenizations; } public void characters(char[] ch, int start, int length) { accumulator.append(ch, start, length); } public String trimAndNormaliseText(String content) { String res = content.trim(); //res = res.replace("\u00A0", " "); // stdandard NO-BREAK SPACE are viewed // as space //res = res.replaceAll("\\p{javaSpaceChar}", " "); // replace all unicode space separators // by a usual SPACE //res = res.replace("\t"," "); // case where tabulation are used as separator // -> replace tabulation with a usual space res = UnicodeUtil.normaliseText(res); return res.trim(); } public void endElement(String uri, String localName, String qName) throws SAXException { if (qName.equals("TextLine")) { LayoutToken token = new LayoutToken(); token.setText("\n"); token.setPage(currentPage); nbTokens++; accumulator.setLength(0); addToken(token); } else if (qName.equals("Description")) { accumulator.setLength(0); } else if (qName.equals("String")) { accumulator.setLength(0); } else if (qName.equals("Page")) { // page marker are useful to detect headers (same first line(s) // appearing on each page) if (block != null) { LayoutToken localTok = new LayoutToken("\n"); localTok.setPage(currentPage); addToken(localTok); addBlock(block); } nbTokens = 0; doc.addPage(page); } else if (qName.equals("TextBlock")) { LayoutToken localTok = new LayoutToken("\n"); localTok.setPage(currentPage); addToken(localTok); //PL //block.setWidth(currentX - block.getX() + currentWidth); //block.setHeight(currentY - block.getY() + currentHeight); addBlock(block); nbTokens = 0; block = null; } else if (qName.equals("Illustration")) { // this is normally the bitmap images and vector graphics // such vector graphics are applied to the whole page, so there is no x,y coordinates available // in the xml - to get them we will need to parse the .vec files if (block != null) { addBlock(block); } //block = new Block(); //block.setStartToken(tokenizations.size()); int imagePos = images.size()-1; /*if (doc.getBlocks() != null) images.get(imagePos).setBlockNumber(doc.getBlocks().size()); else images.get(imagePos).setBlockNumber(0);*/ /*int startPos = 0; if (tokenizations.size() > 0) { startPos = tokenizations.size(); //startPos = tokenizations.size()-1; } int endPos = startPos;*/ images.get(imagePos).setStartPosition(tokenizations.size()); images.get(imagePos).setEndPosition(tokenizations.size()); images.get(imagePos).setPage(currentPage); //addBlock(block); nbTokens = 0; } } public void endDocument(){ doc.setImages(images); } public void startElement(String namespaceURI, String localName, String qName, Attributes atts) throws SAXException { if (qName.equals("Page")) { int length = atts.getLength(); currentPage++; page = new Page(currentPage); // Process each attribute for (int i = 0; i < length; i++) { // Get names and values for each attribute String name = atts.getQName(i); String value = atts.getValue(i); if ((name != null) && (value != null)) { if (name.equals("WIDTH")) { double width = 0.0; try { width = Double.parseDouble(value); } catch(NumberFormatException e) { LOGGER.warn("Invalid WIDTH value: " + value); } page.setWidth(width); } else if (name.equals("HEIGHT")) { double height = 0.0; try { height = Double.parseDouble(value); } catch(NumberFormatException e) { LOGGER.warn("Invalid HEIGHT value: " + value); } page.setHeight(height); } } } } else if (qName.equals("PrintSpace")) { int length = atts.getLength(); // Process each attribute for (int i = 0; i < length; i++) { // Get names and values for each attribute String name = atts.getQName(i); String value = atts.getValue(i); if ((name != null) && (value != null)) { switch (name) { case "HPOS": double x = 0.0; try { x = Double.parseDouble(value); } catch(NumberFormatException e) { LOGGER.warn("Invalid HPOS value: " + value); } if (x != currentX && x != 0.0) { currentX = Math.abs(x); } break; case "VPOS": double y = 0.0; try { y = Double.parseDouble(value); } catch(NumberFormatException e) { LOGGER.warn("Invalid VPOS value: " + value); } if (y != currentY && y != 0.0) { currentY = Math.abs(y); } break; case "WIDTH": double width = 0.0; try { width = Double.parseDouble(value); } catch(NumberFormatException e) { LOGGER.warn("Invalid WIDTH value: " + value); } if (width != currentWidth && width != 0.0) { currentWidth = Math.abs(width); } break; case "HEIGHT": double height = 0.0; try { height = Double.parseDouble(value); } catch(NumberFormatException e) { LOGGER.warn("Invalid HEIGHT value: " + value); } if (height != currentHeight && height != 0.0) { currentHeight = Math.abs(height); } break; } } } } else if (qName.equals("TextBlock")) { block = new Block(); nbTokens = 0; block.setStartToken(tokenizations.size()); } else if (qName.equals("Illustration")) { int length = atts.getLength(); GraphicObject image = new GraphicObject(); double x = 0, y = 0, width = 0, height = 0; // Process each attribute for (int i = 0; i < length; i++) { // Get names and values for each attribute String name = atts.getQName(i); String value = atts.getValue(i); if ((name != null) && (value != null)) { switch (name) { case "FILEID": image.setFilePath(value); if (value.contains(".svg")) { image.setType(GraphicObjectType.VECTOR); } else { image.setType(GraphicObjectType.BITMAP); } break; case "HPOS": try { x = Double.parseDouble(value); } catch(NumberFormatException e) { LOGGER.warn("Invalid HPOS value: " + value); } break; case "VPOS": try { y = Double.parseDouble(value); } catch(NumberFormatException e) { LOGGER.warn("Invalid VPOS value: " + value); } break; case "WIDTH": try { width = Double.parseDouble(value); } catch(NumberFormatException e) { LOGGER.warn("Invalid WIDTH value: " + value); } break; case "HEIGHT": try { height = Double.parseDouble(value); } catch(NumberFormatException e) { LOGGER.warn("Invalid HEIGHT value: " + value); } break; case "TYPE": if (value.equals("svg")) { image.setType(GraphicObjectType.VECTOR); } else { image.setType(GraphicObjectType.BITMAP); } break; } } } image.setBoundingBox(BoundingBox.fromPointAndDimensions(currentPage, x, y, width, height)); image.setPage(currentPage); images.add(image); } else if (qName.equals("TextLine")) { int length = atts.getLength(); // Process each attribute /*for (int i = 0; i < length; i++) { // Get names and values for each attribute String name = atts.getQName(i); String value = atts.getValue(i); if ((name != null) && (value != null)) { if (name.equals("id")) { } else if (name.equals("x")) { } else if (name.equals("y")) { } else if (name.equals("width")) { } else if (name.equals("height")) { } } }*/ } else if (qName.equals("String")) { int length = atts.getLength(); String content = null, fontId = null; TextStyle textStyle = null; // Process each attribute for (int i = 0; i < length; i++) { // Get names and values for each attribute String name = atts.getQName(i); String value = atts.getValue(i); if ((name != null) && (value != null)) { if (name.equals("ID")) { ; } else if (name.equals("CONTENT")) { content = value; } else if (name.equals("STYLEREFS")) { fontId = value; }else if (name.equals("rotation")) { if (value.equals("0")) currentRotation = false; else currentRotation = true; } else if (name.equals("HPOS")) { double x = 0.0; try { x = Double.parseDouble(value); } catch(NumberFormatException e) { LOGGER.warn("Invalid HPOS value: " + value); } if (x != currentX && x != 0.0) { currentX = Math.abs(x); } } else if (name.equals("VPOS")) { double y = 0.0; try { y = Double.parseDouble(value); } catch(NumberFormatException e) { LOGGER.warn("Invalid VPOS value: " + value); } if (y != currentY && y != 0.0) { currentY = Math.abs(y); } } else if (name.equals("base")) { double base = 0.0; try { base = Double.parseDouble(value); } catch(NumberFormatException e) { LOGGER.warn("Invalid base value: " + value); } } else if (name.equals("WIDTH")) { double width = 0.0; try { width = Double.parseDouble(value); } catch(NumberFormatException e) { LOGGER.warn("Invalid WIDTH value: " + value); } if (width != currentWidth && width != 0.0) { currentWidth = Math.abs(width); } } else if (name.equals("HEIGHT")) { double height = 0.0; try { height = Double.parseDouble(value); } catch(NumberFormatException e) { LOGGER.warn("Invalid HEIGHT value: " + value); } if (height != currentHeight && height != 0.0) { currentHeight = Math.abs(height); } } } } // process ligatures String tok0 = TextUtilities.clean(trimAndNormaliseText(content)); textStyle = textStyles.get(fontId); if (block.getStartToken() == -1) { block.setStartToken(tokenizations.size()); } if (tok0.length() > 0) { //StringTokenizer st = new StringTokenizer(tok0, // TextUtilities.delimiters, true); List<String> subTokenizations = new ArrayList<>(); try { // TBD: pass a language object to the tokenize method call subTokenizations = analyzer.tokenize(tok0); } catch(Exception e) { LOGGER.debug("Sub-tokenization of pdfalto token has failed."); } if (subTokenizations.size() != 0) { //{ // WARNING: ROUGH APPROXIMATION (but better than the same coords) double totalLength = 0; for (String t : subTokenizations) { totalLength += t.length(); } double prevSubWidth = 0; for(String tok : subTokenizations) { // WARNING: ROUGH APPROXIMATION (but better than the same coords) // Here to get the right subTokWidth should use the content length. double subTokWidth = (currentWidth * (tok.length() / totalLength)); double subTokX = currentX + prevSubWidth; prevSubWidth += subTokWidth; //String tok = st.nextToken(); if (tok.length() > 0) { LayoutToken token = new LayoutToken(); token.setPage(currentPage); token.setText(tok); addToken(token); if (currentRotation) { // if the text is rotated, it appears that the font size is multiplied // by 2? we should have a look at pdf2xml/pdfalto for this textStyle.setFontSize(textStyle.getFontSize() / 2); } if (textStyle.getFontName() != null) token.setFont(textStyle.getFontName().toLowerCase()); else token.setFont("default"); token.setItalic(textStyle.isItalic()); token.setBold(textStyle.isBold()); token.setRotation(currentRotation); token.setPage(currentPage); token.setColorFont(textStyle.getFontColor()); token.setSubscript(textStyle.isSubscript()); token.setSuperscript(textStyle.isSuperscript()); token.setX(subTokX); token.setY(currentY); token.setWidth(subTokWidth); token.setHeight(currentHeight); // token.setX(currentX); // token.setY(currentY); // token.setWidth(currentWidth); // token.setHeight(currentHeight); token.setFontSize(textStyle.getFontSize()); // if (!diaresis && !accent) { // // block.addToken(token); // } if (block.getFont() == null) { if (textStyle.getFontName() != null) block.setFont(textStyle.getFontName().toLowerCase()); else token.setFont("default"); } if (nbTokens == 0) { block.setItalic(textStyle.isItalic()); block.setBold(textStyle.isBold()); } if (block.getColorFont() == null) block.setColorFont(textStyle.getFontColor()); /*if (block.getX() == 0.0) block.setX(currentX); if (block.getY() == 0.0) block.setY(currentY); if (block.getWidth() == 0.0) block.setWidth(currentWidth); if (block.getHeight() == 0.0) block.setHeight(currentHeight); if (block.getFontSize() == 0.0) block.setFontSize(currentFontSize);*/ previousToken = tok; previousTok = token; nbTokens++; } } } if (tokenizations.size() > 0) { String justBefore = tokenizations.get(tokenizations.size() - 1).t(); if (!justBefore.endsWith("-")) { LayoutToken localTok = new LayoutToken(" "); localTok.setPage(currentPage); addToken(localTok); } } } block.setEndToken(tokenizations.size()); } else if (qName.equals("TextStyle")) { int length = atts.getLength(); TextStyle textStyle = new TextStyle(); String fontId = null; // Process each attribute for (int i = 0; i < length; i++) { // Get names and values for each attribute String name = atts.getQName(i); String value = atts.getValue(i); if (isNotBlank(name) && isNotBlank(value)) { if (name.equals("ID")) { fontId = value; } else if (name.equals("FONTFAMILY")) { /*if (StringUtils.containsIgnoreCase(value, "bold") || StringUtils.endsWithIgnoreCase(value, "_bd")) { textStyle.setBold(true); } if (StringUtils.containsIgnoreCase(value, "italic") || StringUtils.endsWithIgnoreCase(value, "_it")) { textStyle.setItalic(true); }*/ textStyle.setFontName(value); } else if (name.equals("FONTSIZE")) { double fontSize = 0.0; try { fontSize = Double.parseDouble(value); } catch(NumberFormatException e) { LOGGER.warn("Invalid FONTSIZE value: " + value); } textStyle.setFontSize(fontSize); } else if (name.equals("FONTSTYLE")) { // font properties, we are interested by subscript or superscript if (StringUtils.containsIgnoreCase(value, "subscript")) { textStyle.setSubscript(true); } if (StringUtils.containsIgnoreCase(value, "superscript")) { textStyle.setSuperscript(true); } if (StringUtils.containsIgnoreCase(value, "bold")) { textStyle.setBold(true); } if (StringUtils.containsIgnoreCase(value, "italic") || StringUtils.containsIgnoreCase(value, "italics")) { textStyle.setItalic(true); } } else if (name.equals("FONTCOLOR")) { textStyle.setFontColor(value); } else if (name.equals("FONTTYPE")) { // value can be empty or a sequence of font properties separated by space, out of these /*if (value.equals("serif")) { textStyle.setSerif(true); } else { textStyle.setSerif(false); }*/ } // else if (name.equals("FONTWIDTH")) { // if (value.equals("proportional")) { // textStyle.setProportional(true); // } else { // textStyle.setProportional(false); // } // } // // else if (name.equals("rotation")) { // if (value.equals("0")) // textStyle.setRotation(false); // else // textStyle.setRotation(true); // } } } if(fontId != null) textStyles.put(fontId, textStyle); } } } class TextStyle { private double fontSize = 0.0; private String fontName = null; private String fontColor = null; private boolean bold = false; private boolean italic = false; private boolean subscript = false; private boolean superscript = false; //not used attributes private boolean proportional = false; private boolean serif = false; //private boolean rotation = false; public double getFontSize() { return fontSize; } public void setFontSize(double fontSize) { this.fontSize = fontSize; } public String getFontName() { return fontName; } public void setFontName(String fontName) { this.fontName = fontName; } public String getFontColor() { return fontColor; } public void setFontColor(String fontColor) { this.fontColor = fontColor; } public boolean isBold() { return bold; } public void setBold(boolean bold) { this.bold = bold; } public boolean isItalic() { return italic; } public void setItalic(boolean italic) { this.italic = italic; } // public boolean isRotation() { // return rotation; // } // // public void setRotation(boolean rotation) { // this.rotation = rotation; // } public boolean isProportional() { return proportional; } public void setProportional(boolean proportional) { this.proportional = proportional; } public boolean isSerif() { return serif; } public void setSerif(boolean serif) { this.serif = serif; } public boolean isSubscript() { return subscript; } public void setSubscript(boolean script) { this.subscript = script; } public boolean isSuperscript() { return superscript; } public void setSuperscript(boolean script) { this.superscript = script; } }
26,192
34.01738
130
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/sax/PDFMetadataSaxHandler.java
package org.grobid.core.sax; import org.grobid.core.document.Document; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.xml.sax.Attributes; import org.xml.sax.SAXException; import org.xml.sax.helpers.DefaultHandler; import org.grobid.core.data.Metadata; /** * SAX parser for the metadata of PDF files obtained via xpdf pdfalto. * */ public class PDFMetadataSaxHandler extends DefaultHandler { public static final Logger LOGGER = LoggerFactory.getLogger(PDFMetadataSaxHandler.class); private StringBuilder accumulator = new StringBuilder(); // Accumulate parsed text private Document doc = null; private Metadata metadata = null; public void characters(char[] ch, int start, int length) { accumulator.append(ch, start, length); } public String getText() { String res = accumulator.toString().trim(); return res.trim(); } public PDFMetadataSaxHandler(Document d) { doc = d; metadata = new Metadata(); } public void endElement(String uri, String localName, String qName) throws SAXException { if (qName.equals("METADATA")) { } else if (qName.equals("TITLE")) { metadata.setTitle(getText()); accumulator.setLength(0); } else if (qName.equals("SUBJECT")) { metadata.setSubject(getText()); accumulator.setLength(0); } else if (qName.equals("KEYWORDS")) { metadata.setKeywords(getText()); accumulator.setLength(0); } else if (qName.equals("AUTHOR")) { metadata.setAuthor(getText()); accumulator.setLength(0); } else if (qName.equals("CREATOR")) { metadata.setCreator(getText()); accumulator.setLength(0); } else if (qName.equals("PRODUCER")) { metadata.setProducer(getText()); accumulator.setLength(0); } else if (qName.equals("CREATIONDATE")) { metadata.setCreateDate(getText()); accumulator.setLength(0); } else if (qName.equals("MODIFICATIONDATE")) { metadata.setModificationDate(getText()); accumulator.setLength(0); } } public void endDocument(){ } public void startElement(String namespaceURI, String localName, String qName, Attributes atts) throws SAXException { if (qName.equals("METADATA")) { } else if (qName.equals("TITLE")) { } else if (qName.equals("SUBJECT")) { } else if (qName.equals("KEYWORDS")) { } else if (qName.equals("AUTHOR")) { } else if (qName.equals("CREATOR")) { } else if (qName.equals("PRODUCER")) { } else if (qName.equals("CREATIONDATE")) { } else if (qName.equals("MODIFICATIONDATE")) { } } public Metadata getMetadata(){ return metadata; } }
2,813
29.258065
90
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/sax/PDFALTOOutlineSaxHandler.java
package org.grobid.core.sax; import org.grobid.core.document.DocumentNode; import org.grobid.core.document.Document; import org.grobid.core.layout.BoundingBox; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.xml.sax.*; import org.xml.sax.helpers.*; import java.util.*; /** * SAX parser for ALTO XML representation of the outline/bookmark present in PDF files * obtained via pdfalto. * */ public class PDFALTOOutlineSaxHandler extends DefaultHandler { public static final Logger LOGGER = LoggerFactory.getLogger(PDFALTOOutlineSaxHandler.class); private StringBuilder accumulator = new StringBuilder(); // Accumulate parsed text private Document doc = null; private DocumentNode root = null; private DocumentNode currentNode = null; private String label = null; private BoundingBox box = null; private int currentLevel = -1; private int currentId = -1; private int currentParentId = -1; private Map<Integer,DocumentNode> nodes = null; public PDFALTOOutlineSaxHandler(Document doc) { this.doc = doc; } public void characters(char[] ch, int start, int length) { accumulator.append(ch, start, length); } public String getText() { return accumulator.toString().trim(); } public DocumentNode getRootNode() { return root; } public void endElement(java.lang.String uri, java.lang.String localName, java.lang.String qName) throws SAXException { if (qName.equals("STRING")) { currentNode.setLabel(getText()); } else if (qName.equals("ITEM")) { //The box could come from a nested element if (box != null) { currentNode.setBoundingBox(box); } box = null; label = null; } else if (qName.equals("TOCITEMLIST")) { currentParentId = -1; } else if (qName.equals("LINK")) { // in case of nested item, we need to assign the box right away or we will lose it. if (box != null) { currentNode.setBoundingBox(box); } box = null; } } public void startElement(String namespaceURI, String localName, String qName, Attributes atts) throws SAXException { if (qName.equals("TOCITEMS")) { // this is the document root root = new DocumentNode(); nodes = new HashMap<Integer,DocumentNode>(); } else if (qName.equals("ITEM")) { currentNode = new DocumentNode(); // get the node id int length = atts.getLength(); // Process attributes for (int i = 0; i < length; i++) { // Get names and values for each attribute String name = atts.getQName(i); String value = atts.getValue(i); if ((name != null) && (value != null)) { if (name.equalsIgnoreCase("id")) { try { currentId = Integer.parseInt(value); } catch(Exception e) { LOGGER.warn("Invalid id string (should be an integer): " + value); currentId = -1; } } } } currentNode.setId(currentId); nodes.put(currentId,currentNode); if (currentParentId != -1) { DocumentNode father = nodes.get(currentParentId); if (father == null) LOGGER.warn("Father not yet encountered! id is " + currentParentId); else { currentNode.setFather(father); father.addChild(currentNode); } } else { // parent is the root node currentNode.setFather(root); root.addChild(currentNode); } } else if (qName.equals("TOCITEMLIST")) { // we only consider annotation with attribute @subtype of value "Link" int length = atts.getLength(); // Process attributes for (int i = 0; i < length; i++) { // Get names and values for each attribute String name = atts.getQName(i); String value = atts.getValue(i); if ((name != null) && (value != null)) { if (name.equals("level")) { try { currentLevel = Integer.parseInt(value); } catch(Exception e) { LOGGER.warn("Invalid level string (should be an integer): " + value); currentLevel = -1; } } else if (name.equals("idItemParent")) { try { currentParentId = Integer.parseInt(value); } catch(Exception e) { LOGGER.warn("Invalid parent id string (should be an integer): " + value); currentParentId = -1; } } } } } else if (qName.equals("LINK")) { int length = atts.getLength(); int page = -1; double top = -1.0; double bottom = -1.0; double left = -1.0; double right = -1.0; // Process attributes for (int i = 0; i < length; i++) { // Get names and values for each attribute String name = atts.getQName(i); String value = atts.getValue(i); if ((name != null) && (value != null)) { if (name.equals("page")) { try { page = Integer.parseInt(value); } catch(Exception e) { LOGGER.error("The value for page coordinate attribute is not a valid int: " + value); } } else if (name.equals("top")) { double val = -1.0; try { val = Double.parseDouble(value); } catch(Exception e) { LOGGER.error("The value for top coordinate attribute is not a valid double: " + value); } if (val != -1.0) { top = val; } } else if (name.equals("bottom")) { double val = -1.0; try { val = Double.parseDouble(value); } catch(Exception e) { LOGGER.error("The value for bottom coordinate attribute is not a valid double: " + value); } if (val != -1.0) { bottom = val; } } else if (name.equals("left")) { double val = -1.0; try { val = Double.parseDouble(value); } catch(Exception e) { LOGGER.error("The value for left coordinate attribute is not a valid double: " + value); } if (val != -1.0) { left = val; } } else if (name.equals("right")) { double val = -1.0; try { val = Double.parseDouble(value); } catch(Exception e) { LOGGER.error("The value for right coordinate attribute is not a valid double: " + value); } if (val != -1.0) { right = val; } } } } // create the bounding box double x = left; double y = right; double width = -1.0; double height = -1.0; if (right >= left) width = right - left; if (bottom >= top) height = bottom - top; box = BoundingBox .fromPointAndDimensions(page, x, y, width, height); } accumulator.setLength(0); } }
6,501
27.393013
97
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/tokenization/TaggingTokenClusteror.java
package org.grobid.core.tokenization; import com.google.common.base.Predicate; import com.google.common.collect.Iterators; import com.google.common.collect.PeekingIterator; import org.grobid.core.GrobidModel; import org.grobid.core.engines.label.TaggingLabel; import org.grobid.core.layout.LayoutToken; import java.util.ArrayList; import java.util.Collections; import java.util.List; /** * Cluster tokens by label */ public class TaggingTokenClusteror { private final TaggingTokenSynchronizer taggingTokenSynchronizer; public static class LabelTypePredicate implements Predicate<TaggingTokenCluster> { private TaggingLabel label; public LabelTypePredicate(TaggingLabel label) { this.label = label; } @Override public boolean apply(TaggingTokenCluster taggingTokenCluster) { return taggingTokenCluster.getTaggingLabel() == label; } } public static class LabelTypeExcludePredicate implements Predicate<TaggingTokenCluster> { private TaggingLabel[] labels; public LabelTypeExcludePredicate(TaggingLabel... labels) { this.labels = labels; } @Override public boolean apply(TaggingTokenCluster taggingTokenCluster) { for (TaggingLabel label : labels) { if (taggingTokenCluster.getTaggingLabel() == label) { return false; } } return true; } } public TaggingTokenClusteror(GrobidModel grobidModel, String result, List<LayoutToken> tokenizations) { taggingTokenSynchronizer = new TaggingTokenSynchronizer(grobidModel, result, tokenizations); } public TaggingTokenClusteror(GrobidModel grobidModel, String result, List<LayoutToken> tokenizations, boolean computerFeatureBlock) { taggingTokenSynchronizer = new TaggingTokenSynchronizer(grobidModel, result, tokenizations, computerFeatureBlock); } public List<TaggingTokenCluster> cluster() { List<TaggingTokenCluster> result = new ArrayList<>(); PeekingIterator<LabeledTokensContainer> it = Iterators.peekingIterator(taggingTokenSynchronizer); if (!it.hasNext() || (it.peek() == null)) { return Collections.emptyList(); } // a boolean is introduced to indicate the start of the sequence in the case the label // has no beginning indicator (e.g. I-) boolean begin = true; TaggingTokenCluster curCluster = new TaggingTokenCluster(it.peek().getTaggingLabel()); while (it.hasNext()) { LabeledTokensContainer cont = it.next(); if (begin || cont.isBeginning() || cont.getTaggingLabel() != curCluster.getTaggingLabel()) { curCluster = new TaggingTokenCluster(cont.getTaggingLabel()); result.add(curCluster); } curCluster.addLabeledTokensContainer(cont); if (begin) begin = false; } return result; } }
3,072
34.321839
122
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/tokenization/LabeledTokensContainer.java
package org.grobid.core.tokenization; import org.grobid.core.engines.label.TaggingLabel; import org.grobid.core.engines.tagging.GenericTaggerUtils; import org.grobid.core.layout.LayoutToken; import java.util.List; /** * Representing labeled tokens and stuff */ public class LabeledTokensContainer { private List<LayoutToken> layoutTokens; private String token; private TaggingLabel taggingLabel; private boolean beginning; private boolean trailingSpace; private boolean trailingNewLine; private String featureString; public LabeledTokensContainer(List<LayoutToken> layoutTokens, String token, TaggingLabel taggingLabel, boolean beginning) { this.layoutTokens = layoutTokens; this.token = token; this.taggingLabel = taggingLabel; this.beginning = beginning; } public List<LayoutToken> getLayoutTokens() { return layoutTokens; } public String getToken() { return token; } public TaggingLabel getTaggingLabel() { return taggingLabel; } public boolean isBeginning() { return beginning; } public String getPlainLabel() { return taggingLabel.getLabel(); } public String getFullLabel() { return isBeginning() ? GenericTaggerUtils.START_ENTITY_LABEL_PREFIX + taggingLabel.getLabel() : taggingLabel.getLabel(); } public boolean isTrailingSpace() { return trailingSpace; } public boolean isTrailingNewLine() { return trailingNewLine; } public void setTrailingSpace(boolean trailingSpace) { this.trailingSpace = trailingSpace; } public void setTrailingNewLine(boolean trailingNewLine) { this.trailingNewLine = trailingNewLine; } public String getFeatureString() { return featureString; } public void setFeatureString(String featureString) { this.featureString = featureString; } @Override public String toString() { return token + " (" + getFullLabel() + ")"; } }
2,069
23.939759
127
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/tokenization/TaggingTokenSynchronizer.java
package org.grobid.core.tokenization; import com.google.common.collect.Iterators; import com.google.common.collect.PeekingIterator; import org.grobid.core.GrobidModel; import org.grobid.core.engines.label.TaggingLabels; import org.grobid.core.engines.tagging.GenericTaggerUtils; import org.grobid.core.layout.LayoutToken; import org.grobid.core.utilities.LayoutTokensUtil; import org.grobid.core.utilities.Triple; import java.util.ArrayList; import java.util.Iterator; import java.util.List; /** * Synchronize tagging result and layout tokens */ public class TaggingTokenSynchronizer implements Iterator<LabeledTokensContainer>, Iterable<LabeledTokensContainer> { private final GrobidModel grobidModel; private final Iterator<Triple<String, String, String>> tokensAndLabelsIt; private final PeekingIterator<LayoutToken> tokenizationsIt; private int tokensAndLabelsPtr; private int tokenizationsPtr; private List<Triple<String, String, String>> tokensAndLabels; private List<LayoutToken> tokenizations; public TaggingTokenSynchronizer(GrobidModel grobidModel, String result, List<LayoutToken> tokenizations) { this(grobidModel, result, tokenizations, false); } public TaggingTokenSynchronizer(GrobidModel grobidModel, String result, List<LayoutToken> tokenizations, boolean addFeatureStrings) { this(grobidModel, GenericTaggerUtils.getTokensWithLabelsAndFeatures(result, addFeatureStrings), tokenizations); } public TaggingTokenSynchronizer(GrobidModel grobidModel, List<Triple<String, String, String>> tokensAndLabels, List<LayoutToken> tokenizations) { this.grobidModel = grobidModel; this.tokensAndLabels = tokensAndLabels; tokensAndLabelsIt = this.tokensAndLabels.iterator(); this.tokenizations = tokenizations; tokenizationsIt = Iterators.peekingIterator(this.tokenizations.iterator()); } @Override public boolean hasNext() { return tokensAndLabelsIt.hasNext(); } @Override //null value indicates an empty line in a tagging result public LabeledTokensContainer next() { Triple<String, String, String> p = tokensAndLabelsIt.next(); if (p == null) { return null; } String resultToken = p.getA(); String label = p.getB(); String featureString = p.getC(); List<LayoutToken> layoutTokenBuffer = new ArrayList<>(); boolean stop = false; boolean addSpace = false; boolean newLine = false; int preTokenizationPtr = tokenizationsPtr; while ((!stop) && (tokenizationsIt.hasNext())) { LayoutToken layoutToken = tokenizationsIt.next(); layoutToken.addLabel(TaggingLabels.labelFor(grobidModel, label)); layoutTokenBuffer.add(layoutToken); String tokOriginal = layoutToken.t(); if (LayoutTokensUtil.newLineToken(tokOriginal)) { newLine = true; } else if (LayoutTokensUtil.spaceyToken(tokOriginal)) { addSpace = true; } else if (tokOriginal.replaceAll("[ \n]","").equals(resultToken)) { stop = true; } else if (tokOriginal.isEmpty()) { // no op } else { throw new IllegalStateException(prepareErrorMessage(preTokenizationPtr)); } tokenizationsPtr++; } //filling spaces to the end, instead of appending spaces to the next container while (tokenizationsIt.hasNext()) { LayoutToken nextToken = tokenizationsIt.peek(); if (LayoutTokensUtil.spaceyToken(nextToken.t()) || LayoutTokensUtil.newLineToken(nextToken.t())) { LayoutToken layoutToken = tokenizationsIt.next(); layoutTokenBuffer.add(layoutToken); tokenizationsPtr++; if (LayoutTokensUtil.newLineToken(layoutToken.t())) { newLine = true; } else if (LayoutTokensUtil.spaceyToken(layoutToken.t())) { addSpace = true; } } else { break; } } //resultToken = LayoutTokensUtil.removeSpecialVariables(resultToken); tokensAndLabelsPtr++; LabeledTokensContainer labeledTokensContainer = new LabeledTokensContainer(layoutTokenBuffer, resultToken, TaggingLabels.labelFor(grobidModel, label), GenericTaggerUtils.isBeginningOfEntity(label)); labeledTokensContainer.setFeatureString(featureString); labeledTokensContainer.setTrailingSpace(addSpace); labeledTokensContainer.setTrailingNewLine(newLine); return labeledTokensContainer; } private String prepareErrorMessage(int preTokenizationPtr) { int limit = 5; StringBuilder sb = new StringBuilder(); for (int i = Math.max(0, tokensAndLabelsPtr - limit); i < Math.min(tokensAndLabelsPtr + limit, tokensAndLabels.size()); i++) { Triple<String, String, String> s = tokensAndLabels.get(i); String str = i == tokensAndLabelsPtr ? "-->\t'" + s.getA() + "'" : "\t'" + s.getA() + "'"; sb.append(str).append("\n"); } StringBuilder sb2 = new StringBuilder(); for (int i = Math.max(0, preTokenizationPtr - limit * 2); i < Math.min(preTokenizationPtr + limit * 2, tokenizations.size()); i++) { LayoutToken s = tokenizations.get(i); String str = i == preTokenizationPtr ? "-->\t'" + s.t() + "'" : "\t'" + s.t() + "'"; sb2.append(str).append("\n"); } return "IMPLEMENTATION ERROR: " + "tokens (at pos: " + tokensAndLabelsPtr + ") got dissynchronized with tokenizations (at pos: " + tokenizationsPtr + " )\n" + "labelsAndTokens +-: \n" + sb.toString() + "\n" + "tokenizations +-: " + sb2; } @Override public void remove() { throw new UnsupportedOperationException(); } @Override public Iterator<LabeledTokensContainer> iterator() { return this; } }
6,200
39.006452
149
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/tokenization/TaggingTokenCluster.java
package org.grobid.core.tokenization; import com.google.common.base.Function; import com.google.common.base.Joiner; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; import org.grobid.core.engines.label.TaggingLabel; import org.grobid.core.layout.LayoutToken; import java.util.ArrayList; import java.util.List; /** * Cluster of related tokens */ public class TaggingTokenCluster { public static final Function<LabeledTokensContainer, String> CONTAINERS_TO_FEATURE_BLOCK = new Function<LabeledTokensContainer, String>() { @Override public String apply(LabeledTokensContainer labeledTokensContainer) { if (labeledTokensContainer == null) { return "\n"; } if (labeledTokensContainer.getFeatureString() == null) { throw new IllegalStateException("This method must be called when feature string is not empty for " + "LabeledTokenContainers"); } return labeledTokensContainer.getFeatureString(); } }; private List<LabeledTokensContainer> labeledTokensContainers = new ArrayList<>(); private TaggingLabel taggingLabel; public TaggingTokenCluster(TaggingLabel taggingLabel) { this.taggingLabel = taggingLabel; } public void addLabeledTokensContainer(LabeledTokensContainer cont) { labeledTokensContainers.add(cont); } public List<LabeledTokensContainer> getLabeledTokensContainers() { return labeledTokensContainers; } public TaggingLabel getTaggingLabel() { return taggingLabel; } @Override public String toString() { StringBuilder sb = new StringBuilder(); for (LabeledTokensContainer c : labeledTokensContainers) { sb.append(c).append("\n"); } sb.append("\n"); return sb.toString(); } public LabeledTokensContainer getLastContainer() { if (labeledTokensContainers.isEmpty()) { return null; } return labeledTokensContainers.get(labeledTokensContainers.size() - 1); } public List<LayoutToken> concatTokens() { Iterable<LayoutToken> it = Iterables.concat(Iterables.transform(labeledTokensContainers, new Function<LabeledTokensContainer, List<LayoutToken>>() { @Override public List<LayoutToken> apply(LabeledTokensContainer labeledTokensContainer) { return labeledTokensContainer.getLayoutTokens(); } })); return Lists.newArrayList(it); } public String getFeatureBlock() { return Joiner.on("\n").join(Iterables.transform(labeledTokensContainers, CONTAINERS_TO_FEATURE_BLOCK)); } }
2,751
32.156627
156
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/process/ProcessRunner.java
package org.grobid.core.process; import org.apache.commons.io.IOUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.lang.reflect.Field; import java.util.List; import java.util.ArrayList; public class ProcessRunner extends Thread { private static final Logger LOGGER = LoggerFactory.getLogger(ProcessRunner.class); private List<String> cmd; private Integer exit; private Process process; public String getErrorStreamContents() { return errorStreamContents; } private String errorStreamContents; private boolean useStreamGobbler; StreamGobbler sgIn; StreamGobbler sgErr; public ProcessRunner(List<String> cmd, String name, boolean useStreamGobbler) { super(name); this.cmd = cmd; this.useStreamGobbler = useStreamGobbler; } // since we are limiting by ulimit, pdftoxml is actually a child process, therefore Process.destroy() won't work // killing harshly with pkill public void killProcess() { if (process != null) { try { Long pid = getPidOfProcess(process); if (pid != null) { LOGGER.info("Killing pdf to xml process with PID " + pid + " and its children"); Runtime.getRuntime().exec(new String[]{"pkill", "-9", "-P", String.valueOf(pid)}).waitFor(); } } catch (Exception e) { throw new RuntimeException(e); } } } //WARNING public static Long getPidOfProcess(Process p) { Long pid = null; try { if (p.getClass().getName().equals("java.lang.UNIXProcess")) { Field f = p.getClass().getDeclaredField("pid"); f.setAccessible(true); pid = f.getLong(p); f.setAccessible(false); } } catch (Exception e) { pid = null; } return pid; } public void run() { process = null; try { ProcessBuilder builder = new ProcessBuilder(cmd); process = builder.start(); if (useStreamGobbler) { sgIn = new StreamGobbler(process.getInputStream()); sgErr = new StreamGobbler(process.getErrorStream()); } exit = process.waitFor(); } catch (InterruptedException ignore) { //Process needs to be destroyed -- it's done in the finally block } catch (IOException e) { LOGGER.error("IOException while launching the command {} : {}", cmd.toString(), e.getMessage()); } finally { if (process != null) { IOUtils.closeQuietly(process.getInputStream()); IOUtils.closeQuietly(process.getOutputStream()); try { errorStreamContents = IOUtils.toString(process.getErrorStream()); } catch (IOException e) { LOGGER.error("Error retrieving error stream from process: {}", e); } IOUtils.closeQuietly(process.getErrorStream()); process.destroy(); } if (useStreamGobbler) { try { if (sgIn != null) { sgIn.close(); } } catch (IOException e) { LOGGER.error("IOException while closing the stream gobbler: {}", e); } try { if (sgErr != null) { sgErr.close(); } } catch (IOException e) { LOGGER.error("IOException while closing the stream gobbler: {}", e); } } } } public Integer getExitStatus() { return exit; } }
3,887
28.907692
116
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/process/StreamGobbler.java
package org.grobid.core.process; import java.io.IOException; import java.io.InputStream; /** * Copyright (c) 2006-2011 Christian Plattner. All rights reserved. */ public class StreamGobbler extends InputStream { class GobblerThread extends Thread { @Override public void run() { byte[] buff = new byte[8192]; while (true) { try { int avail = is.read(buff); synchronized (synchronizer) { if (avail <= 0) { isEOF = true; synchronizer.notifyAll(); break; } int space_available = buffer.length - write_pos; if (space_available < avail) { /* compact/resize buffer */ int unread_size = write_pos - read_pos; int need_space = unread_size + avail; byte[] new_buffer = buffer; if (need_space > buffer.length) { int inc = need_space / 3; inc = (inc < 256) ? 256 : inc; inc = (inc > 8192) ? 8192 : inc; new_buffer = new byte[need_space + inc]; } if (unread_size > 0) System.arraycopy(buffer, read_pos, new_buffer, 0, unread_size); buffer = new_buffer; read_pos = 0; write_pos = unread_size; } System.arraycopy(buff, 0, buffer, write_pos, avail); write_pos += avail; synchronizer.notifyAll(); } } catch (IOException e) { synchronized (synchronizer) { exception = e; synchronizer.notifyAll(); break; } } } } } private InputStream is; private final Object synchronizer = new Object(); private boolean isEOF = false; private boolean isClosed = false; private IOException exception = null; private byte[] buffer = new byte[2048]; private int read_pos = 0; private int write_pos = 0; public StreamGobbler(InputStream is) { this.is = is; GobblerThread t = new GobblerThread(); t.setDaemon(true); t.start(); } @Override public int read() throws IOException { boolean wasInterrupted = false; try { synchronized (synchronizer) { if (isClosed) throw new IOException("This StreamGobbler is closed."); while (read_pos == write_pos) { if (exception != null) throw exception; if (isEOF) return -1; try { synchronizer.wait(); } catch (InterruptedException e) { wasInterrupted = true; } } return buffer[read_pos++] & 0xff; } } finally { if (wasInterrupted) Thread.currentThread().interrupt(); } } @Override public int available() throws IOException { synchronized (synchronizer) { if (isClosed) throw new IOException("This StreamGobbler is closed."); return write_pos - read_pos; } } @Override public int read(byte[] b) throws IOException { return read(b, 0, b.length); } @Override public void close() throws IOException { synchronized (synchronizer) { if (isClosed) return; isClosed = true; isEOF = true; synchronizer.notifyAll(); is.close(); } } @Override public int read(byte[] b, int off, int len) throws IOException { if (b == null) throw new NullPointerException(); if ((off < 0) || (len < 0) || ((off + len) > b.length) || ((off + len) < 0) || (off > b.length)) throw new IndexOutOfBoundsException(); if (len == 0) return 0; boolean wasInterrupted = false; try { synchronized (synchronizer) { if (isClosed) throw new IOException("This StreamGobbler is closed."); while (read_pos == write_pos) { if (exception != null) throw exception; if (isEOF) return -1; try { synchronizer.wait(); } catch (InterruptedException e) { wasInterrupted = true; } } int avail = write_pos - read_pos; avail = (avail > len) ? len : avail; System.arraycopy(buffer, read_pos, b, off, avail); read_pos += avail; return avail; } } finally { if (wasInterrupted) Thread.currentThread().interrupt(); } } }
5,543
27.875
104
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/process/ProcessPdfToXml.java
package org.grobid.core.process; import org.apache.commons.io.IOUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.*; import java.util.List; import static java.nio.charset.StandardCharsets.UTF_8; public class ProcessPdfToXml { private static final Logger LOGGER = LoggerFactory.getLogger(ProcessPdfToXml.class); /** * Process the conversion. */ public static Integer process(List<String> cmd) { Process process = null; ProcessBuilder builder = null; Integer exit = null; String message = "error message cannot be retrieved"; try { builder = new ProcessBuilder(cmd); builder.redirectErrorStream(true); process = builder.start(); BufferedReader br = new BufferedReader(new InputStreamReader(process.getInputStream())); String output = null; String previousOutput = null; while (null != (output = br.readLine())) { // writing the pdfalto stderr in the GROBID logs as warning if (!output.equals(previousOutput)) { LOGGER.warn("pdfalto stderr: " + output); previousOutput = output; } } exit = process.waitFor(); message = IOUtils.toString(process.getErrorStream(), UTF_8); } catch (InterruptedException ignore) { // Process needs to be destroyed -- it's done in the finally block LOGGER.warn("pdfalto process is about to be killed."); } catch (IOException ioExp) { LOGGER.error("IOException while launching the command {} : {}", cmd, ioExp.getMessage()); } finally { if (process != null) { IOUtils.closeQuietly(process.getInputStream(), process.getOutputStream(), process.getErrorStream()); process.destroy(); if (exit == null || exit != 0) { LOGGER.error("pdfalto process finished with error code: " + exit + ". " + cmd); LOGGER.error("pdfalto return message: \n" + message); } } } return exit; } }
2,249
34.15625
116
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/process/StreamProcess.java
package org.grobid.core.process; import java.io.IOException; import java.io.InputStream; public class StreamProcess extends InputStream { protected void run() { byte[] buff = new byte[8192]; while (true) { try { int avail = is.read(buff); // synchronized (synchronizer) { if (avail <= 0) { isEOF = true; // synchronizer.notifyAll(); break; } int space_available = buffer.length - write_pos; if (space_available < avail) { /* compact/resize buffer */ int unread_size = write_pos - read_pos; int need_space = unread_size + avail; byte[] new_buffer = buffer; if (need_space > buffer.length) { int inc = need_space / 3; inc = (inc < 256) ? 256 : inc; inc = (inc > 8192) ? 8192 : inc; new_buffer = new byte[need_space + inc]; } if (unread_size > 0) System.arraycopy(buffer, read_pos, new_buffer, 0, unread_size); buffer = new_buffer; read_pos = 0; write_pos = unread_size; } System.arraycopy(buff, 0, buffer, write_pos, avail); write_pos += avail; //synchronizer.notifyAll(); //} } catch (IOException e) { //synchronized (synchronizer) { exception = e; //synchronizer.notifyAll(); break; } //} } } private InputStream is; // private final Object synchronizer = new Object(); private boolean isEOF = false; private boolean isClosed = false; private IOException exception = null; private byte[] buffer = new byte[2048]; private int read_pos = 0; private int write_pos = 0; public StreamProcess(InputStream is) { this.is = is; run(); } @Override public int read() throws IOException { boolean wasInterrupted = false; //try { //synchronized (synchronizer) { if (isClosed) throw new IOException("This StreamGobbler is closed."); while (read_pos == write_pos) { if (exception != null) throw exception; if (isEOF) return -1; /*try { synchronizer.wait(); } catch (InterruptedException e) { wasInterrupted = true; }*/ } return buffer[read_pos++] & 0xff; } // } /*finally { if (wasInterrupted) Thread.currentThread().interrupt(); }*/ //} @Override public int available() throws IOException { //synchronized (synchronizer) { if (isClosed) throw new IOException("This StreamGobbler is closed."); return write_pos - read_pos; //} } @Override public int read(byte[] b) throws IOException { return read(b, 0, b.length); } @Override public void close() throws IOException { //synchronized (synchronizer) { if (isClosed) return; isClosed = true; isEOF = true; //synchronizer.notifyAll(); is.close(); //} } @Override public int read(byte[] b, int off, int len) throws IOException { if (b == null) throw new NullPointerException(); if ((off < 0) || (len < 0) || ((off + len) > b.length) || ((off + len) < 0) || (off > b.length)) throw new IndexOutOfBoundsException(); if (len == 0) return 0; boolean wasInterrupted = false; //try { //synchronized (synchronizer) { if (isClosed) throw new IOException("This StreamGobbler is closed."); while (read_pos == write_pos) { if (exception != null) throw exception; if (isEOF) return -1; /*try { synchronizer.wait(); } catch (InterruptedException e) { wasInterrupted = true; }*/ } int avail = write_pos - read_pos; avail = (avail > len) ? len : avail; System.arraycopy(buffer, read_pos, b, off, avail); read_pos += avail; return avail; } //} /*finally { if (wasInterrupted) Thread.currentThread().interrupt(); }*/ //} }
3,783
19.021164
65
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/jni/DeLFTClassifierModel.java
package org.grobid.core.jni; import org.grobid.core.GrobidModel; import org.grobid.core.GrobidModels; import org.grobid.core.exceptions.GrobidException; import org.grobid.core.utilities.GrobidProperties; import org.grobid.core.utilities.IOUtilities; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.concurrent.*; import java.io.*; import java.lang.StringBuilder; import java.util.*; import java.util.regex.*; import jep.Jep; import jep.JepConfig; import jep.JepException; import java.util.function.Consumer; public class DeLFTClassifierModel { public static final Logger LOGGER = LoggerFactory.getLogger(DeLFTClassifierModel.class); // Exploit JNI CPython interpreter to execute load and execute a DeLFT deep learning model private String modelName; private String architecture; public DeLFTClassifierModel(String model, String architecture) { this.modelName = model; this.architecture = architecture; try { LOGGER.info("Loading DeLFT classification model for " + this.modelName + " in " + GrobidProperties.getInstance().getModelPath()); JEPThreadPoolClassifier.getInstance().run(new InitModel(this.modelName, GrobidProperties.getInstance().getModelPath(), this.architecture)); } catch(InterruptedException | RuntimeException e) { LOGGER.error("DeLFT model " + this.modelName + " initialization failed", e); } } class InitModel implements Runnable { private String modelName; private String architecture; private File modelPath; public InitModel(String modelName, File modelPath, String architecture) { this.modelName = modelName; this.modelPath = modelPath; this.architecture = architecture; } @Override public void run() { Jep jep = JEPThreadPoolClassifier.getInstance().getJEPInstance(); try { System.out.println("init classifier..."); // as dash characters are forbidden in python variable name... String model_variable = this.modelName.replace("-", "_"); jep.eval(model_variable+" = Classifier('" + this.modelName + "_" + this.architecture + "')"); jep.eval(model_variable+".load(dir_path='"+this.modelPath.getAbsolutePath()+"')"); if (GrobidProperties.getInstance().getDelftRuntimeMaxSequenceLength(this.modelName) != -1) jep.eval(this.modelName+".config.max_sequence_length="+ GrobidProperties.getInstance().getDelftRuntimeMaxSequenceLength(this.modelName)); if (GrobidProperties.getInstance().getDelftRuntimeBatchSize(this.modelName) != -1) jep.eval(this.modelName+".config.batch_size="+ GrobidProperties.getInstance().getDelftRuntimeBatchSize(this.modelName)); } catch(JepException e) { LOGGER.error("DeLFT classifier model initialization failed. ", e); throw new GrobidException("DeLFT classifier model initialization failed. ", e); } } } private class ClassificationTask implements Callable<String> { private List<String> data; private String modelName; public ClassificationTask(String modelName, List<String> data) { //System.out.println("label thread: " + Thread.currentThread().getId()); this.modelName = modelName; this.data = data; } private void setJepStringValueWithFileFallback( Jep jep, String name, List<String> values ) throws JepException, IOException { try { jep.set(name, values); // convert PyJList to normal python list (necessary for Hugging Face transformer tokenizer input) jep.eval(name + " = list("+name+")"); } catch(JepException e) { // we have normally the Java List as a PyJList in python, which should // be equivalent to a normal python list File tempFile = IOUtilities.newTempFile(name, ".data"); LOGGER.debug( "Falling back to file {} due to exception: {}", tempFile, e.toString() ); IOUtilities.writeListInFile(tempFile.getAbsolutePath(), values, "|"); jep.eval("from pathlib import Path"); jep.eval( name + " = Path('" + tempFile.getAbsolutePath() + "').read_text(encoding='utf-8').split(\"|\")" ); tempFile.delete(); } } @Override public String call() { Jep jep = JEPThreadPoolClassifier.getInstance().getJEPInstance(); StringBuilder labelledData = new StringBuilder(); String results = null; try { //System.out.println(this.data); // load and classify, input here is an array of texts to classify this.setJepStringValueWithFileFallback(jep, "input", this.data); String model_variable = this.modelName.replace("-", "_"); jep.eval("jsondict = "+model_variable+".predict(input, 'json', use_main_thread_only=True)"); //jep.eval("print(json.dumps(jsondict, sort_keys=False, indent=4, ensure_ascii=False))"); Object objectResult = jep.getValue("json.dumps(jsondict, sort_keys=True, indent=4, ensure_ascii=False)"); results = (String) objectResult; //System.out.println(results); // cleaning jep.eval("del jsondict"); jep.eval("del input"); } catch(JepException e) { LOGGER.error("DeLFT model classification via JEP failed", e); } catch(IOException e) { LOGGER.error("DeLFT model classification failed", e); } //System.out.println(labelledData.toString()); return results; } } /** * Classify an array of string in batch. The result is a json array giving * for each text the classification results. */ public String classify(List<String> data) { String result = null; try { result = JEPThreadPoolClassifier.getInstance().call(new ClassificationTask(this.modelName, data)); } catch(InterruptedException e) { LOGGER.error("DeLFT model " + this.modelName + " classification interrupted", e); } catch(ExecutionException e) { LOGGER.error("DeLFT model " + this.modelName + " classification failed", e); } return result; } /** * Training via JNI CPython interpreter (JEP). It appears that after some epochs, the JEP thread * usually hangs... Possibly issues with IO threads at the level of JEP (output not consumed because * of \r and no end of line?). */ public static void trainJNI(String modelName, File trainingData, File outputModel) { try { LOGGER.info("Train DeLFT classification model " + modelName + "..."); JEPThreadPoolClassifier.getInstance().run( new TrainTask(modelName, trainingData, GrobidProperties.getInstance().getModelPath())); } catch(InterruptedException e) { LOGGER.error("Train DeLFT classification model " + modelName + " task failed", e); } } private static class TrainTask implements Runnable { private String modelName; private File trainPath; private File modelPath; private String architecture; private boolean incremental; public TrainTask(String modelName, File trainPath, File modelPath) { //public TrainTask(String modelName, File trainPath, File modelPath, String architecture, boolean incremental) { //System.out.println("train thread: " + Thread.currentThread().getId()); this.modelName = modelName; this.trainPath = trainPath; this.modelPath = modelPath; this.architecture = null; this.incremental = false; } @Override public void run() { Jep jep = JEPThreadPoolClassifier.getInstance().getJEPInstance(); try { // load data // to be reviewed for classification jep.eval("x_all, y_all, f_all = load_data_and_labels_crf_file('" + this.trainPath.getAbsolutePath() + "')"); jep.eval("x_train, x_valid, y_train, y_valid = train_test_split(x_all, y_all, test_size=0.1)"); jep.eval("print(len(x_train), 'train sequences')"); jep.eval("print(len(x_valid), 'validation sequences')"); String useELMo = "False"; if (GrobidProperties.getInstance().useELMo(this.modelName)) { useELMo = "True"; } String localArgs = ""; if (GrobidProperties.getInstance().getDelftTrainingMaxSequenceLength(this.modelName) != -1) localArgs += ", maxlen="+ GrobidProperties.getInstance().getDelftTrainingMaxSequenceLength(this.modelName); if (GrobidProperties.getInstance().getDelftTrainingBatchSize(this.modelName) != -1) localArgs += ", batch_size="+ GrobidProperties.getInstance().getDelftTrainingBatchSize(this.modelName); if (GrobidProperties.getInstance().getDelftTranformer(modelName) != null) { localArgs += ", transformer="+ GrobidProperties.getInstance().getDelftTranformer(modelName); } // init model to be trained if (this.architecture == null) { jep.eval("model = Classifier('"+this.modelName+ "', max_epoch=100, recurrent_dropout=0.50, embeddings_name='glove-840B', use_ELMo="+useELMo+localArgs+")"); } else { jep.eval("model = Classifier('"+this.modelName+ "', max_epoch=100, recurrent_dropout=0.50, embeddings_name='glove-840B', use_ELMo="+useELMo+localArgs+ ", architecture='"+architecture+"')"); } // actual training if (incremental) { // if incremental training, we need to load the existing model if (this.modelPath != null && this.modelPath.exists() && !this.modelPath.isDirectory()) { jep.eval("model.load('" + this.modelPath.getAbsolutePath() + "')"); jep.eval("model.train(x_train, y_train, x_valid, y_valid, incremental=True)"); } else { throw new GrobidException("the path to the model to be used for starting incremental training is invalid: " + this.modelPath.getAbsolutePath()); } } else jep.eval("model.train(x_train, y_train, x_valid, y_valid)"); // saving the model System.out.println(this.modelPath.getAbsolutePath()); jep.eval("model.save('"+this.modelPath.getAbsolutePath()+"')"); // cleaning jep.eval("del x_all"); jep.eval("del y_all"); jep.eval("del f_all"); jep.eval("del x_train"); jep.eval("del x_valid"); jep.eval("del y_train"); jep.eval("del y_valid"); jep.eval("del model"); } catch(JepException e) { LOGGER.error("DeLFT classification model training via JEP failed", e); } catch(GrobidException e) { LOGGER.error("GROBID call to DeLFT training via JEP failed", e); } } } /** * Train with an external process rather than with JNI, this approach appears to be more stable for the * training process (JNI approach hangs after a while) and does not raise any runtime/integration issues. */ public static void train(String modelName, File trainingData, File outputModel) { try { LOGGER.info("Train DeLFT model " + modelName + "..."); List<String> command = Arrays.asList("python3", "dataseerClassifier.py", modelName, "train", "--input", trainingData.getAbsolutePath(), "--output", GrobidProperties.getInstance().getModelPath().getAbsolutePath()); if (GrobidProperties.getInstance().useELMo(modelName)) { command.add("--use-ELMo"); } ProcessBuilder pb = new ProcessBuilder(command); File delftPath = new File(GrobidProperties.getInstance().getDeLFTFilePath()); pb.directory(delftPath); Process process = pb.start(); //pb.inheritIO(); CustomStreamGobbler customStreamGobbler = new CustomStreamGobbler(process.getInputStream(), System.out); Executors.newSingleThreadExecutor().submit(customStreamGobbler); SimpleStreamGobbler streamGobbler = new SimpleStreamGobbler(process.getErrorStream(), System.err::println); Executors.newSingleThreadExecutor().submit(streamGobbler); int exitCode = process.waitFor(); //assert exitCode == 0; } catch(IOException e) { LOGGER.error("IO error when training DeLFT classification model " + modelName, e); } catch(InterruptedException e) { LOGGER.error("Train DeLFT classification model " + modelName + " task failed", e); } catch(GrobidException e) { LOGGER.error("GROBID call to DeLFT training via JEP failed", e); } } public synchronized void close() { try { LOGGER.info("Close DeLFT classification model " + this.modelName + "..."); JEPThreadPoolClassifier.getInstance().run(new CloseModel(this.modelName)); } catch(InterruptedException e) { LOGGER.error("Close DeLFT classification model " + this.modelName + " task failed", e); } } private class CloseModel implements Runnable { private String modelName; public CloseModel(String modelName) { this.modelName = modelName; } @Override public void run() { Jep jep = JEPThreadPoolClassifier.getInstance().getJEPInstance(); try { String model_variable = this.modelName.replace("-", "_"); jep.eval("del "+model_variable); } catch(JepException e) { LOGGER.error("Closing DeLFT classification model failed", e); } } } private static class SimpleStreamGobbler implements Runnable { private InputStream inputStream; private Consumer<String> consumer; public SimpleStreamGobbler(InputStream inputStream, Consumer<String> consumer) { this.inputStream = inputStream; this.consumer = consumer; } @Override public void run() { new BufferedReader(new InputStreamReader(inputStream)).lines() .forEach(consumer); } } /** * This is a custom gobbler that reproduces correctly the Keras training progress bar * by injecting a \r for progress line updates. */ private static class CustomStreamGobbler implements Runnable { public static final Logger LOGGER = LoggerFactory.getLogger(CustomStreamGobbler.class); private final InputStream is; private final PrintStream os; private Pattern pattern = Pattern.compile("\\d/\\d+ \\["); public CustomStreamGobbler(InputStream is, PrintStream os) { this.is = is; this.os = os; } @Override public void run() { try { InputStreamReader isr = new InputStreamReader(this.is); BufferedReader br = new BufferedReader(isr); String line = null; while ((line = br.readLine()) != null) { Matcher matcher = pattern.matcher(line); if (matcher.find()) { os.print("\r" + line); os.flush(); } else { os.println(line); } } } catch (IOException e) { LOGGER.warn("IO error between embedded python and java process", e); } } } }
17,054
43.069767
151
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/jni/DeLFTModel.java
package org.grobid.core.jni; import org.grobid.core.GrobidModel; import org.grobid.core.engines.label.TaggingLabels; import org.grobid.core.exceptions.GrobidException; import org.grobid.core.utilities.GrobidProperties; import org.grobid.core.utilities.IOUtilities; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.concurrent.*; import java.io.*; import java.lang.StringBuilder; import java.util.*; import java.util.regex.*; import jep.Jep; import jep.JepException; import java.util.function.Consumer; public class DeLFTModel { public static final Logger LOGGER = LoggerFactory.getLogger(DeLFTModel.class); // Exploit JNI CPython interpreter to execute load and execute a DeLFT deep learning model private String modelName; private String architecture; public DeLFTModel(GrobidModel model, String architecture) { this.modelName = model.getModelName().replace("-", "_"); this.architecture = architecture; try { LOGGER.info("Loading DeLFT model for " + model.getModelName() + " with architecture " + architecture + "..."); JEPThreadPool.getInstance().run(new InitModel(this.modelName, GrobidProperties.getInstance().getModelPath(), architecture)); } catch(InterruptedException | RuntimeException e) { LOGGER.error("DeLFT model " + this.modelName + " initialization failed", e); } } class InitModel implements Runnable { private String modelName; private File modelPath; private String architecture; public InitModel(String modelName, File modelPath, String architecture) { this.modelName = modelName; this.modelPath = modelPath; this.architecture = architecture; } @Override public void run() { Jep jep = JEPThreadPool.getInstance().getJEPInstance(); try { String fullModelName = this.modelName.replace("_", "-"); //if (architecture != null && !architecture.equals("BidLSTM_CRF")) if (architecture != null) fullModelName += "-" + this.architecture; if (GrobidProperties.getInstance().useELMo(this.modelName) && modelName.toLowerCase().indexOf("bert") == -1) fullModelName += "-with_ELMo"; jep.eval(this.modelName+" = Sequence('" + fullModelName + "')"); jep.eval(this.modelName+".load(dir_path='"+modelPath.getAbsolutePath()+"')"); if (GrobidProperties.getInstance().getDelftRuntimeMaxSequenceLength(this.modelName) != -1) { jep.eval(this.modelName+".model_config.max_sequence_length="+ GrobidProperties.getInstance().getDelftRuntimeMaxSequenceLength(this.modelName)); } if (GrobidProperties.getInstance().getDelftRuntimeBatchSize(this.modelName) != -1) { jep.eval(this.modelName+".model_config.batch_size="+ GrobidProperties.getInstance().getDelftRuntimeBatchSize(this.modelName)); } } catch(JepException e) { LOGGER.error("DeLFT model initialization failed. ", e); throw new GrobidException("DeLFT model initialization failed. ", e); } } } private class LabelTask implements Callable<String> { private String data; private String modelName; private String architecture; public LabelTask(String modelName, String data, String architecture) { //System.out.println("label thread: " + Thread.currentThread().getId()); this.modelName = modelName; this.data = data; this.architecture = architecture; } private void setJepStringValueWithFileFallback( Jep jep, String name, String value ) throws JepException, IOException { try { jep.set(name, value); } catch(JepException e) { File tempFile = IOUtilities.newTempFile(name, ".data"); LOGGER.debug( "Falling back to file {} due to exception: {}", tempFile, e.toString() ); IOUtilities.writeInFile(tempFile.getAbsolutePath(), value); jep.eval("from pathlib import Path"); jep.eval( name + " = Path('" + tempFile.getAbsolutePath() + "').read_text(encoding='utf-8')" ); tempFile.delete(); } } @Override public String call() { Jep jep = JEPThreadPool.getInstance().getJEPInstance(); StringBuilder labelledData = new StringBuilder(); try { //System.out.println(this.data); // load and tag this.setJepStringValueWithFileFallback(jep, "input", this.data); jep.eval("x_all, f_all = load_data_crf_string(input)"); Object objectResults = null; if (architecture.indexOf("FEATURE") != -1) { // model is expecting features objectResults = jep.getValue(this.modelName+".tag(x_all, None, features=f_all)"); } else { // no features used by the model objectResults = jep.getValue(this.modelName+".tag(x_all, None)"); } // inject back the labels List<List<List<String>>> results = (List<List<List<String>>>) objectResults; BufferedReader bufReader = new BufferedReader(new StringReader(data)); String inputLine; int i = 0; // sentence index int j = 0; // word index in the sentence if (results.size() > 0) { List<List<String>> result = results.get(0); while ((inputLine = bufReader.readLine()) != null) { inputLine = inputLine.trim(); if ((inputLine.length() == 0) && (j != 0)) { j = 0; i++; if (i == results.size()) break; result = results.get(i); continue; } if (inputLine.length() == 0) { labelledData.append("\n"); continue; } labelledData.append(inputLine); labelledData.append(" "); if (j >= result.size()) { labelledData.append(TaggingLabels.OTHER_LABEL); } else { List<String> pair = result.get(j); // first is the token, second is the label (DeLFT format) String token = pair.get(0); String label = pair.get(1); labelledData.append(DeLFTModel.delft2grobidLabel(label)); } labelledData.append("\n"); j++; } } // cleaning jep.eval("del input"); jep.eval("del x_all"); jep.eval("del f_all"); //jep.eval("K.clear_session()"); } catch(JepException e) { LOGGER.error("DeLFT model labelling via JEP failed", e); } catch(IOException e) { LOGGER.error("DeLFT model labelling failed", e); } //System.out.println(labelledData.toString()); return labelledData.toString(); } } public String label(String data) { String result = null; try { result = JEPThreadPool.getInstance().call(new LabelTask(this.modelName, data, this.architecture)); } catch(InterruptedException e) { LOGGER.error("DeLFT model " + this.modelName + " labelling interrupted", e); } catch(ExecutionException e) { LOGGER.error("DeLFT model " + this.modelName + " labelling failed", e); } // In some areas, GROBID currently expects tabs as feature separators. // (Same as in WapitiModel.label) if (result != null) result = result.replaceAll(" ", "\t"); return result; } /** * Training via JNI CPython interpreter (JEP). It appears that after some epochs, the JEP thread * usually hangs... Possibly issues with IO threads at the level of JEP (output not consumed because * of \r and no end of line?). */ public static void trainJNI(String modelName, File trainingData, File outputModel, String architecture, boolean incremental) { try { LOGGER.info("Train DeLFT model " + modelName + "..."); JEPThreadPool.getInstance().run( new TrainTask(modelName, trainingData, GrobidProperties.getInstance().getModelPath(), architecture, incremental)); } catch(InterruptedException e) { LOGGER.error("Train DeLFT model " + modelName + " task failed", e); } } private static class TrainTask implements Runnable { private String modelName; private File trainPath; private File modelPath; private String architecture; private boolean incremental; public TrainTask(String modelName, File trainPath, File modelPath, String architecture, boolean incremental) { //System.out.println("train thread: " + Thread.currentThread().getId()); this.modelName = modelName; this.trainPath = trainPath; this.modelPath = modelPath; this.architecture = architecture; this.incremental = incremental; } @Override public void run() { Jep jep = JEPThreadPool.getInstance().getJEPInstance(); try { // load data jep.eval("x_all, y_all, f_all = load_data_and_labels_crf_file('" + this.trainPath.getAbsolutePath() + "')"); jep.eval("x_train, x_valid, y_train, y_valid = train_test_split(x_all, y_all, test_size=0.1)"); jep.eval("print(len(x_train), 'train sequences')"); jep.eval("print(len(x_valid), 'validation sequences')"); String useELMo = "False"; if (GrobidProperties.getInstance().useELMo(this.modelName) && modelName.toLowerCase().indexOf("bert") == -1) { useELMo = "True"; } String localArgs = ""; if (GrobidProperties.getInstance().getDelftTrainingMaxSequenceLength(this.modelName) != -1) localArgs += ", max_sequence_length="+ GrobidProperties.getInstance().getDelftTrainingMaxSequenceLength(this.modelName); if (GrobidProperties.getInstance().getDelftTrainingBatchSize(this.modelName) != -1) localArgs += ", batch_size="+ GrobidProperties.getInstance().getDelftTrainingBatchSize(this.modelName); if (GrobidProperties.getInstance().getDelftTranformer(modelName) != null) { localArgs += ", transformer="+ GrobidProperties.getInstance().getDelftTranformer(modelName); } // init model to be trained if (architecture == null) jep.eval("model = Sequence('"+this.modelName+ "', max_epoch=100, recurrent_dropout=0.50, embeddings_name='glove-840B', use_ELMo="+useELMo+localArgs+")"); else jep.eval("model = Sequence('"+this.modelName+ "', max_epoch=100, recurrent_dropout=0.50, embeddings_name='glove-840B', use_ELMo="+useELMo+localArgs+ ", architecture='"+architecture+"')"); // actual training //start_time = time.time() if (incremental) { // if incremental training, we need to load the existing model if (this.modelPath != null && this.modelPath.exists() && this.modelPath.isDirectory()) { jep.eval("model.load('" + this.modelPath.getAbsolutePath() + "')"); jep.eval("model.train(x_train, y_train, x_valid, y_valid, incremental=True)"); } else { throw new GrobidException("the path to the model to be used for starting incremental training is invalid: " + this.modelPath.getAbsolutePath()); } } else jep.eval("model.train(x_train, y_train, x_valid, y_valid)"); //runtime = round(time.time() - start_time, 3) //print("training runtime: %s seconds " % (runtime)) // saving the model System.out.println(this.modelPath.getAbsolutePath()); jep.eval("model.save('"+this.modelPath.getAbsolutePath()+"')"); // cleaning jep.eval("del x_all"); jep.eval("del y_all"); jep.eval("del f_all"); jep.eval("del x_train"); jep.eval("del x_valid"); jep.eval("del y_train"); jep.eval("del y_valid"); jep.eval("del model"); } catch(JepException e) { LOGGER.error("DeLFT model training via JEP failed", e); } catch(GrobidException e) { LOGGER.error("GROBID call to DeLFT training via JEP failed", e); } } } /** * Train with an external process rather than with JNI, this approach appears to be more stable for the * training process (JNI approach hangs after a while) and does not raise any runtime/integration issues. */ public static void train(String modelName, File trainingData, File outputModel, String architecture, boolean incremental) { try { LOGGER.info("Train DeLFT model " + modelName + "..."); List<String> command = new ArrayList<>(); List<String> subcommands = Arrays.asList("python3", "delft/applications/grobidTagger.py", modelName, "train", "--input", trainingData.getAbsolutePath(), "--output", GrobidProperties.getInstance().getModelPath().getAbsolutePath()); command.addAll(subcommands); if (architecture != null) { command.add("--architecture"); command.add(architecture); } if (GrobidProperties.getInstance().getDelftTranformer(modelName) != null) { command.add("--transformer"); command.add(GrobidProperties.getInstance().getDelftTranformer(modelName)); } if (GrobidProperties.getInstance().useELMo(modelName) && modelName.toLowerCase().indexOf("bert") == -1) { command.add("--use-ELMo"); } if (GrobidProperties.getInstance().getDelftTrainingMaxSequenceLength(modelName) != -1) { command.add("--max-sequence-length"); command.add(String.valueOf(GrobidProperties.getInstance().getDelftTrainingMaxSequenceLength(modelName))); } if (GrobidProperties.getInstance().getDelftTrainingBatchSize(modelName) != -1) { command.add("--batch-size"); command.add(String.valueOf(GrobidProperties.getInstance().getDelftTrainingBatchSize(modelName))); } if (incremental) { command.add("--incremental"); // if incremental training, we need to load the existing model File modelPath = GrobidProperties.getInstance().getModelPath(); if (modelPath != null && modelPath.exists() && modelPath.isDirectory()) { command.add("--input-model"); command.add(GrobidProperties.getInstance().getModelPath().getAbsolutePath()); } else { throw new GrobidException("the path to the model to be used for starting incremental training is invalid: " + GrobidProperties.getInstance().getModelPath().getAbsolutePath()); } } ProcessBuilder pb = new ProcessBuilder(command); File delftPath = new File(GrobidProperties.getInstance().getDeLFTFilePath()); pb.directory(delftPath); Process process = pb.start(); //pb.inheritIO(); CustomStreamGobbler customStreamGobbler = new CustomStreamGobbler(process.getInputStream(), System.out); Executors.newSingleThreadExecutor().submit(customStreamGobbler); SimpleStreamGobbler streamGobbler = new SimpleStreamGobbler(process.getErrorStream(), System.err::println); Executors.newSingleThreadExecutor().submit(streamGobbler); int exitCode = process.waitFor(); //assert exitCode == 0; } catch(IOException e) { LOGGER.error("IO error when training DeLFT model " + modelName, e); } catch(InterruptedException e) { LOGGER.error("Train DeLFT model " + modelName + " task failed", e); } catch(GrobidException e) { LOGGER.error("GROBID call to DeLFT training via JEP failed", e); } } public synchronized void close() { try { LOGGER.info("Close DeLFT model " + this.modelName + "..."); JEPThreadPool.getInstance().run(new CloseModel(this.modelName)); } catch(InterruptedException e) { LOGGER.error("Close DeLFT model " + this.modelName + " task failed", e); } } private class CloseModel implements Runnable { private String modelName; public CloseModel(String modelName) { this.modelName = modelName; } @Override public void run() { Jep jep = JEPThreadPool.getInstance().getJEPInstance(); try { jep.eval("del "+this.modelName); } catch(JepException e) { LOGGER.error("Closing DeLFT model failed", e); } } } private static String delft2grobidLabel(String label) { if (label.equals(TaggingLabels.IOB_OTHER_LABEL)) { label = TaggingLabels.OTHER_LABEL; } else if (label.startsWith(TaggingLabels.IOB_START_ENTITY_LABEL_PREFIX)) { label = label.replace(TaggingLabels.IOB_START_ENTITY_LABEL_PREFIX, TaggingLabels.GROBID_START_ENTITY_LABEL_PREFIX); } else if (label.startsWith(TaggingLabels.IOB_INSIDE_LABEL_PREFIX)) { label = label.replace(TaggingLabels.IOB_INSIDE_LABEL_PREFIX, TaggingLabels.GROBID_INSIDE_ENTITY_LABEL_PREFIX); } return label; } private static class SimpleStreamGobbler implements Runnable { private InputStream inputStream; private Consumer<String> consumer; public SimpleStreamGobbler(InputStream inputStream, Consumer<String> consumer) { this.inputStream = inputStream; this.consumer = consumer; } @Override public void run() { new BufferedReader(new InputStreamReader(inputStream)).lines() .forEach(consumer); } } /** * This is a custom gobbler that reproduces correctly the Keras training progress bar * by injecting a \r for progress line updates. */ private static class CustomStreamGobbler implements Runnable { public static final Logger LOGGER = LoggerFactory.getLogger(CustomStreamGobbler.class); private final InputStream is; private final PrintStream os; private Pattern pattern = Pattern.compile("\\d/\\d+ \\["); public CustomStreamGobbler(InputStream is, PrintStream os) { this.is = is; this.os = os; } @Override public void run() { try { InputStreamReader isr = new InputStreamReader(this.is); BufferedReader br = new BufferedReader(isr); String line = null; while ((line = br.readLine()) != null) { Matcher matcher = pattern.matcher(line); if (matcher.find()) { os.print("\r" + line); os.flush(); } else { os.println(line); } } } catch (IOException e) { LOGGER.warn("IO error between embedded python and java process", e); } } } }
21,375
43.813417
136
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/jni/WapitiWrapper.java
package org.grobid.core.jni; import com.google.common.base.Throwables; import fr.limsi.wapiti.SWIGTYPE_p_mdl_t; import fr.limsi.wapiti.Wapiti; import org.grobid.core.exceptions.GrobidException; import org.grobid.core.exceptions.GrobidExceptionStatus; import java.io.File; public class WapitiWrapper { public static String label(SWIGTYPE_p_mdl_t model, String data) { if (data.trim().isEmpty()) { System.err.println("Empty data is provided to Wapiti tagger: " + Throwables.getStackTraceAsString(new Throwable())); return ""; } String result = Wapiti.labelFromModel(model, data); if (result == null) { throw new GrobidException("Wapiti tagging failed (null data returned) - Possibly mismatch between grobid-home and grobid-core", GrobidExceptionStatus.TAGGING_ERROR); } return result; } public static SWIGTYPE_p_mdl_t getModel(File model) { return getModel(model, false); } public static SWIGTYPE_p_mdl_t getModel(File model, boolean checkLabels) { return Wapiti.loadModel("label " + (checkLabels ? "--check" : "") + " -m " + model.getAbsolutePath()); } }
1,185
33.882353
177
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/jni/PythonEnvironmentConfig.java
package org.grobid.core.jni; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.util.List; import java.util.stream.Collectors; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.io.FilenameUtils; import org.apache.commons.lang3.StringUtils; import org.grobid.core.exceptions.GrobidException; import org.grobid.core.exceptions.GrobidResourceException; import org.grobid.core.utilities.GrobidProperties; public class PythonEnvironmentConfig { private Path virtualEnv; private Path sitePackagesPath; private Path jepPath; private boolean active; private String pythonVersion; public PythonEnvironmentConfig( Path virtualEnv, Path sitePackagesPath, Path jepPath, String pythonVersion, boolean active) { this.virtualEnv = virtualEnv; this.sitePackagesPath = sitePackagesPath; this.jepPath = jepPath; this.active = active; this.pythonVersion = pythonVersion; } public boolean isEmpty() { return this.virtualEnv == null; } public Path getVirtualEnv() { return this.virtualEnv; } public Path getSitePackagesPath() { return this.sitePackagesPath; } public Path getNativeLibPath() { if (this.virtualEnv == null) { return null; } return Paths.get(this.virtualEnv.toString(), "lib"); } public Path[] getNativeLibPaths() { if (this.virtualEnv == null) { return new Path[0]; } return new Path[]{ this.getNativeLibPath(), this.getJepPath() }; } public Path getJepPath() { return this.jepPath; } public boolean isActive() { return this.active; } public static PythonEnvironmentConfig getInstanceForVirtualEnv(String virtualEnv, String activeVirtualEnv) throws GrobidResourceException { if (StringUtils.isEmpty(virtualEnv) && StringUtils.isEmpty(activeVirtualEnv)) { return new PythonEnvironmentConfig(null, null, null, null, false); } if (StringUtils.isEmpty(virtualEnv)) { virtualEnv = activeVirtualEnv; } List<Path> pythons; try { pythons = Files.find( Paths.get(virtualEnv, "lib"), 1, (path, attr) -> ( path.toFile().isDirectory() && path.getFileName().toString().contains("python3") ) ).collect(Collectors.toList()); } catch (IOException e) { throw new GrobidResourceException("failed to get python versions from virtual environment", e); } List<String> pythonVersions = pythons .stream() .map(path -> FilenameUtils.getName(path.getFileName().toString()) .replace("libpython", "").replace("python", "")) .filter(version -> version.contains("3.7") || version.contains("3.8") || version.contains("3.9")) .distinct() .sorted() .collect(Collectors.toList()); if (CollectionUtils.isEmpty(pythonVersions)) { throw new GrobidException( "Cannot find a suitable version (3.7, 3.8 or 3.9) of python in your virtual environment: " + virtualEnv ); } Path sitePackagesPath = Paths.get(pythons.get(0).toString(), "site-packages"); Path jepPath = Paths.get(sitePackagesPath.toString(), "jep"); return new PythonEnvironmentConfig( Paths.get(virtualEnv), sitePackagesPath, jepPath, pythonVersions.get(0), StringUtils.equals(virtualEnv, activeVirtualEnv) ); } public static String getActiveVirtualEnv() { String activeVirtualEnv = System.getenv("VIRTUAL_ENV"); if (StringUtils.isEmpty(activeVirtualEnv)) { activeVirtualEnv = System.getenv("CONDA_PREFIX"); } return activeVirtualEnv; } public static PythonEnvironmentConfig getInstance() throws GrobidResourceException { return getInstanceForVirtualEnv( GrobidProperties.getPythonVirtualEnv(), getActiveVirtualEnv() ); } public String getPythonVersion() { return pythonVersion; } }
4,461
29.772414
110
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/jni/JEPThreadPool.java
package org.grobid.core.jni; import java.util.concurrent.*; import java.util.*; import java.io.*; import java.nio.file.Path; import org.grobid.core.utilities.GrobidProperties; import org.grobid.core.exceptions.GrobidResourceException; import jep.Jep; import jep.JepConfig; import jep.JepException; import jep.SubInterpreter; import jep.SharedInterpreter; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * For using DeLFT deep learning models, we use JEP as JNI CPython interpreter. * JEP presents the following constraint: A thread that creates a JEP instance * must be reused for all method calls to that JEP instance. For ensuring this, * we pool the Jep instances in a singleton class. */ public class JEPThreadPool { private static final Logger LOGGER = LoggerFactory.getLogger(JEPThreadPool.class); private int POOL_SIZE = 1; private ExecutorService executor; private Map<Long, Jep> jepInstances; private static volatile JEPThreadPool instance; public static JEPThreadPool getInstance() { if (instance == null) { getNewInstance(); } return instance; } /** * Creates a new instance. */ private static synchronized void getNewInstance() { LOGGER.debug("Get new instance of JEPThreadPool"); instance = new JEPThreadPool(); } /** * Hidden constructor */ private JEPThreadPool() { // creating a pool of POOL_SIZE threads //executor = Executors.newFixedThreadPool(POOL_SIZE); executor = Executors.newSingleThreadExecutor(); // each of these threads is associated to a JEP instance jepInstances = new ConcurrentHashMap<>(); } private File getAndValidateDelftPath() { File delftPath = new File(GrobidProperties.getDeLFTFilePath()); if (!delftPath.exists()) { throw new GrobidResourceException("DeLFT installation path does not exist"); } if (!delftPath.isDirectory()) { throw new GrobidResourceException("DeLFT installation path is not a directory"); } return delftPath; } private JepConfig getJepConfig(File delftPath, Path sitePackagesPath) { JepConfig config = new JepConfig(); config.addIncludePaths(delftPath.getAbsolutePath()); config.redirectStdout(System.out); config.redirectStdErr(System.err); if (sitePackagesPath != null) { config.addIncludePaths(sitePackagesPath.toString()); } config.setClassLoader(Thread.currentThread().getContextClassLoader()); return config; } private void initializeJepInstance(Jep jep, File delftPath) throws JepException { // import packages jep.eval("import os"); jep.eval("os.chdir('" + delftPath.getAbsolutePath() + "')"); jep.eval("from delft.utilities.Embeddings import Embeddings"); jep.eval("import delft.sequenceLabelling"); jep.eval("from delft.sequenceLabelling import Sequence"); jep.eval("from delft.sequenceLabelling.reader import load_data_and_labels_crf_file"); jep.eval("from delft.sequenceLabelling.reader import load_data_crf_string"); jep.eval("from sklearn.model_selection import train_test_split"); } private Jep createJEPInstance() { Jep jep = null; boolean success = false; try { File delftPath = this.getAndValidateDelftPath(); JepConfig config = this.getJepConfig( delftPath, PythonEnvironmentConfig.getInstance().getSitePackagesPath() ); //jep = new SubInterpreter(config); try { SharedInterpreter.setConfig(config); } catch(Exception e) { LOGGER.info("JEP interpreter already initialized"); } jep = new SharedInterpreter(); this.initializeJepInstance(jep, delftPath); success = true; return jep; } catch(JepException e) { LOGGER.error("JEP initialization failed", e); throw new RuntimeException("JEP initialization failed", e); } catch(GrobidResourceException e) { LOGGER.error("DeLFT installation path invalid, JEP initialization failed", e); throw new RuntimeException("DeLFT installation path invalid, JEP initialization failed", e); } catch (UnsatisfiedLinkError e) { LOGGER.error("JEP environment not correctly installed or has incompatible binaries, JEP initialization failed", e); throw new RuntimeException("JEP environment not correctly installed or has incompatible binaries, JEP initialization failed", e); } finally { if (!success) { if (jep != null) { try { jep.close(); } catch (JepException e) { LOGGER.error("failed to close JEP instance", e); } } else { LOGGER.error("JEP initialisation failed"); throw new RuntimeException("JEP initialisation failed"); } } } } /** * To be called by the thread executing python commands via JEP. * The method will return to the thread its dedicated Jep instance * (or create one the first time). */ public synchronized Jep getJEPInstance() { long threadId = Thread.currentThread().getId(); Jep jep = jepInstances.get(threadId); if (jep == null) { LOGGER.info("Creating JEP instance for thread " + threadId); jep = this.createJEPInstance(); jepInstances.put(threadId, jep); } try { jep.isValidThread(); } catch (JepException e) { LOGGER.warn("JEP instance no longer usable, creating new instance", e); jep = this.createJEPInstance(); jepInstances.put(threadId, jep); } return jep; } public void run(Runnable task) throws InterruptedException { LOGGER.debug("running thread: " + Thread.currentThread().getId()); Future future = executor.submit(task); // wait until done (in ms) while (!future.isDone()) { Thread.sleep(1); } } public String call(Callable<String> task) throws InterruptedException, ExecutionException { Future<String> future = executor.submit(task); // block until done return future.get(); } }
6,598
35.865922
141
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/jni/WapitiModel.java
package org.grobid.core.jni; import fr.limsi.wapiti.SWIGTYPE_p_mdl_t; import fr.limsi.wapiti.Wapiti; import org.grobid.core.GrobidModel; import org.grobid.core.GrobidModels; import org.grobid.core.exceptions.GrobidException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.File; public class WapitiModel { public static final Logger LOGGER = LoggerFactory.getLogger(WapitiModel.class); private SWIGTYPE_p_mdl_t model; private File modelFile; public WapitiModel(File modelFile) { this.modelFile = modelFile; init(); } public WapitiModel(GrobidModel grobidModel) { modelFile = new File(grobidModel.getModelPath()); init(); } private synchronized void init() { if (model != null) { return; } if (!modelFile.exists() || modelFile.isDirectory()) { throw new GrobidException("Model file does not exists or is a directory: " + modelFile.getAbsolutePath()); } LOGGER.info("Loading model: " + modelFile + " (size: " + modelFile.length() + ")"); model = WapitiWrapper.getModel(modelFile); } public String label(String data) { if (model == null) { LOGGER.warn("Model has been already closed, reopening: " + modelFile.getAbsolutePath()); init(); } String label = WapitiWrapper.label(model, data).trim(); //TODO: VZ: Grobid currently expects tabs as separators whereas wapiti uses spaces for separating features. // for now it is safer to replace, although it does not look nice label = label.replaceAll(" ", "\t"); return label; } public synchronized void close() { if (model != null) { Wapiti.freeModel(model); model = null; } } public static void train(File template, File trainingData, File outputModel) { train(template, trainingData, outputModel, ""); } public static void train(File template, File trainingData, File outputModel, String params) { String args = String.format("train " + params + " -p %s %s %s", template.getAbsolutePath(), trainingData.getAbsolutePath(), outputModel.getAbsolutePath()); //System.out.println("Training with equivalent command line: \n" + "wapiti " + args); Wapiti.runWapiti(args); } }
2,355
31.722222
157
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/jni/JEPThreadPoolClassifier.java
package org.grobid.core.jni; import java.util.concurrent.*; import java.util.*; import java.io.*; import java.nio.file.Path; import org.grobid.core.utilities.GrobidProperties; import org.grobid.core.exceptions.GrobidResourceException; import jep.Jep; import jep.JepConfig; import jep.JepException; import jep.SubInterpreter; import jep.SharedInterpreter; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * This is a classifier variant for using DeLFT deep learning models, we use * JEP as JNI CPython interpreter. * JEP presents the following constraint: A thread that creates a JEP instance * must be reused for all method calls to that JEP instance. For ensuring this, * we pool the Jep instances in a singleton class. */ public class JEPThreadPoolClassifier { private static final Logger LOGGER = LoggerFactory.getLogger(JEPThreadPoolClassifier.class); private int POOL_SIZE = 1; private ExecutorService executor; private ConcurrentMap<Long, Jep> jepInstances; private static volatile JEPThreadPoolClassifier instance; public static JEPThreadPoolClassifier getInstance() { if (instance == null) { getNewInstance(); } return instance; } /** * Creates a new instance. */ private static synchronized void getNewInstance() { LOGGER.debug("Get new instance of JEPThreadPoolClassifier"); instance = new JEPThreadPoolClassifier(); } /** * Hidden constructor */ private JEPThreadPoolClassifier() { // creating a pool of POOL_SIZE threads //executor = Executors.newFixedThreadPool(POOL_SIZE); executor = Executors.newSingleThreadExecutor(); // each of these threads is associated to a JEP instance jepInstances = new ConcurrentHashMap<>(); } private File getAndValidateDelftPath() { File delftPath = new File(GrobidProperties.getDeLFTFilePath()); if (!delftPath.exists()) { throw new GrobidResourceException("DeLFT installation path does not exist"); } if (!delftPath.isDirectory()) { throw new GrobidResourceException("DeLFT installation path is not a directory"); } return delftPath; } private JepConfig getJepConfig(File delftPath, Path sitePackagesPath) { JepConfig config = new JepConfig(); config.addIncludePaths(delftPath.getAbsolutePath()); config.redirectStdout(System.out); config.redirectStdErr(System.err); if (sitePackagesPath != null) { config.addIncludePaths(sitePackagesPath.toString()); } config.setClassLoader(Thread.currentThread().getContextClassLoader()); return config; } private void initializeJepInstance(Jep jep, File delftPath) throws JepException { // import packages jep.eval("import os"); jep.eval("import json"); jep.eval("os.chdir('" + delftPath.getAbsolutePath() + "')"); jep.eval("from delft.utilities.Embeddings import Embeddings"); //jep.eval("from delft.utilities.Utilities import split_data_and_labels"); jep.eval("import delft.textClassification"); jep.eval("from delft.textClassification import Classifier"); //jep.eval("from delft.textClassification.reader import load_dataseer_corpus_csv"); //jep.eval("from delft.textClassification.reader import vectorize as vectorizer"); } private Jep createJEPInstance() { Jep jep = null; boolean success = false; try { File delftPath = this.getAndValidateDelftPath(); JepConfig config = this.getJepConfig( delftPath, PythonEnvironmentConfig.getInstance().getSitePackagesPath() ); //jep = new SubInterpreter(config); try { SharedInterpreter.setConfig(config); } catch(Exception e) { LOGGER.info("JEP interpreter already initialized"); } jep = new SharedInterpreter(); this.initializeJepInstance(jep, delftPath); success = true; return jep; } catch(JepException e) { LOGGER.error("JEP initialization failed", e); throw new RuntimeException("JEP initialization failed", e); } catch(GrobidResourceException e) { LOGGER.error("DeLFT installation path invalid, JEP initialization failed", e); throw new RuntimeException("DeLFT installation path invalid, JEP initialization failed", e); } catch (UnsatisfiedLinkError e) { LOGGER.error("JEP environment not correctly installed or has incompatible binaries, JEP initialization failed", e); throw new RuntimeException("JEP environment not correctly installed or has incompatible binaries, JEP initialization failed", e); } finally { if (!success) { if (jep != null) { try { jep.close(); } catch (JepException e) { LOGGER.error("Failed to close JEP instance", e); } } else { LOGGER.error("JEP initialisation failed"); throw new RuntimeException("JEP initialisation failed"); } } } } /** * To be called by the thread executing python commands via JEP. * The method will return to the thread its dedicated Jep instance * (or create one the first time). */ public synchronized Jep getJEPInstance() { long threadId = Thread.currentThread().getId(); Jep jep = jepInstances.get(threadId); if (jep == null) { LOGGER.info("Creating JEP instance for thread " + threadId); jep = this.createJEPInstance(); jepInstances.put(threadId, jep); } try { jep.isValidThread(); } catch (JepException e) { LOGGER.warn("JEP instance no longer usable, creating new instance", e); jep = this.createJEPInstance(); jepInstances.put(threadId, jep); } return jep; } public void run(Runnable task) throws InterruptedException { LOGGER.debug("running thread: " + Thread.currentThread().getId()); Future future = executor.submit(task); // wait until done (in ms) while (!future.isDone()) { Thread.sleep(1); } } public String call(Callable<String> task) throws InterruptedException, ExecutionException { Future<String> future = executor.submit(task); // block until done return future.get(); } }
6,761
36.359116
141
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/DataSetContextExtractor.java
package org.grobid.core.utilities; import com.google.common.collect.ListMultimap; import com.google.common.collect.Multimap; import com.google.common.collect.MultimapBuilder; import net.sf.saxon.om.Item; import net.sf.saxon.om.SequenceIterator; import net.sf.saxon.trans.XPathException; import org.apache.commons.io.IOUtils; import org.grobid.core.data.DataSetContext; import java.io.IOException; import java.io.InputStream; import java.util.regex.Matcher; import java.util.regex.Pattern; import static java.nio.charset.StandardCharsets.UTF_8; /** * Extracting citation callout */ public class DataSetContextExtractor { public static final Pattern REF_PATTERN = Pattern.compile("<ref>(.*)</ref>", Pattern.DOTALL); public static final int CUT_DEFAULT_LENGTH = 50; static { InputStream is = DataSetContextExtractor.class.getResourceAsStream("/xq/get-citation-context-from-tei.xq"); try { CONTEXT_EXTRACTION_BIB_XQ = IOUtils.toString(is, UTF_8); } catch (Exception e) { throw new RuntimeException(e); } IOUtils.closeQuietly(is); is = DataSetContextExtractor.class.getResourceAsStream("/xq/get-formula-context-from-tei.xq"); try { CONTEXT_EXTRACTION_FORMULA_XQ = IOUtils.toString(is, UTF_8); } catch (Exception e) { throw new RuntimeException(e); } IOUtils.closeQuietly(is); is = DataSetContextExtractor.class.getResourceAsStream("/xq/get-figure-context-from-tei.xq"); try { CONTEXT_EXTRACTION_FIGURE_XQ = IOUtils.toString(is, UTF_8); } catch (Exception e) { throw new RuntimeException(e); } IOUtils.closeQuietly(is); is = DataSetContextExtractor.class.getResourceAsStream("/xq/get-table-context-from-tei.xq"); try { CONTEXT_EXTRACTION_TABLE_XQ = IOUtils.toString(is, UTF_8); } catch (Exception e) { throw new RuntimeException(e); } IOUtils.closeQuietly(is); } private static final String CONTEXT_EXTRACTION_BIB_XQ; private static final String CONTEXT_EXTRACTION_FORMULA_XQ; private static final String CONTEXT_EXTRACTION_FIGURE_XQ; private static final String CONTEXT_EXTRACTION_TABLE_XQ; static <K extends Comparable<? super K>, V> ListMultimap<K, V> multimap() { return MultimapBuilder.treeKeys().linkedListValues().build(); } protected static String cutContextSimple(String cont) { Matcher m = REF_PATTERN.matcher(cont); if (m.find()) { String g = m.group(1); int index = m.start(); return cont.substring(Math.max(0, index - CUT_DEFAULT_LENGTH), Math.min(cont.length(), index + g.length() + CUT_DEFAULT_LENGTH)); } else { throw new IllegalStateException("Implementation error: no <ref> found in" + cont); } } public static Multimap<String, DataSetContext> getCitationReferences(String tei) throws XPathException, IOException { XQueryProcessor xQueryProcessor = new XQueryProcessor(tei); SequenceIterator it = xQueryProcessor.getSequenceIterator(CONTEXT_EXTRACTION_BIB_XQ); Item item; Multimap<String, DataSetContext> contexts = multimap(); while ((item = it.next()) != null) { String val = item.getStringValue(); String citationTeiId = it.next().getStringValue(); String sectionName = it.next().getStringValue(); double pos = Double.parseDouble(it.next().getStringValue()); String coords = it.next().getStringValue(); DataSetContext pcc = new DataSetContext(); String context = cutContextSimple(val); pcc.setContext(extractContextSentence(context)); pcc.setDocumentCoords(coords); pcc.setTeiId(citationTeiId); contexts.put(citationTeiId, pcc); } return contexts; } private static String extractContextSentence(String cont) { Matcher m = REF_PATTERN.matcher(cont); if (m.find()) { String g = m.group(1); return m.replaceAll(Matcher.quoteReplacement(g)); } else { throw new IllegalStateException("Implementation error: no <ref> found in" + cont); } } public static Multimap<String, DataSetContext> getFormulaReferences(String tei) throws XPathException, IOException { XQueryProcessor xQueryProcessor = new XQueryProcessor(tei); SequenceIterator it = xQueryProcessor.getSequenceIterator(CONTEXT_EXTRACTION_FORMULA_XQ); Item item; Multimap<String, DataSetContext> contexts = multimap(); while ((item = it.next()) != null) { String val = item.getStringValue(); String formulaTeiId = it.next().getStringValue(); String sectionName = it.next().getStringValue(); double pos = Double.parseDouble(it.next().getStringValue()); String coords = it.next().getStringValue(); DataSetContext pcc = new DataSetContext(); String context = cutContextSimple(val); pcc.setContext(extractContextSentence(context)); pcc.setDocumentCoords(coords); pcc.setTeiId(formulaTeiId); contexts.put(formulaTeiId, pcc); } return contexts; } public static Multimap<String, DataSetContext> getFigureReferences(String tei) throws XPathException, IOException { XQueryProcessor xQueryProcessor = new XQueryProcessor(tei); SequenceIterator it = xQueryProcessor.getSequenceIterator(CONTEXT_EXTRACTION_FIGURE_XQ); Item item; Multimap<String, DataSetContext> contexts = multimap(); while ((item = it.next()) != null) { String val = item.getStringValue(); String figureTeiId = it.next().getStringValue(); String sectionName = it.next().getStringValue(); double pos = Double.parseDouble(it.next().getStringValue()); String coords = it.next().getStringValue(); DataSetContext pcc = new DataSetContext(); String context = cutContextSimple(val); pcc.setContext(extractContextSentence(context)); pcc.setDocumentCoords(coords); pcc.setTeiId(figureTeiId); contexts.put(figureTeiId, pcc); } return contexts; } public static Multimap<String, DataSetContext> getTableReferences(String tei) throws XPathException, IOException { XQueryProcessor xQueryProcessor = new XQueryProcessor(tei); SequenceIterator it = xQueryProcessor.getSequenceIterator(CONTEXT_EXTRACTION_TABLE_XQ); Item item; Multimap<String, DataSetContext> contexts = multimap(); while ((item = it.next()) != null) { String val = item.getStringValue(); String tableTeiId = it.next().getStringValue(); String sectionName = it.next().getStringValue(); double pos = Double.parseDouble(it.next().getStringValue()); String coords = it.next().getStringValue(); DataSetContext pcc = new DataSetContext(); String context = cutContextSimple(val); pcc.setContext(extractContextSentence(context)); pcc.setDocumentCoords(coords); pcc.setTeiId(tableTeiId); contexts.put(tableTeiId, pcc); } return contexts; } }
7,521
36.237624
121
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/GrobidProperties.java
package org.grobid.core.utilities; import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.StringUtils; import org.grobid.core.GrobidModel; import org.grobid.core.engines.tagging.GrobidCRFEngine; import org.grobid.core.exceptions.GrobidPropertyException; import org.grobid.core.exceptions.GrobidResourceException; import org.grobid.core.utilities.GrobidConfig.ModelParameters; import org.grobid.core.utilities.GrobidConfig.DelftModelParameters; import org.grobid.core.utilities.GrobidConfig.DelftModelParameterSet; import org.grobid.core.utilities.GrobidConfig.WapitiModelParameters; import org.grobid.core.main.GrobidHomeFinder; import org.grobid.core.utilities.Consolidation.GrobidConsolidationService; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.nio.file.Files; import java.nio.file.Paths; import java.util.*; import java.util.stream.Collectors; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; import com.fasterxml.jackson.databind.DeserializationFeature; /** * This class provide methods to set/load/access grobid config value from a yaml config file loaded * in the class {@link GrobidConfig}. * * New yaml parameters and former properties should be equivalent via this class. We keep the * class name "GrobidProperties" for compatibility with Grobid modules and other Java applications * using Grobid as a library. * * to be done: having parameters that can be overridden by a system property having a compatible name. */ public class GrobidProperties { public static final Logger LOGGER = LoggerFactory.getLogger(GrobidProperties.class); static final String FOLDER_NAME_MODELS = "models"; static final String FILE_NAME_MODEL = "model"; private static final String GROBID_VERSION_FILE = "/grobid-version.txt"; static final String UNKNOWN_VERSION_STR = "unknown"; private static GrobidProperties grobidProperties = null; // indicate if GROBID is running in server mode or not private static boolean contextExecutionServer = false; /** * {@link GrobidConfig} object containing all config parameters used by grobid. */ private static GrobidConfig grobidConfig = null; /** * Map models specified inthe config file to their parameters */ private static Map<String, ModelParameters> modelMap = null; /** * Path to pdf to xml converter. */ private static File pathToPdfalto = null; private static File grobidHome = null; /** * Path to the yaml config file */ static File GROBID_CONFIG_PATH = null; private static String GROBID_VERSION = null; /** * Returns an instance of {@link GrobidProperties} object. If no one is set, then * it creates one */ public static GrobidProperties getInstance() { if (grobidProperties == null) { return getNewInstance(); } else { return grobidProperties; } } /** * Returns an instance of {@link GrobidProperties} object based on a custom grobid-home directory. * If no one is set, then it creates one. */ public static GrobidProperties getInstance(GrobidHomeFinder grobidHomeFinder) { synchronized (GrobidProperties.class) { if (grobidHome == null) { grobidHome = grobidHomeFinder.findGrobidHomeOrFail(); } } return getInstance(); } /** * Reload grobid config */ public static void reload() { getNewInstance(); } public static void reset() { getNewInstance(); } /** * Creates a new {@link GrobidProperties} object, initializes and returns it. * * @return GrobidProperties */ protected static synchronized GrobidProperties getNewInstance() { LOGGER.debug("synchronized getNewInstance"); grobidProperties = new GrobidProperties(); return grobidProperties; } /** * Load the path to GROBID_HOME from the env-entry set in web.xml. */ private static void assignGrobidHomePath() { if (grobidHome == null) { synchronized (GrobidProperties.class) { if (grobidHome == null) { grobidHome = new GrobidHomeFinder().findGrobidHomeOrFail(); } } } } /** * Return the grobid-home path. * * @return grobid home path */ public static File getGrobidHome() { return grobidHome; } public static File getGrobidHomePath() { return grobidHome; } /** * For back compatibility */ @Deprecated public static File get_GROBID_HOME_PATH() { return grobidHome; } /** * Set the grobid-home path. */ public static void setGrobidHome(final String pGROBID_HOME_PATH) { if (StringUtils.isBlank(pGROBID_HOME_PATH)) throw new GrobidPropertyException("Cannot set property grobidHome to null or empty."); grobidHome = new File(pGROBID_HOME_PATH); // exception if prop file does not exist if (!grobidHome.exists()) { throw new GrobidPropertyException("Could not read GROBID_HOME, the directory '" + pGROBID_HOME_PATH + "' does not exist."); } try { grobidHome = grobidHome.getCanonicalFile(); } catch (IOException e) { throw new GrobidPropertyException("Cannot set grobid home path to the given one '" + pGROBID_HOME_PATH + "', because it does not exist."); } } /** * Load the path to grobid config yaml from the env-entry set in web.xml. */ static void loadGrobidConfigPath() { LOGGER.debug("loading grobid config yaml"); if (GROBID_CONFIG_PATH == null) { synchronized (GrobidProperties.class) { if (GROBID_CONFIG_PATH == null) { GROBID_CONFIG_PATH = new GrobidHomeFinder().findGrobidConfigOrFail(grobidHome); } } } } /** * Return the path to the GROBID yaml config file * * @return grobid properties path */ public static File getGrobidConfigPath() { return GROBID_CONFIG_PATH; } /** * Set the GROBID config yaml file path. */ public static void setGrobidConfigPath(final String pGrobidConfigPath) { if (StringUtils.isBlank(pGrobidConfigPath)) throw new GrobidPropertyException("Cannot set GROBID config file to null or empty."); File grobidConfigPath = new File(pGrobidConfigPath); // exception if config file does not exist if (!grobidConfigPath.exists()) { throw new GrobidPropertyException("Cannot read GROBID yaml config file, the file '" + pGrobidConfigPath + "' does not exist."); } try { GROBID_CONFIG_PATH = grobidConfigPath.getCanonicalFile(); } catch (IOException e) { throw new GrobidPropertyException("Cannot set grobid yaml config file path to the given one '" + pGrobidConfigPath + "', because it does not exist."); } } /** * Create a new object and search where to find the grobid-home folder. * * We check if the system property GrobidPropertyKeys.PROP_GROBID_HOME * is set. If not set, the method will search for a folder named * grobid-home in the current project. * * Finally from the found grobid-home, the yaml config file is loaded and * the native and data resource paths are initialized. */ public GrobidProperties() { assignGrobidHomePath(); loadGrobidConfigPath(); setContextExecutionServer(false); try { ObjectMapper mapper = new ObjectMapper(new YAMLFactory()); mapper.disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES); grobidConfig = mapper.readValue(GROBID_CONFIG_PATH, GrobidConfig.class); } catch (IOException exp) { throw new GrobidPropertyException("Cannot open GROBID config yaml file at location '" + GROBID_CONFIG_PATH.getAbsolutePath() + "'", exp); } catch (Exception exp) { throw new GrobidPropertyException("Cannot open GROBID config yaml file " + getGrobidConfigPath().getAbsolutePath(), exp); } //Map<String, String> configParametersViaEnvironment = getEnvironmentVariableOverrides(System.getenv()); //this.setEnvironmentConfigParameter(configParametersViaEnvironment); initializeTmpPath(); // TBD: tmp to be created loadPdfaltoPath(); createModelMap(); } /** * Create a map between model names and associated parameters */ private static void createModelMap() { for(ModelParameters modelParameter : grobidConfig.grobid.models) { if (modelMap == null) modelMap = new TreeMap<>(); modelMap.put(modelParameter.name, modelParameter); } } /** * Add a model with its parameter object in the model map */ public static void addModel(ModelParameters modelParameter) { if (modelMap == null) modelMap = new TreeMap<>(); modelMap.put(modelParameter.name, modelParameter); } /** * Create indicated tmp path if it does not exist */ private void initializeTmpPath() { File tmpDir = getTempPath(); if (!tmpDir.exists()) { if (!tmpDir.mkdirs()) { LOGGER.warn("tmp does not exist and unable to create tmp directory: " + tmpDir.getAbsolutePath()); } } } /** * Return the distinct values of all the engines that are specified in the the model map */ public static Set<GrobidCRFEngine> getDistinctModels() { Set<GrobidCRFEngine> distinctModels = new HashSet<>(); for (Map.Entry<String, ModelParameters> entry : modelMap.entrySet()) { ModelParameters modelParameter = entry.getValue(); if (modelParameter.engine == null) { // it should not happen normally continue; } GrobidCRFEngine localEngine = GrobidCRFEngine.get(modelParameter.engine); if (!distinctModels.contains(localEngine)) distinctModels.add(localEngine); } return distinctModels; } /** * Returns the current version of GROBID * * @return GROBID version */ public static String getVersion() { if (GROBID_VERSION == null) { synchronized (GrobidProperties.class) { if (GROBID_VERSION == null) { String grobidVersion = UNKNOWN_VERSION_STR; try (InputStream is = GrobidProperties.class.getResourceAsStream(GROBID_VERSION_FILE)) { grobidVersion = IOUtils.toString(is, "UTF-8"); } catch (IOException e) { LOGGER.error("Cannot read Grobid version from resources", e); } GROBID_VERSION = grobidVersion; } } } return GROBID_VERSION; } /** * Returns the temprorary path of grobid * * @return a directory for temp files */ public static File getTempPath() { if (grobidConfig.grobid.temp == null) return new File(System.getProperty("java.io.tmpdir")); else { if (!new File(grobidConfig.grobid.temp).isAbsolute()) { return new File(grobidHome.getPath(), grobidConfig.grobid.temp); } else { return new File(grobidConfig.grobid.temp); } } } public static void setNativeLibraryPath(final String nativeLibPath) { grobidConfig.grobid.nativelibrary = nativeLibPath; } /** * Returns the path to the native libraries as {@link File} object. * * @return folder that contains native libraries */ public static File getNativeLibraryPath() { return new File(grobidHome.getPath(), grobidConfig.grobid.nativelibrary); } /** * Returns the installation path of DeLFT if set, null otherwise. It is required for using * a Deep Learning sequence labelling engine. * * @return path to the folder that contains the local install of DeLFT */ public static String getDeLFTPath() { return grobidConfig.grobid.delft.install; } public static String getDeLFTFilePath() { String rawPath = grobidConfig.grobid.delft.install; File pathFile = new File(rawPath); if (!Files.exists(Paths.get(rawPath).toAbsolutePath())) { rawPath = "../" + rawPath; pathFile = new File(rawPath); } return pathFile.getAbsolutePath(); } public static String getGluttonUrl() { if (grobidConfig.grobid.consolidation.glutton.url == null || grobidConfig.grobid.consolidation.glutton.url.trim().length() == 0) return null; else return grobidConfig.grobid.consolidation.glutton.url; } public static void setGluttonUrl(final String theUrl) { grobidConfig.grobid.consolidation.glutton.url = theUrl; } /** * Returns the host for a proxy connection, given in the grobid config file. * * @return proxy host */ public static String getProxyHost() { if (grobidConfig.grobid.proxy.host == null || grobidConfig.grobid.proxy.host.trim().length() == 0) return null; else return grobidConfig.grobid.proxy.host; } /** * Sets the host a proxy connection, given in the config file. * * @param the proxy host to be used */ public static void setProxyHost(final String host) { grobidConfig.grobid.proxy.host = host; System.setProperty("http.proxyHost", host); System.setProperty("https.proxyHost", host); } /** * Returns the port for a proxy connection, given in the grobid config file. * * @return proxy port */ public static Integer getProxyPort() { return grobidConfig.grobid.proxy.port; } /** * Set the "mailto" parameter to be used in the crossref query and in User-Agent * header, as recommended by CrossRef REST API documentation. * * @param mailto email parameter to be used for requesting crossref */ public static void setCrossrefMailto(final String mailto) { grobidConfig.grobid.consolidation.crossref.mailto = mailto; } /** * Get the "mailto" parameter to be used in the crossref query and in User-Agent * header, as recommended by CrossRef REST API documentation. * * @return string of the email parameter to be used for requesting crossref */ public static String getCrossrefMailto() { if (grobidConfig.grobid.consolidation.crossref.mailto == null || grobidConfig.grobid.consolidation.crossref.mailto.trim().length() == 0) return null; else return grobidConfig.grobid.consolidation.crossref.mailto; } /** * Set the Crossref Metadata Plus authorization token to be used for Crossref * requests for the subscribers of this service. This token will ensure that said * requests get directed to a pool of machines that are reserved for "Plus" SLA users. * * @param token authorization token to be used for requesting crossref */ public static void setCrossrefToken(final String token) { grobidConfig.grobid.consolidation.crossref.token = token; } /** * Get the Crossref Metadata Plus authorization token to be used for Crossref * requests for the subscribers of this service. This token will ensure that said * requests get directed to a pool of machines that are reserved for "Plus" SLA users. * * @return authorization token to be used for requesting crossref */ public static String getCrossrefToken() { if (grobidConfig.grobid.consolidation.crossref.token == null || grobidConfig.grobid.consolidation.crossref.token.trim().length() == 0) return null; else return grobidConfig.grobid.consolidation.crossref.token; } /** * Sets the port for a proxy connection, given in the grobid config file. * * @param proxy port */ public static void setProxyPort(int port) { grobidConfig.grobid.proxy.port = port; System.setProperty("http.proxyPort", ""+port); System.setProperty("https.proxyPort", ""+port); } public static Integer getPdfaltoMemoryLimitMb() { return grobidConfig.grobid.pdf.pdfalto.memoryLimitMb; } public static Integer getPdfaltoTimeoutS() { return grobidConfig.grobid.pdf.pdfalto.timeoutSec; } public static Integer getPdfaltoTimeoutMs() { return grobidConfig.grobid.pdf.pdfalto.timeoutSec * 1000; } /*public static Integer getNBThreads() { Integer nbThreadsConfig = Integer.valueOf(grobidConfig.grobid.wapiti.nbThreads); if (nbThreadsConfig.intValue() == 0) { return Integer.valueOf(Runtime.getRuntime().availableProcessors()); } return nbThreadsConfig; }*/ /** * Returns the number of threads to be used when training with CRF Wapiti, given in the grobid config file. * * @return number of threads */ public static Integer getWapitiNbThreads() { Integer nbThreadsConfig = Integer.valueOf(grobidConfig.grobid.wapiti.nbThreads); if (nbThreadsConfig.intValue() == 0) { return Integer.valueOf(Runtime.getRuntime().availableProcessors()); } return nbThreadsConfig; } // PDF with more blocks will be skipped public static Integer getPdfBlocksMax() { return grobidConfig.grobid.pdf.blocksMax; } // PDF with more tokens will be skipped public static Integer getPdfTokensMax() { return grobidConfig.grobid.pdf.tokensMax; } /** * Sets the number of threads for training a Wapiti model, given in the grobid config file. * * @param nbThreads umber of threads */ /*public static void setNBThreads(int nbThreads) { grobidConfig.grobid.wapiti.nbThreads = nbThreads; }*/ public static void setWapitiNbThreads(int nbThreads) { grobidConfig.grobid.wapiti.nbThreads = nbThreads; } public static String getLanguageDetectorFactory() { String factoryClassName = grobidConfig.grobid.languageDetectorFactory; if (StringUtils.isBlank(factoryClassName)) { throw new GrobidPropertyException("Language detection is enabled but a factory class name is not provided"); } return factoryClassName; } /** * Sets if a language id shall be used, given in the grobid-property file. * * @param useLanguageId true, if a language id shall be used */ /*public static void setUseLanguageId(final String useLanguageId) { setPropertyValue(GrobidPropertyKeys.PROP_USE_LANG_ID, useLanguageId); }*/ public static String getSentenceDetectorFactory() { String factoryClassName = grobidConfig.grobid.sentenceDetectorFactory; if (StringUtils.isBlank(factoryClassName)) { throw new GrobidPropertyException("Sentence detection is enabled but a factory class name is not provided"); } return factoryClassName; } /** * Returns the path to the home folder of pdf to xml converter. */ public static void loadPdfaltoPath() { LOGGER.debug("loading pdfalto command path"); String pathName = grobidConfig.grobid.pdf.pdfalto.path; pathToPdfalto = new File(grobidHome.getPath(), pathName); if (!pathToPdfalto.exists()) { throw new GrobidPropertyException( "Path to pdfalto doesn't exists. " + "Please set the path to pdfalto in the config file"); } pathToPdfalto = new File(pathToPdfalto, Utilities.getOsNameAndArch()); LOGGER.debug("pdfalto executable home directory set to " + pathToPdfalto.getAbsolutePath()); } /** * Returns the path to the home folder of pdfalto program. * * @return path to pdfalto program */ public static File getPdfaltoPath() { return pathToPdfalto; } private static String getGrobidCRFEngineName(final String modelName) { ModelParameters param = modelMap.get(modelName); if (param == null) { LOGGER.debug("No configuration parameter defined for model " + modelName); return null; } return param.engine; } public static GrobidCRFEngine getGrobidCRFEngine(final String modelName) { String engineName = getGrobidCRFEngineName(modelName); if (engineName == null) return null; else return GrobidCRFEngine.get(engineName); } public static GrobidCRFEngine getGrobidCRFEngine(final GrobidModel model) { return getGrobidCRFEngine(model.getModelName()); } public static File getModelPath(final GrobidModel model) { if (modelMap.get(model.getModelName()) == null) { // model is not specified in the config, ignoring return null; } String extension = getGrobidCRFEngine(model).getExt(); return new File(getGrobidHome(), FOLDER_NAME_MODELS + File.separator + model.getFolderName() + File.separator + FILE_NAME_MODEL + "." + extension); } public static File getModelPath() { return new File(getGrobidHome(), FOLDER_NAME_MODELS); } public static File getTemplatePath(final File resourcesDir, final GrobidModel model) { if (modelMap.get(model.getModelName()) == null) { // model is not specified in the config, ignoring return null; } File theFile = new File(resourcesDir, "dataset/" + model.getFolderName() + "/crfpp-templates/" + model.getTemplateName()); if (!theFile.exists()) { theFile = new File("resources/dataset/" + model.getFolderName() + "/crfpp-templates/" + model.getTemplateName()); } return theFile; } public static File getEvalCorpusPath(final File resourcesDir, final GrobidModel model) { File theFile = new File(resourcesDir, "dataset/" + model.getFolderName() + "/evaluation/"); if (!theFile.exists()) { theFile = new File("resources/dataset/" + model.getFolderName() + "/evaluation/"); } return theFile; } public static File getCorpusPath(final File resourcesDir, final GrobidModel model) { File theFile = new File(resourcesDir, "dataset/" + model.getFolderName() + "/corpus"); if (!theFile.exists()) { theFile = new File("resources/dataset/" + model.getFolderName() + "/corpus"); } return theFile; } public static String getLexiconPath() { return new File(getGrobidHome(), "lexicon").getAbsolutePath(); } public static File getLanguageDetectionResourcePath() { return new File(getGrobidHome(), "language-detection"); } /** * Returns the maximum parallel connections allowed in the pool. * * @return the number of connections */ public static int getMaxConcurrency() { return grobidConfig.grobid.concurrency; } /** * Returns maximum time to wait before timeout when the pool is full. * * @return time to wait in milliseconds. */ public static int getPoolMaxWait() { return grobidConfig.grobid.poolMaxWait * 1000; } /** * Returns the consolidation service to be used. * * @return the consolidation service to be used */ public static GrobidConsolidationService getConsolidationService() { if (grobidConfig.grobid.consolidation.service == null) grobidConfig.grobid.consolidation.service = "crossref"; return GrobidConsolidationService.get(grobidConfig.grobid.consolidation.service); } /** * Set which consolidation service to use */ public static void setConsolidationService(String service) { grobidConfig.grobid.consolidation.service = service; } /** * Returns if the execution context is stand alone or server. * * @return the context of execution. Return false if the property value is * not readable. */ public static boolean isContextExecutionServer() { return contextExecutionServer; } /** * Set if the execution context is stand alone or server. * * @param state true to set the context of execution to server, false else. */ public static void setContextExecutionServer(boolean state) { contextExecutionServer = state; } public static String getPythonVirtualEnv() { return grobidConfig.grobid.delft.pythonVirtualEnv; } public static void setPythonVirtualEnv(String pythonVirtualEnv) { grobidConfig.grobid.delft.pythonVirtualEnv = pythonVirtualEnv; } public static int getWindow(final GrobidModel model) { ModelParameters parameters = modelMap.get(model.getModelName()); if (parameters != null && parameters.wapiti != null) return parameters.wapiti.window; else return 20; } public static double getEpsilon(final GrobidModel model) { ModelParameters parameters = modelMap.get(model.getModelName()); if (parameters != null && parameters.wapiti != null) return parameters.wapiti.epsilon; else return 0.00001; } public static int getNbMaxIterations(final GrobidModel model) { ModelParameters parameters = modelMap.get(model.getModelName()); if (parameters != null && parameters.wapiti != null) return parameters.wapiti.nbMaxIterations; else return 2000; } public static boolean useELMo(final String modelName) { ModelParameters param = modelMap.get(modelName); if (param == null) { LOGGER.debug("No configuration parameter defined for model " + modelName); return false; } DelftModelParameters delftParam = param.delft; if (delftParam == null) { LOGGER.debug("No configuration parameter defined for DeLFT engine for model " + modelName); return false; } return param.delft.useELMo; } public static String getDelftArchitecture(final String modelName) { ModelParameters param = modelMap.get(modelName); if (param == null) { LOGGER.debug("No configuration parameter defined for model " + modelName); return null; } DelftModelParameters delftParam = param.delft; if (delftParam == null) { LOGGER.debug("No configuration parameter defined for DeLFT engine for model " + modelName); return null; } return param.delft.architecture; } public static String getDelftEmbeddingsName(final String modelName) { ModelParameters param = modelMap.get(modelName); if (param == null) { LOGGER.debug("No configuration parameter defined for model " + modelName); return null; } DelftModelParameters delftParam = param.delft; if (delftParam == null) { LOGGER.debug("No configuration parameter defined for DeLFT engine for model " + modelName); return null; } return param.delft.embeddings_name; } public static String getDelftTranformer(final String modelName) { ModelParameters param = modelMap.get(modelName); if (param == null) { LOGGER.debug("No configuration parameter defined for model " + modelName); return null; } DelftModelParameters delftParam = param.delft; if (delftParam == null) { LOGGER.debug("No configuration parameter defined for DeLFT engine for model " + modelName); return null; } return param.delft.transformer; } /** * Return -1 if not set in the configuration and the default DeLFT value will be used in this case. */ public static int getDelftTrainingMaxSequenceLength(final String modelName) { ModelParameters param = modelMap.get(modelName); if (param == null) { LOGGER.debug("No configuration parameter defined for model " + modelName); return -1; } DelftModelParameters delftParam = param.delft; if (delftParam == null) { LOGGER.debug("No configuration parameter defined for DeLFT engine for model " + modelName); return -1; } DelftModelParameterSet delftParamSet = param.delft.training; if (delftParamSet == null) { LOGGER.debug("No training configuration parameter defined for DeLFT engine for model " + modelName); return -1; } return param.delft.training.max_sequence_length; } /** * Return -1 if not set in the configuration and the default DeLFT value will be used in this case. */ public static int getDelftRuntimeMaxSequenceLength(final String modelName) { ModelParameters param = modelMap.get(modelName); if (param == null) { LOGGER.debug("No configuration parameter defined for model " + modelName); return -1; } DelftModelParameters delftParam = param.delft; if (delftParam == null) { LOGGER.debug("No configuration parameter defined for DeLFT engine for model " + modelName); return -1; } DelftModelParameterSet delftParamSet = param.delft.runtime; if (delftParamSet == null) { LOGGER.debug("No runtime configuration parameter defined for DeLFT engine for model " + modelName); return -1; } return param.delft.runtime.max_sequence_length; } /** * Return -1 if not set in the configuration and the default DeLFT value will be used in this case. */ public static int getDelftTrainingBatchSize(final String modelName) { ModelParameters param = modelMap.get(modelName); if (param == null) { LOGGER.debug("No configuration parameter defined for model " + modelName); return -1; } DelftModelParameters delftParam = param.delft; if (delftParam == null) { LOGGER.debug("No configuration parameter defined for DeLFT engine for model " + modelName); return -1; } DelftModelParameterSet delftParamSet = param.delft.training; if (delftParamSet == null) { LOGGER.debug("No training configuration parameter defined for DeLFT engine for model " + modelName); return -1; } return param.delft.training.batch_size; } /** * Return -1 if not set in the configuration and the default DeLFT value will be used in this case. */ public static int getDelftRuntimeBatchSize(final String modelName) { ModelParameters param = modelMap.get(modelName); if (param == null) { LOGGER.debug("No configuration parameter defined for model " + modelName); return -1; } DelftModelParameters delftParam = param.delft; if (delftParam == null) { LOGGER.debug("No configuration parameter defined for DeLFT engine for model " + modelName); return -1; } DelftModelParameterSet delftParamSet = param.delft.runtime; if (delftParamSet == null) { LOGGER.debug("No runtime configuration parameter defined for DeLFT engine for model " + modelName); return -1; } return param.delft.runtime.batch_size; } public static String getDelftArchitecture(final GrobidModel model) { return getDelftArchitecture(model.getModelName()); } /*protected static Map<String, String> getEnvironmentVariableOverrides(Map<String, String> environmentVariablesMap) { EnvironmentVariableProperties envParameters = new EnvironmentVariableProperties(environmentVariablesMap, "(grobid__).+"); return envParameters.getConfigParameters(); }*/ }
32,807
35.092409
144
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/ElementCounterItem.java
package org.grobid.core.utilities; public class ElementCounterItem<T> { private T item; private Integer cnt; //for Jackson public ElementCounterItem() { } public ElementCounterItem(T item, Integer cnt) { this.item = item; this.cnt = cnt; } public T getItem() { return item; } public java.lang.Integer getCnt() { return cnt; } }
410
15.44
52
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/ConsolidationCounters.java
package org.grobid.core.utilities; import org.grobid.core.engines.counters.*; /** * Counters for keeping track of consolidation activity and results * */ public class ConsolidationCounters { public static final Countable CONSOLIDATION = new Countable() { @Override public String getName() { return "CONSOLIDATION"; } }; public static final Countable CONSOLIDATION_SUCCESS = new Countable() { @Override public String getName() { return "CONSOLIDATION_SUCCESS"; } }; public static final Countable CONSOLIDATION_PER_DOI = new Countable() { @Override public String getName() { return "CONSOLIDATION_PER_DOI"; } }; public static final Countable CONSOLIDATION_PER_DOI_SUCCESS = new Countable() { @Override public String getName() { return "CONSOLIDATION_PER_DOI_SUCCESS"; } }; public static final Countable TOTAL_BIB_REF = new Countable() { @Override public String getName() { return "TOTAL_BIB_REF"; } }; }
1,126
27.175
83
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/Pair.java
package org.grobid.core.utilities; public class Pair<A, B> { public final A a; public final B b; public Pair(A a, B b) { this.a = a; this.b = b; } @Override public String toString() { StringBuilder sb = new StringBuilder(); sb .append("('") .append(a) .append("'; '") .append(b) .append("')"); return sb.toString(); } @Override public boolean equals(Object o) { if (this == o) { return true; } if (!(o instanceof Pair)) { return false; } Pair<?, ?> that = (Pair<?, ?>) o; return ((this.a == null) ? that.a == null : this.a.equals(that.a)) && ((this.b == null) ? that.b == null : this.b.equals(that.b)); } @Override public int hashCode() { return 7 * (a != null ? a.hashCode() : 11) + 13 * (b != null ? b.hashCode() : 3); } public A getA() { return a; } public B getB() { return b; } }
1,094
20.057692
89
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/TextUtilities.java
package org.grobid.core.utilities; import org.apache.commons.lang3.StringUtils; import org.grobid.core.analyzers.GrobidAnalyzer; import org.grobid.core.exceptions.GrobidException; import org.grobid.core.layout.LayoutToken; import org.grobid.core.lexicon.Lexicon; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.text.DecimalFormat; import java.text.NumberFormat; import java.util.*; import java.util.regex.Matcher; import java.util.regex.Pattern; /** * Class for holding static methods for text processing. * */ public class TextUtilities { public static final String punctuations = " •*,:;?.!)-−–\"“”‘’'`$]*\u2666\u2665\u2663\u2660\u00A0。、,・"; public static final String fullPunctuations = "(([ •*,:;?.!/))-−–‐«»„\"“”‘’'`$#@]*\u2666\u2665\u2663\u2660\u00A0。、,・"; public static final String restrictedPunctuations = ",:;?.!/-–«»„\"“”‘’'`*\u2666\u2665\u2663\u2660。、,・"; public static String delimiters = "\n\r\t\f\u00A0\u200C" + fullPunctuations; public static final String OR = "|"; public static final String NEW_LINE = "\n"; public static final String SPACE = " "; public static final String COMMA = ","; public static final String QUOTE = "'"; public static final String END_BRACKET = ")"; public static final String START_BRACKET = "("; public static final String SHARP = "#"; public static final String COLON = ":"; public static final String DOUBLE_QUOTE = "\""; public static final String ESC_DOUBLE_QUOTE = "&quot;"; public static final String LESS_THAN = "<"; public static final String ESC_LESS_THAN = "&lt;"; public static final String GREATER_THAN = ">"; public static final String ESC_GREATER_THAN = "&gt;"; public static final String AND = "&"; public static final String ESC_AND = "&amp;"; public static final String SLASH = "/"; // note: be careful of catastrophic backtracking here as a consequence of PDF noise! private static final String ORCIDRegex = "^\\s*(?:(?:https?://)?orcid.org/)?([0-9]{4})\\-?([0-9]{4})\\-?([0-9]{4})\\-?([0-9]{3}[\\dX])\\s*$"; static public final Pattern ORCIDPattern = Pattern.compile(ORCIDRegex); // the magical DOI regular expression... static public final Pattern DOIPattern = Pattern .compile("(10\\.\\d{4,5}\\/[\\S]+[^;,.\\s])"); // a regular expression for arXiv identifiers // see https://arxiv.org/help/arxiv_identifier and https://arxiv.org/help/arxiv_identifier_for_services static public final Pattern arXivPattern = Pattern .compile("(arXiv\\s?(\\.org)?\\s?\\:\\s?\\d{4}\\s?\\.\\s?\\d{4,5}(v\\d+)?)|(arXiv\\s?(\\.org)?\\s?\\:\\s?[ a-zA-Z\\-\\.]*\\s?/\\s?\\d{7}(v\\d+)?)"); // regular expression for PubMed identifiers, last group gives the PMID digits static public final Pattern pmidPattern = Pattern.compile("((PMID)|(Pub(\\s)?Med(\\s)?(ID)?))(\\s)?(\\:)?(\\s)*(\\d{1,8})"); // regular expression for PubMed Central identifiers (note: contrary to PMID, we include the prefix PMC here, see // https://www.ncbi.nlm.nih.gov/pmc/pmctopmid/ for instance), last group gives the PMC ID digits static public final Pattern pmcidPattern = Pattern .compile("((PMC\\s?(ID)?)|(Pub(\\s)?Med(\\s)?(Central)?(\\s)?(ID)?))(\\s)?(\\:)?(\\s)*(\\d{1,9})"); // a regular expression for identifying url pattern in text // TODO: maybe find a better regex (better == more robust, not more "standard") static public final Pattern urlPattern0 = Pattern .compile("(?i)(https?|ftp)\\s?:\\s?//\\s?[-A-Z0-9+&@#/%?=~_()|!:,.;]*[-A-Z0-9+&@#/%=~_()|]"); static public final Pattern urlPattern = Pattern .compile("(?i)(https?|ftp)\\s{0,2}:\\s{0,2}\\/\\/\\s{0,2}[-A-Z0-9+&@#\\/%?=~_()|!:.;]*[-A-Z0-9+&@#\\/%=~_()]"); // a regular expression for identifying email pattern in text // TODO: maybe find a better regex (better == more robust, not more "standard") static public final Pattern emailPattern = Pattern.compile("\\w+((\\.|-|_|,)\\w+)?\\s?((\\.|-|_|,)\\w+)?\\s?@\\s?\\w+(\\s?(\\.|-)\\s?\\w+)+"); // variant: \w+(\s?(\.|-|_|,)\w+)?(\s?(\.|-|_|,)\w+)?\s?@\s?\w+(\s?(\.|\-)\s?\w+)+ /** * Replace numbers in the string by a dummy character for string distance evaluations * * @param string the string to be processed. * @return Returns the string with numbers replaced by 'X'. */ public static String shadowNumbers(String string) { int i = 0; if (string == null) return string; String res = ""; while (i < string.length()) { char c = string.charAt(i); if (Character.isDigit(c)) res += 'X'; else res += c; i++; } return res; } private static int getLastPunctuationCharacter(String section) { int res = -1; for (int i = section.length() - 1; i >= 0; i--) { if (fullPunctuations.contains("" + section.charAt(i))) { res = i; } } return res; } /** @use LayoutTokensUtil.dehyphenize(List<LayoutToken> tokens) **/ @Deprecated public static List<LayoutToken> dehyphenize(List<LayoutToken> tokens) { return LayoutTokensUtil.dehyphenize(tokens); } /** @use LayoutTokenUtils.doesRequireDehypenisation(List<LayoutToken> tokens, int i)**/ @Deprecated protected static boolean doesRequireDehypenisation(List<LayoutToken> tokens, int i) { return LayoutTokensUtil.doesRequireDehypenisation(tokens, i); } public static String dehyphenize(String text) { GrobidAnalyzer analyser = GrobidAnalyzer.getInstance(); final List<LayoutToken> layoutTokens = analyser.tokenizeWithLayoutToken(text); return LayoutTokensUtil.toText(LayoutTokensUtil.dehyphenize(layoutTokens)); } public static String getLastToken(String section) { String lastToken = section; int lastSpaceIndex = section.lastIndexOf(' '); //The last parenthesis cover the case 'this is a (special-one) case' // where the lastToken before the hypen should be 'special' and not '(special' /* int lastParenthesisIndex = section.lastIndexOf('('); if (lastParenthesisIndex > lastSpaceIndex) lastSpaceIndex = lastParenthesisIndex;*/ if (lastSpaceIndex != -1) { lastToken = section.substring(lastSpaceIndex + 1, section.length()); } else { lastToken = section.substring(0, section.length()); } return lastToken; } public static String getFirstToken(String section) { int firstSpaceIndex = section.indexOf(' '); if (firstSpaceIndex == 0) { return getFirstToken(section.substring(1, section.length())); } else if (firstSpaceIndex != -1) { return section.substring(0, firstSpaceIndex); } else { return section.substring(0, section.length()); } } /** * Text extracted from a PDF is usually hyphenized, which is not desirable. * This version supposes that the end of line are lost and than hyphenation * could appear everywhere. So a dictionary is used to control the recognition * of hyphen. * * @param text the string to be processed without preserved end of lines. * @return Returns the dehyphenized string. * <p> * Deprecated method, not needed anymore since the @newline are preserved thanks to the LayoutTokens * @Use LayoutTokensUtil.dehypenize() */ @Deprecated public static String dehyphenizeHard(String text) { if (text == null) return null; String res = ""; text.replaceAll("\n", SPACE); StringTokenizer st = new StringTokenizer(text, "-"); boolean hyphen = false; boolean failure = false; String lastToken = null; while (st.hasMoreTokens()) { String section = st.nextToken().trim(); if (hyphen) { // we get the first token StringTokenizer st2 = new StringTokenizer(section, " ,.);!"); if (st2.countTokens() > 0) { String firstToken = st2.nextToken(); // we check if the composed token is in the lexicon String hyphenToken = lastToken + firstToken; //System.out.println(hyphenToken); /*if (lex == null) featureFactory.loadLexicon();*/ Lexicon lex = Lexicon.getInstance(); if (lex.inDictionary(hyphenToken.toLowerCase()) & !(test_digit(hyphenToken))) { // if yes, it is hyphenization res += firstToken; section = section.substring(firstToken.length(), section.length()); } else { // if not res += "-"; failure = true; } } else { res += "-"; } hyphen = false; } // we get the last token hyphen = true; lastToken = getLastToken(section); if (failure) { res += section; failure = false; } else res += SPACE + section; } res = res.replace(" . ", ". "); res = res.replace(" ", SPACE); return res.trim(); } /** * Levenstein distance between two strings * * @param s the first string to be compared. * @param t the second string to be compared. * @return Returns the Levenshtein distance. */ public static int getLevenshteinDistance(String s, String t) { //if (s == null || t == null) { // throw new IllegalArgumentException("Strings must not be null"); //} int n = s.length(); // length of s int m = t.length(); // length of t if (n == 0) { return m; } else if (m == 0) { return n; } int p[] = new int[n + 1]; //'previous' cost array, horizontally int d[] = new int[n + 1]; // cost array, horizontally int _d[]; //placeholder to assist in swapping p and d // indexes into strings s and t int i; // iterates through s int j; // iterates through t char t_j; // jth character of t int cost; // cost for (i = 0; i <= n; i++) { p[i] = i; } for (j = 1; j <= m; j++) { t_j = t.charAt(j - 1); d[0] = j; for (i = 1; i <= n; i++) { cost = s.charAt(i - 1) == t_j ? 0 : 1; // minimum of cell to the left+1, to the top+1, diagonally left and up +cost d[i] = Math.min(Math.min(d[i - 1] + 1, p[i] + 1), p[i - 1] + cost); } // copy current distance counts to 'previous row' distance counts _d = p; p = d; d = _d; } // our last action in the above loop was to switch d and p, so p now // actually has the most recent cost counts return p[n]; } /** * Appending nb times the char c to the a StringBuffer... */ public final static void appendN(StringBuffer buffer, char c, int nb) { for (int i = 0; i < nb; i++) { buffer.append(c); } } /** * To replace accented characters in a unicode string by unaccented equivalents: * é -> e, ü -> ue, ß -> ss, etc. following the standard transcription conventions * * @param input the string to be processed. * @return Returns the string without accent. */ public final static String removeAccents(String input) { if (input == null) return null; final StringBuffer output = new StringBuffer(); for (int i = 0; i < input.length(); i++) { switch (input.charAt(i)) { case '\u00C0': // À case '\u00C1': // Á case '\u00C2': //  case '\u00C3': // à case '\u00C5': // Ã… output.append("A"); break; case '\u00C4': // Ä case '\u00C6': // Æ output.append("AE"); break; case '\u00C7': // Ç output.append("C"); break; case '\u00C8': // È case '\u00C9': // É case '\u00CA': // Ê case '\u00CB': // Ë output.append("E"); break; case '\u00CC': // ÃŒ case '\u00CD': // Í case '\u00CE': // ÃŽ case '\u00CF': // Ï output.append("I"); break; case '\u00D0': // Ð output.append("D"); break; case '\u00D1': // Ñ output.append("N"); break; case '\u00D2': // Ã’ case '\u00D3': // Ó case '\u00D4': // Ô case '\u00D5': // Õ case '\u00D8': // Ø output.append("O"); break; case '\u00D6': // Ö case '\u0152': // Å’ output.append("OE"); break; case '\u00DE': // Þ output.append("TH"); break; case '\u00D9': // Ù case '\u00DA': // Ú case '\u00DB': // Û output.append("U"); break; case '\u00DC': // Ü output.append("UE"); break; case '\u00DD': // Ý case '\u0178': // Ÿ output.append("Y"); break; case '\u00E0': // à case '\u00E1': // á case '\u00E2': // â case '\u00E3': // ã case '\u00E5': // Ã¥ output.append("a"); break; case '\u00E4': // ä case '\u00E6': // æ output.append("ae"); break; case '\u00E7': // ç output.append("c"); break; case '\u00E8': // è case '\u00E9': // é case '\u00EA': // ê case '\u00EB': // ë output.append("e"); break; case '\u00EC': // ì case '\u00ED': // à case '\u00EE': // î case '\u00EF': // ï output.append("i"); break; case '\u00F0': // ð output.append("d"); break; case '\u00F1': // ñ output.append("n"); break; case '\u00F2': // ò case '\u00F3': // ó case '\u00F4': // ô case '\u00F5': // õ case '\u00F8': // ø output.append("o"); break; case '\u00F6': // ö case '\u0153': // Å“ output.append("oe"); break; case '\u00DF': // ß output.append("ss"); break; case '\u00FE': // þ output.append("th"); break; case '\u00F9': // ù case '\u00FA': // ú case '\u00FB': // û output.append("u"); break; case '\u00FC': // ü output.append("ue"); break; case '\u00FD': // ý case '\u00FF': // ÿ output.append("y"); break; default: output.append(input.charAt(i)); break; } } return output.toString(); } // ad hoc stopword list for the cleanField method public final static List<String> stopwords = Arrays.asList("the", "of", "and", "du", "de le", "de la", "des", "der", "an", "und"); /** * Remove useless punctuation at the end and beginning of a metadata field. * <p/> * Still experimental ! Use with care ! */ public final static String cleanField(String input0, boolean applyStopwordsFilter) { if (input0 == null) { return null; } if (input0.length() == 0) { return null; } String input = input0.replace(",,", ","); input = input.replace(", ,", ","); int n = input.length(); // characters at the end for (int i = input.length() - 1; i > 0; i--) { char c = input.charAt(i); if ((c == ',') || (c == ' ') || (c == '.') || (c == '-') || (c == '_') || (c == '/') || //(c == ')') || //(c == '(') || (c == ':')) { n = i; } else if (c == ';') { // we have to check if we have an html entity finishing if (i - 3 >= 0) { char c0 = input.charAt(i - 3); if (c0 == '&') { break; } } if (i - 4 >= 0) { char c0 = input.charAt(i - 4); if (c0 == '&') { break; } } if (i - 5 >= 0) { char c0 = input.charAt(i - 5); if (c0 == '&') { break; } } if (i - 6 >= 0) { char c0 = input.charAt(i - 6); if (c0 == '&') { break; } } n = i; } else break; } input = input.substring(0, n); // characters at the begining n = 0; for (int i = 0; i < input.length(); i++) { char c = input.charAt(i); if ((c == ',') || (c == ' ') || (c == '.') || (c == ';') || (c == '-') || (c == '_') || //(c == ')') || //(c == '(') || (c == ':')) { n = i; } else break; } input = input.substring(n, input.length()).trim(); if ((input.endsWith(")")) && (input.startsWith("("))) { input = input.substring(1, input.length() - 1).trim(); } if ((input.length() > 12) && (input.endsWith("&quot;")) && (input.startsWith("&quot;"))) { input = input.substring(6, input.length() - 6).trim(); } if (applyStopwordsFilter) { boolean stop = false; while (!stop) { stop = true; for (String word : stopwords) { if (input.endsWith(SPACE + word)) { input = input.substring(0, input.length() - word.length()).trim(); stop = false; break; } } } } return input.trim(); } /** * Segment piece of text following a list of segmentation characters. * "hello, world." -> [ "hello", ",", "world", "." ] * * @param input the string to be processed. * @param input the characters creating a segment (typically space and punctuations). * @return Returns the string without accent. */ public final static List<String> segment(String input, String segments) { if (input == null) return null; ArrayList<String> result = new ArrayList<String>(); String token = null; String seg = " \n\t"; for (int i = 0; i < input.length(); i++) { char c = input.charAt(i); int ind = seg.indexOf(c); if (ind != -1) { if (token != null) { result.add(token); token = null; } } else { int ind2 = segments.indexOf(c); if (ind2 == -1) { if (token == null) token = "" + c; else token += c; } else { if (token != null) { result.add(token); token = null; } result.add("" + segments.charAt(ind2)); } } } if (token != null) result.add(token); return result; } /** * Encode a string to be displayed in HTML * <p/> * If fullHTML encode, then all unicode characters above 7 bits are converted into * HTML entitites */ public static String HTMLEncode(String string) { return HTMLEncode(string, false); } public static String HTMLEncode(String string, boolean fullHTML) { if (string == null) return null; if (string.length() == 0) return string; //string = string.replace("@BULLET", "•"); StringBuffer sb = new StringBuffer(string.length()); // true if last char was blank boolean lastWasBlankChar = false; int len = string.length(); char c; for (int i = 0; i < len; i++) { c = string.charAt(i); if (c == ' ') { // blank gets extra work, // this solves the problem you get if you replace all // blanks with &nbsp;, if you do that you loss // word breaking if (lastWasBlankChar) { lastWasBlankChar = false; //sb.append("&nbsp;"); } else { lastWasBlankChar = true; sb.append(' '); } } else { lastWasBlankChar = false; // // HTML Special Chars if (c == '"') sb.append("&quot;"); else if (c == '\'') sb.append("&apos;"); else if (c == '&') { boolean skip = false; // we don't want to recode an existing hmlt entity if (string.length() > i + 3) { char c2 = string.charAt(i + 1); char c3 = string.charAt(i + 2); char c4 = string.charAt(i + 3); if (c2 == 'a') { if (c3 == 'm') { if (c4 == 'p') { if (string.length() > i + 4) { char c5 = string.charAt(i + 4); if (c5 == ';') { skip = true; } } } } } else if (c2 == 'q') { if (c3 == 'u') { if (c4 == 'o') { if (string.length() > i + 5) { char c5 = string.charAt(i + 4); char c6 = string.charAt(i + 5); if (c5 == 't') { if (c6 == ';') { skip = true; } } } } } } else if (c2 == 'l' || c2 == 'g') { if (c3 == 't') { if (c4 == ';') { skip = true; } } } } if (!skip) { sb.append("&amp;"); } else { sb.append("&"); } } else if (c == '<') sb.append("&lt;"); else if (c == '>') sb.append("&gt;"); /*else if (c == '\n') { // warning: this can be too much html! sb.append("&lt;br/&gt;"); }*/ else { int ci = 0xffff & c; if (ci < 160) { // nothing special only 7 Bit sb.append(c); } else { if (fullHTML) { // Not 7 Bit use the unicode system sb.append("&#"); sb.append(Integer.valueOf(ci).toString()); sb.append(';'); } else sb.append(c); } } } } return sb.toString(); } public static String normalizeRegex(String string) { string = string.replace("&", "\\\\&"); string = string.replace("&", "\\\\&"); string = string.replace("+", "\\\\+"); return string; } /* * To convert the InputStream to String we use the BufferedReader.readLine() * method. We iterate until the BufferedReader return null which means * there's no more data to read. Each line will appended to a StringBuilder * and returned as String. */ static public String convertStreamToString(InputStream is) { BufferedReader reader = new BufferedReader(new InputStreamReader(is)); StringBuilder sb = new StringBuilder(); String line = null; try { while ((line = reader.readLine()) != null) { sb.append(line + "\n"); } } catch (IOException e) { // e.printStackTrace(); throw new GrobidException("An exception occured while running Grobid.", e); } finally { try { is.close(); } catch (IOException e) { // e.printStackTrace(); throw new GrobidException("An exception occured while running Grobid.", e); } } return sb.toString(); } /** * Count the number of digit in a given string. * * @param text the string to be processed. * @return Returns the number of digit chracaters in the string... */ static public int countDigit(String text) { int count = 0; for (int i = 0; i < text.length(); i++) { char c = text.charAt(i); if (Character.isDigit(c)) count++; } return count; } /** * Map special ligature and characters coming from the pdf */ static public String clean(String token) { if (token == null) return null; if (token.length() == 0) return token; String res = ""; int i = 0; while (i < token.length()) { switch (token.charAt(i)) { // ligature case '\uFB00': { res += "ff"; break; } case '\uFB01': { res += "fi"; break; } case '\uFB02': { res += "fl"; break; } case '\uFB03': { res += "ffi"; break; } case '\uFB04': { res += "ffl"; break; } case '\uFB06': { res += "st"; break; } case '\uFB05': { res += "ft"; break; } case '\u00E6': { res += "ae"; break; } case '\u00C6': { res += "AE"; break; } case '\u0153': { res += "oe"; break; } case '\u0152': { res += "OE"; break; } // quote case '\u201C': { res += "\""; break; } case '\u201D': { res += "\""; break; } case '\u201E': { res += "\""; break; } case '\u201F': { res += "\""; break; } case '\u2019': { res += "'"; break; } case '\u2018': { res += "'"; break; } // bullet uniformity case '\u2022': { res += "•"; break; } case '\u2023': { res += "•"; break; } case '\u2043': { res += "•"; break; } case '\u204C': { res += "•"; break; } case '\u204D': { res += "•"; break; } case '\u2219': { res += "•"; break; } case '\u25C9': { res += "•"; break; } case '\u25D8': { res += "•"; break; } case '\u25E6': { res += "•"; break; } case '\u2619': { res += "•"; break; } case '\u2765': { res += "•"; break; } case '\u2767': { res += "•"; break; } case '\u29BE': { res += "•"; break; } case '\u29BF': { res += "•"; break; } // asterix case '\u2217': { res += " * "; break; } // typical author/affiliation markers case '\u2020': { res += SPACE + '\u2020'; break; } case '\u2021': { res += SPACE + '\u2021'; break; } case '\u00A7': { res += SPACE + '\u00A7'; break; } case '\u00B6': { res += SPACE + '\u00B6'; break; } case '\u204B': { res += SPACE + '\u204B'; break; } case '\u01C2': { res += SPACE + '\u01C2'; break; } // default default: { res += token.charAt(i); break; } } i++; } return res; } public static String formatTwoDecimals(double d) { NumberFormat nf = NumberFormat.getNumberInstance(Locale.US); DecimalFormat df = (DecimalFormat) nf; df.applyPattern("#.##"); return df.format(d); } public static String formatFourDecimals(double d) { NumberFormat nf = NumberFormat.getNumberInstance(Locale.US); DecimalFormat df = (DecimalFormat) nf; df.applyPattern("#.####"); return df.format(d); } public static boolean isAllUpperCase(String text) { for (int i = 0; i < text.length(); i++) { if (!Character.isUpperCase(text.charAt(i))) { return false; } } return true; } public static boolean isAllLowerCase(String text) { for (int i = 0; i < text.length(); i++) { if (!Character.isLowerCase(text.charAt(i))) { return false; } } return true; } public static List<String> generateEmailVariants(String firstName, String lastName) { // current heuristics: // "First Last" // "First L" // "F Last" // "First" // "Last" // "Last First" // "Last F" List<String> variants = new ArrayList<String>(); if (lastName != null) { variants.add(lastName); if (firstName != null) { variants.add(firstName + SPACE + lastName); variants.add(lastName + SPACE + firstName); if (firstName.length() > 1) { String firstInitial = firstName.substring(0, 1); variants.add(firstInitial + SPACE + lastName); variants.add(lastName + SPACE + firstInitial); } if (lastName.length() > 1) { String lastInitial = lastName.substring(0, 1); variants.add(firstName + SPACE + lastInitial); } } } else { if (firstName != null) { variants.add(firstName); } } return variants; } /** * This is a re-implementation of the capitalizeFully of Apache commons lang, because it appears not working * properly. * <p/> * Convert a string so that each word is made up of a titlecase character and then a series of lowercase * characters. Words are defined as token delimited by one of the character in delimiters or the begining * of the string. */ public static String capitalizeFully(String input, String delimiters) { if (input == null) { return null; } //input = input.toLowerCase(); String output = ""; boolean toUpper = true; for (int c = 0; c < input.length(); c++) { char ch = input.charAt(c); if (delimiters.indexOf(ch) != -1) { toUpper = true; output += ch; } else { if (toUpper == true) { output += Character.toUpperCase(ch); toUpper = false; } else { output += Character.toLowerCase(ch); } } } return output; } public static String wordShape(String word) { StringBuilder shape = new StringBuilder(); for (char c : word.toCharArray()) { if (Character.isLetter(c)) { if (Character.isUpperCase(c)) { shape.append("X"); } else { shape.append("x"); } } else if (Character.isDigit(c)) { shape.append("d"); } else { shape.append(c); } } StringBuilder finalShape = new StringBuilder().append(shape.charAt(0)); String suffix = ""; if (word.length() > 2) { suffix = shape.substring(shape.length() - 2); } else if (word.length() > 1) { suffix = shape.substring(shape.length() - 1); } StringBuilder middle = new StringBuilder(); if (shape.length() > 3) { char ch = shape.charAt(1); for (int i = 1; i < shape.length() - 2; i++) { middle.append(ch); while (ch == shape.charAt(i) && i < shape.length() - 2) { i++; } ch = shape.charAt(i); } if (ch != middle.charAt(middle.length() - 1)) { middle.append(ch); } } return finalShape.append(middle).append(suffix).toString(); } public static String wordShapeTrimmed(String word) { StringBuilder shape = new StringBuilder(); for (char c : word.toCharArray()) { if (Character.isLetter(c)) { if (Character.isUpperCase(c)) { shape.append("X"); } else { shape.append("x"); } } else if (Character.isDigit(c)) { shape.append("d"); } else { shape.append(c); } } StringBuilder middle = new StringBuilder(); char ch = shape.charAt(0); for (int i = 0; i < shape.length(); i++) { middle.append(ch); while (ch == shape.charAt(i) && i < shape.length() - 1) { i++; } ch = shape.charAt(i); } if (ch != middle.charAt(middle.length() - 1)) { middle.append(ch); } return middle.toString(); } /** * Give the punctuation profile of a line, i.e. the concatenation of all the punctuations * occuring in the line. * * @param line the string corresponding to a line * @return the punctuation profile as a string, empty string is no punctuation * @throws Exception */ public static String punctuationProfile(String line) { String profile = ""; if ((line == null) || (line.length() == 0)) { return profile; } for (int i = 0; i < line.length(); i++) { char c = line.charAt(i); if (c == ' ') { continue; } if (fullPunctuations.indexOf(c) != -1) profile += c; } return profile; } /** * Return the number of token in a line given an existing global tokenization and a current * start position of the line in this global tokenization. * * @param line the string corresponding to a line * @param currentLinePos position of the line in the tokenization * @param tokenization the global tokenization where the line appears * @return the punctuation profile as a string, empty string is no punctuation * @throws Exception */ public static int getNbTokens(String line, int currentLinePos, List<String> tokenization) throws Exception { if ((line == null) || (line.length() == 0)) return 0; String currentToken = tokenization.get(currentLinePos); while ((currentLinePos < tokenization.size()) && (currentToken.equals(" ") || currentToken.equals("\n"))) { currentLinePos++; currentToken = tokenization.get(currentLinePos); } if (!line.trim().startsWith(currentToken)) { System.out.println("out of sync. : " + currentToken); throw new IllegalArgumentException("line start does not match given tokenization start"); } int nbTokens = 0; int posMatch = 0; // current position in line for (int p = currentLinePos; p < tokenization.size(); p++) { currentToken = tokenization.get(p); posMatch = line.indexOf(currentToken, posMatch); if (posMatch == -1) break; nbTokens++; } return nbTokens; } /** * Ensure that special XML characters are correctly encoded. */ public static String trimEncodedCharaters(String string) { return string.replaceAll("&amp\\s+;", "&amp;"). replaceAll("&quot\\s+;|&amp;quot\\s*;", "&quot;"). replaceAll("&lt\\s+;|&amp;lt\\s*;", "&lt;"). replaceAll("&gt\\s+;|&amp;gt\\s*;", "&gt;"). replaceAll("&apos\\s+;|&amp;apos\\s*;", "&apos;"); } public static boolean filterLine(String line) { boolean filter = false; if ((line == null) || (line.length() == 0)) filter = true; else if (line.contains("@IMAGE") || line.contains("@PAGE")) { filter = true; } else if (line.contains(".pbm") || line.contains(".ppm") || line.contains(".svg") || line.contains(".jpg") || line.contains(".png")) { filter = true; } return filter; } /** * The equivalent of String.replaceAll() for StringBuilder */ public static StringBuilder replaceAll(StringBuilder sb, String regex, String replacement) { Pattern pattern = Pattern.compile(regex); Matcher m = pattern.matcher(sb); int start = 0; while (m.find(start)) { sb.replace(m.start(), m.end(), replacement); start = m.start() + replacement.length(); } return sb; } /** * Return the prefix of a string. */ public static String prefix(String s, int count) { if (s == null) { return null; } if (s.length() < count) { return s; } return s.substring(0, count); } /** * Return the suffix of a string. */ public static String suffix(String s, int count) { if (s == null) { return null; } if (s.length() < count) { return s; } return s.substring(s.length() - count); } public static String JSONEncode(String json) { // we assume all json string will be bounded by double quotes return json.replaceAll("\"", "\\\"").replaceAll("\n", "\\\n"); } public static String strrep(char c, int times) { StringBuilder builder = new StringBuilder(); for (int i = 0; i < times; i++) { builder.append(c); } return builder.toString(); } public static int getOccCount(String term, String string) { return StringUtils.countMatches(term, string); } /** * Test for the current string contains at least one digit. * * @param tok the string to be processed. * @return true if contains a digit */ public static boolean test_digit(String tok) { if (tok == null) return false; if (tok.length() == 0) return false; char a; for (int i = 0; i < tok.length(); i++) { a = tok.charAt(i); if (Character.isDigit(a)) return true; } return false; } /** * Useful for recognising an acronym candidate: check if a text is only * composed of upper case, dot and digit characters */ public static boolean isAllUpperCaseOrDigitOrDot(String text) { for (int i = 0; i < text.length(); i++) { final char charAt = text.charAt(i); if (!Character.isUpperCase(charAt) && !Character.isDigit(charAt) && charAt != '.') { return false; } } return true; } /** * Remove indicated leading and trailing characters from a string **/ public static String removeLeadingAndTrailingChars(String text, String leadingChars, String trailingChars) { text = StringUtils.stripStart(text, leadingChars); text = StringUtils.stripEnd(text, trailingChars); return text; } /** * Remove indicated leading and trailing characters from a string represented as a list of LayoutToken. * Indicated leading and trailing characters must be matching exactly the layout token text content. **/ public static List<LayoutToken> removeLeadingAndTrailingCharsLayoutTokens(List<LayoutToken> tokens, String leadingChars, String trailingChars) { if (tokens == null) return tokens; if (tokens.size() == 0) return tokens; int start = 0; for(int i=0; i<tokens.size(); i++) { LayoutToken token = tokens.get(i); if (token.getText() == null || token.getText().length() == 0) { start++; continue; } else if (token.getText().length() > 1) { break; } else if (leadingChars.contains(token.getText())) { start++; } else break; } int end = tokens.size(); for(int i=end; i>0; i--) { LayoutToken token = tokens.get(i-1); if (token.getText() == null || token.getText().length() == 0) { end--; continue; } else if (token.getText().length() > 1) { break; } else if (trailingChars.contains(token.getText())) { end--; } else break; } return tokens.subList(start, end); } }
46,407
32.874453
156
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/LevenshteinDistanceAligment.java
package org.grobid.core.utilities; import java.util.*; /** * Distance and aligment of strings based on Levenshtein edit distances using the * standard <b>Dynamic Programming</b> algorithm. Standard Levenshtein distance is * implemented, i.e. without transpose! * <p/> * Based on algorithm at * - http://www.csse.monash.edu.au/~lloyd/tildeStrings/Alignment/92.IPL.html * - http://www.csse.monash.edu.au/~lloyd/tildeFP/Haskell/1998/Edit01/ * - Xuan Luo implementation http://xuanluo.bol.ucla.edu/Levenshtein.java */ public class LevenshteinDistanceAligment<E> { private final E[] a, b; private Diagonal<E> diag; private int dist; private boolean twist = false; public LevenshteinDistanceAligment(E[] _a, E[] _b) { if (_a.length < _b.length) { a = _a; b = _b; twist = false; } else { a = _b; b = _a; twist = true; } compute(); } public enum Op {Start, Match, Insert, Delete, Substitute} private static class Diagonal<E> { /** * diagonal starts at a[0], b[abs(offset)] * lower half has negative offset */ private final int offset; private final E[] a; /** * left string */ private final E[] b; /** * top string */ private Diagonal<E> prev; /** * below-left diagonal */ private Diagonal<E> next = null; /** * above-right diagonal */ /* list of elements; is never empty after constructor finishes */ private final List<Integer> elements = new ArrayList<Integer>(); public Diagonal(E[] _a, E[] _b, Diagonal<E> diagBelow, int o) { assert Math.abs(o) <= _b.length; a = _a; b = _b; prev = diagBelow; offset = o; // first element of diagonal = |offset| elements.add(Math.abs(offset)); } /** * returns below diagonal, creating it if necessary */ public Diagonal<E> getBelow() { if (prev == null) { // only happens for main diagonal assert offset == 0; // lower half has a, b switched, so see themselves // as the upper half of the transpose prev = new Diagonal<E>(b, a, this, -1); } return prev; } /** * returns above diagonal, creating it if necessary */ public Diagonal<E> getAbove() { if (next == null) next = new Diagonal<E>(a, b, this, offset >= 0 ? offset + 1 : offset - 1); return next; } /** * get entry to the left */ public int getW(int i) { assert i >= 0 && (offset != 0 || i > 0); // if this is main diagonal, then left diagonal is 1 shorter return getBelow().get(offset == 0 ? i - 1 : i); } /** * get entry above */ public int getN(int i) { assert i > 0; // above diagonal is 1 shorter return getAbove().get(i - 1); } /** * compute element j of this diagonal */ public int get(int j) { assert j >= 0 && j <= b.length - Math.abs(offset) && j <= a.length; if (j < elements.size()) return elements.get(j); int me = elements.get(elements.size() - 1); while (elements.size() <= j) { int nw = me; int i = elements.size(); // \ \ \ // \ \ \ // \ nw n // \ \ // w me // according to dynamic programming algorithm, // if characters are equal, me = nw // otherwise, me = 1 + min3 (w, nw, n) if (a[i - 1].equals(b[Math.abs(offset) + i - 1])) me = nw; else { // see L. Allison, Lazy Dynamic-Programming can be Eager // Inf. Proc. Letters 43(4) pp207-212, Sept' 1992 // computes min3 (w, nw, n) // but does not always evaluate n // this makes it O(|a|*D(a,b)) int w = getW(i); if (w < nw) // if w < nw, then w <= n me = 1 + w; else { int n = getN(i); me = 1 + Math.min(nw, n); } // me = 1 + Math.min(w, Math.min(nw, n)) // would make it O(|a|*|b|) } //System.out.printf("(%d,%d): %d\n", offset >= 0 ? i : i-offset, offset >= 0 ? offset+i : i, me); elements.add(me); } return me; } /** * get the last operation used to get to a certain element */ public Op getOp(int i) { //System.out.println(i + " " + offset); if (i == 0) { if (offset == 0) return Op.Start; else if (offset > 0) return Op.Insert; else return Op.Delete; } else if ((offset + i - 1) >= 0) { // PL if (a[i - 1].equals(b[offset + i - 1])) return Op.Match; else { int me = get(i); int w = getW(i); int nw = get(i - 1); if (me == 1 + w) return offset >= 0 ? Op.Insert : Op.Delete; else if (me == 1 + nw) return Op.Substitute; else return offset >= 0 ? Op.Delete : Op.Insert; } } else { int me = get(i); int w = getW(i); int nw = get(i - 1); if (me == 1 + w) return offset >= 0 ? Op.Insert : Op.Delete; else if (me == 1 + nw) return Op.Substitute; else return offset >= 0 ? Op.Delete : Op.Insert; } } } /** * perform the Levenshtein distance computation of sequences a and b */ private void compute() { // diagonal from the top-left element Diagonal<E> mainDiag = new Diagonal<E>(a, b, null, 0); // which is the diagonal containing the bottom R.H. elt? int lba = b.length - a.length; if (lba >= 0) { diag = mainDiag; for (int i = 0; i < lba; i++) diag = diag.getAbove(); } else { diag = mainDiag.getBelow(); for (int i = 0; i < ~lba; i++) diag = diag.getAbove(); } dist = diag.get(Math.min(a.length, b.length)); } /** * retrieves the Levenshtein distance */ public int getDistance() { return dist; } /** * retrieves the Levenshtein distance alignment */ public List<Op> getAlignment() { Diagonal<E> diag = this.diag; // compute alignment by backtracking through structure LinkedList<Op> alignment = new LinkedList<Op>(); int i = Math.min(a.length, b.length); LOOP: while (true) { // adds operations in reverse order if (diag == null) break LOOP; //PL Op op = diag.getOp(i); switch (op) { case Match: case Substitute: i--; break; case Insert: if (diag.offset == 0) { diag = diag.prev; i--; } else if (diag.offset >= 0) { diag = diag.prev; } else { diag = diag.next; i--; } break; case Delete: if (diag.offset == 0) { diag = diag.next; i--; } else if (diag.offset >= 0) { diag = diag.next; i--; } else { diag = diag.prev; } break; case Start: break LOOP; } alignment.add(op); } Collections.reverse(alignment); if (twist) { // we take the symetric of the current result, so simply replace substitute by delete and resp. LinkedList<Op> alignment2 = new LinkedList<Op>(); for (Op op : alignment) { if (op == Op.Delete) alignment2.add(Op.Insert); else if (op == Op.Insert) alignment2.add(Op.Delete); else alignment2.add(op); //case Start: // alignment2.add(op); } alignment = alignment2; } return alignment; // print out alignment } public static Character[] str2chararray(String x) { Character[] result = new Character[x.length()]; for (int i = 0; i < x.length(); i++) { result[i] = x.charAt(i); } return result; } public static void main(String[] args) { assert args.length == 2; LevenshteinDistanceAligment<Character> foo = new LevenshteinDistanceAligment<Character>(str2chararray(args[0]), str2chararray(args[1])); System.out.println("Levenshtein distance = " + foo.getDistance()); System.out.println("Alignment: " + foo.getAlignment()); } }
10,136
30.877358
113
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/OffsetPosition.java
package org.grobid.core.utilities; import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; public class OffsetPosition implements Comparable<OffsetPosition> { public int start = -1; public int end = -1; public OffsetPosition() { } public OffsetPosition(int start, int end) { this.start = start; this.end = end; } public boolean overlaps(OffsetPosition pos) { return !((end <= pos.start) || (start >= pos.end)) ; } public String toString() { return "" + start + "\t" + end; } @Override public int compareTo(OffsetPosition pos) { if (pos.start < start) return 1; else if (pos.start == start) { if (pos.end < end) return 1; else if (pos.end == end) return 0; else return -1; } else return -1; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; OffsetPosition that = (OffsetPosition) o; return new EqualsBuilder() .append(start, that.start) .append(end, that.end) .isEquals(); } @Override public int hashCode() { return new HashCodeBuilder(17, 37) .append(start) .append(end) .toHashCode(); } }
1,482
22.919355
67
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/GrobidConfig.java
package org.grobid.core.utilities; import java.util.List; /** * This class is a bean for the YAML configuation of the GROBID instance. * */ public class GrobidConfig { public GrobidParameters grobid; public static class GrobidParameters { public String grobidHome = "grobid-home"; public String temp = "./tmp"; public String nativelibrary = "./lib"; public PdfParameters pdf; public ConsolidationParameters consolidation; public HostParameters proxy; public String languageDetectorFactory; public String sentenceDetectorFactory; public int concurrency = 10; public int poolMaxWait = 1; public DelftParameters delft; public WapitiParameters wapiti; public List<ModelParameters> models; } public static class PdfParameters { public PdfAltoParameters pdfalto; public int blocksMax = 100000; public int tokensMax = 1000000; } public static class PdfAltoParameters { public String path; public int memoryLimitMb = 6096; public int timeoutSec = 60; } public static class ConsolidationParameters { public String service; public HostParameters glutton; public CrossrefParameters crossref; } public static class CrossrefParameters { public String mailto; public String token; } public static class HostParameters { public String type; public String host; public int port; public String url; } public static class DelftParameters { /** * Generic parameters relative to the DeLFT engine */ public String install; public String pythonVirtualEnv; } public static class WapitiParameters { /** * Generic parameters relative to the Wapiti engine */ public int nbThreads = 0; } public static class WapitiModelParameters { /** * Parameters relative to a specific Wapiti model */ public double epsilon = 0.00001; public int window = 20; public int nbMaxIterations = 2000; } public static class DelftModelParameters { /** * Parameters relative to a specific DeLFT model (train and runtime) */ public String architecture; public boolean useELMo = false; public String embeddings_name = "glove-840B"; public String transformer; public DelftModelParameterSet training; public DelftModelParameterSet runtime; } public static class DelftModelParameterSet { /** * Parameters relative to a specific DeLFT model and either training or runtime */ public int max_sequence_length = -1; public int batch_size = -1; } public static class ModelParameters { public String name; /* name of model */ public String engine; /* value wapiti or delft */ public WapitiModelParameters wapiti; public DelftModelParameters delft; } }
3,136
26.278261
87
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/LayoutTokensUtil.java
package org.grobid.core.utilities; import org.apache.commons.lang3.StringUtils; import com.google.common.base.Function; import com.google.common.base.Joiner; import com.google.common.collect.Iterables; import com.google.common.collect.Iterators; import com.google.common.collect.PeekingIterator; import org.grobid.core.layout.BoundingBox; import org.grobid.core.layout.LayoutToken; import java.util.ArrayList; import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; /** * Dealing with layout tokens */ public class LayoutTokensUtil { public static final Function<LayoutToken, String> TO_TEXT_FUNCTION = new Function<LayoutToken, String>() { @Override public String apply(LayoutToken layoutToken) { return layoutToken.t(); } }; public static List<LayoutToken> enrichWithNewLineInfo(List<LayoutToken> toks) { PeekingIterator<LayoutToken> tokens = Iterators.peekingIterator(toks.iterator()); while (tokens.hasNext()) { LayoutToken curToken = tokens.next(); if (tokens.hasNext() && tokens.peek().getText().equals("\n")) { curToken.setNewLineAfter(true); } if (curToken.getText().equals("\n")) { curToken.setText(" "); } } return toks; } public static String normalizeText(String text) { //return TextUtilities.dehyphenize(text).replace("\n", " ").replaceAll("[ ]{2,}", " "); return StringUtils.normalizeSpace(text.replace("\n", " ")); } public static String normalizeText(List<LayoutToken> tokens) { //return TextUtilities.dehyphenize(toText(tokens)).replace("\n", " ").replaceAll("[ ]{2,}", " "); return StringUtils.normalizeSpace(toText(tokens).replace("\n", " ")); } public static String normalizeDehyphenizeText(List<LayoutToken> tokens) { return StringUtils.normalizeSpace(LayoutTokensUtil.toText(LayoutTokensUtil.dehyphenize(tokens)).replace("\n", " ")); } public static String toText(List<LayoutToken> tokens) { return Joiner.on("").join(Iterables.transform(tokens, TO_TEXT_FUNCTION)); } public static boolean noCoords(LayoutToken t) { return t.getPage() == -1 || t.getWidth() <= 0; } public static boolean spaceyToken(String tok) { /*return (tok.equals(" ") || tok.equals("\u00A0") || tok.equals("\n"));*/ // all space characters are normalised into simple space character return tok.equals(" "); } public static boolean newLineToken(String tok) { //return (tok.equals("\n") || tok.equals("\r") || tok.equals("\n\r")); // all new line characters are normalised into simple \n character return tok.equals("\n"); } /*public static String removeSpecialVariables(String tok) { if (tok.equals("@BULLET")) { tok = "•"; } return tok; }*/ public static boolean containsToken(List<LayoutToken> toks, String text) { for (LayoutToken t : toks) { if (text.equals(t.t())) { return true; } } return false; } public static int tokenPos(List<LayoutToken> toks, String text) { int cnt = 0; for (LayoutToken t : toks) { if (text.equals(t.t())) { return cnt; } cnt++; } return -1; } public static int tokenPos(List<LayoutToken> toks, Pattern p) { int cnt = 0; for (LayoutToken t : toks) { if (p.matcher(t.t()).matches()) { return cnt; } cnt++; } return -1; } // public static List<List<LayoutToken>> split(List<LayoutToken> toks, Pattern p) { // return split(toks, p, false); // } public static List<List<LayoutToken>> split(List<LayoutToken> toks, Pattern p, boolean preserveSeparator) { return split(toks, p, preserveSeparator, true); } public static List<List<LayoutToken>> split(List<LayoutToken> toks, Pattern p, boolean preserveSeparator, boolean preserveLeftOvers) { List<List<LayoutToken>> split = new ArrayList<>(); List<LayoutToken> curToks = new ArrayList<>(); for (LayoutToken tok : toks) { if (p.matcher(tok.t()).matches()) { if (preserveSeparator) { curToks.add(tok); } split.add(curToks); curToks = new ArrayList<>(); } else { curToks.add(tok); } } if (preserveLeftOvers) { if (!curToks.isEmpty()) { split.add(curToks); } } return split; } public static boolean tooFarAwayVertically(List<BoundingBox> boxes, double distance) { if (boxes == null) { return false; } for (int i = 0; i < boxes.size() - 1; i++) { if (boxes.get(i).verticalDistanceTo(boxes.get(i + 1)) > distance) { return true; } } return false; } public static String getCoordsString(List<LayoutToken> toks) { List<BoundingBox> res = BoundingBoxCalculator.calculate(toks); return Joiner.on(";").join(res); } public static String getCoordsStringForOneBox(List<LayoutToken> toks) { BoundingBox res = BoundingBoxCalculator.calculateOneBox(toks, true); if (res == null) { return null; } return res.toString(); } public static List<LayoutToken> dehyphenize(List<LayoutToken> tokens) { List<LayoutToken> output = new ArrayList<>(); for (int i = 0; i < tokens.size(); i++) { LayoutToken currentToken = tokens.get(i); //the current token is dash (and is neither subscript nor superscript) checking what's around if (currentToken.getText().equals("-") && !(currentToken.isSubscript() || currentToken.isSuperscript())) { if (doesRequireDehypenisation(tokens, i)) { //Cleanup eventual additional spaces before the hypen that have been already written to the output int z = output.size() - 1; while (z >= 0 && output.get(z).getText().equals(" ")) { String tokenString = output.get(z).getText(); if (tokenString.equals(" ")) { output.remove(z); } z--; } List<Integer> breakLines = new ArrayList<>(); List<Integer> spaces = new ArrayList<>(); int j = i + 1; while (j < tokens.size() && tokens.get(j).getText().equals(" ") || tokens.get(j).getText().equals("\n")) { String tokenString = tokens.get(j).getText(); if (tokenString.equals("\n")) { breakLines.add(j); } if (tokenString.equals(" ")) { spaces.add(j); } j++; } i += breakLines.size() + spaces.size(); } else { output.add(currentToken); List<Integer> breakLines = new ArrayList<>(); List<Integer> spaces = new ArrayList<>(); int j = i + 1; while (j < tokens.size() && tokens.get(j).getText().equals("\n")) { String tokenString = tokens.get(j).getText(); if (tokenString.equals("\n")) { breakLines.add(j); } j++; } i += breakLines.size() + spaces.size(); } } else { output.add(currentToken); } } return output; } /** * Check if the current token (place i), or the hypen, needs to be removed or not. * <p> * It will check the tokens before and after. It will get to the next "non space" tokens and verify * that it's a plain word. If it's not it's keeping the hypen. * <p> * TODO: What to do in case of a punctuation is found? */ protected static boolean doesRequireDehypenisation(List<LayoutToken> tokens, int i) { boolean forward = false; boolean backward = false; int j = i + 1; int breakLine = 0; int spacesAfter = 0; double coordinateY = tokens.get(i).getY(); while (j < tokens.size() && (tokens.get(j).getText().equals(" ") || tokens.get(j).getText().equals("\n"))) { if (tokens.get(j).getText().equals("\n")) { breakLine++; } else if (tokens.get(j).getText().equals(" ")) { spacesAfter++; } else if (tokens.get(j).getY() > coordinateY) { breakLine++; } j++; } if (breakLine == 0) { // check if there is a break-line using coordinates, if not, no dehypenisation if (j < tokens.size() && tokens.get(j).getY() == coordinateY) { return false; } } //tokens.stream().collect(groupingBy(LayoutToken::getY)).keySet() if (j < tokens.size()) { forward = StringUtils.isAllLowerCase(tokens.get(j).getText()); if (forward) { //If nothing before the hypen, but it looks like a forward hypenisation, let's trust it if (i < 1) { return forward; } //I check if the coordinates have changed, this means there is a newline if (tokens.get(j).getY() > coordinateY) { return forward; } // Check backward int z = i - 1; while (z > 0 && (tokens.get(z).getText().equals(" ") || tokens.get(z).getText().equals("\n"))) { z--; } if (StringUtils.isAlpha(tokens.get(z).getText())) { if (tokens.get(z).getY() < coordinateY) { backward = true; } else if(coordinateY == -1 && breakLine > 0) { backward = true; } } } } return backward; } public static List<LayoutToken> subListByOffset(List<LayoutToken> token, int startIncluded) { return subListByOffset(token, startIncluded, Integer.MAX_VALUE); } public static List<LayoutToken> subListByOffset(List<LayoutToken> token, int startIncluded, int endExcluded) { return token .stream() .filter(t -> t.getOffset() >= startIncluded && t.getOffset() < endExcluded) .collect(Collectors.toList()); } public static List<LayoutToken> getLayoutTokensForTokenizedText(List<String> tokens) { List<LayoutToken> result = new ArrayList<>(); int pos = 0; for (int i = 0; i < tokens.size(); i++) { String tok = tokens.get(i); LayoutToken layoutToken = new LayoutToken(); layoutToken.setText(tok); layoutToken.setOffset(pos); result.add(layoutToken); pos += tok.length(); if (i < tokens.size() - 1 && tokens.get(i + 1).equals("\n")) { layoutToken.setNewLineAfter(true); } } return result; } }
11,841
34.035503
138
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/BoundingBoxCalculator.java
package org.grobid.core.utilities; import com.google.common.base.Predicate; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; import org.grobid.core.layout.BoundingBox; import org.grobid.core.layout.LayoutToken; import java.util.List; /** * Utilities to calculate bounding boxes from coordinates */ public class BoundingBoxCalculator { private static final double EPS_X = 15; private static final double EPS_Y = 4; public static BoundingBox calculateOneBox(Iterable<LayoutToken> tokens) { return calculateOneBox(tokens, false); } public static BoundingBox calculateOneBox(Iterable<LayoutToken> tokens, boolean ignoreDifferentPageTokens) { if (tokens == null) { return null; } BoundingBox b = null; for (LayoutToken t : tokens) { if (LayoutTokensUtil.noCoords(t)) { continue; } if (b == null) { b = BoundingBox.fromLayoutToken(t); } else { if (ignoreDifferentPageTokens) { b = b.boundBoxExcludingAnotherPage(BoundingBox.fromLayoutToken(t)); } else { b = b.boundBox(BoundingBox.fromLayoutToken(t)); } } } return b; } public static List<BoundingBox> calculate(List<LayoutToken> tokens) { List<BoundingBox> result = Lists.newArrayList(); if (tokens != null) { tokens = Lists.newArrayList(Iterables.filter(tokens, new Predicate<LayoutToken>() { @Override public boolean apply(LayoutToken layoutToken) { return !(Math.abs(layoutToken.getWidth()) <= Double.MIN_VALUE || Math.abs(layoutToken.getHeight()) <= Double.MIN_VALUE); } })); } if (tokens == null || tokens.isEmpty()) { return result; } BoundingBox firstBox = BoundingBox.fromLayoutToken(tokens.get(0)); result.add(firstBox); BoundingBox lastBox = firstBox; for (int i = 1; i < tokens.size(); i++) { BoundingBox b = BoundingBox.fromLayoutToken(tokens.get(i)); if (Math.abs(b.getWidth()) <= Double.MIN_VALUE || Math.abs(b.getHeight()) <= Double.MIN_VALUE) { continue; } if (near(lastBox, b)) { result.set(result.size() - 1, result.get(result.size() - 1).boundBox(b)); } else { result.add(b); } lastBox = b; } return result; } //same page, Y is more or less the same, b2 follows b1 on X, and b2 close to the end of b1 private static boolean near(BoundingBox b1, BoundingBox b2) { return b1.getPage() == b2.getPage() && Math.abs(b1.getY() - b2.getY()) < EPS_Y && Math.abs(b1.getY2() - b2.getY2()) < EPS_Y && b2.getX() - b1.getX2() < EPS_X && b2.getX() >= b1.getX(); } }
3,022
33.747126
140
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/LanguageUtilities.java
package org.grobid.core.utilities; import org.grobid.core.exceptions.GrobidException; import org.grobid.core.lang.Language; import org.grobid.core.lang.LanguageDetectorFactory; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Class for using language guessers (singleton). */ public class LanguageUtilities { public static final Logger LOGGER = LoggerFactory .getLogger(LanguageUtilities.class); private static volatile LanguageUtilities instance = null; //private boolean useLanguageId = false; private LanguageDetectorFactory ldf = null; public static LanguageUtilities getInstance() { if (instance == null) { synchronized (LanguageUtilities.class) { if (instance == null) { LOGGER.debug("synchronized getNewInstance"); instance = new LanguageUtilities(); } } } return instance; } private LanguageUtilities() { String className = GrobidProperties.getLanguageDetectorFactory(); try { ldf = (LanguageDetectorFactory) Class.forName(className) .newInstance(); } catch (ClassCastException e) { throw new GrobidException("Class " + className + " must implement " + LanguageDetectorFactory.class.getName(), e); } catch (ClassNotFoundException e) { throw new GrobidException( "Class " + className + " were not found in the classpath. " + "Make sure that it is provided correctly is in the classpath", e); } catch (InstantiationException e) { throw new GrobidException("Class " + className + " should have a default constructor", e); } catch (IllegalAccessException e) { throw new GrobidException(e); } } /** * Basic run for language identification, return the language code and * confidence score separated by a semicolon * * @param text * text to classify * @return language ids concatenated with ; */ public Language runLanguageId(String text) { try { return ldf.getInstance().detect(text); } catch (Exception e) { LOGGER.warn("Cannot detect language. ", e); return null; } } /** * Less basic run for language identification, where a maxumum length of text is used to * identify the language. The goal is to avoid wasting resources using a too long piece of * text, when normally only a small chunk is enough for a safe language prediction. * Return a Language object consisting of the language code and a confidence score. * * @param text * text to classify * @param maxLength * maximum length of text to be used to identify the language, expressed in characters * @return language Language object consisting of the language code and a confidence score */ public Language runLanguageId(String text, int maxLength) { try { int max = text.length(); if (maxLength < max) max = maxLength; return ldf.getInstance().detect(text.substring(0, max)); } catch (Exception e) { LOGGER.warn("Cannot detect language. ", e); return null; } } }
3,138
31.360825
93
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/SHA1.java
package org.grobid.core.utilities; import java.io.UnsupportedEncodingException; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.util.Formatter; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Hash a string using sha1. * */ public class SHA1 { /** * The class Logger. */ private static final Logger LOGGER = LoggerFactory.getLogger(SHA1.class); /** * Error message. */ private static final String ERROR_WHILE_EXECUTING_SHA1 = "Error while executing sha1:"; /** * Return the hash value of argument using SHA1 algorithm. * * @param pArg the value to hash. * @return The hashed value. */ public static String getSHA1(String pArg) { String sha1 = ""; try { MessageDigest crypt = MessageDigest.getInstance("SHA-1"); crypt.reset(); crypt.update(pArg.getBytes("UTF-8")); sha1 = byteToHex(crypt.digest()); } catch (NoSuchAlgorithmException exp) { LOGGER.error(ERROR_WHILE_EXECUTING_SHA1 + exp); } catch (UnsupportedEncodingException exp) { LOGGER.error(ERROR_WHILE_EXECUTING_SHA1 + exp); } return sha1; } /** * Convert from byte to hexa. * @param hash the input in bytes. * @return String */ protected static String byteToHex(final byte[] hash) { Formatter formatter = new Formatter(); for (byte b : hash) { formatter.format("%02x", b); } return formatter.toString(); } }
1,414
22.196721
88
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/Consolidation.java
package org.grobid.core.utilities; import com.rockymadden.stringmetric.similarity.RatcliffObershelpMetric; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.exception.ExceptionUtils; import org.grobid.core.data.BibDataSet; import org.grobid.core.data.BiblioItem; import org.grobid.core.utilities.counters.CntManager; import org.grobid.core.utilities.crossref.CrossrefClient; import org.grobid.core.utilities.crossref.CrossrefRequestListener; import org.grobid.core.utilities.crossref.WorkDeserializer; import org.grobid.core.utilities.glutton.GluttonClient; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import scala.Option; import java.util.*; import static org.apache.commons.lang3.StringUtils.isNotEmpty; import static org.grobid.core.data.BiblioItem.cleanDOI; /** * Singleton class for managing the extraction of bibliographical information from pdf documents. * When consolidation operations are realized, be sure to call the close() method * to ensure that all Executors are terminated. * */ public class Consolidation { private static final Logger LOGGER = LoggerFactory.getLogger(Consolidation.class); private static volatile Consolidation instance; private CrossrefClient client = null; private WorkDeserializer workDeserializer = null; private CntManager cntManager = null; public enum GrobidConsolidationService { CROSSREF("crossref"), GLUTTON("glutton"); private final String ext; GrobidConsolidationService(String ext) { this.ext = ext; } public String getExt() { return ext; } public static GrobidConsolidationService get(String name) { if (name == null) { throw new IllegalArgumentException("Name of consolidation service must not be null"); } String n = name.toLowerCase(); for (GrobidConsolidationService e : values()) { if (e.name().toLowerCase().equals(n)) { return e; } } throw new IllegalArgumentException("No consolidation service with name '" + name + "', possible values are: " + Arrays.toString(values())); } } public static Consolidation getInstance() { if (instance == null) { getNewInstance(); } return instance; } /** * Creates a new instance. */ private static synchronized void getNewInstance() { LOGGER.debug("Get new instance of Consolidation"); instance = new Consolidation(); } /** * Hidden constructor */ private Consolidation() { if (GrobidProperties.getInstance().getConsolidationService() == GrobidConsolidationService.GLUTTON) client = GluttonClient.getInstance(); else client = CrossrefClient.getInstance(); workDeserializer = new WorkDeserializer(); } public void setCntManager(CntManager cntManager) { this.cntManager = cntManager; } public CntManager getCntManager() { return this.cntManager; } /** * After consolidation operations, this need to be called to ensure that all * involved Executors are shut down immediatly, otherwise non terminated thread * could prevent the JVM from exiting */ public void close() { //client.close(); } /** * Try to consolidate one bibliographical object with crossref metadata lookup web services based on * core metadata */ public BiblioItem consolidate(BiblioItem bib, String rawCitation, int consolidateMode) throws Exception { final List<BiblioItem> results = new ArrayList<>(); String theDOI = bib.getDOI(); if (StringUtils.isNotBlank(theDOI)) { theDOI = cleanDOI(theDOI); } final String doi = theDOI; String aut = bib.getFirstAuthorSurname(); String title = bib.getTitle(); String journalTitle = bib.getJournal(); String volume = bib.getVolume(); if (StringUtils.isBlank(volume)) volume = bib.getVolumeBlock(); String firstPage = null; String pageRange = bib.getPageRange(); int beginPage = bib.getBeginPage(); if (beginPage != -1) { firstPage = "" + beginPage; } else if (pageRange != null) { StringTokenizer st = new StringTokenizer(pageRange, "--"); if (st.countTokens() == 2) { firstPage = st.nextToken(); } else if (st.countTokens() == 1) firstPage = pageRange; } String year = null; if ( bib.getNormalizedPublicationDate() != null ) { year = "" + bib.getNormalizedPublicationDate().getYear(); } if (year == null) year = bib.getYear(); if (cntManager != null) cntManager.i(ConsolidationCounters.CONSOLIDATION); long threadId = Thread.currentThread().getId(); Map<String, String> arguments = null; if (StringUtils.isNotBlank(doi)) { // call based on the identified DOI arguments = new HashMap<String,String>(); arguments.put("doi", doi); } else if (consolidateMode != 3) { if (StringUtils.isNotBlank(rawCitation)) { // call with full raw string if (GrobidProperties.getInstance().getConsolidationService() != GrobidConsolidationService.CROSSREF) { if (arguments == null) arguments = new HashMap<String,String>(); arguments.put("query.bibliographic", rawCitation); } } if (StringUtils.isNotBlank(aut)) { // call based on partial metadata if (GrobidProperties.getInstance().getConsolidationService() != GrobidConsolidationService.CROSSREF) { if (arguments == null) arguments = new HashMap<String,String>(); arguments.put("query.author", aut); } } if (StringUtils.isNotBlank(title)) { // call based on partial metadata if (GrobidProperties.getInstance().getConsolidationService() != GrobidConsolidationService.CROSSREF) { if (arguments == null) arguments = new HashMap<String,String>(); arguments.put("query.title", title); } } if (StringUtils.isNotBlank(journalTitle)) { // call based on partial metadata if (GrobidProperties.getInstance().getConsolidationService() != GrobidConsolidationService.CROSSREF) { if (arguments == null) arguments = new HashMap<String,String>(); arguments.put("query.container-title", journalTitle); } } if (StringUtils.isNotBlank(volume)) { // call based on partial metadata if (GrobidProperties.getInstance().getConsolidationService() != GrobidConsolidationService.CROSSREF) { if (arguments == null) arguments = new HashMap<String,String>(); arguments.put("volume", volume); } } if (StringUtils.isNotBlank(firstPage)) { // call based on partial metadata if (GrobidProperties.getInstance().getConsolidationService() != GrobidConsolidationService.CROSSREF) { if (arguments == null) arguments = new HashMap<String,String>(); arguments.put("firstPage", firstPage); } } if (StringUtils.isNotBlank(year)) { // publication year metadata, CrossRef has no year query field, they are supported by the query.bibliographic // field and filter if (GrobidProperties.getInstance().getConsolidationService() != GrobidConsolidationService.CROSSREF) { if (arguments == null) arguments = new HashMap<String,String>(); arguments.put("year", year); } } } if (arguments == null || arguments.size() == 0) { return null; } if (GrobidProperties.getInstance().getConsolidationService() == GrobidConsolidationService.CROSSREF) { if (StringUtils.isBlank(doi) && StringUtils.isBlank(rawCitation) && (StringUtils.isBlank(aut) || StringUtils.isBlank(title)) ) { // there's not enough information for a crossref request, which might always return a result return null; } } if (GrobidProperties.getInstance().getConsolidationService() == GrobidConsolidationService.CROSSREF) { arguments.put("rows", "1"); // we just request the top-one result } else if (GrobidProperties.getInstance().getConsolidationService() == GrobidConsolidationService.GLUTTON) { // GROBID has already parsed the reference, so no need to redo this in glutton arguments.put("parseReference", "false"); } final boolean doiQuery; try { //CrossrefRequestListener<BiblioItem> requestListener = new CrossrefRequestListener<BiblioItem>(); if (cntManager != null) { cntManager.i(ConsolidationCounters.CONSOLIDATION); } if ( StringUtils.isNotBlank(doi) && (cntManager != null) ) { cntManager.i(ConsolidationCounters.CONSOLIDATION_PER_DOI); doiQuery = true; } else { doiQuery = false; } client.pushRequest("works", arguments, workDeserializer, threadId, new CrossrefRequestListener<BiblioItem>(0) { @Override public void onSuccess(List<BiblioItem> res) { if ((res != null) && (res.size() > 0) ) { // we need here to post-check that the found item corresponds // correctly to the one requested in order to avoid false positive for(BiblioItem oneRes : res) { /* Glutton integrates its own post-validation, so we can skip post-validation in GROBID when it is used as consolidation service. In case of crossref REST API, for single bib. ref. consolidation (this case comes only for header extraction), having an extracted DOI matching is considered safe enough, and we don't require further post-validation. For all the other case of matching with CrossRef, we require a post-validation. */ if ((GrobidProperties.getInstance().getConsolidationService() == GrobidConsolidationService.GLUTTON) || ( (GrobidProperties.getInstance().getConsolidationService() == GrobidConsolidationService.CROSSREF) && doiQuery ) || ( (GrobidProperties.getInstance().getConsolidationService() == GrobidConsolidationService.CROSSREF) && postValidation(bib, oneRes)) ) { results.add(oneRes); if (cntManager != null) { cntManager.i(ConsolidationCounters.CONSOLIDATION_SUCCESS); if (doiQuery) cntManager.i(ConsolidationCounters.CONSOLIDATION_PER_DOI_SUCCESS); } break; } } } } @Override public void onError(int status, String message, Exception exception) { LOGGER.info("Consolidation service returns error ("+status+") : "+message, exception); } }); } catch(Exception e) { LOGGER.info("Consolidation error - ", e); } client.finish(threadId); if (results.size() == 0) return null; else return results.get(0); } /** * Try tp consolidate a list of bibliographical objects in one operation with consolidation services */ public Map<Integer,BiblioItem> consolidate(List<BibDataSet> biblios) { if (CollectionUtils.isEmpty(biblios)) return null; final Map<Integer,BiblioItem> results = new HashMap<Integer,BiblioItem>(); // init the results int n = 0; for(n=0; n<biblios.size(); n++) { results.put(Integer.valueOf(n), null); } n = 0; long threadId = Thread.currentThread().getId(); for(BibDataSet bibDataSet : biblios) { final BiblioItem theBiblio = bibDataSet.getResBib(); if (cntManager != null) cntManager.i(ConsolidationCounters.TOTAL_BIB_REF); // first we get the exploitable metadata String doi = theBiblio.getDOI(); if (StringUtils.isNotBlank(doi)) { doi = BiblioItem.cleanDOI(doi); } String aut = theBiblio.getFirstAuthorSurname(); String title = theBiblio.getTitle(); String journalTitle = theBiblio.getJournal(); // and the row string String rawCitation = bibDataSet.getRawBib(); Map<String, String> arguments = null; String volume = theBiblio.getVolume(); if (StringUtils.isBlank(volume)) volume = theBiblio.getVolumeBlock(); String firstPage = null; String pageRange = theBiblio.getPageRange(); int beginPage = theBiblio.getBeginPage(); if (beginPage != -1) { firstPage = "" + beginPage; } else if (pageRange != null) { StringTokenizer st = new StringTokenizer(pageRange, "--"); if (st.countTokens() == 2) { firstPage = st.nextToken(); } else if (st.countTokens() == 1) firstPage = pageRange; } String year = null; if ( theBiblio.getNormalizedPublicationDate() != null ) { year = "" + theBiblio.getNormalizedPublicationDate().getYear(); } if (year == null) year = theBiblio.getYear(); /*if (aut != null) { aut = TextUtilities.removeAccents(aut); } if (title != null) { title = TextUtilities.removeAccents(title); } if (journalTitle != null) { journalTitle = TextUtilities.removeAccents(journalTitle); }*/ if (StringUtils.isNotBlank(doi)) { // call based on the identified DOI arguments = new HashMap<String,String>(); arguments.put("doi", doi); } if (StringUtils.isNotBlank(rawCitation)) { // call with full raw string if (arguments == null) arguments = new HashMap<String,String>(); if ( (GrobidProperties.getInstance().getConsolidationService() != GrobidConsolidationService.CROSSREF) || StringUtils.isBlank(doi) ) arguments.put("query.bibliographic", rawCitation); } if (StringUtils.isNotBlank(title)) { // call based on partial metadata if (arguments == null) arguments = new HashMap<String,String>(); if ( (GrobidProperties.getInstance().getConsolidationService() != GrobidConsolidationService.CROSSREF) || (StringUtils.isBlank(rawCitation) && StringUtils.isBlank(doi)) ) arguments.put("query.title", title); } if (StringUtils.isNotBlank(aut)) { // call based on partial metadata if (arguments == null) arguments = new HashMap<String,String>(); if ( (GrobidProperties.getInstance().getConsolidationService() != GrobidConsolidationService.CROSSREF) || (StringUtils.isBlank(rawCitation) && StringUtils.isBlank(doi)) ) arguments.put("query.author", aut); } if (StringUtils.isNotBlank(journalTitle)) { // call based on partial metadata if (GrobidProperties.getInstance().getConsolidationService() != GrobidConsolidationService.CROSSREF) { if (arguments == null) arguments = new HashMap<String,String>(); arguments.put("query.container-title", journalTitle); } } if (StringUtils.isNotBlank(volume)) { // call based on partial metadata if (GrobidProperties.getInstance().getConsolidationService() != GrobidConsolidationService.CROSSREF) { if (arguments == null) arguments = new HashMap<String,String>(); arguments.put("volume", volume); } } if (StringUtils.isNotBlank(firstPage)) { // call based on partial metadata if (GrobidProperties.getInstance().getConsolidationService() != GrobidConsolidationService.CROSSREF) { if (arguments == null) arguments = new HashMap<String,String>(); arguments.put("firstPage", firstPage); } } if (StringUtils.isNotBlank(year)) { // publication year metadata, CrossRef has no year query field, they are supported by the query.bibliographic // field and filter if (GrobidProperties.getInstance().getConsolidationService() != GrobidConsolidationService.CROSSREF) { if (arguments == null) arguments = new HashMap<String,String>(); arguments.put("year", year); } } if (arguments == null || arguments.size() == 0) { n++; continue; } if (GrobidProperties.getInstance().getConsolidationService() == GrobidConsolidationService.CROSSREF) { if (StringUtils.isBlank(doi) && StringUtils.isBlank(rawCitation) && (StringUtils.isBlank(aut) || StringUtils.isBlank(title)) ) { // there's not enough information for a crossref request, which might always return a result n++; continue; } } if (GrobidProperties.getInstance().getConsolidationService() == GrobidConsolidationService.CROSSREF) arguments.put("rows", "1"); // we just request the top-one result else if (GrobidProperties.getInstance().getConsolidationService() == GrobidConsolidationService.GLUTTON) { // GROBID has already parsed the reference, so no need to redo this in glutton arguments.put("parseReference", "false"); } final boolean doiQuery; try { //CrossrefRequestListener<BiblioItem> requestListener = new CrossrefRequestListener<BiblioItem>(); if (cntManager != null) { cntManager.i(ConsolidationCounters.CONSOLIDATION); } if ( StringUtils.isNotBlank(doi) && (cntManager != null) ) { cntManager.i(ConsolidationCounters.CONSOLIDATION_PER_DOI); doiQuery = true; } else { doiQuery = false; } client.<BiblioItem>pushRequest("works", arguments, workDeserializer, threadId, new CrossrefRequestListener<BiblioItem>(n) { @Override public void onSuccess(List<BiblioItem> res) { if ((res != null) && (res.size() > 0) ) { // for CrossRef API we need here to post-validate if the found item corresponds // to the one requested in order to avoid false positive // Glutton has its own validation mechanisms for(BiblioItem oneRes : res) { if ((GrobidProperties.getInstance().getConsolidationService() == GrobidConsolidationService.GLUTTON) || postValidation(theBiblio, oneRes)) { results.put(Integer.valueOf(getRank()), oneRes); if (cntManager != null) { cntManager.i(ConsolidationCounters.CONSOLIDATION_SUCCESS); if (doiQuery) cntManager.i(ConsolidationCounters.CONSOLIDATION_PER_DOI_SUCCESS); } break; } } } } @Override public void onError(int status, String message, Exception exception) { LOGGER.info("Consolidation service returns error ("+status+") : "+message); } }); } catch(Exception e) { LOGGER.info("Consolidation error - ", e); } n++; } client.finish(threadId); return results; } /** * The public CrossRef API is a search API, and thus returns * many false positives. It is necessary to validate return results * against the (incomplete) source bibliographic item to block * inconsistent results. */ private boolean postValidation(BiblioItem source, BiblioItem result) { boolean valid = true; // check main metadata available in source with fuzzy matching /*if (!StringUtils.isBlank(source.getTitle()) && !StringUtils.isBlank(source.getTitle())) { //System.out.println(source.getTitle() + " / " + result.getTitle() + " = " + ratcliffObershelpDistance(source.getTitle(), result.getTitle(), false)); if (ratcliffObershelpDistance(source.getTitle(), result.getTitle(), false) < 0.8) return false; }*/ if (!StringUtils.isBlank(source.getFirstAuthorSurname()) && !StringUtils.isBlank(result.getFirstAuthorSurname())) { //System.out.println(source.getFirstAuthorSurname() + " / " + result.getFirstAuthorSurname() + " = " + // ratcliffObershelpDistance(source.getFirstAuthorSurname(), result.getFirstAuthorSurname(), false)); if (ratcliffObershelpDistance(source.getFirstAuthorSurname(),result.getFirstAuthorSurname(), false) < 0.8) return false; } /*if (!StringUtils.isBlank(source.getPublicationDate()) && !StringUtils.isBlank(result.getPublicationDate())) { if (!source.getPublicationDate().equals(result.getPublicationDate())) valid = false; }*/ return valid; } private double ratcliffObershelpDistance(String string1, String string2, boolean caseDependent) { if ( StringUtils.isBlank(string1) || StringUtils.isBlank(string2) ) return 0.0; Double similarity = 0.0; if (!caseDependent) { string1 = string1.toLowerCase(); string2 = string2.toLowerCase(); } if (string1.equals(string2)) { similarity = 1.0; } if ( isNotEmpty(string1) && isNotEmpty(string2) ) { Option<Object> similarityObject = RatcliffObershelpMetric.compare(string1, string2); if (similarityObject.isDefined()) { similarity = (Double) similarityObject.get(); } } return similarity; } }
24,795
42.731922
155
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/Triple.java
package org.grobid.core.utilities; import com.google.common.base.Function; public class Triple<A, B, C> { private final A a; private final B b; private final C c; public Triple(A a, B b, C c) { this.a = a; this.b = b; this.c = c; } public Function getAFunction = new Function<Triple<A, B, C>, A>() { @Override public A apply(Triple<A, B, C> input) { return input.getA(); } }; public Function getBFunction = new Function<Triple<A, B, C>, B>() { @Override public B apply(Triple<A, B, C> input) { return input.getB(); } }; public Function getCFunction = new Function<Triple<A, B, C>, C>() { @Override public C apply(Triple<A, B, C> input) { return input.getC(); } }; @Override public String toString() { StringBuilder sb = new StringBuilder(); sb .append("('") .append(a) .append("'; '") .append(b) .append("'; '") .append(c) .append("')"); return sb.toString(); } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((a == null) ? 0 : a.hashCode()); result = prime * result + ((b == null) ? 0 : b.hashCode()); result = prime * result + ((c == null) ? 0 : c.hashCode()); return result; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (!(o instanceof Triple)) { return false; } Triple<?, ?, ?> that = (Triple<?, ?, ?>) o; return ((this.a == null) ? that.a == null : this.a.equals(that.a)) && ((this.b == null) ? that.b == null : this.b.equals(that.b)) && ((this.c == null) ? that.c == null : this.c.equals(that.c)); } public A getA() { return a; } public B getB() { return b; } public C getC() { return c; } }
1,904
20.404494
71
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/PathUtil.java
package org.grobid.core.utilities; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.File; import java.io.FilenameFilter; import java.io.IOException; import java.nio.file.FileVisitResult; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.SimpleFileVisitor; import java.nio.file.attribute.BasicFileAttributes; import java.util.ArrayList; import java.util.List; public class PathUtil { private static final Logger LOGGER = LoggerFactory.getLogger(PathUtil.class); public static File getOneFile(File root, final String ext) { File[] l = root.listFiles(new FilenameFilter() { @Override public boolean accept(File dir, String name) { return name.endsWith(ext); } }); if (l == null || l.length == 0) { throw new IllegalArgumentException("Cannot find files in " + root + " with extension " + ext); } return l[0]; } public static List<Path> getAllPaths(Path root, final String... extensions) { List<Path> l = new ArrayList<>(); getAllPaths(l, root, extensions); return l; } public static void getAllPaths(final List<Path> paths, Path root, final String... extensions) { try { Files.walkFileTree(root.toAbsolutePath(), new SimpleFileVisitor<Path>() { @Override public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { if (Files.isRegularFile(file) && isSupportedFile(file, extensions)) { paths.add(file); } return FileVisitResult.CONTINUE; } private boolean isSupportedFile(Path file, String[] extensions) { for (String ext : extensions) { if (file.toString().toLowerCase().endsWith("." + ext)) { return true; } } return false; } }); } catch (IOException e) { // return; } } }
2,174
32.461538
107
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/IOUtilities.java
package org.grobid.core.utilities; import org.grobid.core.exceptions.GrobidResourceException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.*; import java.util.List; import java.nio.file.Files; import java.nio.file.Path; import java.util.Date; import static org.apache.commons.lang3.StringUtils.isEmpty; import org.apache.commons.lang3.StringUtils; import org.apache.commons.io.IOUtils; /** * Utilities related to file and directory management. */ public class IOUtilities { private static final Logger LOGGER = LoggerFactory.getLogger(IOUtilities.class); /** * Creates a file and writes some string content in it. * * @param file The file to write in. * @param content the content to write * @throws IOException */ public static void writeInFile(String file, String content) throws IOException { FileWriter filew = new FileWriter(new File(file)); BufferedWriter buffw = new BufferedWriter(filew); buffw.write(content); buffw.close(); } /** * Creates a file and writes a list of string in it separated by a given separator. * * @param file The file to write in. * @param content the list of string to write * @param sep separator to used for the list elements * @throws IOException */ public static void writeListInFile(String file, List<String> content, String sep) throws IOException { FileWriter filew = new FileWriter(new File(file)); BufferedWriter buffw = new BufferedWriter(filew); boolean start = true; for(String cont : content) { if (start) { buffw.write(cont); start = false; } else buffw.write(sep + cont); } buffw.close(); } /** * Read a file and return the content. * * @param pPathToFile path to file to read. * @return String contained in the document. * @throws IOException */ public static String readFile(String pPathToFile) throws IOException { StringBuffer out = new StringBuffer(); FileInputStream inputStrem = new FileInputStream(new File(pPathToFile)); ByteArrayOutputStream outStream = new ByteArrayOutputStream(); byte buf[] = new byte[1024]; int len; while ((len = inputStrem.read(buf)) > 0) { outStream.write(buf, 0, len); out.append(outStream.toString()); } IOUtils.closeQuietly(inputStrem); IOUtils.closeQuietly(outStream); return out.toString(); } /** * Write an input stream in temp directory. */ public static File writeInputFile(InputStream inputStream) { LOGGER.debug(">> set origin document for stateless service'..."); File originFile = null; OutputStream out = null; try { originFile = newTempFile("origin", ".pdf"); out = new FileOutputStream(originFile); byte buf[] = new byte[1024]; int len; while ((len = inputStream.read(buf)) > 0) { out.write(buf, 0, len); } } catch (IOException e) { LOGGER.error( "An internal error occurs, while writing to disk (file to write '" + originFile + "').", e); originFile = null; } finally { try { if (out != null) { out.close(); } inputStream.close(); } catch (IOException e) { LOGGER.error("An internal error occurs, while writing to disk (file to write '" + originFile + "').", e); originFile = null; } } return originFile; } /** * Creates a new not used temporary file and returns it. */ public static File newTempFile(String fileName, String extension) { try { return File.createTempFile(fileName, extension, GrobidProperties.getTempPath()); } catch (IOException e) { throw new GrobidResourceException( "Could not create temprorary file, '" + fileName + "." + extension + "' under path '" + GrobidProperties.getTempPath() + "'.", e); } } /** * From JDK 1.7, creates a new system temporary file and returns the file */ public static File newSystemTempFile(String extension) { try { Path newFile = Files.createTempFile("grobid", extension); return newFile.toFile(); } catch (IOException e) { throw new GrobidResourceException( "Could not create temprorary file, with extension '" + extension + "' under path tmp system path.", e); } } /** * Delete a temporary file */ public static void removeTempFile(final File file) { try { // sanity cleaning deleteOldies(GrobidProperties.getTempPath(), 300); LOGGER.debug("Removing " + file.getAbsolutePath()); file.delete(); } catch (Exception exp) { LOGGER.error("Error while deleting the temporary file: ", exp); } } /** * Delete a system temporary file */ public static void removeSystemTempFile(final File file) { try { // sanity cleaning deleteSystemOldies(300); LOGGER.debug("Removing " + file.getAbsolutePath()); file.delete(); } catch (Exception exp) { LOGGER.error("Error while deleting the temporary file: ", exp); } } /** * Delete temporary directory */ public static void removeTempDirectory(final String path) { try { LOGGER.debug("Removing " + path); File theDirectory = new File(path); if (theDirectory.exists()) { theDirectory.delete(); } } catch (Exception exp) { LOGGER.error("Error while deleting the temporary directory: ", exp); } } /** * Deletes all files and subdirectories under dir if they are older than a given * amount of seconds. Returns true if all deletions were successful. If a deletion * fails, the method stops attempting to delete and returns false. */ public static boolean deleteOldies(File dir, int maxLifeInSeconds) { return deleteOldies(dir, maxLifeInSeconds, "", true); } public static boolean deleteOldies(File dir, int maxLifeInSeconds, String prefix, boolean root) { Date currentDate = new Date(); long currentDateMillisec = currentDate.getTime(); boolean empty = true; boolean success = true; long threasholdMillisec = currentDateMillisec - (maxLifeInSeconds*1000); if (dir.isDirectory() && (StringUtils.isEmpty(prefix) || dir.getName().startsWith(prefix))) { File[] children = dir.listFiles(); if (children != null) { for (int i = 0; i < children.length; i++) { if ((StringUtils.isEmpty(prefix) || children[i].getName().startsWith(prefix))) { long millisec = children[i].lastModified(); if (millisec < threasholdMillisec) { success = deleteOldies(children[i], maxLifeInSeconds, prefix, false); if (!success) return false; } else empty = false; } } } } // if the dir is a file or if the directory is empty and it is no the root dir, we delete it if (!root && (empty || (!dir.isDirectory()))) { if (StringUtils.isEmpty(prefix) || dir.getName().startsWith(prefix)) success = dir.delete(); } return success; } /** * Deletes all files and subdirectories under the system temporary folder if they are older than * a given amount of seconds. Returns true if all deletions were successful. If a deletion * fails, the method stops attempting to delete and returns false. * The grobid system temporary files and folders are all identified with a grobid prefix. */ public static boolean deleteSystemOldies(int maxLifeInSeconds) { String defaultBaseDir = System.getProperty("java.io.tmpdir"); return deleteOldies(new File(defaultBaseDir), maxLifeInSeconds, "grobid", true); } }
8,725
34.327935
120
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/UnicodeUtil.java
package org.grobid.core.utilities; import java.util.regex.Matcher; import java.util.regex.Pattern; /** * Class for holding static methods for processing related to unicode. * */ public class UnicodeUtil { private static final Pattern DASH_PATTERN = Pattern.compile("[\\p{Pd}\\u2212]"); private static final Pattern NORMALISE_REGEX_PATTERN = Pattern.compile("[ \n]"); // As java \s doesn’t support the Unicode white space property (\s matches // [ \t\n\x0B\f\r]), here are the 26 code points of the "official" stable // p{White_Space} unicode property public static String whitespace_chars = "[" + "\\u0009" // CHARACTER TABULATION, \t + "\\u000A" // LINE FEED (LF), \n -> new line + "\\u000B" // LINE TABULATION, \v -> new line + "\\u000C" // FORM FEED (FF) -> break page + "\\u000D" // CARRIAGE RETURN (CR), \r + "\\u0020" // SPACE + "\\u0085" // NEXT LINE (NEL) -> new line + "\\u00A0" // NO-BREAK SPACE + "\\u1680" // OGHAM SPACE MARK + "\\u180E" // MONGOLIAN VOWEL SEPARATOR + "\\u2000" // EN QUAD + "\\u2001" // EM QUAD + "\\u2002" // EN SPACE + "\\u2003" // EM SPACE + "\\u2004" // THREE-PER-EM SPACE + "\\u2005" // FOUR-PER-EM SPACE + "\\u2006" // SIX-PER-EM SPACE + "\\u2007" // FIGURE SPACE + "\\u2008" // PUNCTUATION SPACE + "\\u2009" // THIN SPACE + "\\u200A" // HAIR SPACE + "\\u2028" // LINE SEPARATOR + "\\u2029" // PARAGRAPH SEPARATOR + "\\u202F" // NARROW NO-BREAK SPACE + "\\u205F" // MEDIUM MATHEMATICAL SPACE + "\\u3000" // IDEOGRAPHIC SPACE + "]"; // a more restrictive selection of horizontal white space characters than the // Unicode p{White_Space} property (which includes new line and vertical spaces) public static String my_whitespace_chars = "[" + "\\u0009" // CHARACTER TABULATION, \t + "\\u0020" // SPACE + "\\u00A0" // NO-BREAK SPACE + "\\u1680" // OGHAM SPACE MARK + "\\u180E" // MONGOLIAN VOWEL SEPARATOR + "\\u2000" // EN QUAD + "\\u2001" // EM QUAD + "\\u2002" // EN SPACE + "\\u2003" // EM SPACE + "\\u2004" // THREE-PER-EM SPACE + "\\u2005" // FOUR-PER-EM SPACE + "\\u2006" // SIX-PER-EM SPACE + "\\u2007" // FIGURE SPACE + "\\u2008" // PUNCTUATION SPACE + "\\u2009" // THIN SPACE + "\\u200A" // HAIR SPACE + "\\u2028" // LINE SEPARATOR + "\\u2029" // PARAGRAPH SEPARATOR + "\\u202F" // NARROW NO-BREAK SPACE + "\\u205F" // MEDIUM MATHEMATICAL SPACE + "\\u3000" // IDEOGRAPHIC SPACE + "]"; private static final Pattern MY_WHITESPACE_PATTERN = Pattern.compile(my_whitespace_chars); // all the horizontal low lines public static String horizontal_low_lines_chars = "[" + "\\u005F" // low Line + "\\u203F" // undertie + "\\u2040" // character tie + "\\u2054" // inverted undertie + "\\uFE4D" // dashed low line + "\\uFE4E" // centreline low line + "\\uFE4F" // wavy low line + "\\uFF3F" // fullwidth low line + "\\uFE33" // Presentation Form For Vertical Low Line + "\\uFE34" // Presentation Form For Vertical Wavy Low Line + "]"; private static final Pattern HORIZONTAL_LOW_LINES_CHARS_PATTERN = Pattern.compile(horizontal_low_lines_chars); // all the vertical lines public static String vertical_lines_chars = "[" + "\\u007C" // vertical line + "\\u01C0" // Latin Letter Dental + "\\u05C0" // Hebrew Punctuation Paseq + "\\u2223" // Divides + "\\u2758" // Light Vertical Bar + "]"; private static final Pattern VERTICAL_LINES_CHARS_PATTERN = Pattern.compile(vertical_lines_chars); // all new lines / "vertical" white spaces public static String new_line_chars = "[" + "\\u000C" // form feed, \f - normally a page break + "\\u000A" // line feed, \n + "\\u000D" // carriage return, \r + "\\u000B" // line tabulation, \v - concretely it's a new line + "\\u0085" // next line (NEL) + "\\u2029" // PARAGRAPH SEPARATOR, \p{Zp} + "\\u2028" // LINE SEPARATOR, \p{Zl} + "]"; private static final Pattern NEW_LINE_CHARS_PATTERN = Pattern.compile(new_line_chars); // all bullets public static String bullet_chars = "[" + "\\u2022" // bullet + "\\u2023" // triangular bullet + "\\u25E6" // white bullet + "\\u2043" // hyphen bullet + "\\u204C" // black leftwards bullet + "\\u204D" // black rightwards bullet + "\\u2219" // bullet operator (use in math stuff) + "\\u25D8" // inverse bullet + "\\u29BE" // circled white bullet + "\\u29BF" // circled bullet + "\\u23FA" // black circle for record + "\\u25CF" // black circle + "\\u26AB" // medium black circle + "\\u2B24" // black large circle + "\\u00B7" // middle dot + "]"; private static final Pattern BULLET_CHARS_PATTERN = Pattern.compile(bullet_chars); // opening parenthesis public static String open_parenthesis = "[" + "\\u0028" // left parenthesis + "\\uFF08" // large opening parenthesis with space before + "\\u27EE" // mathematical left flattened parenthesis + "\\u2985" // left white parenthesis + "\\u2768" // medium left parenthesis ornament + "\\u276A" // medium flattened left parenthesis ornament + "\\u27EC" // mathematical left white tortoise shell bracket, called bracket but totally looks like parenthesis + "]"; private static final Pattern OPEN_PARENTHESIS_PATTERN = Pattern.compile(open_parenthesis); // closing parenthesis public static String close_parenthesis = "[" + "\\u0029" // right parenthesis + "\\uFF09" // large closing parenthesis with space after + "\\u27EF" // mathematical right flattened parenthesis + "\\u2986" // right white parenthesis + "\\u2769" // medium right parenthesis ornament + "\\u276B" // medium flattened right parenthesis ornament + "\\u27ED" // mathematical right white tortoise shell bracket, called bracket but totally looks like parenthesis + "]"; private static final Pattern CLOSE_PARENTHESIS_PATTERN = Pattern.compile(close_parenthesis); /** * Normalise the space, EOL and punctuation unicode characters. * * In particular all the characters which are treated as space in * C++ (http://en.cppreference.com/w/cpp/string/byte/isspace) * will be replace by the punctuation space character U+2008 * so that the token can be used to generate a robust feature vector * legible as Wapiti input. * * @param text to be normalised * @return normalised string, legible for Wapiti feature generation */ public static String normaliseText(String text) { if (text == null) return null; // see https://docs.oracle.com/javase/8/docs/api/java/lang/Character.html // for Unicode character properties supported by Java // normalise all horizontal space separator characters text = MY_WHITESPACE_PATTERN.matcher(text).replaceAll(" "); // normalise all EOL - special handling of "\r\n" as one single newline text = text.replace("\r\n", "\n"); text = NEW_LINE_CHARS_PATTERN.matcher(text).replaceAll("\n"); // normalize dash via the unicode dash punctuation property text = DASH_PATTERN.matcher(text).replaceAll("-"); // normalize horizontal low lines text = HORIZONTAL_LOW_LINES_CHARS_PATTERN.matcher(text).replaceAll("_"); // normalize vertical lines text = VERTICAL_LINES_CHARS_PATTERN.matcher(text).replaceAll("|"); // bullet normalisation text = BULLET_CHARS_PATTERN.matcher(text).replaceAll("•"); // opening parenthesis normalisation text = OPEN_PARENTHESIS_PATTERN.matcher(text).replaceAll("("); // closing parenthesis normalisation text = CLOSE_PARENTHESIS_PATTERN.matcher(text).replaceAll(")"); // remove all control charcaters? //text = text.replaceAll("\\p{Cntrl}", " "); return text; } /** * Unicode normalisation of the token text. * Works as the {@link org.grobid.core.utilities.UnicodeUtil#normaliseText(java.lang.String)}, but in addition also removes spaces * @param text to be normalised * @return normalised string, legible for Wapiti feature generation */ public static String normaliseTextAndRemoveSpaces(String text) { // parano sanitising return NORMALISE_REGEX_PATTERN.matcher(normaliseText(text)).replaceAll(""); } }
10,798
49.227907
157
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/XQueryProcessor.java
package org.grobid.core.utilities; import net.sf.saxon.Configuration; import net.sf.saxon.om.DocumentInfo; import net.sf.saxon.om.SequenceIterator; import net.sf.saxon.query.DynamicQueryContext; import net.sf.saxon.query.StaticQueryContext; import net.sf.saxon.query.XQueryExpression; import net.sf.saxon.trans.XPathException; import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; import org.xml.sax.InputSource; import javax.xml.transform.sax.SAXSource; import javax.xml.transform.stream.StreamResult; import java.io.ByteArrayInputStream; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.io.StringWriter; import java.util.Properties; import static java.nio.charset.StandardCharsets.UTF_8; /** * Running XQuery queries */ public class XQueryProcessor { private final StaticQueryContext sqc; private final DynamicQueryContext dqc; public static String getQueryFromResources(String name) throws IOException { return IOUtils.toString(XQueryProcessor.class.getResourceAsStream("/xq/" + name),UTF_8); } public XQueryProcessor(File xmFile) throws XPathException, IOException { this(FileUtils.readFileToString(xmFile,UTF_8)); } public XQueryProcessor(String xmlContent) throws XPathException { Configuration c = new Configuration(); sqc = new StaticQueryContext(c); dqc = new DynamicQueryContext(c); InputStream is = new ByteArrayInputStream(xmlContent.getBytes(UTF_8)); InputSource XMLSource = new InputSource(is); SAXSource SAXs = new SAXSource(XMLSource); DocumentInfo DI = sqc.buildDocument(SAXs); dqc.setContextItem(DI); } public SequenceIterator getSequenceIterator(String query) throws XPathException { Properties props = new Properties(); final XQueryExpression exp = sqc.compileQuery(query); StringWriter stringWriter = new StringWriter(); exp.run(dqc, new StreamResult(stringWriter), props); return exp.iterator(dqc); } }
2,063
31.761905
96
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/SentenceUtilities.java
package org.grobid.core.utilities; import org.grobid.core.exceptions.GrobidException; import org.grobid.core.lang.SentenceDetectorFactory; import org.grobid.core.lang.Language; import org.grobid.core.layout.LayoutToken; import java.util.*; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Class for using sentence segmentation (singleton). The actual sentence segmentation implementation * is specified in the Grobid configuration. See org.grobid.core.lang.impl.* for the available * implementations. * */ public class SentenceUtilities { public static final Logger LOGGER = LoggerFactory.getLogger(SentenceUtilities.class); private static volatile SentenceUtilities instance = null; private SentenceDetectorFactory sdf = null; public static SentenceUtilities getInstance() { if (instance == null) { synchronized (SentenceUtilities.class) { if (instance == null) { LOGGER.debug("synchronized getNewInstance"); instance = new SentenceUtilities(); } } } return instance; } private SentenceUtilities() { String className = GrobidProperties.getSentenceDetectorFactory(); try { sdf = (SentenceDetectorFactory) Class.forName(className).newInstance(); } catch (ClassCastException e) { throw new GrobidException("Class " + className + " must implement " + SentenceDetectorFactory.class.getName(), e); } catch (ClassNotFoundException e) { throw new GrobidException( "Class " + className + " were not found in the classpath. " + "Make sure that it is provided correctly is in the classpath", e); } catch (InstantiationException e) { throw new GrobidException("Class " + className + " should have a default constructor", e); } catch (IllegalAccessException e) { throw new GrobidException(e); } } /** * Basic run for sentence identification, return the offset positions of the * identified sentences * * @param text * text to segment into sentences * @return list of offset positions for the identified sentence, relative to the input text */ public List<OffsetPosition> runSentenceDetection(String text) { if (text == null) return null; try { return sdf.getInstance().detect(text); } catch (Exception e) { LOGGER.warn("Cannot detect sentences. ", e); return null; } } /** * Basic run for sentence identification with a specified language to be considered when segmenting, * return the offset positions of the identified sentences * * @param text * text to segment into sentences * @param lang * specified language to be used when segmenting text * @return list of offset positions for the identified sentence, relative to the input text */ public List<OffsetPosition> runSentenceDetection(String text, Language lang) { if (text == null) return null; try { return sdf.getInstance().detect(text, lang); } catch (Exception e) { LOGGER.warn("Cannot detect sentences. ", e); return null; } } /** * Run for sentence identification with some forbidden span constraints, return the offset positions of the * identified sentences without sentence boundaries within a forbidden span (typically a reference marker * and we don't want a sentence end/start in the middle of that). * * @param text * text to segment into sentences * @param forbidden * list of offset positions where sentence boundaries are forbidden * @return list of offset positions for the identified sentence, relative to the input text */ public List<OffsetPosition> runSentenceDetection(String text, List<OffsetPosition> forbidden) { return runSentenceDetection(text, forbidden, null, null); } /** * Run for sentence identification with some forbidden span constraints, return the offset positions of the * identified sentences without sentence boundaries within a forbidden span (typically a reference marker * and we don't want a sentence end/start in the middle of that). The original LayoutToken objects are * provided, which allows to apply additional heuristics based on document layout and font features. * * @param text * text to segment into sentences * @param forbidden * list of offset positions where sentence boundaries are forbidden * @param textLayoutTokens * list of LayoutToken objects from which the text has been created, if this list is null * we consider that we have a pure textual input (e.g. text is not from a PDF) * @param lang * specified language to be used when segmenting text * @return list of offset positions for the identified sentence, relative to the input text */ public List<OffsetPosition> runSentenceDetection(String text, List<OffsetPosition> forbidden, List<LayoutToken> textLayoutTokens, Language lang) { if (text == null) return null; try { List<OffsetPosition> sentencePositions = sdf.getInstance().detect(text, lang); // to be sure, we sort the forbidden positions if (forbidden == null) return sentencePositions; Collections.sort(forbidden); // cancel sentence boundaries within the forbidden spans List<OffsetPosition> finalSentencePositions = new ArrayList<>(); int forbiddenIndex = 0; for(int j=0; j < sentencePositions.size(); j++) { OffsetPosition position = sentencePositions.get(j); for(int i=forbiddenIndex; i < forbidden.size(); i++) { OffsetPosition forbiddenPos = forbidden.get(i); if (forbiddenPos.end < position.end) continue; if (forbiddenPos.start > position.end) break; while ( (forbiddenPos.start < position.end && position.end < forbiddenPos.end) ) { if (j+1 < sentencePositions.size()) { position.end = sentencePositions.get(j+1).end; j++; forbiddenIndex = i; } else break; } } finalSentencePositions.add(position); } // as a heuristics for all implementations, because they clearly all fail for this case, we // attached to the right sentence the numerical bibliographical references markers expressed // in superscript just *after* the final sentence comma, e.g. // "Laboratory tests at the time of injury were not predictive of outcome. 32" // or // "CSF-1 has been linked to tumor growth and progression in breast cancer, 5,6 and has been // shown to effectively reduce the number of tumor-associated macrophages in different tumor // types. 4,5" // or // "Even if the symmetry is s- like, it does not necessarily indicate that the // superconductivity is not exotic, because the s- like symmetry or the fully gapped state // may be realized by the pairing mediated by the interband excitations of the electrons. 23) " if (finalSentencePositions.size() == 0) { // this should normally not happen, but it happens (depending on sentence splitter, usually the text // is just a punctuation) // in this case we consider the current text as a unique sentence as fall back finalSentencePositions.add(new OffsetPosition(0, text.length())); } if (textLayoutTokens == null || textLayoutTokens.size() == 0) return finalSentencePositions; int pos = 0; // init sentence index int currentSentenceIndex = 0; String sentenceChunk = text.substring(finalSentencePositions.get(currentSentenceIndex).start, finalSentencePositions.get(currentSentenceIndex).end); boolean moved = false; // iterate on layout tokens in sync with sentences for(int i=0; i<textLayoutTokens.size(); i++) { LayoutToken token = textLayoutTokens.get(i); if (token.getText() == null || token.getText().length() == 0) continue; if (this.toSkipToken(token.getText())) continue; int newPos = sentenceChunk.indexOf(token.getText(), pos); if (newPos != -1) { pos = newPos; moved = true; } else { // before moving to the next sentence, we check if a ref marker in superscript just follow int pushedEnd = 0; int buffer = 0; int j = i; for(; j<textLayoutTokens.size(); j++) { LayoutToken nextToken = textLayoutTokens.get(j); if (nextToken.getText() == null || nextToken.getText().length() == 0) continue; // we don't look beyond an end of line (to prevent from numbered list/notes) if (nextToken.getText().equals("\n")) break; // we don't look beyond the text length if (finalSentencePositions.get(currentSentenceIndex).end + nextToken.getText().length() + buffer >= text.length()) break; if (this.toSkipTokenNoHyphen(nextToken.getText())) { buffer += nextToken.getText().length(); continue; } if (this.isValidSuperScriptNumericalReferenceMarker(nextToken)) { pushedEnd += buffer + nextToken.getText().length(); buffer = 0; } else break; } if (pushedEnd > 0) { OffsetPosition newPosition = finalSentencePositions.get(currentSentenceIndex); newPosition.end += pushedEnd+1; finalSentencePositions.set(currentSentenceIndex, newPosition); // push also the beginning of the next sentence if (currentSentenceIndex+1 < finalSentencePositions.size()) { OffsetPosition newNextPosition = finalSentencePositions.get(currentSentenceIndex+1); // it could be that the extra added ref marker was entirely the next sentence, which should be then removed if (newNextPosition.start + pushedEnd + buffer >= newNextPosition.end) { finalSentencePositions.remove(currentSentenceIndex+1); } else { newNextPosition.start += pushedEnd + buffer; finalSentencePositions.set(currentSentenceIndex+1, newNextPosition); } } pushedEnd = 0; buffer = 0; i = j-1; } if (moved) { currentSentenceIndex++; if (currentSentenceIndex >= finalSentencePositions.size()) break; sentenceChunk = text.substring(finalSentencePositions.get(currentSentenceIndex).start, finalSentencePositions.get(currentSentenceIndex).end); moved = false; } pos = 0; } if (currentSentenceIndex >= finalSentencePositions.size()) break; } // other heuristics/post-corrections based on layout/style features of the tokens could be added // here, for instance non-breakable italic or bold chunks, or adding sentence split based on // spacing/indent return finalSentencePositions; } catch (Exception e) { LOGGER.warn("Cannot detect sentences. ", e); return null; } } /** * Return true if the token should be skipped when considering sentence content. */ public static boolean toSkipToken(String tok) { // the hyphen is considered to be skipped to cover the case of word hyphenation if (tok.equals("-") || tok.equals(" ") || tok.equals("\n") || tok.equals("\t")) return true; else return false; } private static boolean toSkipTokenNoHyphen(String tok) { if (tok.equals(" ") || tok.equals("\n") || tok.equals("\t")) return true; else return false; } /** * Return true if the token is a valid numerical reference markers ([0-9,())\-\]\[) in supercript. */ private static boolean isValidSuperScriptNumericalReferenceMarker(LayoutToken token) { String tok = token.getText(); if (tok == null) { // should never be the case, but we can just skip the token return true; } if (token.isSuperscript() && token.getText().matches("[0-9,\\-\\(\\)\\[\\]]+")) { //System.out.println("isValidSuperScriptNumericalReferenceMarker: " + token.getText() + " -> true"); return true; } else { //System.out.println("isValidSuperScriptNumericalReferenceMarker: " + token.getText() + " -> false"); return false; } } }
14,524
43.555215
150
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/KeyGen.java
package org.grobid.core.utilities; /** * Generate a random key. * */ public class KeyGen { /** * Minimum length for a decent key */ public static final int MIN_LENGTH = 10; /** * The random number generator. */ protected static java.util.Random r = new java.util.Random(); /** * Set of characters that is valid. Must be printable, memorable, and "won't * break HTML" (i.e., not ' <', '>', '&', '=', ...). or break shell commands * (i.e., not ' <', '>', '$', '!', ...). I, L and O are good to leave out, * as are numeric zero and one. */ protected static final char[] goodChar = {'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'j', 'k', 'm', 'n', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '2', '3', '4', '5', '6', '7', '8', '9'}; /** * Generate a Password object with a random password. * @return a generated key */ public static String getKey() { StringBuilder sb = new StringBuilder(); for (int i = 0; i < MIN_LENGTH; i++) { sb.append(goodChar[r.nextInt(goodChar.length)]); } return sb.toString(); } }
1,320
29.72093
80
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/Utilities.java
package org.grobid.core.utilities; import org.apache.commons.collections4.CollectionUtils; import java.io.BufferedWriter; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.FileWriter; import java.io.IOException; import java.io.InputStream; import java.lang.reflect.Method; import java.net.URL; import java.text.SimpleDateFormat; import java.util.List; import java.util.ArrayList; import java.util.Collections; import java.util.Date; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.apache.commons.lang3.StringUtils; import org.grobid.core.data.BiblioItem; import org.grobid.core.exceptions.GrobidException; import org.grobid.core.layout.LayoutToken; /** * Some utilities methods that I don't know where to put. * */ public class Utilities { /** * Deletes all files and subdirectories under dir. Returns true if all * deletions were successful. If a deletion fails, the method stops * attempting to delete and returns false. */ public static boolean deleteDir(File dir) { if (dir.isDirectory()) { //String[] children = dir.list(); File[] children = dir.listFiles(); for (int i = 0; i < children.length; i++) { boolean success = deleteDir(children[i]); if (!success) { return false; } } } // the directory is now empty so delete it return dir.delete(); } public static String uploadFile(String urlmsg, String path, String name) { try { System.out.println("Sending: " + urlmsg); URL url = new URL(urlmsg); File outFile = new File(path, name); FileOutputStream out = new FileOutputStream(outFile); // Serve the file InputStream in = url.openStream(); byte[] buf = new byte[4 * 1024]; // 4K buffer int bytesRead; while ((bytesRead = in.read(buf)) != -1) { out.write(buf, 0, bytesRead); } out.close(); in.close(); return path + name; } catch (Exception e) { throw new GrobidException("An exception occured while running Grobid.", e); } } public static String punctuationsSub = "([,;])"; /** * Special cleaning for ZFN extracted data in a BiblioItem */ @Deprecated public static BiblioItem cleanZFNMetadata(BiblioItem item) { // general cleaning: remove brackets, parenthesis, etc. // date if (item.getPublicationDate() != null) { String new_date = ""; for (int i = 0; i < item.getPublicationDate().length(); i++) { char c = item.getPublicationDate().charAt(i); if (TextUtilities.fullPunctuations.indexOf(c) == -1) new_date += c; } item.setPublicationDate(new_date.trim()); } // affiliation String affiliation = item.getAffiliation(); if (affiliation != null) { if (affiliation.startsWith("Aus dem")) affiliation = affiliation.replace("Aus dem", ""); if (affiliation.startsWith("Aus der")) affiliation = affiliation.replace("Aus der", ""); affiliation = affiliation.trim(); item.setAffiliation(affiliation); } // journal String journal = item.getJournal(); if (journal != null) { String new_journal = ""; for (int i = 0; i < journal.length(); i++) { char c = journal.charAt(i); if (punctuationsSub.indexOf(c) == -1) new_journal += c; } journal = new_journal.trim(); journal = journal.replace(" .", "."); if ((journal.startsWith(",")) | (journal.startsWith("."))) { journal = journal.substring(1, journal.length()).trim(); } item.setJournal(journal); } // page block String pageRange = item.getPageRange(); if (pageRange != null) { String new_pageRange = ""; for (int i = 0; i < pageRange.length(); i++) { char c = pageRange.charAt(i); if (punctuationsSub.indexOf(c) == -1) new_pageRange += c; } pageRange = new_pageRange.trim(); item.setPageRange(pageRange); } // note String note = item.getNote(); if (note != null) { String new_note = ""; for (int i = 0; i < note.length(); i++) { char c = note.charAt(i); if (punctuationsSub.indexOf(c) == -1) new_note += c; } note = new_note.trim(); note = note.replace(" .", "."); note = note.replace("...", "."); note = note.replace("..", "."); if ((note.startsWith(",")) | (note.startsWith("."))) { note = note.substring(1, note.length()).trim(); } //note = note.replace("@BULLET", " • "); item.setNote(note); } // submission String submission = item.getSubmission(); if (submission != null) { String new_submission = ""; for (int i = 0; i < submission.length(); i++) { char c = submission.charAt(i); if (punctuationsSub.indexOf(c) == -1) new_submission += c; } submission = new_submission.trim(); submission = submission.replace(" .", "."); submission = submission.replace("...", "."); submission = submission.replace("..", "."); if ((submission.startsWith(",")) | (submission.startsWith("."))) { submission = submission.substring(1, submission.length()) .trim(); } //submission = submission.replace("@BULLET", " • "); item.setSubmission(submission); } // dedication String dedication = item.getDedication(); if (dedication != null) { String new_dedication = ""; for (int i = 0; i < dedication.length(); i++) { char c = dedication.charAt(i); if (punctuationsSub.indexOf(c) == -1) new_dedication += c; } dedication = new_dedication.trim(); dedication = dedication.replace(" .", "."); dedication = dedication.replace("...", "."); dedication = dedication.replace("..", "."); if ((dedication.startsWith(",")) | (dedication.startsWith("."))) { dedication = dedication.substring(1, dedication.length()) .trim(); } //dedication = dedication.replace("@BULLET", " • "); item.setDedication(dedication); } // title String title = item.getTitle(); if (title != null) { if (title.endsWith("'")) { title = title.substring(0, title.length() - 1).trim(); } //title = title.replace("@BULLET", " • "); item.setTitle(title); } // English title String english_title = item.getEnglishTitle(); if (english_title != null) { if (english_title.endsWith("'")) { english_title = english_title.substring(0, english_title.length() - 1).trim(); } //english_title = english_title.replace("@BULLET", " • "); item.setEnglishTitle(english_title); } // abstract String abstract_ = item.getAbstract(); if (abstract_ != null) { if (abstract_.startsWith(") ")) { abstract_ = abstract_.substring(1, abstract_.length()).trim(); } //abstract_ = abstract_.replace("@BULLET", " • "); item.setAbstract(abstract_); } // address String address = item.getAddress(); if (address != null) { address.replace("\t", " "); address = address.trim(); if ((address.startsWith(",")) | (address.startsWith("("))) { address = address.substring(1, address.length()).trim(); } if (address.endsWith(")")) { address = address.substring(0, address.length() - 1).trim(); } item.setAddress(address); } // email String email = item.getEmail(); if (email != null) { if (email.startsWith("E-mail :")) { email = email.replace("E-mail :", "").trim(); item.setEmail(email); } } // authors String authors = item.getAuthors(); if (authors != null) { authors = authors.replace("0. ", "O. "); item.setAuthors(authors); } // keywords String keyword = item.getKeyword(); if (keyword != null) { if (keyword.startsWith(":")) { keyword = keyword.substring(1, keyword.length()).trim(); item.setKeyword(keyword); } } return item; } /** * Return the name of directory to use given the os and the architecture.<br> * Possibles returned values should match one of the following:<br> * win-32<br> * lin-32<br> * lin-64<br> * mac-64<br> * * @return name of the directory corresponding to the os name and * architecture. */ public static String getOsNameAndArch() { String osPart = System.getProperty("os.name").replace(" ", "") .toLowerCase().substring(0, 3); if (StringUtils.equals(osPart, "mac")) { if (StringUtils.equals(System.getProperty("os.arch"), "aarch64")){ osPart = osPart+"_arm"; } } String archPart = System.getProperty("sun.arch.data.model"); return String.format("%s-%s", osPart, archPart); } /** * Convert a string to boolean. * * @param value * the value to convert * @return true if the string value is "true", false is it equals to * "false". <br> * If the value does not correspond to one of these 2 values, return * false. */ public static boolean stringToBoolean(String value) { boolean res = false; if (StringUtils.isNotBlank(value) && Boolean.toString(true).equalsIgnoreCase(value.trim())) { res = true; } return res; } /** * Call a java method using the method name given in string. * * @param obj * Class in which the method is. * @param args * the arguments of the method. * @param methodName * the name of the method. * @return result of the called method. * @throws Exception */ @SuppressWarnings("rawtypes") public static Object launchMethod(Object obj, Object[] args, String methodName) throws Exception { Class[] paramTypes = null; if (args != null) { paramTypes = new Class[args.length]; for (int i = 0; i < args.length; ++i) { paramTypes[i] = args[i].getClass(); } } if(StringUtils.isEmpty(methodName)) { throw new GrobidException("Missing method in command line. To specify with -exe [methodName]. " + methodName); } return getMethod(obj, paramTypes, methodName).invoke(obj, args); } /** * Call a java method using the method name given in string. * * @param obj * Class in which the method is. * @param args * the arguments of the method. * @param paramTypes * types of the arguments. * @param methodName * the name of the method. * @return result of the called method. * @throws Exception */ @SuppressWarnings("rawtypes") public static Object launchMethod(Object obj, Object[] args, Class[] paramTypes, String methodName) throws Exception { return getMethod(obj, paramTypes, methodName).invoke(obj, args); } /** * Get the method given in string in input corresponding to the given * arguments. * * @param obj * Class in which the method is. * @param paramTypes * types of the arguments. * @param methodName * the name of the method. * @return Methood * * @throws NoSuchMethodException */ @SuppressWarnings("rawtypes") public static Method getMethod(Object obj, Class[] paramTypes, String methodName) throws NoSuchMethodException { Method method = obj.getClass().getMethod(methodName, paramTypes); return method; } /** * Format a date in string using pFormat. * * @param pDate * the date to parse. * @param pFormat * the format to use following SimpleDateFormat patterns. * * @return the formatted date. */ public static String dateToString(Date pDate, String pFormat){ SimpleDateFormat dateFormat = new SimpleDateFormat(pFormat); return dateFormat.format(pDate); } public static boolean doubleEquals(double d1, double d2) { return Math.abs(d1 - d2) <= Double.MIN_VALUE; } public static boolean doubleEquals(double d1, double d2, double epsilon) { return Math.abs(d1 - d2) <= epsilon; } /** * Merge the offset positions of two lists, merging overlapping positions * into a spanning one. * * @param positions1 * the first offset position list to be merged * @param positions2 * the second offset position list to be merged * * @return the merged list of (merged) offset positions */ public static List<OffsetPosition> mergePositions(List<OffsetPosition> positions1, List<OffsetPosition> positions2) { if (CollectionUtils.isEmpty(positions1)) return positions2; if (CollectionUtils.isEmpty(positions2)) return positions1; Collections.sort(positions1); Collections.sort(positions2); List<OffsetPosition> result = new ArrayList<OffsetPosition>(); for(OffsetPosition pos : positions1) { result.add(pos); } for(OffsetPosition pos : positions2) { if (!result.contains(pos)) result.add(pos); } Collections.sort(result); List<OffsetPosition> finalResult = new ArrayList<OffsetPosition>(); OffsetPosition prevPos = null; for(OffsetPosition pos : result) { if (prevPos == null) { finalResult.add(pos); prevPos = pos; } else { if ( (pos.start >= prevPos.start) && (pos.end <= prevPos.end) ) { // nothing to do } else if (prevPos.end >= pos.start) { prevPos.end = pos.end; } else { prevPos = pos; finalResult.add(pos); } } } return finalResult; } /** * This version uses general LayoutToken offsets relative to the complete document. * It supposes that the stringPosition have been identified on the complete document string */ public static List<OffsetPosition> convertStringOffsetToTokenOffsetOld( List<OffsetPosition> stringPosition, List<LayoutToken> tokens) { List<OffsetPosition> result = new ArrayList<OffsetPosition>(); int indexToken = 0; OffsetPosition currentPosition = null; LayoutToken token = null; for(OffsetPosition pos : stringPosition) { while(indexToken < tokens.size()) { token = tokens.get(indexToken); if (token.getOffset() >= pos.start) { // we have a start currentPosition = new OffsetPosition(indexToken, indexToken); // we need an end boolean found = false; while(indexToken < tokens.size()) { token = tokens.get(indexToken); if (token.getOffset()+token.getText().length() >= pos.end) { // we have an end currentPosition.end = indexToken; result.add(currentPosition); found = true; break; } indexToken++; } if (found) { indexToken++; break; } else { currentPosition.end = indexToken-1; result.add(currentPosition); } } indexToken++; } } return result; } /** * This version uses actual LayoutToken offsets relative to the tokens present in argment only. * It supposes that the stringPosition have been identified on the provided tokens only, and not * restricted to the complete document. */ public static List<OffsetPosition> convertStringOffsetToTokenOffset( List<OffsetPosition> stringPosition, List<LayoutToken> tokens) { List<OffsetPosition> result = new ArrayList<OffsetPosition>(); int indexText = 0; int indexToken = 0; OffsetPosition currentPosition = null; LayoutToken token = null; for(OffsetPosition pos : stringPosition) { while(indexToken < tokens.size()) { token = tokens.get(indexToken); if (token.getText() == null) { indexToken++; continue; } if (indexText >= pos.start) { // we have a start currentPosition = new OffsetPosition(indexToken, indexToken); // we need an end boolean found = false; while(indexToken < tokens.size()) { token = tokens.get(indexToken); if (token.getText() == null) { indexToken++; continue; } if (indexText+token.getText().length() >= pos.end) { // we have an end currentPosition.end = indexToken; result.add(currentPosition); found = true; break; } indexToken++; indexText += token.getText().length(); } if (found) { indexToken++; indexText += token.getText().length(); break; } else { currentPosition.end = indexToken-1; result.add(currentPosition); } } indexToken++; indexText += token.getText().length(); } } return result; } }
17,189
29.31746
116
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/EnvironmentVariableValues.java
package org.grobid.core.utilities; import java.util.HashMap; import java.util.Map; public class EnvironmentVariableValues { private final Map<String, String> configParameters = new HashMap<>(); public EnvironmentVariableValues(String matcher) { this(System.getenv(), matcher); } public EnvironmentVariableValues(Map<String, String> environmentVariablesMap, String matcher) { for (Map.Entry<String, String> entry: environmentVariablesMap.entrySet()) { if (!entry.getKey().matches(matcher)) { continue; } this.configParameters.put(entry.getKey(), entry.getValue()); } } public Map<String, String> getConfigParameters() { return this.configParameters; } }
770
28.653846
99
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/ElementCounter.java
package org.grobid.core.utilities; import com.google.common.base.Function; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; import java.io.Serializable; import java.util.Collections; import java.util.Comparator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; public class ElementCounter<T> implements Serializable { private static final long serialVersionUID = 7859247056683063608L; private Map<T, Integer> cnts = new LinkedHashMap<>(); public ElementCounter() { } public ElementCounter(Map<T, Integer> cnts) { this.cnts = cnts; } public void i(T obj) { i(obj, 1); } public void i(T obj, int val) { if (cnts.containsKey(obj)) { cnts.put(obj, cnts.get(obj) + val); } else { cnts.put(obj, val); } } public int cnt(T obj) { if (cnts.containsKey(obj)) { return cnts.get(obj); } else { return 0; } } public Map<T, Integer> getCnts() { return cnts; } //Jackson public void setCountItems(List<ElementCounterItem<T>> items) { for (ElementCounterItem<T> i : items) { cnts.put(i.getItem(), i.getCnt()); } } public List<Map.Entry<T, Integer>> getSortedCounts() { List<Map.Entry<T, Integer>> list = Lists.newArrayList(cnts.entrySet()); Collections.sort(list, new Comparator<Map.Entry<T, Integer>>() { @Override public int compare(Map.Entry<T, Integer> o1, Map.Entry<T, Integer> o2) { return -o1.getValue().compareTo(o2.getValue()); } }); return list; } public int size() { return cnts.size(); } public List<ElementCounterItem<T>> getCountItems() { return Lists.newArrayList(Iterables.transform(getCnts().entrySet(), new Function<Map.Entry<T, Integer>, ElementCounterItem<T>>() { @Override public ElementCounterItem<T> apply(Map.Entry<T, Integer> input) { return new ElementCounterItem<T>(input.getKey(), input.getValue()); } })); } @Override public String toString() { return "ElementCounter{" + "cnts=" + cnts + '}'; } }
2,344
25.055556
138
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/crossref/Test.java
package org.grobid.core.utilities.crossref; import java.util.List; import java.util.Map; import java.util.HashMap; import org.grobid.core.data.BiblioItem; import org.grobid.core.main.LibraryLoader; import org.grobid.core.utilities.GrobidProperties; /** * Test class with 100 requests (works/doi) * */ public class Test { public static String[] DOIs = { "10.1086/107043", "10.1086/102351", "10.1086/100853", "10.1086/105172", "10.1086/106972", "10.1086/106203", "10.1086/106965", "10.1086/112074", "10.1086/112157", "10.1086/104710", "10.1086/105874", "10.1086/511976", "10.1086/522786", "10.1086/523598", "10.1086/522794", "10.1086/522302", "10.1086/522782", "10.1086/521647", "10.1086/523597", "10.1086/522886", "10.1086/522053", "10.1086/522334", "10.1086/523596", "10.1086/521358", "10.1086/522054", "10.1086/521819", "10.1086/522627", "10.1086/522626", "10.1086/522962", "10.1086/522831", "10.1086/522783", "10.1086/522793", "10.1086/521817", "10.1086/523631", "10.1086/521821", "10.1086/522945", "10.1086/521927", "10.1086/522888", "10.1086/522787", "10.1086/518558", "10.1086/522369", "10.1086/522963", "10.1086/521985", "10.1086/522784", "10.1086/522229", "10.1086/522795", "10.1086/521926", "10.1086/521645", "10.1086/522205", "10.1086/521649", "10.1086/522628", "10.1086/522943", "10.1086/521925", "10.1086/521984", "10.1086/522112", "10.1086/521651", "10.1086/112571", "10.1086/112836", "10.1086/112324", "10.1086/112352", "10.1086/112337", "10.1086/112331", "10.1086/112325", "10.1086/112366", "10.1086/112354", "10.1086/112370", "10.1086/112332", "10.1086/112326", "10.1086/521552", "10.1086/520921", "10.1086/521432", "10.1086/521022", "10.1086/521434", "10.1086/521356", "10.1086/520958", "10.1086/520878", "10.1086/521396", "10.1086/519974", "10.1086/521397", "10.1086/521392", "10.1086/520807", "10.1086/521707", "10.1086/521652", "10.1086/521556", "10.1086/521394", "10.1086/521343", "10.1086/521823", "10.1086/521341", "10.1086/521554", "10.1086/521429", "10.1086/519975", "10.1086/519379", "10.1086/521148", "10.1086/520500", "10.1086/520813", "10.1086/521815", "10.1086/521555", "10.1086/521703", "10.1086/521430", "10.1086/520641" }; public static void main( String[] args ) { LibraryLoader.load(); GrobidProperties.getInstance(); CrossrefClient client = CrossrefClient.getInstance(); WorkDeserializer workDeserializer = new WorkDeserializer(); long threadId = Thread.currentThread().getId(); try { for (int i=0 ; i<DOIs.length ; i++) { String doi = DOIs[i]; final int id = i; Map<String, String> arguments = new HashMap<String,String>(); arguments.put("doi", doi); // ASYNCHRONOUS TEST (50 first requests) if (i < 90) { client.<BiblioItem>pushRequest("works", arguments, workDeserializer, threadId, new CrossrefRequestListener<BiblioItem>() { @Override public void onSuccess(List<BiblioItem> results) { System.out.println("Success request "+id); for (BiblioItem biblio : results) { System.out.println(" -> res title: "+biblio.getTitle()); } } @Override public void onError(int status, String message, Exception exception) { System.out.println("ERROR ("+status+") : "+message); } }); } // SYNCHRONOUS TEST (10 last requests) else { CrossrefRequestListener<BiblioItem> requestListener = new CrossrefRequestListener<BiblioItem>(); client.<BiblioItem>pushRequest("works", arguments, workDeserializer, threadId, requestListener); synchronized (requestListener) { try { requestListener.wait(6000); // timeout after 6 seconds } catch (InterruptedException e) { e.printStackTrace(); } } CrossrefRequestListener.Response<BiblioItem> response = requestListener.getResponse(); if (response == null) System.out.println("ERROR : No response ! Maybe timeout."); else if (response.hasError() || !response.hasResults()) System.out.println("ERROR ("+response.status+") : "+response.errorMessage); else { // success System.out.println("Success request "+id); for (BiblioItem biblio : response.results) { System.out.println(" -> res title: "+biblio.getTitle()); } } } } } catch (Exception e) { e.printStackTrace(); } } }
4,810
24.188482
130
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/crossref/CrossrefClient.java
package org.grobid.core.utilities.crossref; import java.io.Closeable; import java.io.IOException; import java.net.URISyntaxException; import java.util.Map; import java.util.HashMap; import java.util.List; import java.util.ArrayList; import java.util.concurrent.*; import org.apache.commons.lang3.concurrent.TimedSemaphore; import org.apache.http.client.ClientProtocolException; import org.grobid.core.utilities.crossref.CrossrefRequestListener.Response; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Request pool to get data from api.crossref.org without exceeding limits * supporting multi-thread. * * Note: the provided interval for the query rate returned by CrossRef appeared to be note reliable, * so we have to use the rate limit (X-Rate-Limit-Interval) as a global parallel query limit, without * interval consideration. * See https://github.com/kermitt2/grobid/pull/725 * */ public class CrossrefClient implements Closeable { public static final Logger logger = LoggerFactory.getLogger(CrossrefClient.class); protected static volatile CrossrefClient instance; protected volatile ExecutorService executorService; protected int max_pool_size = 1; protected static boolean limitAuto = true; // this list is used to maintain a list of Futures that were submitted, // that we can use to check if the requests are completed //private List<Future<?>> futures = new ArrayList<Future<?>>(); protected volatile Map<Long, List<Future<?>>> futures = new HashMap<>(); public static CrossrefClient getInstance() { if (instance == null) { getNewInstance(); } return instance; } /** * Creates a new instance. */ private static synchronized void getNewInstance() { logger.debug("Get new instance of CrossrefClient"); instance = new CrossrefClient(); } /** * Hidden constructor */ protected CrossrefClient() { // note: by default timeout with newCachedThreadPool is set to 60s, which might be too much for crossref usage, // hanging grobid significantly, so we might want to use rather a custom instance of ThreadPoolExecutor and set // the timeout sifferently this.executorService = Executors.newCachedThreadPool(r -> { Thread t = Executors.defaultThreadFactory().newThread(r); t.setDaemon(true); return t; }); this.futures = new HashMap<>(); setLimits(1, 1000); } public static void printLog(CrossrefRequest<?> request, String message) { logger.debug((request != null ? request+": " : "")+message); //System.out.println((request != null ? request+": " : "")+message); } public void setLimits(int iterations, int interval) { this.setMax_pool_size(iterations); // interval is not usable anymore, we need to wait termination of threads independently from any time interval } public void updateLimits(int iterations, int interval) { if (this.limitAuto) { //printLog(null, "Updating limits... " + iterations + " / " + interval); this.setLimits(iterations, interval); // note: interval not used anymore } } /** * Push a request in pool to be executed as soon as possible, then wait a response through the listener. * API Documentation : https://github.com/CrossRef/rest-api-doc/blob/master/rest_api.md */ public <T extends Object> void pushRequest(CrossrefRequest<T> request, CrossrefRequestListener<T> listener, long threadId) throws URISyntaxException, ClientProtocolException, IOException { if (listener != null) request.addListener(listener); synchronized(this) { // we limit the number of active threads to the crossref api dynamic limit returned in the response header while(((ThreadPoolExecutor)executorService).getActiveCount() >= this.getMax_pool_size()) { try { TimeUnit.MICROSECONDS.sleep(10); } catch (InterruptedException e) { e.printStackTrace(); } } Future<?> f = executorService.submit(new CrossrefRequestTask<T>(this, request)); List<Future<?>> localFutures = this.futures.get(Long.valueOf(threadId)); if (localFutures == null) localFutures = new ArrayList<Future<?>>(); localFutures.add(f); this.futures.put(threadId, localFutures); logger.debug("add request to thread " + threadId + "active threads count is now " + ((ThreadPoolExecutor) executorService).getActiveCount() ); //System.out.println("add request to thread " + threadId + " / current total for the thread: " + localFutures.size()); } } /** * Push a request in pool to be executed soon as possible, then wait a response through the listener. * @see <a href="https://github.com/CrossRef/rest-api-doc/blob/master/rest_api.md">Crossref API Documentation</a> * * @param params query parameters, can be null, ex: ?query.title=[title]&query.author=[author] * @param deserializer json response deserializer, ex: WorkDeserializer to convert Work to BiblioItem * @param threadId the java identifier of the thread providing the request (e.g. via Thread.currentThread().getId()) * @param listener catch response from request */ public <T extends Object> void pushRequest(String model, Map<String, String> params, CrossrefDeserializer<T> deserializer, long threadId, CrossrefRequestListener<T> listener) throws URISyntaxException, ClientProtocolException, IOException { CrossrefRequest<T> request = new CrossrefRequest<T>(model, params, deserializer); synchronized(this) { this.<T>pushRequest(request, listener, threadId); } } /** * Wait for all request from a specific thread to be completed */ public void finish(long threadId) { synchronized(this.futures) { try { List<Future<?>> threadFutures = this.futures.get(Long.valueOf(threadId)); if (threadFutures != null) { //System.out.println("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< thread: " + threadId + " / waiting for " + threadFutures.size() + " requests to finish..."); for(Future<?> future : threadFutures) { future.get(); // get will block until the future is done } this.futures.remove(threadId); } } catch (InterruptedException ie) { // Preserve interrupt status Thread.currentThread().interrupt(); } catch (ExecutionException ee) { logger.error("CrossRef request execution fails"); } } } public int getMax_pool_size() { return max_pool_size; } public void setMax_pool_size(int max_pool_size) { this.max_pool_size = max_pool_size; } @Override public void close() throws IOException { executorService.shutdown(); } }
6,566
36.3125
158
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/crossref/CrossrefDeserializer.java
package org.grobid.core.utilities.crossref; import java.io.IOException; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import org.apache.http.client.ClientProtocolException; import com.fasterxml.jackson.core.JsonParseException; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.DeserializationContext; import com.fasterxml.jackson.databind.JsonDeserializer; import com.fasterxml.jackson.databind.JsonMappingException; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.module.SimpleModule; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; /** * Abstract deserializer to parse json response from crossref. * Normalize results to a list of java objects even if only one result is given. * As example: WorkDeserializer. * */ public abstract class CrossrefDeserializer<T extends Object> extends JsonDeserializer<List<T>> { protected ObjectMapper mapper; protected SimpleModule module; public CrossrefDeserializer() { mapper = new ObjectMapper(); module = new SimpleModule(); module.addDeserializer(List.class, this); mapper.registerModule(module); } /** * Describe how to deserialize one json item from response */ protected abstract T deserializeOneItem(JsonNode item); /** * Parse a json String, usually the response body. Give back a list of java objects. */ @SuppressWarnings("unchecked") public List<T> parse(String body) throws JsonParseException, JsonMappingException, IOException { return (List<T>)mapper.readValue(body, List.class); } /** * Normalize results to get always an object list even if you fetch only one object. */ protected ArrayNode normalizeResults(JsonParser parser) throws IOException { JsonNode treeNode = parser.readValueAsTree(); ArrayNode results = null; JsonNode messageNode = treeNode.get("message"); if (messageNode == null || !messageNode.isObject()) { //throw new ClientProtocolException("No message found in json result."); // glutton results = mapper.createArrayNode(); results.add(treeNode); } else { ObjectNode message = (ObjectNode)messageNode; JsonNode itemsNode = message.get("items"); if (itemsNode == null || !itemsNode.isArray()) { results = mapper.createArrayNode(); results.add(message); } else results = (ArrayNode)itemsNode; } return results; } @Override public List<T> deserialize(JsonParser parser, DeserializationContext context) throws IOException, JsonProcessingException { ArrayList<T> res = new ArrayList<T>(); ArrayNode items = normalizeResults(parser); Iterator<JsonNode> it = items.elements(); while (it.hasNext()) { JsonNode item = it.next(); T one = deserializeOneItem(item); res.add(one); } return res; } }
3,004
28.460784
124
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/crossref/CrossrefRequestTask.java
package org.grobid.core.utilities.crossref; import java.util.List; /** * Task to execute its request at the right time. * */ public class CrossrefRequestTask<T extends Object> extends CrossrefRequestListener<T> implements Runnable { protected CrossrefClient client; protected CrossrefRequest<T> request; public CrossrefRequestTask(CrossrefClient client, CrossrefRequest<T> request) { this.client = client; this.request = request; CrossrefClient.printLog(request, "New request in the pool"); } @Override public void run() { try { CrossrefClient.printLog(request, ".. executing"); request.addListener(this); request.execute(); } catch (Exception e) { Response<T> message = new Response<T>(); message.setException(e, request.toString()); request.notifyListeners(message); } } @Override public void onResponse(Response<T> response) { if (!response.hasError()) client.updateLimits(response.limitIterations, response.interval); } @Override public void onSuccess(List<T> results) {} @Override public void onError(int status, String message, Exception exception) {} }
1,140
21.82
107
java
grobid
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/crossref/CrossrefRequest.java
package org.grobid.core.utilities.crossref; import org.apache.http.Header; import org.apache.http.HttpEntity; import org.apache.http.HttpHost; import org.apache.http.HttpResponse; import org.apache.http.client.config.CookieSpecs; import org.apache.http.client.config.RequestConfig; import org.apache.http.client.ClientProtocolException; import org.apache.http.client.ResponseHandler; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.utils.URIBuilder; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClients; import org.apache.http.impl.conn.DefaultProxyRoutePlanner; import org.apache.http.util.EntityUtils; import org.grobid.core.utilities.GrobidProperties; import java.io.IOException; import java.util.ArrayList; import java.util.Map; import java.util.Map.Entry; import java.util.Observable; /** * GET crossref request * @see <a href="https://github.com/CrossRef/rest-api-doc/blob/master/rest_api.md">Crossref API Documentation</a> * */ public class CrossrefRequest<T extends Object> extends Observable { protected static final String BASE_URL = "https://api.crossref.org"; /** * Model key in crossref, ex: "works", "journals".. * @see <a href="https://github.com/CrossRef/rest-api-doc/blob/master/rest_api.md">Crossref API Documentation</a> */ public String model; /** * Model identifier in crossref, can be null, ex: doi for a work * @see <a href="https://github.com/CrossRef/rest-api-doc/blob/master/rest_api.md">Crossref API Documentation</a> */ //public String id; /** * Query parameters, cannot be null, ex: ?query.bibliographic=[title]&query.author=[author] * @see <a href="https://github.com/CrossRef/rest-api-doc/blob/master/rest_api.md">Crossref API Documentation</a> */ public Map<String, String> params; /** * JSON response deserializer, ex: WorkDeserializer to convert Work to BiblioItem */ protected CrossrefDeserializer<T> deserializer; protected ArrayList<CrossrefRequestListener<T>> listeners; public CrossrefRequest(String model, Map<String, String> params, CrossrefDeserializer<T> deserializer) { this.model = model; //this.id = id; this.params = params; this.deserializer = deserializer; this.listeners = new ArrayList<CrossrefRequestListener<T>>(); } /** * Add listener to catch response when request is executed. */ public void addListener(CrossrefRequestListener<T> listener) { this.listeners.add(listener); } /** * Notify all connected listeners */ protected void notifyListeners(CrossrefRequestListener.Response<T> message) { for (CrossrefRequestListener<T> listener : listeners) listener.notify(message); } /** * Execute request, handle response by sending to listeners a CrossrefRequestListener.Response */ public void execute() { if (params == null) { // this should not happen CrossrefRequestListener.Response<T> message = new CrossrefRequestListener.Response<T>(); message.setException(new Exception("Empty list of parameter, cannot build request to the consolidation service"), this.toString()); notifyListeners(message); return; } CloseableHttpClient httpclient = null; RequestConfig requestConfig = RequestConfig.custom() .setCookieSpec(CookieSpecs.STANDARD) .build(); if (GrobidProperties.getProxyHost() != null) { HttpHost proxy = new HttpHost(GrobidProperties.getProxyHost(), GrobidProperties.getProxyPort()); DefaultProxyRoutePlanner routePlanner = new DefaultProxyRoutePlanner(proxy); httpclient = HttpClients.custom() .setDefaultRequestConfig(requestConfig) .setRoutePlanner(routePlanner) .build(); } else { httpclient = HttpClients.custom() .setDefaultRequestConfig(requestConfig) .build(); } try { URIBuilder uriBuilder = new URIBuilder(BASE_URL); String path = model; if (params.get("query.title") != null) { params.put("query.bibliographic", params.get("query.title")); params.remove("query.title"); } if (params.get("DOI") != null || params.get("doi") != null) { String doi = params.get("DOI"); if (doi == null) doi = params.get("doi"); //uriBuilder.setParameter("doi", doi); path += "/"+doi; uriBuilder.setPath(path); } else { uriBuilder.setPath(path); for (Entry<String, String> cursor : params.entrySet()) if (!cursor.getKey().equals("doi") && !cursor.getKey().equals("DOI") && !cursor.getKey().equals("firstPage") && !cursor.getKey().equals("volume")) uriBuilder.setParameter(cursor.getKey(), cursor.getValue()); } // "mailto" parameter to be used in the crossref query and in User-Agent // header, as recommended by CrossRef REST API documentation, e.g. &[email protected] if (GrobidProperties.getCrossrefMailto() != null) { uriBuilder.setParameter("mailto", GrobidProperties.getCrossrefMailto()); } // set recommended User-Agent header HttpGet httpget = new HttpGet(uriBuilder.build()); if (GrobidProperties.getCrossrefMailto() != null) { httpget.setHeader("User-Agent", "GROBID/0.6.1 (https://github.com/kermitt2/grobid; mailto:" + GrobidProperties.getCrossrefMailto() + ")"); } else { httpget.setHeader("User-Agent", "GROBID/0.6.1 (https://github.com/kermitt2/grobid)"); } // set the authorization token for the Metadata Plus service if available if (GrobidProperties.getCrossrefToken() != null) { httpget.setHeader("Crossref-Plus-API-Token", "Bearer " + GrobidProperties.getCrossrefToken()); } ResponseHandler<Void> responseHandler = new ResponseHandler<Void>() { @Override public Void handleResponse(HttpResponse response) throws ClientProtocolException, IOException { CrossrefRequestListener.Response<T> message = new CrossrefRequestListener.Response<T>(); message.status = response.getStatusLine().getStatusCode(); // note: header field names are case insensitive Header limitIntervalHeader = response.getFirstHeader("X-Rate-Limit-Interval"); Header limitLimitHeader = response.getFirstHeader("X-Rate-Limit-Limit"); if (limitIntervalHeader != null && limitLimitHeader != null) { message.setTimeLimit(limitIntervalHeader.getValue(), limitLimitHeader.getValue()); } if (message.status < 200 || message.status >= 300) { message.errorMessage = response.getStatusLine().getReasonPhrase(); notifyListeners(message); } HttpEntity entity = response.getEntity(); if (entity != null) { String body = EntityUtils.toString(entity); if (body != null && body.equals("Resource not found.")) { // this used to be a json object too in the past I think message.results = null; } else message.results = deserializer.parse(body); } notifyListeners(message); return null; } }; httpclient.execute(httpget, responseHandler); } catch (Exception e) { CrossrefRequestListener.Response<T> message = new CrossrefRequestListener.Response<T>(); message.setException(e, this.toString()); notifyListeners(message); } finally { try { httpclient.close(); } catch (IOException e) { CrossrefRequestListener.Response<T> message = new CrossrefRequestListener.Response<T>(); message.setException(e, this.toString()); notifyListeners(message); } } } public String toString() { String str = " ("; if (params != null) { for (Entry<String, String> cursor : params.entrySet()) str += ","+cursor.getKey()+"="+cursor.getValue(); } str += ")"; return str; } }
8,235
35.604444
143
java