repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/crossref/CrossrefRequestListener.java
|
package org.grobid.core.utilities.crossref;
import org.joda.time.Duration;
import java.util.List;
/**
* Listener to catch response from a CrossrefRequest.
*
*/
public class CrossrefRequestListener<T extends Object> {
public CrossrefRequestListener() {
}
public CrossrefRequestListener(int rank) {
this.rank = rank;
}
public static class Response<T> {
public int status = -1;
public List<T> results = null;
public int interval;
public int limitIterations;
public long time;
public String errorMessage;
public Exception errorException;
public Response() {
this.status = -1;
this.results = null;
this.interval = 0;
this.limitIterations = 1;
this.time = System.currentTimeMillis();
this.errorMessage = null;
this.errorException = null;
}
public void setTimeLimit(String limitInterval, String limitLimit) {
this.interval = (int)Duration.parse("PT"+limitInterval.toUpperCase()).getMillis();
this.limitIterations = Integer.parseInt(limitLimit);
}
/*public void setException(Exception e, CrossrefRequest<T> request) {
errorException = e;
errorMessage = e.getClass().getName()+" thrown during request execution : "+request.toString()+"\n"+e.getMessage();
}*/
public void setException(Exception e, String requestString) {
errorException = e;
errorMessage = e.getClass().getName()+" thrown during request execution : "+requestString+"\n"+e.getMessage();
}
public int getOneStepTime() {
return interval/limitIterations;
}
public String toString() {
return "Response (status:"+status+" timeLimit:"+interval+"/"+limitIterations+", results:"+results.size();
}
public boolean hasError() {
return (errorMessage != null) || (errorException != null);
}
public boolean hasResults() {
return (results != null) && (results.size() > 0);
}
}
/**
* Called when request executed and get any response
*/
public void onResponse(Response<T> response) {}
/**
* Called when request succeed and response format is as expected
*/
public void onSuccess(List<T> results) {}
/**
* Called when request gives an error
*/
public void onError(int status, String message, Exception exception) {}
public void notify(Response<T> response) {
onResponse(response);
if (response == null)
System.out.println("Response is null");
if (response != null && response.results != null && response.results.size() > 0)
onSuccess(response.results);
if (response.hasError()) {
onError(response.status, response.errorMessage, response.errorException);
}
currentResponse = response;
synchronized (this) {
this.notifyAll();
}
}
protected Response<T> currentResponse = null;
/**
* Get response after waiting listener, usefull for synchronous call
*/
public Response<T> getResponse() {
return currentResponse;
}
private int rank = -1;
/**
* Associate the listener to a rank for identifying the response
*/
public int getRank() {
return rank;
}
}
| 3,011 | 23.892562 | 118 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/crossref/WorkDeserializer.java
|
package org.grobid.core.utilities.crossref;
import java.util.Iterator;
import org.grobid.core.data.BiblioItem;
import org.grobid.core.data.Person;
import org.grobid.core.data.Date;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.fasterxml.jackson.databind.node.ArrayNode;
/**
* Convert a JSON Work model - from a glutton or crossref response - to a BiblioItem
* (understandable by this stupid GROBID
*
*/
public class WorkDeserializer extends CrossrefDeserializer<BiblioItem> {
@Override
protected BiblioItem deserializeOneItem(JsonNode item) {
BiblioItem biblio = null;
String type = null; // the crossref type of the item, see http://api.crossref.org/types
if (item.isObject()) {
biblio = new BiblioItem();
//System.out.println(item.toString());
biblio.setDOI(item.get("DOI").asText());
// the following are usually provided by biblio-glutton which index augmented/aggregated
// metadata
JsonNode pmidNode = item.get("pmid");
if (pmidNode != null && (!pmidNode.isMissingNode()) ) {
String pmid = pmidNode.asText();
biblio.setPMID(pmid);
}
JsonNode pmcidNode = item.get("pmcid");
if (pmcidNode != null && (!pmcidNode.isMissingNode()) ) {
String pmcid = pmcidNode.asText();
biblio.setPMCID(pmcid);
}
JsonNode piiNode = item.get("pii");
if (piiNode != null && (!piiNode.isMissingNode()) ) {
String pii = piiNode.asText();
biblio.setPII(pii);
}
JsonNode arkNode = item.get("ark");
if (arkNode != null && (!arkNode.isMissingNode()) ) {
String ark = arkNode.asText();
biblio.setArk(ark);
}
JsonNode istexNode = item.get("istexId");
if (istexNode != null && (!istexNode.isMissingNode()) ) {
String istexId = istexNode.asText();
biblio.setIstexId(istexId);
}
// the open access url - if available, from the glorious UnpayWall dataset provided
// by biblio-glutton
JsonNode oaLinkNode = item.get("oaLink");
if (oaLinkNode != null && (!oaLinkNode.isMissingNode()) ) {
String oaLink = oaLinkNode.asText();
biblio.setOAURL(oaLink);
}
// all the following is now pure crossref metadata
JsonNode typeNode = item.get("type");
if (typeNode != null && (!typeNode.isMissingNode()) ) {
type = typeNode.asText();
}
JsonNode titlesNode = item.get("title");
if (titlesNode != null && (!titlesNode.isMissingNode()) &&
titlesNode.isArray() && (((ArrayNode)titlesNode).size() > 0))
biblio.setTitle(((ArrayNode)titlesNode).get(0).asText());
JsonNode authorsNode = item.get("author");
if (authorsNode != null && (!authorsNode.isMissingNode()) &&
authorsNode.isArray() && (((ArrayNode)authorsNode).size() > 0)) {
Iterator<JsonNode> authorIt = ((ArrayNode)authorsNode).elements();
while (authorIt.hasNext()) {
JsonNode authorNode = authorIt.next();
Person person = new Person();
if (authorNode.get("given") != null && !authorNode.get("given").isMissingNode()) {
person.setFirstName(authorNode.get("given").asText());
person.normalizeCrossRefFirstName();
}
if (authorNode.get("family") != null && !authorNode.get("family").isMissingNode()) {
person.setLastName(authorNode.get("family").asText());
}
if (authorNode.get("ORCID") != null && !authorNode.get("ORCID").isMissingNode()) {
person.setORCID(authorNode.get("ORCID").asText());
}
// for cases like JM Smith and for case normalisation
person.normalizeName();
biblio.addFullAuthor(person);
}
}
JsonNode publisherNode = item.get("publisher");
if (publisherNode != null && (!publisherNode.isMissingNode()))
biblio.setPublisher(publisherNode.asText());
JsonNode pageNode = item.get("page");
if (pageNode != null && (!pageNode.isMissingNode()) )
biblio.setPageRange(pageNode.asText());
JsonNode volumeNode = item.get("volume");
if (volumeNode != null && (!volumeNode.isMissingNode()))
biblio.setVolumeBlock(volumeNode.asText(), false);
JsonNode issueNode = item.get("issue");
if (issueNode != null && (!issueNode.isMissingNode()))
biblio.setIssue(issueNode.asText());
JsonNode containerTitlesNode = item.get("container-title");
if (containerTitlesNode != null && (!containerTitlesNode.isMissingNode()) &&
containerTitlesNode.isArray() && (((ArrayNode)containerTitlesNode).size() > 0)) {
// container title depends on the type of object
// if journal
if ( (type != null) && (type.equals("journal-article")) )
biblio.setJournal(((ArrayNode)containerTitlesNode).get(0).asText());
// if book chapter or proceedings article
if ( (type != null) && (type.equals("book-section") ||
type.equals("proceedings-article") || type.equals("book-chapter")) )
biblio.setBookTitle(((ArrayNode)containerTitlesNode).get(0).asText());
}
JsonNode shortContainerTitlesNode = item.get("short-container-title");
if (shortContainerTitlesNode != null && (!shortContainerTitlesNode.isMissingNode()) &&
shortContainerTitlesNode.isArray() && (((ArrayNode)shortContainerTitlesNode).size() > 0)) {
// container title depends on the type of object
// if journal
if ( (type != null) && (type.equals("journal-article")) )
biblio.setJournalAbbrev(((ArrayNode)shortContainerTitlesNode).get(0).asText());
}
JsonNode issnTypeNode = item.get("issn-type");
if (issnTypeNode != null && (!issnTypeNode.isMissingNode()) &&
issnTypeNode.isArray() && (((ArrayNode)issnTypeNode).size() > 0)) {
Iterator<JsonNode> issnIt = ((ArrayNode)issnTypeNode).elements();
while (issnIt.hasNext()) {
JsonNode issnNode = issnIt.next();
JsonNode theTypeNode = issnNode.get("type");
JsonNode valueNode = issnNode.get("value");
if (theTypeNode != null && (!theTypeNode.isMissingNode()) &&
valueNode != null && (!valueNode.isMissingNode()) ) {
String theType = theTypeNode.asText();
if (theType.equals("print")) {
biblio.setISSN(valueNode.asText());
} else if (theType.equals("electronic")) {
biblio.setISSNe(valueNode.asText());
}
}
}
}
JsonNode publishPrintNode = item.get("issued");
if (publishPrintNode == null || publishPrintNode.isMissingNode()) {
publishPrintNode = item.get("published-online");
}
if (publishPrintNode == null || publishPrintNode.isMissingNode()) {
publishPrintNode = item.get("published-print");
}
if (publishPrintNode != null && (!publishPrintNode.isMissingNode())) {
JsonNode datePartNode = publishPrintNode.get("date-parts");
if (datePartNode != null && (!datePartNode.isMissingNode()) &&
datePartNode.isArray() && (((ArrayNode)datePartNode).size() > 0)) {
JsonNode firstDatePartNode = ((ArrayNode)datePartNode).get(0);
if (firstDatePartNode != null && (!firstDatePartNode.isMissingNode()) &&
firstDatePartNode.isArray() && (((ArrayNode)firstDatePartNode).size() > 0)) {
// format is [year, month, day], last two optional
String year = ((ArrayNode)firstDatePartNode).get(0).asText();
String month = null;
String day = null;
if (((ArrayNode)firstDatePartNode).size() > 1) {
month = ((ArrayNode)firstDatePartNode).get(1).asText();
if (((ArrayNode)firstDatePartNode).size() > 2) {
day = ((ArrayNode)firstDatePartNode).get(2).asText();
}
}
Date date = new Date();
date.setYearString(year);
int yearInt = -1;
try {
yearInt = Integer.parseInt(year);
} catch(Exception e) {
// log something
}
if (yearInt != -1)
date.setYear(yearInt);
if (month != null) {
date.setMonthString(month);
int monthInt = -1;
try {
monthInt = Integer.parseInt(month);
} catch(Exception e) {
// log something
}
if (monthInt != -1)
date.setMonth(monthInt);
}
if (day != null) {
date.setDayString(day);
int dayInt = -1;
try {
dayInt = Integer.parseInt(day);
} catch(Exception e) {
// log something
}
if (dayInt != -1)
date.setDay(dayInt);
}
biblio.setNormalizedPublicationDate(date);
}
}
}
//System.out.println(biblio.toTEI(0));
}
return biblio;
}
}
| 8,740 | 36.038136 | 95 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/glutton/GluttonClient.java
|
package org.grobid.core.utilities.glutton;
import java.io.IOException;
import java.net.URISyntaxException;
import java.util.Map;
import java.util.List;
import java.util.ArrayList;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import org.apache.http.client.ClientProtocolException;
import org.grobid.core.utilities.crossref.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Client to the Glutton bibliographical service
*
*/
public class GluttonClient extends CrossrefClient {
public static final Logger LOGGER = LoggerFactory.getLogger(GluttonClient.class);
private static volatile GluttonClient instance;
//private volatile ExecutorService executorService;
//private static boolean limitAuto = true;
//private volatile TimedSemaphore timedSemaphore;
// this list is used to maintain a list of Futures that were submitted,
// that we can use to check if the requests are completed
//private volatile Map<Long, List<Future<?>>> futures = new HashMap<>();
public static GluttonClient getInstance() {
if (instance == null) {
getNewInstance();
}
return instance;
}
/**
* Creates a new instance.
*/
private static synchronized void getNewInstance() {
LOGGER.debug("Get new instance of GluttonClient");
instance = new GluttonClient();
}
/**
* Hidden constructor
*/
private GluttonClient() {
super();
/*this.executorService = Executors.newCachedThreadPool(r -> {
Thread t = Executors.defaultThreadFactory().newThread(r);
t.setDaemon(true);
return t;
});
this.timedSemaphore = null;
this.futures = new HashMap<>();*/
int nThreads = Runtime.getRuntime().availableProcessors();
//int nThreads = (int) Math.ceil((double)Runtime.getRuntime().availableProcessors() / 2);
LOGGER.debug("nThreads: " + nThreads);
this.executorService = Executors.newFixedThreadPool(nThreads*2);
//setLimits(20, 1000); // default calls per second
}
/*public void setLimits(int iterations, int interval) {
if ((this.timedSemaphore == null)
|| (this.timedSemaphore.getLimit() != iterations)
|| (this.timedSemaphore.getPeriod() != interval)) {
// custom executor to prevent stopping JVM from exiting
this.timedSemaphore = new TimedSemaphore(new ScheduledThreadPoolExecutor(1, new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
Thread t = Executors.defaultThreadFactory().newThread(r);
t.setDaemon(true);
return t;
}
}), interval, TimeUnit.MILLISECONDS, iterations);
}
}*/
/*public synchronized void checkLimits() throws InterruptedException {
if (this.limitAuto) {
synchronized(this.timedSemaphore) {
printLog(null, "timedSemaphore acquire... current total: " + this.timedSemaphore.getAcquireCount() +
", still available: " + this.timedSemaphore.getAvailablePermits() );
this.timedSemaphore.acquire();
}
}
}*/
public static void printLog(GluttonRequest<?> request, String message) {
LOGGER.debug((request != null ? request+": " : "")+message);
//System.out.println((request != null ? request+": " : "")+message);
}
/**
* Push a request in pool to be executed as soon as possible, then wait a response through the listener.
*/
public <T extends Object> void pushRequest(GluttonRequest<T> request, CrossrefRequestListener<T> listener,
long threadId) throws URISyntaxException, ClientProtocolException, IOException {
if (listener != null)
request.addListener(listener);
synchronized(this) {
Future<?> f = executorService.submit(new GluttonRequestTask<T>(this, request));
List<Future<?>> localFutures = this.futures.get(Long.valueOf(threadId));
if (localFutures == null)
localFutures = new ArrayList<Future<?>>();
localFutures.add(f);
this.futures.put(Long.valueOf(threadId), localFutures);
//System.out.println("add request to thread " + threadId + " / current total for the thread: " + localFutures.size());
}
}
/**
* Push a request in pool to be executed soon as possible, then wait a response through the listener.
*
* @param params query parameters, can be null, ex: ?query.title=[title]&query.author=[author]
* @param deserializer json response deserializer, ex: WorkDeserializer to convert Work to BiblioItem
* @param threadId the java identifier of the thread providing the request (e.g. via Thread.currentThread().getId())
* @param listener catch response from request
*/
@Override
public <T extends Object> void pushRequest(String model, Map<String, String> params, CrossrefDeserializer<T> deserializer,
long threadId, CrossrefRequestListener<T> listener) throws URISyntaxException, ClientProtocolException, IOException {
GluttonRequest<T> request = new GluttonRequest<T>(model, params, deserializer);
synchronized(this) {
this.<T>pushRequest(request, listener, threadId);
}
}
/**
* Wait for all request from a specific thread to be completed
*/
/*public void finish(long threadId) {
synchronized(this.futures) {
try {
List<Future<?>> threadFutures = this.futures.get(new Long(threadId));
if (threadFutures != null) {
//System.out.println("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< thread: " + threadId + " / waiting for " + threadFutures.size() + " requests to finish...");
for(Future<?> future : threadFutures) {
future.get();
// get will block until the future is done
}
this.futures.remove(threadId);
}
} catch (InterruptedException ie) {
// Preserve interrupt status
Thread.currentThread().interrupt();
} catch (ExecutionException ee) {
logger.error("Glutton request execution fails");
}
}
}*/
/*@Override
public void close() throws IOException {
timedSemaphore.shutdown();
}*/
}
| 6,606 | 39.783951 | 158 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/glutton/GluttonRequestTask.java
|
package org.grobid.core.utilities.glutton;
import java.util.List;
import org.grobid.core.utilities.crossref.CrossrefRequestListener;
import org.grobid.core.utilities.crossref.CrossrefRequestListener.Response;
/**
* Task to execute its request at the right time.
*
*/
public class GluttonRequestTask<T extends Object> extends CrossrefRequestListener<T> implements Runnable {
protected GluttonClient client;
protected GluttonRequest<T> request;
public GluttonRequestTask(GluttonClient client, GluttonRequest<T> request) {
this.client = client;
this.request = request;
GluttonClient.printLog(request, "New request in the pool");
}
@Override
public void run() {
try {
//client.checkLimits();
GluttonClient.printLog(request, ".. executing");
request.addListener(this);
request.execute();
} catch (Exception e) {
Response<T> message = new Response<T>();
message.setException(e, request.toString());
request.notifyListeners(message);
}
}
@Override
public void onResponse(Response<T> response) {
/*if (!response.hasError())
client.updateLimits(response.limitIterations, response.interval);*/
}
@Override
public void onSuccess(List<T> results) {}
@Override
public void onError(int status, String message, Exception exception) {}
}
| 1,528 | 26.8 | 106 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/glutton/GluttonRequest.java
|
package org.grobid.core.utilities.glutton;
import org.grobid.core.utilities.GrobidProperties;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Observable;
import java.util.concurrent.TimeUnit;
import org.apache.http.Header;
import org.apache.http.HttpEntity;
import org.apache.http.HttpResponse;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.ResponseHandler;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.utils.URIBuilder;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.util.EntityUtils;
import org.apache.http.HttpHost;
import org.apache.http.conn.params.*;
import org.apache.http.impl.conn.*;
import org.grobid.core.utilities.crossref.CrossrefRequestListener;
import org.grobid.core.utilities.crossref.CrossrefRequestListener.Response;
import org.grobid.core.utilities.crossref.CrossrefDeserializer;
import org.grobid.core.utilities.crossref.CrossrefRequest;
import org.grobid.core.exceptions.GrobidResourceException;
import org.apache.commons.io.IOUtils;
import java.net.URL;
import java.io.*;
/**
* Glutton request
*
*/
public class GluttonRequest<T extends Object> extends Observable {
protected String BASE_PATH = "/service/lookup";
protected static final List<String> identifiers = Arrays.asList("doi", "DOI", "pmid", "PMID", "pmcid", "PMCID", "pmc", "PMC");
/**
* Query parameters, cannot be null, ex: ?atitle=[title]&firstAuthor=[first_author_lastname]
* Identifier are also delivered as parameter, with the name of the identifier
*/
public Map<String, String> params;
/**
* JSON response deserializer, ex: WorkDeserializer to convert metadata to BiblioItem, it's similar
* to CrossRef, but possibly enriched with some additional metadata (e.g. PubMed)
*/
protected CrossrefDeserializer<T> deserializer;
protected ArrayList<CrossrefRequestListener<T>> listeners;
public GluttonRequest(String model, Map<String, String> params, CrossrefDeserializer<T> deserializer) {
this.params = params;
this.deserializer = deserializer;
this.listeners = new ArrayList<CrossrefRequestListener<T>>();
}
/**
* Add listener to catch response when request is executed.
*/
public void addListener(CrossrefRequestListener<T> listener) {
this.listeners.add(listener);
}
/**
* Notify all connected listeners
*/
protected void notifyListeners(CrossrefRequestListener.Response<T> message) {
for (CrossrefRequestListener<T> listener : listeners)
listener.notify(message);
}
/**
* Execute request, handle response by sending to listeners a CrossrefRequestListener.Response
*/
public void execute() {
if (params == null) {
// this should not happen
CrossrefRequestListener.Response<T> message = new CrossrefRequestListener.Response<T>();
message.setException(new Exception("Empty list of parameter, cannot build request to glutton service"), this.toString());
notifyListeners(message);
return;
}
CloseableHttpClient httpclient = null;
if (GrobidProperties.getProxyHost() != null) {
HttpHost proxy = new HttpHost(GrobidProperties.getProxyHost(), GrobidProperties.getProxyPort());
DefaultProxyRoutePlanner routePlanner = new DefaultProxyRoutePlanner(proxy);
httpclient = HttpClients.custom()
.setRoutePlanner(routePlanner)
.build();
} else {
httpclient = HttpClients.createDefault();
}
try {
String url = GrobidProperties.getInstance().getGluttonUrl();
if (url == null) {
throw new Exception("Invalid url for glutton service");
}
URIBuilder uriBuilder = new URIBuilder(url + BASE_PATH);
// check if we have a strong identifier directly supported by Glutton: DOI, PMID, PMCID
// more probably in the future
if (params.get("DOI") != null || params.get("doi") != null) {
String doi = params.get("DOI");
if (doi == null)
doi = params.get("doi");
uriBuilder.setParameter("doi", doi);
}
if (params.get("PMID") != null || params.get("pmid") != null) {
String pmid = params.get("PMID");
if (pmid == null)
pmid = params.get("pmid");
uriBuilder.setParameter("pmid", pmid);
}
if (params.get("PMCID") != null || params.get("pmcid") != null || params.get("pmc") != null || params.get("PMC") != null) {
String pmcid = params.get("PMCID");
if (pmcid == null)
pmcid = params.get("pmcid");
if (pmcid == null)
pmcid = params.get("PMC");
if (pmcid == null)
pmcid = params.get("pmc");
uriBuilder.setParameter("pmc", pmcid);
}
{
for (Entry<String, String> cursor : params.entrySet()) {
if (!identifiers.contains(cursor.getKey()))
uriBuilder.setParameter(mapFromCrossref(cursor.getKey()), cursor.getValue());
}
}
//System.out.println(uriBuilder.toString());
HttpGet httpget = new HttpGet(uriBuilder.build());
ResponseHandler<Void> responseHandler = response -> {
Response<T> message = new Response<T>();
message.status = response.getStatusLine().getStatusCode();
/*Header limitIntervalHeader = response.getFirstHeader("X-Rate-Limit-Interval");
Header limitLimitHeader = response.getFirstHeader("X-Rate-Limit-Limit");
if (limitIntervalHeader != null && limitLimitHeader != null)
message.setTimeLimit(limitIntervalHeader.getValue(), limitLimitHeader.getValue());
*/
if (message.status == 503) {
throw new GrobidResourceException();
} else if (message.status < 200 || message.status >= 300) {
message.errorMessage = response.getStatusLine().getReasonPhrase();
} else {
HttpEntity entity = response.getEntity();
if (entity != null) {
String body = EntityUtils.toString(entity);
message.results = deserializer.parse(body);
}
}
notifyListeners(message);
return null;
};
httpclient.execute(httpget, responseHandler);
} catch (GrobidResourceException gre) {
try {
httpclient.close();
} catch (IOException e) {
// to log
}
try {
TimeUnit.SECONDS.sleep(1);
} catch (InterruptedException ie) {
// to log
}
execute();
} catch (Exception e) {
CrossrefRequestListener.Response<T> message = new CrossrefRequestListener.Response<T>();
message.setException(e, this.toString());
notifyListeners(message);
} finally {
try {
httpclient.close();
} catch (IOException e) {
CrossrefRequestListener.Response<T> message = new CrossrefRequestListener.Response<T>();
message.setException(e, this.toString());
notifyListeners(message);
}
}
}
/**
* Mapping CrossRef API field arguments to the ones of glutton, to ensure compatibility
*/
private String mapFromCrossref(String field) {
if (field.equals("query.bibliographic"))
return "biblio";
if (field.equals("query.title")) {
return "atitle";
}
if (field.equals("query.author")) {
return "firstAuthor";
}
if (field.equals("query.container-title")) {
return "jtitle";
}
return field;
}
public String toString() {
String str = "";
str += " (";
if (params != null) {
for (Entry<String, String> cursor : params.entrySet())
str += ","+cursor.getKey()+"="+cursor.getValue();
}
str += ")";
return str;
}
}
| 8,847 | 36.020921 | 135 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/matching/ReferenceMarkerMatcher.java
|
package org.grobid.core.utilities.matching;
import com.google.common.base.Function;
import com.google.common.base.Predicate;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import org.apache.lucene.analysis.standard.ClassicAnalyzer;
import org.apache.lucene.util.Version;
import org.grobid.core.data.BibDataSet;
import org.grobid.core.data.BiblioItem;;
import org.grobid.core.layout.LayoutToken;
import org.grobid.core.utilities.LayoutTokensUtil;
import org.grobid.core.utilities.Pair;
import org.grobid.core.utilities.TextUtilities;
import org.grobid.core.utilities.counters.CntManager;
import org.grobid.core.engines.counters.ReferenceMarkerMatcherCounters;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import java.util.HashSet;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Matching reference markers to extracted citations
*/
public class ReferenceMarkerMatcher {
private static final Logger LOGGER = LoggerFactory.getLogger(ReferenceMarkerMatcher.class);
public static final Pattern YEAR_PATTERN = Pattern.compile("[12][0-9]{3}[a-d]?");
public static final Pattern YEAR_PATTERN_WITH_LOOK_AROUND = Pattern.compile("(?<!\\d)[12][0-9]{3}(?!\\d)[a-d]?");
//public static final Pattern AUTHOR_NAME_PATTERN = Pattern.compile("[A-Z][A-Za-z]+");
public static final Pattern AUTHOR_NAME_PATTERN = Pattern.compile("[A-Z][\\p{L}]+");
//public static final Pattern NUMBERED_CITATION_PATTERN = Pattern.compile(" *[\\(\\[]? *(?:\\d+[-–]\\d+,|\\d+, *)*[ ]*(?:\\d+[-–]\\d+|\\d+)[\\)\\]]? *");
public static final Pattern NUMBERED_CITATION_PATTERN = Pattern.compile("[\\(\\[]?\\s*(?:\\d+[-−–]\\d+,|\\d+,[ ]*)*[ ]*(?:\\d+[-–]\\d+|\\d+)\\s*[\\)\\]]?");
public static final Pattern AUTHOR_SEPARATOR_PATTERN = Pattern.compile(";");
public static final ClassicAnalyzer ANALYZER = new ClassicAnalyzer(Version.LUCENE_45);
public static final int MAX_RANGE = 20;
public static final Pattern NUMBERED_CITATIONS_SPLIT_PATTERN = Pattern.compile("[,;]");
public static final Pattern AND_WORD_PATTERN = Pattern.compile("(and)|&");
public static final Pattern DASH_PATTERN = Pattern.compile("[–−-]");
public class MatchResult {
private String text;
private List<LayoutToken> tokens;
private BibDataSet bibDataSet;
public MatchResult(String text, List<LayoutToken> tokens, BibDataSet bibDataSet) {
this.text = text;
this.tokens = tokens;
this.bibDataSet = bibDataSet;
}
public String getText() {
return text;
}
public List<LayoutToken> getTokens() {
return tokens;
}
public BibDataSet getBibDataSet() {
return bibDataSet;
}
}
public static final Function<String, Object> IDENTITY = new Function<String, Object>() {
@Override
public Object apply(String s) {
return s;
}
};
private final LuceneIndexMatcher<BibDataSet, String> authorMatcher;
private final LuceneIndexMatcher<BibDataSet, String> labelMatcher;
private CntManager cntManager;
private Set<String> allLabels = null;
private Set<String> allFirstAuthors = null;
public ReferenceMarkerMatcher(List<BibDataSet> bds, CntManager cntManager)
throws EntityMatcherException {
allLabels = new HashSet<String>();
allFirstAuthors = new HashSet<String>();
if ( (bds != null) && (bds.size() > 0) ) {
for(BibDataSet bibDataSet : bds) {
allLabels.add(bibDataSet.getRefSymbol());
//System.out.println(bibDataSet.getRefSymbol());
String authorString = bibDataSet.getResBib().getFirstAuthorSurname();
if ((authorString != null) && (authorString.length() > 0))
allFirstAuthors.add(authorString);
}
}
this.cntManager = cntManager;
authorMatcher = new LuceneIndexMatcher<>(
new Function<BibDataSet, Object>() {
@Override
public Object apply(BibDataSet bibDataSet) {
String authorString = bibDataSet.getResBib().getAuthors() + " et al";
if (bibDataSet.getResBib().getPublicationDate() != null) {
authorString += " " + bibDataSet.getResBib().getPublicationDate();
}
// System.out.println("Indexing: " + authorString);
return authorString;
}
},
IDENTITY
);
authorMatcher.setMustMatchPercentage(1.0);
if (bds != null)
authorMatcher.load(bds);
labelMatcher = new LuceneIndexMatcher<>(
new Function<BibDataSet, Object>() {
@Override
public Object apply(BibDataSet bibDataSet) {
return bibDataSet.getRefSymbol();
}
},
IDENTITY
);
labelMatcher.setMustMatchPercentage(1.0);
if (bds != null)
labelMatcher.load(bds);
}
public List<MatchResult> match(List<LayoutToken> refTokens) throws EntityMatcherException {
cntManager.i(ReferenceMarkerMatcherCounters.INPUT_REF_STRINGS_CNT);
String text = LayoutTokensUtil.toText(LayoutTokensUtil.dehyphenize(LayoutTokensUtil.enrichWithNewLineInfo(refTokens)));
if (isAuthorCitationStyle(text)) {
cntManager.i(ReferenceMarkerMatcherCounters.STYLE_AUTHORS);
//System.out.println("STYLE_AUTHORS: " + text);
return matchAuthorCitation(text, refTokens);
} else if (isNumberedCitationReference(text)) {
cntManager.i(ReferenceMarkerMatcherCounters.STYLE_NUMBERED);
//System.out.println("STYLE_NUMBERED: " + text);
return matchNumberedCitation(text, refTokens);
} else {
cntManager.i(ReferenceMarkerMatcherCounters.STYLE_OTHER);
//System.out.println("STYLE_OTHER: " + text);
// LOGGER.info("Other style: " + text);
return Collections.singletonList(new MatchResult(text, refTokens, null));
}
}
/*public boolean isAuthorCitationStyle(String text) {
return ( YEAR_PATTERN.matcher(text.trim()).find() ||
NUMBERED_CITATION_PATTERN.matcher(text.trim()).find() )
&& AUTHOR_NAME_PATTERN.matcher(text.trim()).find();
}*/
public boolean isAuthorCitationStyle(String text) {
return YEAR_PATTERN.matcher(text.trim()).find() && AUTHOR_NAME_PATTERN.matcher(text.trim()).find();
}
// relaxed number matching
/*public static boolean isNumberedCitationReference(String t) {
return NUMBERED_CITATION_PATTERN.matcher(t.trim()).find();
}*/
// number matching for number alone or in combination with author for cases "Naze et al. [5]"
public boolean isNumberedCitationReference(String t) {
return NUMBERED_CITATION_PATTERN.matcher(t.trim()).matches() ||
( NUMBERED_CITATION_PATTERN.matcher(t.trim()).find() && AUTHOR_NAME_PATTERN.matcher(t.trim()).find() );
}
// string number matching
/*public static boolean isNumberedCitationReference(String t) {
return NUMBERED_CITATION_PATTERN.matcher(t.trim()).matches();
}*/
private List<MatchResult> matchNumberedCitation(String input, List<LayoutToken> refTokens) throws EntityMatcherException {
List<Pair<String, List<LayoutToken>>> labels = getNumberedLabels(refTokens, true);
List<MatchResult> results = new ArrayList<>();
for (Pair<String, List<LayoutToken>> label : labels) {
String text = label.a;
List<LayoutToken> labelToks = label.b;
List<BibDataSet> matches = labelMatcher.match(text);
if (matches.size() == 1) {
cntManager.i(ReferenceMarkerMatcherCounters.MATCHED_REF_MARKERS);
// System.out.println("MATCHED: " + text + "\n" + matches.get(0).getRefSymbol() + "\n" + matches.get(0).getRawBib());
// System.out.println("-----------");
results.add(new MatchResult(text, labelToks, matches.get(0)));
} else {
cntManager.i(ReferenceMarkerMatcherCounters.UNMATCHED_REF_MARKERS);
if (matches.size() != 0) {
cntManager.i(ReferenceMarkerMatcherCounters.MANY_CANDIDATES);
// LOGGER.info("MANY CANDIDATES: " + input + "\n" + text + "\n");
for (BibDataSet bds : matches) {
// LOGGER.info(" " + bds.getRawBib());
}
// LOGGER.info("----------");
} else {
cntManager.i(ReferenceMarkerMatcherCounters.NO_CANDIDATES);
// LOGGER.info("NO CANDIDATES: " + text + "\n" + text);
// LOGGER.info("++++++++++++");
}
results.add(new MatchResult(text, labelToks, null));
}
}
return results;
}
public static List<Pair<String, List<LayoutToken>>> getNumberedLabels(List<LayoutToken> layoutTokens, boolean addWrappingSymbol) {
List<List<LayoutToken>> split = LayoutTokensUtil.split(layoutTokens, NUMBERED_CITATIONS_SPLIT_PATTERN, true);
List<Pair<String, List<LayoutToken>>> res = new ArrayList<>();
// return [ ] or () depending on (1 - 2) or [3-5])
Pair<Character, Character> wrappingSymbols = getWrappingSymbols(split.get(0));
for (List<LayoutToken> s : split) {
int minusPos = LayoutTokensUtil.tokenPos(s, DASH_PATTERN);
if (minusPos < 0) {
res.add(new Pair<>(LayoutTokensUtil.toText(s), s));
} else {
try {
LayoutToken minusTok = s.get(minusPos);
List<LayoutToken> leftNumberToks = s.subList(0, minusPos);
List<LayoutToken> rightNumberToks = s.subList(minusPos + 1, s.size());
Integer a;
Integer b;
a = Integer.valueOf(LuceneUtil.tokenizeString(ANALYZER, LayoutTokensUtil.toText(leftNumberToks)).get(0), 10);
b = Integer.valueOf(LuceneUtil.tokenizeString(ANALYZER, LayoutTokensUtil.toText(rightNumberToks)).get(0), 10);
if (a < b && b - a < MAX_RANGE) {
for (int i = a; i <= b; i++) {
List<LayoutToken> tokPtr;
if (i == a) {
tokPtr = leftNumberToks;
} else if (i == b) {
tokPtr = rightNumberToks;
} else {
tokPtr = Collections.singletonList(minusTok);
}
if (addWrappingSymbol)
res.add(new Pair<>(wrappingSymbols.a + String.valueOf(i) + wrappingSymbols.b, tokPtr));
else
res.add(new Pair<>(String.valueOf(i), tokPtr));
}
}
} catch (Exception e) {
LOGGER.debug("Cannot parse citation reference range: " + s);
}
}
}
return res;
}
private static Pair<Character, Character> getWrappingSymbols(List<LayoutToken> layoutTokens) {
for (LayoutToken t : layoutTokens) {
if (LayoutTokensUtil.spaceyToken(t.t()) || LayoutTokensUtil.newLineToken(t.t())) {
continue;
}
if (t.t().equals("(")) {
return new Pair<>('(', ')');
} else {
return new Pair<>('[', ']');
}
}
return new Pair<>('[', ']');
}
private List<MatchResult> matchAuthorCitation(String text, List<LayoutToken> refTokens) throws EntityMatcherException {
List<Pair<String, List<LayoutToken>>> split = splitAuthors(refTokens);
List<MatchResult> results = new ArrayList<>();
for (Pair<String, List<LayoutToken>> si : split) {
String c = si.a;
List<LayoutToken> splitItem = si.b;
List<BibDataSet> matches = authorMatcher.match(c);
if (matches.size() == 1) {
cntManager.i(ReferenceMarkerMatcherCounters.MATCHED_REF_MARKERS);
//System.out.println("MATCHED: " + text + "\n" + c + "\n" + matches.get(0).getRawBib());
results.add(new MatchResult(c, splitItem, matches.get(0)));
} else {
if (matches.size() != 0) {
cntManager.i(ReferenceMarkerMatcherCounters.MANY_CANDIDATES);
List<BibDataSet> filtered = postFilterMatches(c, matches);
if (filtered.size() == 1) {
results.add(new MatchResult(c, splitItem, filtered.get(0)));
cntManager.i(ReferenceMarkerMatcherCounters.MATCHED_REF_MARKERS);
cntManager.i(ReferenceMarkerMatcherCounters.MATCHED_REF_MARKERS_AFTER_POST_FILTERING);
} else {
cntManager.i(ReferenceMarkerMatcherCounters.UNMATCHED_REF_MARKERS);
results.add(new MatchResult(c, splitItem, null));
if (filtered.size() == 0) {
cntManager.i(ReferenceMarkerMatcherCounters.NO_CANDIDATES_AFTER_POST_FILTERING);
} else {
cntManager.i(ReferenceMarkerMatcherCounters.MANY_CANDIDATES_AFTER_POST_FILTERING);
//LOGGER.info("SEVERAL MATCHED REF CANDIDATES: " + text + "\n-----\n" + c + "\n");
/*for (BibDataSet bds : matches) {
LOGGER.info("+++++");
LOGGER.info(" " + bds.getRawBib());
}*/
}
}
} else {
results.add(new MatchResult(c, splitItem, null));
cntManager.i(ReferenceMarkerMatcherCounters.NO_CANDIDATES);
//LOGGER.info("NO MATCHED REF CANDIDATES: " + text + "\n" + c);
//LOGGER.info("++++++++++++");
}
}
}
return results;
}
// splitting into individual citation references strings like in:
// Kuwajima et al., 1985; Creighton, 1990; Ptitsyn et al., 1990;
private static List<Pair<String, List<LayoutToken>>> splitAuthors(List<LayoutToken> toks) {
List<List<LayoutToken>> split = LayoutTokensUtil.split(toks, AUTHOR_SEPARATOR_PATTERN, true);
List<Pair<String, List<LayoutToken>>> result = new ArrayList<>();
for (List<LayoutToken> splitTokens : split) {
//cases like: Khechinashvili et al. (1973) and Privalov (1979)
String text = LayoutTokensUtil.toText(splitTokens);
int matchCount = matchCount(text, YEAR_PATTERN_WITH_LOOK_AROUND);
if (matchCount == 2 && text.contains(" and ")) {
for (List<LayoutToken> ys : LayoutTokensUtil.split(splitTokens, AND_WORD_PATTERN, true)) {
result.add(new Pair<>(LayoutTokensUtil.toText(LayoutTokensUtil.dehyphenize(ys)), ys));
}
} else if (matchCount > 1) {
List<List<LayoutToken>> yearSplit = LayoutTokensUtil.split(splitTokens, YEAR_PATTERN, true, false);
List<List<LayoutToken>> yearSplitWithLeftOver = LayoutTokensUtil.split(splitTokens, YEAR_PATTERN, true, true);
// do we have a leftover to be added?
List<LayoutToken> leftover = null;
if (yearSplit.size() < yearSplitWithLeftOver.size()) {
leftover = yearSplitWithLeftOver.get(yearSplitWithLeftOver.size()-1);
}
if (yearSplit.isEmpty()) {
result.add(new Pair<>(LayoutTokensUtil.toText(LayoutTokensUtil.dehyphenize(splitTokens)), splitTokens));
} else {
if (matchCount(splitTokens, AUTHOR_NAME_PATTERN) == 1) {
// cases like Grafton et al. 1995, 1998;
// the idea is that we produce as many labels as we have year.
//E.g. "Grafton et al. 1995, 1998;" will become two pairs:
// 1) ("Grafton et al. 1995", tokens_of("Grafton et al. 1995"))
// 2) ("Grafton et al. 1998", tokens_of("1998"))
// this method will allow to mark two citations in a non-overlapping manner
List<LayoutToken> firstYearSplitItem;
firstYearSplitItem = yearSplit.get(0);
result.add(new Pair<>(LayoutTokensUtil.toText(LayoutTokensUtil.dehyphenize(firstYearSplitItem)), firstYearSplitItem));
List<LayoutToken> excludedYearToks = firstYearSplitItem.subList(0, firstYearSplitItem.size() - 1);
String authorName = LayoutTokensUtil.toText(LayoutTokensUtil.dehyphenize(excludedYearToks));
for (int i = 1; i < yearSplit.size(); i++) {
List<LayoutToken> toksI = yearSplit.get(i);
if (i == yearSplit.size()-1 && leftover != null) {
List<LayoutToken> lastSegmentTokens = toksI.subList(toksI.size() - 1, toksI.size());
lastSegmentTokens.addAll(leftover);
result.add(new Pair<>(authorName + " " + LayoutTokensUtil.toText(LayoutTokensUtil.dehyphenize(toksI)) + LayoutTokensUtil.toText(leftover),
lastSegmentTokens));
} else {
result.add(new Pair<>(authorName + " " + LayoutTokensUtil.toText(LayoutTokensUtil.dehyphenize(toksI)),
toksI.subList(toksI.size() - 1, toksI.size())));
}
}
} else {
// case when two authors still appear
for(int k=0; k<yearSplit.size(); k++) {
List<LayoutToken> item = yearSplit.get(k);
if (k == yearSplit.size()-1 && leftover != null) {
List<LayoutToken> lastSegmentTokens = item;
lastSegmentTokens.addAll(leftover);
result.add(new Pair<>(LayoutTokensUtil.toText(LayoutTokensUtil.dehyphenize(lastSegmentTokens)), lastSegmentTokens));
} else
result.add(new Pair<>(LayoutTokensUtil.toText(LayoutTokensUtil.dehyphenize(item)), item));
}
}
}
} else {
result.add(new Pair<>(LayoutTokensUtil.toText(LayoutTokensUtil.dehyphenize(splitTokens)), splitTokens));
}
}
return result;
}
private static int matchCount(String s, Pattern p) {
Matcher m = p.matcher(s);
int cnt = 0;
while (m.find()) {
cnt++;
}
return cnt;
}
private static int matchCount(List<LayoutToken> toks, Pattern p) {
return matchCount(LayoutTokensUtil.toText(toks), p);
}
//if we match more than 1 citation based on name, then we leave only those citations that have author name first
private List<BibDataSet> postFilterMatches(String c, List<BibDataSet> matches) {
if (c.toLowerCase().contains("et al") || c.toLowerCase().contains(" and ")) {
String[] sp = c.trim().split(" ");
//callouts often include parentheses as seen in https://grobid.readthedocs.io/en/latest/training/fulltext/
final String author = sp[0].replaceAll("[\\(\\[]", "").toLowerCase();
ArrayList<BibDataSet> bibDataSets = Lists.newArrayList(Iterables.filter(matches, new Predicate<BibDataSet>() {
@Override
public boolean apply(BibDataSet bibDataSet) {
// first author last name formatted raw bib
return bibDataSet.getRawBib().trim().toLowerCase().startsWith(author);
}
}));
if (bibDataSets.size() == 1) {
return bibDataSets;
}
bibDataSets = Lists.newArrayList(Iterables.filter(matches, new Predicate<BibDataSet>() {
@Override
public boolean apply(BibDataSet bibDataSet) {
BiblioItem resBib = bibDataSet.getResBib();
if (resBib == null)
return false;
String firstAuthorLastName = resBib.getFirstAuthorSurname();
if (firstAuthorLastName == null)
return false;
firstAuthorLastName = firstAuthorLastName.toLowerCase();
// first author forename last name formatted raw bib
return firstAuthorLastName.equals(author);
}
}));
if (bibDataSets.size() <= 1) {
return bibDataSets;
}
//cases like c = "Smith et al, 2015" and Bds = <"Smith, Hoffmann, 2015", "Smith, 2015"> -- should prefer first one
return Lists.newArrayList(Iterables.filter(bibDataSets, new Predicate<BibDataSet>() {
@Override
public boolean apply(BibDataSet bibDataSet) {
return (bibDataSet.getResBib().getFullAuthors() != null && bibDataSet.getResBib().getFullAuthors().size() > 1);
}
}));
} else {
//cases like c = "Smith, 2015" and Bds = <"Smith, Hoffmann, 2015", "Smith, 2015"> -- should prefer second one
return Lists.newArrayList(Iterables.filter(matches, new Predicate<BibDataSet>() {
@Override
public boolean apply(BibDataSet bibDataSet) {
return bibDataSet.getResBib().getFullAuthors() != null && bibDataSet.getResBib().getFullAuthors().size() == 1;
}
}));
}
}
/**
* Return true if the text is a known label from the bibliographical reference list
*/
public boolean isKnownLabel(String text) {
if ((allLabels != null) && (allLabels.contains(text.trim())))
return true;
else
return false;
}
/**
* Return true if the text is a known first author from the bibliographical reference list
*/
public boolean isKnownFirstAuthor(String text) {
if ( (allFirstAuthors != null) && (allFirstAuthors.contains(text.trim())) )
return true;
else
return false;
}
}
| 23,282 | 47.405405 | 171 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/matching/LuceneIndexMatcher.java
|
package org.grobid.core.utilities.matching;
import com.google.common.base.Function;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.ClassicAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.Version;
import java.io.Closeable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class LuceneIndexMatcher<T, V> implements Closeable {
private Analyzer analyzer = new ClassicAnalyzer(Version.LUCENE_45);
private static final String ID_LUCENE_FIELD_NAME = "idField";
public static final String INDEXED_LUCENE_FIELD_NAME = "indexedField";
private final Function<T, Object> indexedFieldSelector;
private Function<V, Object> searchedFieldSelector;
private IndexSearcher searcher = null;
private Map<Integer, T> cache = new HashMap<Integer, T>();
private boolean debug = false;
// -- settings
private double mustMatchPercentage = 0.9;
private int maxResults = 10;
// -- settings
public LuceneIndexMatcher(Function<T, Object> indexedFieldSelector, Function<V, Object> searchedFieldSelector) {
this.indexedFieldSelector = indexedFieldSelector;
this.searchedFieldSelector = searchedFieldSelector;
}
public void load(Iterable<T> entities) throws EntityMatcherException {
close();
RAMDirectory directory = new RAMDirectory();
IndexWriter writer = null;
cache.clear();
int idCounter = 0;
try {
writer = new IndexWriter(directory, new IndexWriterConfig(Version.LUCENE_45, analyzer));
for (T entity : entities) {
Document doc = new Document();
Object indexedFieldObj = getIndexedObject(entity);
if (indexedFieldObj == null) {
continue;
}
cache.put(idCounter, entity);
doc.add(new Field(ID_LUCENE_FIELD_NAME, String.valueOf(idCounter), Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field(INDEXED_LUCENE_FIELD_NAME, indexedFieldObj.toString(), Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
if (debug) {
System.out.println("Doc added: " + doc);
}
idCounter++;
}
writer.commit();
writer.close();
} catch (IOException e) {
directory.close();
throw new EntityMatcherException("Cannot build a lucene index: " + e.getMessage(), e);
} finally {
try {
if (writer != null) {
writer.close();
}
} catch (IOException ignored) {
//no op
}
}
try {
searcher = new IndexSearcher(DirectoryReader.open(directory));
} catch (IOException e) {
throw new EntityMatcherException("Cannot open a lucene index searcher: " + e.getMessage(), e);
}
}
public List<T> match(V entity) throws EntityMatcherException {
try {
Query query = createLuceneQuery(getSearchedObject(entity));
if (query == null) {
return Collections.emptyList();
}
TopDocs topDocs = searcher.search(query, maxResults);
List<T> result = new ArrayList<T>();
for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
Document doc = searcher.doc(scoreDoc.doc);
Integer id = Integer.valueOf(doc.get(ID_LUCENE_FIELD_NAME));
result.add(cache.get(id));
}
return result;
} catch (IOException e) {
try {
searcher.getIndexReader().close();
} catch (IOException ignored) {
}
// try {
//
// searcher.close();
// } catch (IOException ignored) {
// }
throw new EntityMatcherException("Error searching lucene Index: " + e.getMessage(), e);
}
}
private Object getSearchedObject(V entity) throws EntityMatcherException {
return searchedFieldSelector.apply(entity);
}
private Object getIndexedObject(T entity) throws EntityMatcherException {
return indexedFieldSelector.apply(entity);
}
private Query createLuceneQuery(Object indexedObj) {
if (indexedObj == null) {
return null;
}
BooleanQuery query = new BooleanQuery();
// final Term term = new Term(INDEXED_LUCENE_FIELD_NAME);
List<String> luceneTokens = LuceneUtil.tokenizeString(analyzer, indexedObj.toString());
for (String luceneToken : luceneTokens) {
TermQuery termQuery = new TermQuery(new Term(INDEXED_LUCENE_FIELD_NAME, luceneToken));
query.add(termQuery, BooleanClause.Occur.SHOULD);
}
query.setMinimumNumberShouldMatch((int) (luceneTokens.size() * mustMatchPercentage));
if (debug) {
System.out.println(query);
}
return query;
}
public LuceneIndexMatcher<T, V> setMustMatchPercentage(double mustMatchPercentage) {
this.mustMatchPercentage = mustMatchPercentage;
return this;
}
public LuceneIndexMatcher<T, V> setMaxResults(int maxResults) {
this.maxResults = maxResults;
return this;
}
public LuceneIndexMatcher<T, V> setAnalyzer(Analyzer analyzer) {
this.analyzer = analyzer;
return this;
}
public void setDebug(boolean debug) {
this.debug = debug;
}
@Override
public void close() {
if (searcher != null) {
try {
searcher.getIndexReader().close();
} catch (IOException e) {
//no op
}
}
}
}
| 6,522 | 33.696809 | 129 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/matching/EntityMatcherException.java
|
package org.grobid.core.utilities.matching;
public class EntityMatcherException extends Exception {
private static final long serialVersionUID = 6080563488720903757L;
public EntityMatcherException() {
super();
}
public EntityMatcherException(String message) {
super(message);
}
public EntityMatcherException(String message, Throwable cause) {
super(message, cause);
}
public EntityMatcherException(Throwable cause) {
super(cause);
}
}
| 508 | 22.136364 | 70 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/matching/LuceneUtil.java
|
package org.grobid.core.utilities.matching;
import com.google.common.base.Joiner;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.util.Version;
import org.grobid.core.utilities.Pair;
import java.io.IOException;
import java.io.Reader;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.List;
public class LuceneUtil {
private LuceneUtil() {
}
/**
* @return a StandardAnalyzer without stop-words
*/
public static StandardAnalyzer createStandardAnalyzer() {
return new StandardAnalyzer(Version.LUCENE_45);
}
public static String normalizeString(final Analyzer analyzer, final String in) {
final List<String> tokens = tokenizeString(analyzer, in);
return Joiner.on(' ').join(tokens);
}
public static String normalizeTokens(final Analyzer analyzer, final List<String> tokens) {
return Joiner.on(' ').join(tokens);
}
/**
* Convert a Reader to a List of Tokens.
*
* @param analyzer the Analyzer to use
* @param reader the reader to feed to the Analyzer
* @return a List of tokens
* @throws java.io.IOException lucene exceptions
*/
private static List<String> readerToTokens(final Analyzer analyzer,
final Reader reader) throws IOException {
final List<String> coll = new ArrayList<String>();
final TokenStream ts = analyzer.tokenStream("", reader);
ts.reset();
final CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
while (ts.incrementToken()) {
final String val = String.valueOf(termAtt.buffer(), 0, termAtt.length());
coll.add(val);
}
ts.end();
ts.close();
return coll;
}
public static List<String> tokenizeString(final Analyzer analyzer, final String in) {
final Reader r = new StringReader(in);
try {
return readerToTokens(analyzer, r);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public static List<Pair<String, String>> tokenizeWithTokenTypes(final Analyzer analyzer, final String in) {
final Reader r = new StringReader(in);
final List<Pair<String, String>> coll = new ArrayList<Pair<String, String>>();
try {
final TokenStream ts = analyzer.tokenStream("", r);
ts.reset();
final CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
final TypeAttribute typeAtt = ts.addAttribute(TypeAttribute.class);
while (ts.incrementToken()) {
final String val = String.valueOf(termAtt.buffer(), 0, termAtt.length());
final String type = typeAtt.type();
coll.add(new Pair<String, String>(val, type));
}
ts.end();
ts.close();
} catch (IOException e) {
throw new RuntimeException("Error during tokenization", e);
}
return coll;
}
}
| 3,319 | 33.226804 | 111 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/counters/CntManager.java
|
package org.grobid.core.utilities.counters;
import org.grobid.core.engines.counters.Countable;
import java.io.Serializable;
import java.util.Map;
public interface CntManager extends Serializable {
void i(Countable e);
void i(Countable e, long val);
void i(String group, String name);
void i(String group, String name, long val);
long cnt(Countable e);
long cnt(String group, String name);
Counter getCounter(Countable e);
Counter getCounter(String group, String name);
Map<String, Long> getCounters(Class<? extends Countable> enumClass);
Map<String, Long> getCounters(String group);
Map<String, Map<String, Long>> getAllCounters();
Map<String, Long> flattenAllCounters(String separator);
void addMetric(String name, CntsMetric cntsMetric);
void removeMetric(String name);
}
| 846 | 21.891892 | 72 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/counters/CntManagerRepresentation.java
|
package org.grobid.core.utilities.counters;
import java.io.IOException;
public interface CntManagerRepresentation {
String getRepresentation(CntManager cntManager) throws IOException;
}
| 192 | 23.125 | 71 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/counters/Counter.java
|
package org.grobid.core.utilities.counters;
import java.io.Serializable;
public interface Counter extends Serializable {
void i();
void i(long val);
long cnt();
void set(long val);
}
| 201 | 17.363636 | 47 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/counters/CntManagerSaver.java
|
package org.grobid.core.utilities.counters;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
public interface CntManagerSaver {
CntManager deserialize(InputStream is) throws IOException;
void serialize(CntManager cntManager, OutputStream os) throws IOException;
}
| 310 | 27.272727 | 78 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/counters/CntsMetric.java
|
package org.grobid.core.utilities.counters;
public interface CntsMetric {
String getMetricString(CntManager cntManager);
}
| 128 | 20.5 | 50 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/counters/GrobidTimer.java
|
package org.grobid.core.utilities.counters;
import java.util.Date;
import java.util.HashMap;
import java.util.Map;
/**
* GrobidTimer is a timer that can memorize multiple stop times.<br>
*
* Example of use: <code>
* GrobidTimer timer= new GrobidTimer();
* timer.start();
* timer.stop("Time1");
* timer.stop("Time2");
* System.out.println("The elapsed time between start and Time1 is "+timer.getElapsedTimeFromStart("Time1"));
* System.out.println("The elapsed time between Time1 and Time2 is "+timer.getElapsedTime("Time1","Time2"));
* </code>
*
*
*/
public class GrobidTimer {
/**
* The tag under which the start time is saved.
*/
public static final String START = "START";
/**
* The tag end.
*/
public static final String STOP = "STOP";
/**
* Date format.
*/
private static final String MIN_SEC_MILI = "s 'seconds' S 'milliseconds'";
/**
* Map containing all the tagged times.
*/
private Map<String, Long> times;
/**
* Contains the time when the timer count has been paused.
*/
private Long timePauseStarted;
/**
* Constructor.
*/
public GrobidTimer() {
this(false);
}
/**
* Constructor.
*
* @param pStartNow
* if true the timer will be started from now, else the start
* method will have to be called.
*/
public GrobidTimer(final boolean pStartNow) {
times = new HashMap<String, Long>();
if (pStartNow) {
start();
}
}
/**
* Start the timer.
*/
public void start() {
times.put(START, System.currentTimeMillis());
}
/**
* Store the current time with the name pTag.
*
* @param pTag
* the name under which the current time will be saved.
*/
public void stop(final String pTag) {
times.put(pTag, System.currentTimeMillis());
}
/**
* Compute the time elapsed between the start of the timer and the stop
* time.
*
* @param pTag
* the tag of the stop time.
* @return the time elapsed from start to stop.
*/
public Long getElapsedTimeFromStart(final String pTag) {
return times.get(pTag) - times.get(START);
}
/**
* Compute the time elapsed between pTagStart and pTagStop.
*
* @param pTagStart
* the tag of the start time.
* @param pTagStop
* the tag of the stop time.
* @return the time elapsed from start to stop.
*/
public Long getElapsedTime(final String pTagStart, final String pTagStop) {
return times.get(pTagStop) - times.get(pTagStart);
}
/**
* Get all the time saved in the timer. <br>
* The start tag is {@link #START}.
*
* @return the time corresponding to the tag.
*/
public Long getTime(final String pTag) {
return times.get(pTag);
}
/**
* Compute the time elapsed between the start of the timer and the stop
* time.<br>
* Return the time formatted: {@link #MIN_SEC_MILI}.
*
* @param pTag
* the tag of the stop time.
* @return the time elapsed from start to stop.
*/
public String getElapsedTimeFromStartFormated(final String pTag) {
return formatTime(getElapsedTimeFromStart(pTag));
}
/**
* Compute the time elapsed between pTagStart and pTagStop. <br>
* Return the time formatted: {@link #MIN_SEC_MILI}.
*
* @param pTagStart
* the tag of the start time.
* @param pTagStop
* the tag of the stop time.
* @return the time elapsed from start to stop.
*/
public String getElapsedTimeFormated(final String pTagStart, final String pTagStop) {
return formatTime(getElapsedTime(pTagStart, pTagStop));
}
/**
* Get all the time saved in the timer. <br>
* The start tag is {@link #START}. <br>
* Return the time formatted: {@link #MIN_SEC_MILI}.
*
* @return the time corresponding to the tag.
*/
public String getTimeFormated(final String pTag) {
return formatTime(getTime(pTag));
}
/**
* Stop time count. To restart the timer: {@link #restartTimer()}.
*/
public void pauseTimer() {
timePauseStarted = System.currentTimeMillis();
}
/**
* Restart the timer when it has been paused by {@link #pauseTimer()}.
*/
public void restartTimer() {
times.put(START, times.get(START) + (System.currentTimeMillis() - timePauseStarted));
}
/**
* Return the complete Map of all stored times.
*
* @return Map<String,Long>
*/
public Map<String, Long> getAllSavedTimes() {
return times;
}
/**
* Format a time from Long to String using the following format:
* {@link #MIN_SEC_MILI}.
*
* @param pTime
* the time to format.
* @return formatted time.
*/
public static String formatTime(final Long pTime) {
return org.grobid.core.utilities.Utilities.dateToString(new Date(pTime), MIN_SEC_MILI);
}
}
| 4,685 | 23.154639 | 109 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/counters/impl/CntManagerFactory.java
|
package org.grobid.core.utilities.counters.impl;
import org.grobid.core.utilities.counters.CntManager;
public class CntManagerFactory {
public static CntManager getCntManager() {
return new CntManagerImpl();
}
public static CntManager getNoOpCntManager() {
return new NoOpCntManagerImpl();
}
}
| 329 | 22.571429 | 53 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/counters/impl/CntManagerImpl.java
|
package org.grobid.core.utilities.counters.impl;
import org.grobid.core.engines.counters.Countable;
import org.grobid.core.utilities.counters.CntManager;
import org.grobid.core.utilities.counters.CntsMetric;
import org.grobid.core.utilities.counters.Counter;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
class CntManagerImpl implements CntManager {
private static final long serialVersionUID = 2305126306757162275L;
private ConcurrentMap<String, ConcurrentMap<String, Counter>> classCounters = new ConcurrentHashMap<>();
private ConcurrentMap<String, ConcurrentMap<String, Counter>> strCnts = new ConcurrentHashMap<>();
transient private ConcurrentMap<String, CntsMetric> metrics = null;
private void checkGroupName(String groupName) {
if (classCounters.containsKey(groupName)) {
throw new IllegalStateException("Group name " + groupName + " coincides with the enum type counter name");
}
}
private void checkClass(String class1) {
if (strCnts.containsKey(class1)) {
throw new IllegalStateException("Enum class name " + class1 + " coincides with the string type counter name");
}
}
@Override
public void i(Countable e) {
i(e, 1);
}
@Override
public void i(Countable e, long val) {
final String groupName = getCounterEnclosingName(e);
checkClass(groupName);
classCounters.putIfAbsent(groupName, new ConcurrentHashMap<String, Counter>());
ConcurrentMap<String, Counter> cntMap = classCounters.get(groupName);
cntMap.putIfAbsent(e.getName(), new CounterImpl());
Counter cnt = cntMap.get(e.getName());
cnt.i(val);
}
@Override
public void i(String group, String name) {
i(group, name, 1);
}
@Override
public void i(String group, String name, long val) {
checkGroupName(group);
strCnts.putIfAbsent(group, new ConcurrentHashMap<String, Counter>());
ConcurrentMap<String, Counter> cntMap = strCnts.get(group);
cntMap.putIfAbsent(name, new CounterImpl());
Counter cnt = cntMap.get(name);
cnt.i(val);
}
@Override
public long cnt(Countable e) {
Map<String, Counter> cntMap = classCounters.get(getCounterEnclosingName(e));
if (cntMap == null) {
return 0;
}
Counter cnt = cntMap.get(e.getName());
return cnt == null ? 0 : cnt.cnt();
}
@Override
public long cnt(String group, String name) {
Map<String, Counter> cntMap = strCnts.get(group);
if (cntMap == null) {
return 0;
}
Counter cnt = cntMap.get(name);
return cnt == null ? 0 : cnt.cnt();
}
@Override
public Counter getCounter(Countable e) {
checkClass(e.getName());
classCounters.putIfAbsent(e.getName(), new ConcurrentHashMap<String, Counter>());
ConcurrentMap<String, Counter> cntMap = classCounters.get(e.getClass().getName());
cntMap.putIfAbsent(e.getName(), new CounterImpl());
return cntMap.get(e.getName());
}
@Override
public Counter getCounter(String group, String name) {
checkGroupName(group);
strCnts.putIfAbsent(group, new ConcurrentHashMap<String, Counter>());
ConcurrentMap<String, Counter> cntMap = strCnts.get(group);
cntMap.putIfAbsent(name, new CounterImpl());
return cntMap.get(name);
}
@Override
public Map<String, Long> getCounters(Class<? extends Countable> countableClass) {
Map<String, Long> toReturn = new ConcurrentHashMap<>();
final ConcurrentMap<String, Counter> stringCounterConcurrentMap = classCounters.get(countableClass.getName());
for (String key : stringCounterConcurrentMap.keySet()) {
toReturn.put(key, stringCounterConcurrentMap.get(key).cnt());
}
return toReturn;
}
@Override
public Map<String, Long> getCounters(String group) {
Map<String, Long> toReturn = new ConcurrentHashMap<>();
if (strCnts.containsKey(group)) {
for (Map.Entry<String, Counter> e : strCnts.get(group).entrySet()) {
toReturn.put(e.getKey(), e.getValue().cnt());
}
}
return toReturn;
}
@SuppressWarnings({"unchecked"})
@Override
public Map<String, Map<String, Long>> getAllCounters() {
Map<String, Map<String, Long>> map = new ConcurrentHashMap<>();
for (String e : classCounters.keySet()) {
try {
map.put(e, getCounters((Class<? extends Countable>) Class.forName(e)));
} catch (ClassNotFoundException e1) {
throw new IllegalStateException(e1);
}
}
for (String e : strCnts.keySet()) {
map.put(e, getCounters(e));
}
return map;
}
@Override
public Map<String, Long> flattenAllCounters(String separator) {
Map<String, Long> map = new HashMap<>();
for (Map.Entry<String, Map<String, Long>> group : getAllCounters().entrySet()) {
for (Map.Entry<String, Long> e : group.getValue().entrySet()) {
map.put(group.getKey() + separator + e.getKey(), e.getValue());
}
}
return map;
}
@Override
public synchronized void addMetric(String name, CntsMetric cntsMetric) {
if (metrics == null) {
metrics = new ConcurrentHashMap<>();
}
metrics.put(name, cntsMetric);
}
@Override
public synchronized void removeMetric(String name) {
if (metrics == null) {
metrics = new ConcurrentHashMap<>();
}
metrics.remove(name);
}
@Override
public synchronized String toString() {
StringBuilder sb = new StringBuilder(1000);
for (Map.Entry<String, Map<String, Long>> m : getAllCounters().entrySet()) {
sb.append("\n************************************************************************************\n").
append("COUNTER: ").append(m.getKey()).append("\n************************************************************************************").
append("\n------------------------------------------------------------------------------------\n");
int maxLength = 0;
for (Map.Entry<String, Long> cs : m.getValue().entrySet()) {
if (maxLength < cs.getKey().length()) {
maxLength = cs.getKey().length();
}
}
for (Map.Entry<String, Long> cs : m.getValue().entrySet()) {
sb.append(" ").append(cs.getKey()).append(": ").append(new String(new char[maxLength - cs.getKey().length()]).replace('\0', ' '))
.append(cs.getValue()).append("\n");
}
sb.append("====================================================================================\n");
}
if (metrics != null && !metrics.isEmpty()) {
sb.append("\n++++++++++++++++++++++++++++++ METRICS +++++++++++++++++++++++++++++++++++++++++++++\n");
for (Map.Entry<String, CntsMetric> e : metrics.entrySet()) {
sb.append(e.getKey()).append(": ").append(e.getValue().getMetricString(this)).append("\n");
}
}
sb.append("====================================================================================\n");
return sb.toString();
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
CntManagerImpl that = (CntManagerImpl) o;
return !(classCounters != null ? !classCounters.equals(that.classCounters) : that.classCounters != null)
&& !(strCnts != null ? !strCnts.equals(that.strCnts) : that.strCnts != null);
}
@Override
public int hashCode() {
int result = classCounters != null ? classCounters.hashCode() : 0;
result = 31 * result + (strCnts != null ? strCnts.hashCode() : 0);
return result;
}
protected String getCounterEnclosingName(Countable e) {
if (e.getClass() != null && e.getClass().getEnclosingClass() != null) {
return e.getClass().getEnclosingClass().getName();
} else {
return e.getClass().getName();
}
}
}
| 8,545 | 35.365957 | 156 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/counters/impl/CounterImpl.java
|
package org.grobid.core.utilities.counters.impl;
import org.grobid.core.utilities.counters.Counter;
import java.util.concurrent.atomic.AtomicLong;
class CounterImpl implements Counter {
private static final long serialVersionUID = 4764636620333386314L;
private volatile AtomicLong cnt = new AtomicLong(0);
public CounterImpl() {
}
public CounterImpl(long cnt) {
this.cnt.set(cnt);
}
@Override
public void i() {
cnt.incrementAndGet();
}
@Override
public void i(long val) {
cnt.addAndGet(val);
}
@Override
public long cnt() {
return cnt.longValue();
}
@Override
public void set(long val) {
cnt.set(val);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
CounterImpl counter = (CounterImpl) o;
return cnt.get() == counter.cnt.get();
}
@Override
public int hashCode() {
return cnt.hashCode();
}
}
| 1,064 | 19.480769 | 70 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/counters/impl/CntManagerSaverImpl.java
|
package org.grobid.core.utilities.counters.impl;
import org.grobid.core.utilities.counters.CntManager;
import org.grobid.core.utilities.counters.CntManagerSaver;
import java.io.IOException;
import java.io.InputStream;
import java.io.ObjectInputStream;
import java.io.ObjectOutput;
import java.io.ObjectOutputStream;
import java.io.OutputStream;
import java.text.SimpleDateFormat;
import java.util.TimeZone;
public class CntManagerSaverImpl implements CntManagerSaver {
public static final SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'");
static {
df.setTimeZone(TimeZone.getTimeZone("UTC"));
}
@Override
public CntManager deserialize(InputStream is) throws IOException {
ObjectInputStream in = new ObjectInputStream(is);
try {
return (CntManager) in.readObject();
} catch (ClassNotFoundException e) {
throw new IllegalStateException("Cannot deserialize counter because: " + e.getMessage(), e);
}
}
@Override
public void serialize(CntManager cntManager, OutputStream os) throws IOException {
ObjectOutput out = new ObjectOutputStream(os);
out.writeObject(cntManager);
}
}
| 1,217 | 30.230769 | 104 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/counters/impl/NoOpCounterImpl.java
|
package org.grobid.core.utilities.counters.impl;
import org.grobid.core.utilities.counters.Counter;
class NoOpCounterImpl implements Counter {
private static final long serialVersionUID = -6891249458789932892L;
@Override
public void i() {
}
@Override
public void i(long val) {
}
@Override
public long cnt() {
return 0;
}
@Override
public void set(long val) {
}
}
| 431 | 15 | 71 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/counters/impl/NoOpCntManagerImpl.java
|
package org.grobid.core.utilities.counters.impl;
import org.grobid.core.engines.counters.Countable;
import org.grobid.core.utilities.counters.CntManager;
import org.grobid.core.utilities.counters.CntsMetric;
import org.grobid.core.utilities.counters.Counter;
import java.util.Map;
class NoOpCntManagerImpl implements CntManager {
@Override
public void i(Countable e) {
}
@Override
public void i(Countable e, long val) {
}
@Override
public void i(String group, String name) {
}
@Override
public void i(String group, String name, long val) {
}
@Override
public long cnt(Countable e) {
return 0;
}
@Override
public long cnt(String group, String name) {
return 0;
}
@Override
public Counter getCounter(Countable e) {
return null;
}
@Override
public Counter getCounter(String group, String name) {
return null;
}
@Override
public Map<String, Long> getCounters(Class<? extends Countable> enumClass) {
return null;
}
@Override
public Map<String, Long> getCounters(String group) {
return null;
}
@Override
public Map<String, Map<String, Long>> getAllCounters() {
return null;
}
@Override
public Map<String, Long> flattenAllCounters(String separator) {
return null;
}
@Override
public void addMetric(String name, CntsMetric cntsMetric) {
}
@Override
public void removeMetric(String name) {
}
}
| 1,539 | 18.012346 | 80 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/utilities/counters/impl/CntManagerGrepRepresentation.java
|
package org.grobid.core.utilities.counters.impl;
import org.grobid.core.utilities.counters.CntManager;
import org.grobid.core.utilities.counters.CntManagerRepresentation;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Map;
import java.util.TimeZone;
public class CntManagerGrepRepresentation implements CntManagerRepresentation {
public static final SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'");
static {
df.setTimeZone(TimeZone.getTimeZone("UTC"));
}
@Override
public String getRepresentation(CntManager cntManager) {
StringBuilder sb = new StringBuilder();
synchronized (df) {
sb.append("|").append(df.format(new Date())).append('\n');
}
for (Map.Entry<String, Map<String, Long>> m : cntManager.getAllCounters().entrySet()) {
sb.append('=').append(m.getKey()).append('\n');
for (Map.Entry<String, Long> cs : m.getValue().entrySet()) {
sb.append(m.getKey()).append("+").append(cs.getKey()).append(":").append(cs.getValue()).append('\n');
}
sb.append('\n');
}
return sb.toString();
}
}
| 1,202 | 33.371429 | 117 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/document/DocumentPointer.java
|
package org.grobid.core.document;
import com.google.common.base.Preconditions;
import com.google.common.primitives.Ints;
/**
* Class representing a pointer within a PDF document, basically a block index and then a token index within a block (not global token index)
*/
public class DocumentPointer implements Comparable<DocumentPointer>{
public static final DocumentPointer START_DOCUMENT_POINTER = new DocumentPointer(0, 0 , 0);
private final int blockPtr;
private final int tokenBlockPos;
private final int tokenDocPos;
public DocumentPointer(int blockPtr, int tokenDocPos, int tokenBlockPos) {
Preconditions.checkArgument(tokenDocPos >= tokenBlockPos);
Preconditions.checkArgument(tokenBlockPos >= 0);
this.tokenDocPos = tokenDocPos;
this.tokenBlockPos = tokenBlockPos;
this.blockPtr = blockPtr;
}
public DocumentPointer(Document doc, int blockIndex, int tokenDocPos) {
this(blockIndex, tokenDocPos, tokenDocPos - doc.getBlocks().get(blockIndex).getStartToken());
}
@Override
public int compareTo(DocumentPointer o) {
return Ints.compare(tokenDocPos, o.tokenDocPos);
}
public int getBlockPtr() {
return blockPtr;
}
public int getTokenBlockPos() {
return tokenBlockPos;
}
public int getTokenDocPos() {
return tokenDocPos;
}
@Override
public String toString() {
return "DocPtr(Block No: " + blockPtr + "; Token position in block: " + tokenBlockPos + "; position of token in doc: " + tokenDocPos + ")";
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
DocumentPointer that = (DocumentPointer) o;
if (blockPtr != that.blockPtr) return false;
if (tokenBlockPos != that.tokenBlockPos) return false;
if (tokenDocPos != that.tokenDocPos) return false;
return true;
}
@Override
public int hashCode() {
int result = blockPtr;
result = 31 * result + tokenBlockPos;
result = 31 * result + tokenDocPos;
return result;
}
}
| 2,194 | 29.068493 | 147 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/document/PatentDocument.java
|
package org.grobid.core.document;
import org.grobid.core.layout.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Class for additional information for patent document.
*
*/
public class PatentDocument extends Document {
private int beginBlockPAReport = -1;
static public Pattern searchReport =
Pattern.compile("((international|interna(\\s)+Η(\\s)+onal)(\\s)+(search)(\\s)+(report))|" +
"((internationaler)(\\s)+(recherchenberich))|" +
"(I(\\s)+N(\\s)+T(\\s)+E(\\s)+R(\\s)+N(\\s)+A(\\s)+T(\\s)+I(\\s)+O(\\s)+N(\\s)+A(\\s)+L(\\s)+S(\\s)+E(\\s)+A(\\s)+R(\\s)+C(\\s)+H)",
Pattern.CASE_INSENSITIVE | Pattern.MULTILINE);
static public Pattern FamilyMembers =
Pattern.compile("(patent)(\\s)+(famil(v|y))(\\s)+(members)?",
Pattern.CASE_INSENSITIVE | Pattern.MULTILINE);
public PatentDocument(DocumentSource documentSource) {
super(documentSource);
}
public int getBeginBlockPAReport() {
return beginBlockPAReport;
}
public void setBeginBlockPAReport(int begin) {
beginBlockPAReport = begin;
}
/**
* Return all blocks corresponding to the prior art report of a WO patent publication
*/
public String getWOPriorArtBlocks() {
System.out.println("getWOPriorArtBlocks");
StringBuilder accumulated = new StringBuilder();
int i = 0;
boolean PAReport = false;
boolean newPage = false;
if (getBlocks() != null) {
for (Block block : getBlocks()) {
String content = block.getText();
if (content != null) {
content = content.trim();
//System.out.println(content);
if (newPage & (!PAReport)) {
//System.out.println("new page");
Matcher m = PatentDocument.searchReport.matcher(content);
if (m.find()) {
PAReport = true;
beginBlockPAReport = i;
}
}
/*if (PAReport) {
Matcher m = FamilyMembers.matcher(content);
if (m.find()) {
PAReport = false;
}
}*/
newPage = content.startsWith("@PAGE");
if (PAReport)
accumulated.append(content).append("\n");
}
i++;
}
}
System.out.println(accumulated.toString());
return accumulated.toString();
}
}
| 2,761 | 32.277108 | 152 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/document/DocumentPiece.java
|
package org.grobid.core.document;
public class DocumentPiece implements Comparable<DocumentPiece>{
//for easier access make them final, but public
private final DocumentPointer a;
private final DocumentPointer b;
public DocumentPiece(DocumentPointer a, DocumentPointer b) {
if (a.compareTo(b) > 0) {
throw new IllegalArgumentException("Invalid document piece: " + a + "-" + b);
}
this.a = a;
this.b = b;
}
public DocumentPointer getLeft() {
return a;
}
public DocumentPointer getRight() {
return b;
}
@Override
public String toString() {
return "(" + a + " - " + b + ")";
}
@Override
public int compareTo(DocumentPiece o) {
return a.compareTo(o.a);
}
}
| 795 | 22.411765 | 89 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/document/TEICounters.java
|
package org.grobid.core.document;
/**
* General debugging counters
*/
public enum TEICounters {
CITATION_FIGURE_REF_MARKER_MISSED_SUBSTITUTION, TEI_POSITION_REF_MARKERS_OFFSET_TOO_LARGE, TEI_POSITION_REF_MARKERS_TOK_NOT_FOUND, CITATION_FIGURE_REF_MARKER_SUBSTITUTED
}
| 275 | 29.666667 | 173 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/document/BasicStructureBuilder.java
|
package org.grobid.core.document;
import com.google.common.collect.Iterables;
import com.google.common.collect.SortedSetMultimap;
import com.google.common.collect.TreeMultimap;
import org.apache.commons.lang3.tuple.Pair;
import org.grobid.core.data.BibDataSet;
import org.grobid.core.engines.tagging.GenericTaggerUtils;
import org.grobid.core.layout.Block;
import org.grobid.core.layout.Cluster;
import org.grobid.core.layout.LayoutToken;
import org.grobid.core.utilities.TextUtilities;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.SortedSet;
import java.util.StringTokenizer;
import java.util.TreeSet;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Class for building basic structures in a document item.
*
*/
public class BasicStructureBuilder {
private static final Logger LOGGER = LoggerFactory.getLogger(BasicStructureBuilder.class);
// note: these regular expressions will disappear as a new CRF model is now covering
// the overall document segmentation
/*static public Pattern introduction =
Pattern.compile("^\\b*(Introduction?|Einleitung|INTRODUCTION|Acknowledge?ments?|Acknowledge?ment?|Background?|Content?|Contents?|Motivations?|1\\.\\sPROBLEMS?|1\\.(\\n)?\\sIntroduction?|1\\.\\sINTRODUCTION|I\\.(\\s)+Introduction|1\\.\\sProblems?|I\\.\\sEinleitung?|1\\.\\sEinleitung?|1\\sEinleitung?|1\\sIntroduction?)",
Pattern.CASE_INSENSITIVE);
static public Pattern introductionStrict =
Pattern.compile("^\\b*(1\\.\\sPROBLEMS?|1\\.(\\n)?\\sIntroduction?|1\\.(\\n)?\\sContent?|1\\.\\sINTRODUCTION|I\\.(\\s)+Introduction|1\\.\\sProblems?|I\\.\\sEinleitung?|1\\.\\sEinleitung?|1\\sEinleitung?|1\\sIntroduction?)",
Pattern.CASE_INSENSITIVE);
static public Pattern abstract_ = Pattern.compile("^\\b*\\.?(abstract?|résumé?|summary?|zusammenfassung?)",
Pattern.CASE_INSENSITIVE);*/
static public Pattern headerNumbering1 = Pattern.compile("^(\\d+)\\.?\\s");
static public Pattern headerNumbering2 = Pattern.compile("^((\\d+)\\.)+(\\d+)\\s");
static public Pattern headerNumbering3 = Pattern.compile("^((\\d+)\\.)+\\s");
static public Pattern headerNumbering4 = Pattern.compile("^([A-Z](I|V|X)*(\\.(\\d)*)*\\s)");
private static Pattern startNum = Pattern.compile("^(\\d)+\\s");
private static Pattern endNum = Pattern.compile("\\s(\\d)+$");
/**
* Cluster the blocks following the font, style and size aspects
*
* -> not used at this stage, but could be an interesting feature in the full text model in the future
*
* @param b integer
* @param doc a document
*/
private static void addBlockToCluster(Integer b, Document doc) {
// get block features
Block block = doc.getBlocks().get(b);
String font = block.getFont();
boolean bold = block.getBold();
boolean italic = block.getItalic();
double fontSize = block.getFontSize();
boolean found = false;
if (font == null) {
font = "unknown";
}
//System.out.println(font + " " + bold + " " + italic + " " + fontSize );
if (doc.getClusters() == null) {
doc.setClusters(new ArrayList<Cluster>());
} else {
for (Cluster cluster : doc.getClusters()) {
String font2 = cluster.getFont();
if (font2 == null)
font2 = "unknown";
if (font.equals(font2) &&
(bold == cluster.getBold()) &
(italic == cluster.getItalic()) &
(fontSize == cluster.getFontSize())) {
cluster.addBlock2(b);
found = true;
}
}
}
if (!found) {
Cluster cluster = new Cluster();
cluster.setFont(font);
cluster.setBold(bold);
cluster.setItalic(italic);
cluster.setFontSize(fontSize);
cluster.addBlock2(b);
doc.getClusters().add(cluster);
}
}
static public Document generalResultSegmentation(Document doc, String labeledResult, List<LayoutToken> documentTokens) {
List<Pair<String, String>> labeledTokens = GenericTaggerUtils.getTokensAndLabels(labeledResult);
SortedSetMultimap<String, DocumentPiece> labeledBlocks = TreeMultimap.create();
doc.setLabeledBlocks(labeledBlocks);
List<Block> docBlocks = doc.getBlocks();
int indexLine = 0;
int blockIndex = 0;
int p = 0; // position in the labeled result
int currentLineEndPos = 0; // position in the global doc. tokenization of the last
// token of the current line
int currentLineStartPos = 0; // position in the global doc.
// tokenization of the first token of the current line
String line = null;
//DocumentPointer pointerA = DocumentPointer.START_DOCUMENT_POINTER;
// the default first block might not contain tokens but only bitmap - in this case we move
// to the first block containing some LayoutToken objects
while (docBlocks.get(blockIndex).getTokens() == null ||
docBlocks.get(blockIndex).getNbTokens() == 0
//TODO: make things right
// || docBlocks.get(blockIndex).getStartToken() == -1
) {
blockIndex++;
}
DocumentPointer pointerA = new DocumentPointer(doc, blockIndex, docBlocks.get(blockIndex).getStartToken());
DocumentPointer currentPointer = null;
DocumentPointer lastPointer = null;
String curLabel;
String curPlainLabel = null;
String lastPlainLabel = null;
int lastTokenInd = -1;
for (int i = docBlocks.size() - 1; i >=0; i--) {
int endToken = docBlocks.get(i).getEndToken();
if (endToken != -1) {
lastTokenInd = endToken;
break;
}
}
// we do this concatenation trick so that we don't have to process stuff after the main loop
// no copying of lists happens because of this, so it's ok to concatenate
String ignoredLabel = "@IGNORED_LABEL@";
for (Pair<String, String> labeledTokenPair :
Iterables.concat(labeledTokens,
Collections.singleton(Pair.of("IgnoredToken", ignoredLabel)))) {
if (labeledTokenPair == null) {
p++;
continue;
}
// as we process the document segmentation line by line, we don't use the usual
// tokenization to rebuild the text flow, but we get each line again from the
// text stored in the document blocks (similarly as when generating the features)
line = null;
while( (line == null) && (blockIndex < docBlocks.size()) ) {
Block block = docBlocks.get(blockIndex);
List<LayoutToken> tokens = block.getTokens();
String localText = block.getText();
if ( (tokens == null) || (localText == null) || (localText.trim().length() == 0) ) {
blockIndex++;
indexLine = 0;
if (blockIndex < docBlocks.size()) {
block = docBlocks.get(blockIndex);
currentLineStartPos = block.getStartToken();
}
continue;
}
String[] lines = localText.split("[\\n\\r]");
if ( (lines.length == 0) || (indexLine >= lines.length)) {
blockIndex++;
indexLine = 0;
if (blockIndex < docBlocks.size()) {
block = docBlocks.get(blockIndex);
currentLineStartPos = block.getStartToken();
}
continue;
}
else {
line = lines[indexLine];
indexLine++;
if ( (line.trim().length() == 0) || (TextUtilities.filterLine(line)) ) {
line = null;
continue;
}
if (currentLineStartPos > lastTokenInd)
continue;
// adjust the start token position in documentTokens to this non trivial line
// first skip possible space characters and tabs at the beginning of the line
while( (documentTokens.get(currentLineStartPos).t().equals(" ") ||
documentTokens.get(currentLineStartPos).t().equals("\t") )
&& (currentLineStartPos != lastTokenInd)) {
currentLineStartPos++;
}
if (!labeledTokenPair.getLeft().startsWith(documentTokens.get(currentLineStartPos).getText())) {
while(currentLineStartPos < block.getEndToken()) {
if (documentTokens.get(currentLineStartPos).t().equals("\n")
|| documentTokens.get(currentLineStartPos).t().equals("\r")) {
// move to the start of the next line, but ignore space characters and tabs
currentLineStartPos++;
while( (documentTokens.get(currentLineStartPos).t().equals(" ") ||
documentTokens.get(currentLineStartPos).t().equals("\t") )
&& (currentLineStartPos != lastTokenInd)) {
currentLineStartPos++;
}
if ((currentLineStartPos != lastTokenInd) &&
labeledTokenPair.getLeft().startsWith(documentTokens.get(currentLineStartPos).getText())) {
break;
}
}
currentLineStartPos++;
}
}
// what is then the position of the last token of this line?
currentLineEndPos = currentLineStartPos;
while(currentLineEndPos < block.getEndToken()) {
if (documentTokens.get(currentLineEndPos).t().equals("\n")
|| documentTokens.get(currentLineEndPos).t().equals("\r")) {
currentLineEndPos--;
break;
}
currentLineEndPos++;
}
}
}
curLabel = labeledTokenPair.getRight();
curPlainLabel = GenericTaggerUtils.getPlainLabel(curLabel);
/*System.out.println("-------------------------------");
System.out.println("block: " + blockIndex);
System.out.println("line: " + line);
System.out.println("token: " + labeledTokenPair.a);
System.out.println("curPlainLabel: " + curPlainLabel);
System.out.println("lastPlainLabel: " + lastPlainLabel);
if ((currentLineStartPos < lastTokenInd) && (currentLineStartPos != -1))
System.out.println("currentLineStartPos: " + currentLineStartPos +
" (" + documentTokens.get(currentLineStartPos) + ")");
if ((currentLineEndPos < lastTokenInd) && (currentLineEndPos != -1))
System.out.println("currentLineEndPos: " + currentLineEndPos +
" (" + documentTokens.get(currentLineEndPos) + ")");*/
if (blockIndex == docBlocks.size()) {
break;
}
currentPointer = new DocumentPointer(doc, blockIndex, currentLineEndPos);
// either a new entity starts or a new beginning of the same type of entity
if ((!curPlainLabel.equals(lastPlainLabel)) && (lastPlainLabel != null)) {
if ( (pointerA.getTokenDocPos() <= lastPointer.getTokenDocPos()) &&
(pointerA.getTokenDocPos() != -1) ) {
labeledBlocks.put(lastPlainLabel, new DocumentPiece(pointerA, lastPointer));
}
pointerA = new DocumentPointer(doc, blockIndex, currentLineStartPos);
//System.out.println("add segment for: " + lastPlainLabel + ", until " + (currentLineStartPos-2));
}
//updating stuff for next iteration
lastPlainLabel = curPlainLabel;
lastPointer = currentPointer;
currentLineStartPos = currentLineEndPos+2; // one shift for the EOL, one for the next line
p++;
}
if (blockIndex == docBlocks.size()) {
// the last labelled piece has still to be added
if ((!curPlainLabel.equals(lastPlainLabel)) && (lastPlainLabel != null)) {
if ( (pointerA.getTokenDocPos() <= lastPointer.getTokenDocPos()) &&
(pointerA.getTokenDocPos() != -1) ) {
labeledBlocks.put(lastPlainLabel, new DocumentPiece(pointerA, lastPointer));
//System.out.println("add segment for: " + lastPlainLabel + ", until " + (currentLineStartPos-2));
}
}
}
return doc;
}
/**
* Set the main segments of the document based on the full text parsing results
*
* @param doc a document
* @param labeledResult string
* @param tokenizations tokens
* @return a document
*/
static public Document resultSegmentation(Document doc, String labeledResult, List<String> tokenizations) {
if (doc == null) {
throw new NullPointerException("Document is null");
}
if (doc.getBlocks() == null) {
throw new NullPointerException("Blocks of the documents are null");
}
//System.out.println(tokenizations.toString());
// int i = 0;
// boolean first = true;
List<Integer> blockHeaders = new ArrayList<Integer>();
List<Integer> blockFooters = new ArrayList<Integer>();
List<Integer> blockDocumentHeaders = new ArrayList<Integer>();
List<Integer> blockSectionTitles = new ArrayList<Integer>();
SortedSet<DocumentPiece> blockReferences = new TreeSet<DocumentPiece>();
doc.setBibDataSets(new ArrayList<BibDataSet>());
// StringTokenizer st = new StringTokenizer(labeledResult, "\n");
String[] lines = labeledResult.split("\n");
String currentTag = null;
String s2 = null;
String lastTag = null;
String lastPlainTag = null;
int p = 0; // index in the results' tokenization (st)
int blockIndex = 0;
BibDataSet bib = null;
DocumentPointer pointerA = null;
// DocumentPointer pointerB = null;
DocumentPointer currentPointer;
DocumentPointer lastPointer = null;
for (String line : lines) {
// while (st.hasMoreTokens()) {
for (; blockIndex < doc.getBlocks().size() - 1; blockIndex++) {
// int startTok = doc.getBlocks().get(blockIndex).getStartToken();
int endTok = doc.getBlocks().get(blockIndex).getEndToken();
if (endTok >= p) {
break;
}
}
ArrayList<String> localFeatures = new ArrayList<String>();
boolean addSpace = false;
// String tok = st.nextToken().trim();
line = line.trim();
StringTokenizer stt = new StringTokenizer(line, "\t");
int j = 0;
boolean newLine = false;
int ll = stt.countTokens();
while (stt.hasMoreTokens()) {
String s = stt.nextToken().trim();
if (j == 0) {
s2 = s;
boolean strop = false;
while ((!strop) && (p < tokenizations.size())) {
String tokOriginal = tokenizations.get(p);
if (tokOriginal.equals(" ")
| tokOriginal.equals("\n")
| tokOriginal.equals("\r")
| tokOriginal.equals("\t")) {
addSpace = true;
p++;
} else if (tokOriginal.equals("")) {
p++;
} else //if (tokOriginal.equals(s))
{
strop = true;
}
}
} else if (j == ll - 1) {
currentTag = s; // current tag
} else {
if (s.equals("LINESTART")) {
newLine = true;
}
localFeatures.add(s);
}
j++;
}
if (lastTag != null) {
if (lastTag.startsWith("I-")) {
lastPlainTag = lastTag.substring(2, lastTag.length());
} else {
lastPlainTag = lastTag;
}
}
String currentPlainTag = null;
if (currentTag != null) {
if (currentTag.startsWith("I-")) {
currentPlainTag = currentTag.substring(2, currentTag.length());
} else {
currentPlainTag = currentTag;
}
}
currentPointer = new DocumentPointer(doc, blockIndex, p);
if (lastPlainTag != null && !currentPlainTag.equals(lastPlainTag) && lastPlainTag.equals("<references>")) {
blockReferences.add(new DocumentPiece(pointerA, lastPointer));
pointerA = currentPointer;
}
if (currentPlainTag.equals("<header>")) {
if (!blockDocumentHeaders.contains(blockIndex)) {
blockDocumentHeaders.add(blockIndex);
//System.out.println("add block header: " + blockIndexInteger.intValue());
}
} else if (currentPlainTag.equals("<references>")) {// if (!blockReferences.contains(blockIndex)) {
// blockReferences.add(blockIndex);
// //System.out.println("add block reference: " + blockIndexInteger.intValue());
// }
if (currentTag.equals("I-<references>")) {
pointerA = new DocumentPointer(doc, blockIndex, p);
if (bib != null) {
if (bib.getRawBib() != null) {
doc.getBibDataSets().add(bib);
bib = new BibDataSet();
}
} else {
bib = new BibDataSet();
}
bib.setRawBib(s2);
} else {
if (addSpace) {
if (bib == null) {
bib = new BibDataSet();
bib.setRawBib(" " + s2);
} else {
bib.setRawBib(bib.getRawBib() + " " + s2);
}
} else {
if (bib == null) {
bib = new BibDataSet();
bib.setRawBib(s2);
} else {
bib.setRawBib(bib.getRawBib() + s2);
}
}
}
// case "<reference_marker>":
// if (!blockReferences.contains(blockIndex)) {
// blockReferences.add(blockIndex);
// //System.out.println("add block reference: " + blockIndexInteger.intValue());
// }
//
// if (currentTag.equals("I-<reference_marker>")) {
// if (bib != null) {
// if (bib.getRefSymbol() != null) {
// doc.getBibDataSets().add(bib);
// bib = new BibDataSet();
// }
// } else {
// bib = new BibDataSet();
// }
// bib.setRefSymbol(s2);
// } else {
// if (addSpace) {
// if (bib == null) {
// bib = new BibDataSet();
// bib.setRefSymbol(s2);
// } else {
// bib.setRefSymbol(bib.getRefSymbol() + " " + s2);
// }
// } else {
// if (bib == null) {
// bib = new BibDataSet();
// bib.setRefSymbol(s2);
// } else {
// bib.setRefSymbol(bib.getRefSymbol() + s2);
// }
// }
// }
// break;
} else if (currentPlainTag.equals("<page_footnote>")) {
if (!blockFooters.contains(blockIndex)) {
blockFooters.add(blockIndex);
//System.out.println("add block foot note: " + blockIndexInteger.intValue());
}
} else if (currentPlainTag.equals("<page_header>")) {
if (!blockHeaders.contains(blockIndex)) {
blockHeaders.add(blockIndex);
//System.out.println("add block page header: " + blockIndexInteger.intValue());
}
} else if (currentPlainTag.equals("<section>")) {
if (!blockSectionTitles.contains(blockIndex)) {
blockSectionTitles.add(blockIndex);
//System.out.println("add block page header: " + blockIndexInteger.intValue());
}
}
lastTag = currentTag;
p++;
lastPointer = currentPointer;
}
if (bib != null) {
doc.getBibDataSets().add(bib);
}
if (!lastPointer.equals(pointerA)) {
if (lastPlainTag.equals("<references>")) {
blockReferences.add(new DocumentPiece(pointerA, lastPointer));
}
}
/*doc.setBlockHeaders(blockHeaders);
doc.setBlockFooters(blockFooters);
doc.setBlockDocumentHeaders(blockDocumentHeaders);
doc.setBlockReferences(blockReferences);
doc.setBlockSectionTitles(blockSectionTitles);*/
return doc;
}
}
| 21,556 | 39.218284 | 332 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/document/Document.java
|
package org.grobid.core.document;
import com.google.common.base.Function;
import com.google.common.base.Joiner;
import com.google.common.base.Predicate;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Iterables;
import com.google.common.collect.LinkedListMultimap;
import com.google.common.collect.Lists;
import com.google.common.collect.Multimap;
import com.google.common.collect.SortedSetMultimap;
import org.apache.commons.io.IOUtils;
import org.grobid.core.analyzers.Analyzer;
import org.grobid.core.analyzers.GrobidAnalyzer;
import org.grobid.core.data.*;
import org.grobid.core.engines.Engine;
import org.grobid.core.engines.config.GrobidAnalysisConfig;
import org.grobid.core.engines.counters.FigureCounters;
import org.grobid.core.engines.counters.TableRejectionCounters;
import org.grobid.core.engines.label.TaggingLabel;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.exceptions.GrobidExceptionStatus;
import org.grobid.core.features.FeatureFactory;
import org.grobid.core.layout.Block;
import org.grobid.core.layout.BoundingBox;
import org.grobid.core.layout.Cluster;
import org.grobid.core.layout.GraphicObject;
import org.grobid.core.layout.GraphicObjectType;
import org.grobid.core.layout.LayoutToken;
import org.grobid.core.layout.PDFAnnotation;
import org.grobid.core.layout.Page;
import org.grobid.core.layout.VectorGraphicBoxCalculator;
import org.grobid.core.sax.*;
import org.grobid.core.utilities.BoundingBoxCalculator;
import org.grobid.core.utilities.ElementCounter;
import org.grobid.core.utilities.LayoutTokensUtil;
import org.grobid.core.utilities.Pair;
import org.grobid.core.utilities.TextUtilities;
import org.grobid.core.utilities.Utilities;
import org.grobid.core.utilities.matching.EntityMatcherException;
import org.grobid.core.utilities.matching.ReferenceMarkerMatcher;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.xml.sax.InputSource;
import org.xml.sax.SAXException;
import org.xml.sax.helpers.DefaultHandler;
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.parsers.SAXParser;
import javax.xml.parsers.SAXParserFactory;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.Serializable;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.nio.charset.Charset;
import java.nio.charset.CharsetDecoder;
import java.nio.charset.CodingErrorAction;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.SortedSet;
import java.util.regex.Matcher;
import java.util.stream.Collectors;
/**
* Class for representing, processing and exchanging a document item.
*
*/
public class Document implements Serializable {
public static final long serialVersionUID = 1L;
protected static final Logger LOGGER = LoggerFactory.getLogger(Document.class);
public static final int MAX_FIG_BOX_DISTANCE = 70;
protected transient final DocumentSource documentSource;
protected String pathXML = null; // XML representation of the current PDF file
protected String lang = null;
// layout structure of the document
protected transient List<Page> pages = null;
protected transient List<Cluster> clusters = null;
protected transient List<Block> blocks = null;
protected List<Integer> blockDocumentHeaders = null;
protected transient FeatureFactory featureFactory = null;
// map of labels (e.g. <reference> or <footnote>) to document pieces
protected transient SortedSetMultimap<String, DocumentPiece> labeledBlocks;
// original tokenization and tokens - in order to recreate the original
// strings and spacing
protected List<LayoutToken> tokenizations = null;
// list of bibliographical references with context
protected transient Map<String, BibDataSet> teiIdToBibDataSets = null;
protected transient List<BibDataSet> bibDataSets = null;
// header of the document - if extracted and processed
protected transient BiblioItem resHeader = null;
// full text as tructure TEI - if extracted and processed
protected String tei;
protected transient ReferenceMarkerMatcher referenceMarkerMatcher;
public void setImages(List<GraphicObject> images) {
this.images = images;
}
// list of bitmaps and vector graphics of the document
protected transient List<GraphicObject> images = null;
// list of PDF annotations as present in the PDF source file
protected transient List<PDFAnnotation> pdfAnnotations = null;
// the document outline (or bookmark) embedded in the PDF, if present
protected transient DocumentNode outlineRoot = null;
protected transient Metadata metadata = null;
protected transient Multimap<Integer, GraphicObject> imagesPerPage = LinkedListMultimap.create();
// some statistics regarding the document - useful for generating the features
protected double maxCharacterDensity = 0.0;
protected double minCharacterDensity = 0.0;
protected double maxBlockSpacing = 0.0;
protected double minBlockSpacing = 0.0;
protected int documentLenghtChar = -1; // length here is expressed as number of characters
// not used
protected int beginBody = -1;
protected int beginReferences = -1;
protected boolean titleMatchNum = false; // true if the section titles of the document are numbered
protected transient List<Figure> figures;
protected transient Predicate<GraphicObject> validGraphicObjectPredicate;
protected int m;
protected transient List<Table> tables;
protected transient List<Equation> equations;
// the analyzer/tokenizer used for processing this document
protected transient Analyzer analyzer = GrobidAnalyzer.getInstance();
// map of sequence of LayoutTokens for the fulltext model labels
//Map<String, List<LayoutTokenization>> labeledTokenSequences = null;
protected double byteSize = 0;
public Document(DocumentSource documentSource) {
this.documentSource = documentSource;
setPathXML(documentSource.getXmlFile());
this.byteSize = documentSource.getByteSize();
}
protected Document() {
this.documentSource = null;
}
public static Document createFromText(String text) {
Document doc = new Document();
doc.fromText(text);
if (text != null) {
try {
final byte[] utf8Bytes = text.getBytes("UTF-8");
doc.byteSize = utf8Bytes.length;
} catch(Exception e) {
LOGGER.warn("Could not set the original text document size in bytes for UTF-8 encoding");
}
}
return doc;
}
public void setLanguage(String l) {
lang = l;
}
public String getLanguage() {
return lang;
}
public BiblioItem getResHeader() {
return resHeader;
}
public List<Block> getBlocks() {
return blocks;
}
public List<BibDataSet> getBibDataSets() {
return bibDataSets;
}
public void addBlock(Block b) {
if (blocks == null)
blocks = new ArrayList<Block>();
blocks.add(b);
}
public List<GraphicObject> getImages() {
return images;
}
public List<PDFAnnotation> getPDFAnnotations() {
return pdfAnnotations;
}
public Metadata getMetadata() {
return metadata;
}
/**
* Set the path to the XML file generated by xml2pdf
*/
protected void setPathXML(File pathXML) {
this.pathXML = pathXML.getAbsolutePath();
}
public List<LayoutToken> getTokenizations() {
return tokenizations;
}
public int getDocumentLenghtChar() {
return documentLenghtChar;
}
public double getMaxCharacterDensity() {
return maxCharacterDensity;
}
public double getMinCharacterDensity() {
return minCharacterDensity;
}
public double getMaxBlockSpacing() {
return maxBlockSpacing;
}
public double getMinBlockSpacing() {
return minBlockSpacing;
}
public void setAnalyzer(Analyzer analyzer) {
this.analyzer = analyzer;
}
public Analyzer getAnalyzer() {
return this.analyzer;
}
public List<LayoutToken> fromText(final String text) {
List<String> toks = null;
try {
toks = GrobidAnalyzer.getInstance().tokenize(text);
} catch (Exception e) {
LOGGER.error("Fail tokenization for " + text, e);
}
tokenizations = toks.stream().map(LayoutToken::new).collect(Collectors.toList());
blocks = new ArrayList<>();
Block b = new Block();
for (LayoutToken lt : tokenizations) {
b.addToken(lt);
}
Page p = new Page(1);
b.setPage(p);
//b.setText(text);
pages = new ArrayList<>();
pages.add(p);
blocks.add(b);
p.addBlock(b);
b.setStartToken(0);
b.setEndToken(toks.size() - 1);
images = new ArrayList<>();
return tokenizations;
}
/**
* See https://github.com/kermitt2/grobid/pull/475
* Ignore invalid unicode characters
*
* @author Daniel Ecer
*/
protected static void parseInputStream(InputStream in, SAXParser saxParser, DefaultHandler handler)
throws SAXException, IOException {
CharsetDecoder utf8Decoder = Charset.forName("UTF-8").newDecoder();
utf8Decoder.onMalformedInput(CodingErrorAction.IGNORE);
utf8Decoder.onUnmappableCharacter(CodingErrorAction.IGNORE);
saxParser.parse(new InputSource(new InputStreamReader(in, utf8Decoder)), handler);
}
protected static void parseInputStream(InputStream in, SAXParserFactory saxParserFactory, DefaultHandler handler)
throws SAXException, IOException, ParserConfigurationException {
parseInputStream(in, saxParserFactory.newSAXParser(), handler);
}
/**
* Parser PDFALTO output representation and get the tokenized form of the document.
*
* @return list of features
*/
public List<LayoutToken> addTokenizedDocument(GrobidAnalysisConfig config) {
// The XML generated by pdfalto might contains invalid UTF characters due to the "garbage-in" of the PDF,
// which will result in a "fatal" parsing failure (the joy of XML!). The solution could be to prevent
// having those characters in the input XML by cleaning it first
images = new ArrayList<>();
PDFALTOSaxHandler parser = new PDFALTOSaxHandler(this, images);
// we set possibly the particular analyzer to be used for tokenization of the PDF elements
if (config.getAnalyzer() != null)
parser.setAnalyzer(config.getAnalyzer());
pdfAnnotations = new ArrayList<PDFAnnotation>();
PDFALTOAnnotationSaxHandler parserAnnot = new PDFALTOAnnotationSaxHandler(this, pdfAnnotations);
PDFALTOOutlineSaxHandler parserOutline = new PDFALTOOutlineSaxHandler(this);
PDFMetadataSaxHandler parserMetadata = new PDFMetadataSaxHandler(this);
// get a SAX parser factory
SAXParserFactory spf = SAXParserFactory.newInstance();
tokenizations = null;
File file = new File(pathXML);
File fileAnnot = new File(pathXML+"_annot.xml");
File fileOutline = new File(pathXML+"_outline.xml");
File fileMetadata = new File(pathXML+"_metadata.xml");
FileInputStream in = null;
try {
// parsing of the pdfalto file
in = new FileInputStream(file);
// in = new XMLFilterFileInputStream(file); // -> to filter invalid XML characters
// get a new instance of parser
parseInputStream(in, spf, parser);
tokenizations = parser.getTokenization();
if (in != null) {
try {
in.close();
} catch (IOException e) {
LOGGER.error("Cannot close input stream", e);
}
}
} catch (GrobidException e) {
throw e;
} catch (Exception e) {
throw new GrobidException("Cannot parse file: " + file, e, GrobidExceptionStatus.PARSING_ERROR);
} finally {
IOUtils.closeQuietly(in);
}
if (fileAnnot.exists()) {
try {
// parsing of the annotation XML file (for annotations in the PDf)
in = new FileInputStream(fileAnnot);
SAXParser p = spf.newSAXParser();
p.parse(in, parserAnnot);
} catch (GrobidException e) {
throw e;
} catch (Exception e) {
LOGGER.error("Cannot parse file: " + fileAnnot, e, GrobidExceptionStatus.PARSING_ERROR);
} finally {
IOUtils.closeQuietly(in);
}
}
if (fileOutline.exists()) {
try {
// parsing of the outline XML file (for PDF bookmark)
in = new FileInputStream(fileOutline);
SAXParser p = spf.newSAXParser();
p.parse(in, parserOutline);
outlineRoot = parserOutline.getRootNode();
} catch (GrobidException e) {
throw e;
} catch (Exception e) {
LOGGER.error("Cannot parse file: " + fileOutline, e, GrobidExceptionStatus.PARSING_ERROR);
} finally {
IOUtils.closeQuietly(in);
}
}
if (fileMetadata.exists()) {
try {
// parsing of the outline XML file (for PDF bookmark)
in = new FileInputStream(fileMetadata);
SAXParser p = spf.newSAXParser();
p.parse(in, parserMetadata);
metadata = parserMetadata.getMetadata();
} catch (GrobidException e) {
throw e;
} catch (Exception e) {
LOGGER.error("Cannot parse file: " + fileMetadata, e, GrobidExceptionStatus.PARSING_ERROR);
} finally {
IOUtils.closeQuietly(in);
}
}
if (getBlocks() == null) {
throw new GrobidException("PDF parsing resulted in empty content", GrobidExceptionStatus.NO_BLOCKS);
}
// calculating main area
calculatePageMainAreas();
// calculating boxes for pages
if (config.isProcessVectorGraphics()) {
try {
for (GraphicObject o : VectorGraphicBoxCalculator.calculate(this).values()) {
images.add(o);
}
} catch (Exception e) {
throw new GrobidException("Cannot process vector graphics: " + file, e, GrobidExceptionStatus.PARSING_ERROR);
}
}
// cache images per page
for (GraphicObject go : images) {
// filtering out small figures that are likely to be logos and stuff
if (go.getType() == GraphicObjectType.BITMAP && !isValidBitmapGraphicObject(go)) {
continue;
}
imagesPerPage.put(go.getPage(), go);
}
HashSet<Integer> keys = new HashSet<>(imagesPerPage.keySet());
for (Integer pageNum : keys) {
Collection<GraphicObject> elements = imagesPerPage.get(pageNum);
if (elements.size() > 100) {
imagesPerPage.removeAll(pageNum);
Engine.getCntManager().i(FigureCounters.TOO_MANY_FIGURES_PER_PAGE);
} else {
ArrayList<GraphicObject> res = glueImagesIfNecessary(pageNum, Lists.newArrayList(elements));
if (res != null) {
imagesPerPage.removeAll(pageNum);
imagesPerPage.putAll(pageNum, res);
}
}
}
// we filter out possible line numbering for review works
// filterLineNumber();
return tokenizations;
}
private void calculatePageMainAreas() {
ElementCounter<Integer> leftEven = new ElementCounter<>();
ElementCounter<Integer> rightEven = new ElementCounter<>();
ElementCounter<Integer> leftOdd = new ElementCounter<>();
ElementCounter<Integer> rightOdd = new ElementCounter<>();
ElementCounter<Integer> top = new ElementCounter<>();
ElementCounter<Integer> bottom = new ElementCounter<>();
for (Block b : blocks) {
BoundingBox box = BoundingBoxCalculator.calculateOneBox(b.getTokens());
if (box != null) {
b.setBoundingBox(box);
}
//small blocks can indicate that it's page numbers, some journal header info, etc. No need in them
if (b.getX() == 0 || b.getHeight() < 20 || b.getWidth() < 20 || b.getHeight() * b.getWidth() < 3000) {
continue;
}
if (b.getPageNumber() % 2 == 0) {
leftEven.i((int) b.getX());
rightEven.i((int) (b.getX() + b.getWidth()));
} else {
leftOdd.i((int) b.getX());
rightOdd.i((int) (b.getX() + b.getWidth()));
}
top.i((int) b.getY());
bottom.i((int) (b.getY() + b.getHeight()));
}
if (!leftEven.getCnts().isEmpty() && !leftOdd.getCnts().isEmpty()) {
int pageEvenX = 0;
int pageEvenWidth = 0;
if (pages.size() > 1) {
pageEvenX = getCoordItem(leftEven, true);
// +1 due to rounding
pageEvenWidth = getCoordItem(rightEven, false) - pageEvenX + 1;
}
int pageOddX = getCoordItem(leftOdd, true);
// +1 due to rounding
int pageOddWidth = getCoordItem(rightOdd, false) - pageOddX + 1;
int pageY = getCoordItem(top, true);
int pageHeight = getCoordItem(bottom, false) - pageY + 1;
for (Page page : pages) {
if (page.isEven()) {
page.setMainArea(BoundingBox.fromPointAndDimensions(page.getNumber(),
pageEvenX, pageY, pageEvenWidth, pageHeight));
} else {
page.setMainArea(BoundingBox.fromPointAndDimensions(page.getNumber(),
pageOddX, pageY, pageOddWidth, pageHeight));
}
}
} else {
for (Page page : pages) {
page.setMainArea(BoundingBox.fromPointAndDimensions(page.getNumber(),
0, 0, page.getWidth(), page.getHeight()));
}
}
}
protected ArrayList<GraphicObject> glueImagesIfNecessary(Integer pageNum, List<GraphicObject> graphicObjects) {
List<Pair<Integer, Integer>> toGlue = new ArrayList<>();
// List<GraphicObject> cur = new ArrayList<>();
// List<GraphicObject> graphicObjects = new ArrayList<>(objs);
int start = 0, end = 0;
for (int i = 1; i < graphicObjects.size(); i++) {
GraphicObject prev = graphicObjects.get(i - 1);
GraphicObject cur = graphicObjects.get(i);
if (prev.getType() != GraphicObjectType.BITMAP || cur.getType() != GraphicObjectType.BITMAP) {
if (start != end) {
toGlue.add(new Pair<>(start, end + 1));
}
start = i;
end = start;
continue;
}
if (Utilities.doubleEquals(prev.getBoundingBox().getWidth(), cur.getBoundingBox().getWidth(), 0.0001)
&& Utilities.doubleEquals(prev.getBoundingBox().getY2(), cur.getBoundingBox().getY(), 0.0001)
) {
end++;
} else {
if (start != end) {
toGlue.add(new Pair<>(start, end + 1));
}
start = i;
end = start;
}
}
if (start != end) {
toGlue.add(new Pair<>(start, end + 1));
}
if (toGlue.isEmpty()) {
return null;
}
for (Pair<Integer, Integer> p : toGlue) {
BoundingBox box = graphicObjects.get(p.a).getBoundingBox();
for (int i = p.a + 1; i < p.b; i++) {
box = box.boundBox(graphicObjects.get(i).getBoundingBox());
}
graphicObjects.set(p.a, new GraphicObject(box, GraphicObjectType.VECTOR_BOX));
for (int i = p.a + 1; i < p.b; i++) {
graphicObjects.set(i, null);
}
}
validGraphicObjectPredicate = new Predicate<GraphicObject>() {
@Override
public boolean apply(GraphicObject graphicObject) {
return graphicObject != null && isValidBitmapGraphicObject(graphicObject);
}
};
return Lists.newArrayList(Iterables.filter(graphicObjects, validGraphicObjectPredicate));
}
protected static int getCoordItem(ElementCounter<Integer> cnt, boolean getMin) {
List<Map.Entry<Integer, Integer>> counts = cnt.getSortedCounts();
int max = counts.get(0).getValue();
int res = counts.get(0).getKey();
for (Map.Entry<Integer, Integer> e : counts) {
/*if (e.getValue() < max * 0.7) {
break;
}*/
if (getMin) {
if (e.getKey() < res) {
res = e.getKey();
}
} else {
if (e.getKey() > res) {
res = e.getKey();
}
}
}
return res;
}
/**
* Return all blocks without markers.
* <p/>
* Ignore the toIgnore1 th blocks and the blocks after toIgnore2 (included)
*/
public String getAllBlocksClean(int toIgnore1, int toIgnore2) {
StringBuilder accumulated = new StringBuilder();
if (toIgnore2 == -1)
toIgnore2 = blocks.size() + 1;
int i = 0;
if (blocks != null) {
for (Block block : blocks) {
if ((i >= toIgnore1) && (i < toIgnore2)) {
accumulated.append(block.getText()).append("\n");
}
i++;
}
}
return accumulated.toString();
}
/*
* Try to match a DOI in the first page, independently from any preliminar
* segmentation. This can be useful for improving the chance to find a DOI
* in headers or footnotes.
*/
public List<String> getDOIMatches() {
List<String> results = new ArrayList<String>();
List<Page> pages = getPages();
int p = 0;
for (Page page : pages) {
if ((page.getBlocks() != null) && (page.getBlocks().size() > 0)) {
for (int blockIndex = 0; blockIndex < page.getBlocks().size(); blockIndex++) {
Block block = page.getBlocks().get(blockIndex);
String localText = block.getText();
if ((localText != null) && (localText.length() > 0)) {
localText = localText.trim();
Matcher DOIMatcher = TextUtilities.DOIPattern.matcher(localText);
while (DOIMatcher.find()) {
String theDOI = DOIMatcher.group();
if (!results.contains(theDOI)) {
results.add(theDOI);
}
}
}
}
}
if (p > 1)
break;
p++;
}
return results;
}
public String getTei() {
return tei;
}
public void setTei(String tei) {
this.tei = tei;
}
public List<Integer> getBlockDocumentHeaders() {
return blockDocumentHeaders;
}
public DocumentNode getOutlineRoot() {
return outlineRoot;
}
public void setOutlineRoot(DocumentNode outlineRoot) {
this.outlineRoot = outlineRoot;
}
public boolean isTitleMatchNum() {
return titleMatchNum;
}
public void setTitleMatchNum(boolean titleMatchNum) {
this.titleMatchNum = titleMatchNum;
}
public List<Page> getPages() {
return pages;
}
// starting from 1
public Page getPage(int num) {
return pages.get(num - 1);
}
public List<Cluster> getClusters() {
return clusters;
}
public void setBlockDocumentHeaders(List<Integer> blockDocumentHeaders) {
this.blockDocumentHeaders = blockDocumentHeaders;
}
public void setClusters(List<Cluster> clusters) {
this.clusters = clusters;
}
public void setPages(List<Page> pages) {
this.pages = pages;
}
public void addPage(Page page) {
if (pages == null)
pages = new ArrayList<Page>();
pages.add(page);
}
public void setBibDataSets(List<BibDataSet> bibDataSets) {
this.bibDataSets = bibDataSets;
// some cleaning of the labels
if (this.bibDataSets != null) {
for (BibDataSet bds : this.bibDataSets) {
String marker = bds.getRefSymbol();
if (marker != null) {
//marker = marker.replace(".", "");
//marker = marker.replace(" ", "");
marker = marker.replaceAll("[\\.\\[\\]()\\-\\s]", "");
bds.setRefSymbol(marker);
}
}
}
int cnt = 0;
for (BibDataSet bds : bibDataSets) {
bds.getResBib().setOrdinal(cnt++);
}
}
public synchronized ReferenceMarkerMatcher getReferenceMarkerMatcher() throws EntityMatcherException {
if (referenceMarkerMatcher == null) {
if (this.bibDataSets != null)
referenceMarkerMatcher = new ReferenceMarkerMatcher(this.bibDataSets, Engine.getCntManager());
}
return referenceMarkerMatcher;
}
// when calling this method, the tei ids already should be in BibDataSets.BiblioItem
public void calculateTeiIdToBibDataSets() {
if (bibDataSets == null) {
return;
}
teiIdToBibDataSets = new HashMap<String, BibDataSet>(bibDataSets.size());
for (BibDataSet bds : bibDataSets) {
if (bds.getResBib() != null && bds.getResBib().getTeiId() != null) {
teiIdToBibDataSets.put(bds.getResBib().getTeiId(), bds);
}
}
}
public SortedSetMultimap<String, DocumentPiece> getLabeledBlocks() {
return labeledBlocks;
}
public void setLabeledBlocks(SortedSetMultimap<String, DocumentPiece> labeledBlocks) {
this.labeledBlocks = labeledBlocks;
}
// helper
public List<LayoutToken> getDocumentPieceTokenization(DocumentPiece dp) {
return tokenizations.subList(dp.getLeft().getTokenDocPos(), dp.getRight().getTokenDocPos() + 1);
}
public String getDocumentPieceText(DocumentPiece dp) {
return Joiner.on("").join(getDocumentPieceTokenization(dp));
}
public String getDocumentPieceText(SortedSet<DocumentPiece> dps) {
return Joiner.on("\n").join(Iterables.transform(dps, new Function<DocumentPiece, Object>() {
@Override
public String apply(DocumentPiece documentPiece) {
return getDocumentPieceText(documentPiece);
}
}));
}
/**
* Get the document part corresponding to a particular segment type
*/
public SortedSet<DocumentPiece> getDocumentPart(TaggingLabel segmentationLabel) {
if (labeledBlocks == null) {
LOGGER.debug("labeledBlocks is null");
return null;
}
if (segmentationLabel.getLabel() == null) {
System.out.println("segmentationLabel.getLabel() is null");
}
return labeledBlocks.get(segmentationLabel.getLabel());
}
public String getDocumentPartText(TaggingLabel segmentationLabel) {
SortedSet<DocumentPiece> pieces = getDocumentPart(segmentationLabel);
if (pieces == null) {
return null;
} else {
return getDocumentPieceText(getDocumentPart(segmentationLabel));
}
}
/**
* Give the list of LayoutToken corresponding to some document parts and
* a global document tokenization.
*/
public static List<LayoutToken> getTokenizationParts(SortedSet<DocumentPiece> documentParts,
List<LayoutToken> tokenizations) {
if (documentParts == null)
return null;
List<LayoutToken> tokenizationParts = new ArrayList<>();
for (DocumentPiece docPiece : documentParts) {
DocumentPointer dp1 = docPiece.getLeft();
DocumentPointer dp2 = docPiece.getRight();
int tokens = dp1.getTokenDocPos();
int tokene = dp2.getTokenDocPos();
for (int i = tokens; i < tokene; i++) {
tokenizationParts.add(tokenizations.get(i));
}
}
return tokenizationParts;
}
public BibDataSet getBibDataSetByTeiId(String teiId) {
return teiIdToBibDataSets.get(teiId);
}
protected static double MIN_DISTANCE = 100.0;
/**
* Return the list of graphical object touching the given block.
*/
public static List<GraphicObject> getConnectedGraphics(Block block, Document doc) {
List<GraphicObject> images = null;
for (GraphicObject image : doc.getImages()) {
if (block.getPageNumber() != image.getPage())
continue;
if (((Math.abs((image.getY() + image.getHeight()) - block.getY()) < MIN_DISTANCE) ||
(Math.abs(image.getY() - (block.getY() + block.getHeight())) < MIN_DISTANCE)) //||
//( (Math.abs((image.x+image.getWidth()) - block.getX()) < MIN_DISTANCE) ||
// (Math.abs(image.x - (block.getX()+block.getWidth())) < MIN_DISTANCE) )
) {
// the image is at a distance of at least MIN_DISTANCE from one border
// of the block on the vertical/horizontal axis
if (images == null)
images = new ArrayList<GraphicObject>();
images.add(image);
}
}
return images;
}
// deal with false positives, with footer stuff, etc.
public void postProcessTables() {
for (Table table : tables) {
if (!table.firstCheck()) {
continue;
}
// cleaning up tokens
List<LayoutToken> fullDescResult = new ArrayList<>();
BoundingBox curBox = BoundingBox.fromLayoutToken(table.getFullDescriptionTokens().get(0));
int distanceThreshold = 200;
for (LayoutToken fdt : table.getFullDescriptionTokens()) {
BoundingBox b = BoundingBox.fromLayoutToken(fdt);
if (b.getX() < 0) {
fullDescResult.add(fdt);
continue;
}
if (b.distanceTo(curBox) > distanceThreshold) {
Engine.getCntManager().i(TableRejectionCounters.HEADER_NOT_CONSECUTIVE);
table.setGoodTable(false);
break;
} else {
curBox = curBox.boundBox(b);
fullDescResult.add(fdt);
}
}
table.getFullDescriptionTokens().clear();
table.getFullDescriptionTokens().addAll(fullDescResult);
List<LayoutToken> contentResult = new ArrayList<>();
curBox = BoundingBox.fromLayoutToken(table.getContentTokens().get(0));
for (LayoutToken fdt : table.getContentTokens()) {
BoundingBox b = BoundingBox.fromLayoutToken(fdt);
if (b.getX() < 0) {
contentResult.add(fdt);
continue;
}
if (b.distanceTo(curBox) > distanceThreshold) {
break;
} else {
curBox = curBox.boundBox(b);
contentResult.add(fdt);
}
}
table.getContentTokens().clear();
table.getContentTokens().addAll(contentResult);
table.secondCheck();
}
}
public void assignGraphicObjectsToFigures() {
Multimap<Integer, Figure> figureMap = HashMultimap.create();
for (Figure f : figures) {
figureMap.put(f.getPage(), f);
}
for (Integer pageNum : figureMap.keySet()) {
List<Figure> pageFigures = new ArrayList<>();
for (Figure f : figureMap.get(pageNum)) {
List<LayoutToken> realCaptionTokens = getFigureLayoutTokens(f);
if (realCaptionTokens != null && !realCaptionTokens.isEmpty()) {
f.setLayoutTokens(realCaptionTokens);
f.setTextArea(BoundingBoxCalculator.calculate(realCaptionTokens));
f.setCaption(new StringBuilder(LayoutTokensUtil.toText(LayoutTokensUtil.dehyphenize(realCaptionTokens))));
f.setCaptionLayoutTokens(realCaptionTokens);
pageFigures.add(f);
}
}
if (pageFigures.isEmpty()) {
continue;
}
List<GraphicObject> it = Lists.newArrayList(Iterables.filter(imagesPerPage.get(pageNum), Figure.GRAPHIC_OBJECT_PREDICATE));
// filtering those images that for some reason are outside of main area
it = it.stream().filter(go -> {
BoundingBox mainArea = getPage(go.getBoundingBox().getPage()).getMainArea();
return mainArea.intersect(go.getBoundingBox());
}).collect(Collectors.toList());
List<GraphicObject> vectorBoxGraphicObjects =
Lists.newArrayList(Iterables.filter(imagesPerPage.get(pageNum), Figure.VECTOR_BOX_GRAPHIC_OBJECT_PREDICATE));
// case where figure caption is covered almost precisely but the vector graphics box -- filter those out - they are covered by caption anyways
vectorBoxGraphicObjects = vectorBoxGraphicObjects.stream().filter(go -> {
for (Figure f : pageFigures) {
BoundingBox intersection = BoundingBoxCalculator.calculateOneBox(f.getLayoutTokens(), true).boundingBoxIntersection(go.getBoundingBox());
if(intersection != null && intersection.area() / go.getBoundingBox().area() > 0.5) {
return false;
}
}
return true;
}).collect(Collectors.toList());
List<GraphicObject> graphicObjects = new ArrayList<>();
l:
for (GraphicObject bgo : it) {
for (GraphicObject vgo : vectorBoxGraphicObjects) {
if (bgo.getBoundingBox().intersect(vgo.getBoundingBox())) {
continue l;
}
}
graphicObjects.add(bgo);
}
graphicObjects.addAll(vectorBoxGraphicObjects);
// easy case when we don't have any vector boxes -- easier to correlation figure captions with bitmap images
if (vectorBoxGraphicObjects.isEmpty()) {
for (Figure figure : pageFigures) {
List<LayoutToken> tokens = figure.getLayoutTokens();
final BoundingBox figureBox =
BoundingBoxCalculator.calculateOneBox(tokens, true);
double minDist = MAX_FIG_BOX_DISTANCE * 100;
GraphicObject bestGo = null;
if (figureBox != null) {
for (GraphicObject go : graphicObjects) {
// if it's not a bitmap, if it was used or the caption in the figure view
if (go.isUsed() || go.getBoundingBox().contains(figureBox)) {
continue;
}
if (!isValidBitmapGraphicObject(go)) {
continue;
}
double dist = figureBox.distanceTo(go.getBoundingBox());
if (dist > MAX_FIG_BOX_DISTANCE) {
continue;
}
if (dist < minDist) {
minDist = dist;
bestGo = go;
}
}
}
if (bestGo != null) {
bestGo.setUsed(true);
figure.setGraphicObjects(Lists.newArrayList(bestGo));
Engine.getCntManager().i("FigureCounters", "ASSIGNED_GRAPHICS_TO_FIGURES");
}
}
} else {
if (pageFigures.size() != graphicObjects.size()) {
Engine.getCntManager().i(FigureCounters.SKIPPED_DUE_TO_MISMATCH_OF_CAPTIONS_AND_VECTOR_AND_BITMAP_GRAPHICS);
continue;
}
for (Figure figure : pageFigures) {
List<LayoutToken> tokens = figure.getLayoutTokens();
final BoundingBox figureBox =
BoundingBoxCalculator.calculateOneBox(tokens, true);
double minDist = MAX_FIG_BOX_DISTANCE * 100;
GraphicObject bestGo = null;
if (figureBox != null) {
for (GraphicObject go : graphicObjects) {
if (go.isUsed()) {
continue;
}
BoundingBox goBox = go.getBoundingBox();
if (!getPage(goBox.getPage()).getMainArea().contains(goBox) && go.getWidth() * go.getHeight() < 10000) {
continue;
}
if (go.getType() == GraphicObjectType.BITMAP && !isValidBitmapGraphicObject(go)) {
continue;
}
double dist = figureBox.distanceTo(goBox);
if (dist > MAX_FIG_BOX_DISTANCE) {
continue;
}
if (dist < minDist) {
minDist = dist;
bestGo = go;
}
}
}
if (bestGo != null) {
bestGo.setUsed(true);
// when vector box overlaps the caption, we need to cut that piece from vector graphics
if (bestGo.getType() == GraphicObjectType.VECTOR_BOX) {
recalculateVectorBoxCoords(figure, bestGo);
}
figure.setGraphicObjects(Lists.newArrayList(bestGo));
Engine.getCntManager().i("FigureCounters", "ASSIGNED_GRAPHICS_TO_FIGURES");
}
}
}
}
// special case, when we didn't detect figures, but there is a nice figure on this page
int maxPage = pages.size();
for (int pageNum = 1; pageNum <= maxPage; pageNum++) {
if (!figureMap.containsKey(pageNum)) {
ArrayList<GraphicObject> it = Lists.newArrayList(Iterables.filter(imagesPerPage.get(pageNum), Figure.GRAPHIC_OBJECT_PREDICATE));
List<GraphicObject> vectorBoxGraphicObjects =
Lists.newArrayList(Iterables.filter(imagesPerPage.get(pageNum), Figure.VECTOR_BOX_GRAPHIC_OBJECT_PREDICATE));
List<GraphicObject> graphicObjects = new ArrayList<>();
l:
for (GraphicObject bgo : it) {
// intersecting with vector graphics is dangerous, so better skip than have a false positive
for (GraphicObject vgo : vectorBoxGraphicObjects) {
if (bgo.getBoundingBox().intersect(vgo.getBoundingBox())) {
continue l;
}
}
// if graphics object intersect between each other, it's most likely a composition and we cannot take just 1
for (GraphicObject bgo2 : it) {
if (bgo2 != bgo && bgo.getBoundingBox().intersect(bgo2.getBoundingBox())) {
continue l;
}
}
graphicObjects.add(bgo);
}
graphicObjects.addAll(vectorBoxGraphicObjects);
if (graphicObjects.size() == it.size()) {
for (GraphicObject o : graphicObjects) {
if (badStandaloneFigure(o)) {
Engine.getCntManager().i(FigureCounters.SKIPPED_BAD_STANDALONE_FIGURES);
continue;
}
Figure f = new Figure();
f.setPage(pageNum);
f.setGraphicObjects(Collections.singletonList(o));
figures.add(f);
Engine.getCntManager().i("FigureCounters", "STANDALONE_FIGURES");
LOGGER.debug("Standalone figure on page: " + pageNum);
}
}
}
}
}
private boolean badStandaloneFigure(GraphicObject o) {
if (o.getBoundingBox().area() < 50000) {
Engine.getCntManager().i(FigureCounters.SKIPPED_SMALL_STANDALONE_FIGURES);
return true;
}
if (o.getBoundingBox().area() / pages.get(o.getPage() - 1).getMainArea().area() > 0.6) {
Engine.getCntManager().i(FigureCounters.SKIPPED_BIG_STANDALONE_FIGURES);
return true;
}
return false;
}
protected boolean isValidBitmapGraphicObject(GraphicObject go) {
if (go.getWidth() * go.getHeight() < 1000) {
return false;
}
if (go.getWidth() < 50) {
return false;
}
if (go.getHeight() < 50) {
return false;
}
BoundingBox mainArea = getPage(go.getBoundingBox().getPage()).getMainArea();
if (!mainArea.contains(go.getBoundingBox()) && go.getWidth() * go.getHeight() < 10000) {
return false;
}
return true;
}
// graphic boxes could overlap captions, we need to cut this from a vector box
protected void recalculateVectorBoxCoords(Figure f, GraphicObject g) {
//TODO: make it robust - now super simplistic
BoundingBox captionBox = BoundingBoxCalculator.calculateOneBox(f.getLayoutTokens(), true);
BoundingBox originalGoBox = g.getBoundingBox();
if (captionBox.intersect(originalGoBox)) {
int p = originalGoBox.getPage();
double cx1 = captionBox.getX();
double cx2 = captionBox.getX2();
double cy1 = captionBox.getY();
double cy2 = captionBox.getY2();
double fx1 = originalGoBox.getX();
double fx2 = originalGoBox.getX2();
double fy1 = originalGoBox.getY();
double fy2 = originalGoBox.getY2();
m = 5;
BoundingBox bestBox = null;
try {
//if caption is on the bottom
BoundingBox bottomArea = BoundingBox.fromTwoPoints(p, fx1, fy1, fx2, cy1 - m);
bestBox = bottomArea;
} catch (Exception e) {
// no op
}
try {
// caption is on the right
BoundingBox rightArea = BoundingBox.fromTwoPoints(p, fx1, fy1, cx1 - m, fy2);
if (bestBox == null || rightArea.area() > bestBox.area()) {
bestBox = rightArea;
}
} catch (Exception e) {
//no op
}
try {
BoundingBox topArea = BoundingBox.fromTwoPoints(p, fx1, cy2 + m, fx2, fy2);
if (bestBox == null || topArea.area() > bestBox.area()) {
bestBox = topArea;
}
} catch (Exception e) {
//no op
}
try {
BoundingBox leftArea = BoundingBox.fromTwoPoints(p, cx2 + m, fy1, fx2, fy2);
if (bestBox == null || leftArea.area() > bestBox.area()) {
bestBox = leftArea;
}
} catch (Exception e) {
//no op
}
if (bestBox != null && bestBox.area() > 600) {
g.setBoundingBox(bestBox);
}
}
// if (captionBox.intersect(originalGoBox)) {
// if (originalGoBox.getY() < captionBox.getY() - 5) {
// g.setBoundingBox(BoundingBox.fromTwoPoints(p, originalGoBox.getX(), originalGoBox.getY(), originalGoBox.getX2(), captionBox.getY() - 5));
// }
// }
}
protected List<LayoutToken> getFigureLayoutTokens(Figure f) {
List<LayoutToken> result = new ArrayList<>();
Iterator<Integer> it = f.getBlockPtrs().iterator();
while (it.hasNext()) {
Integer blockPtr = it.next();
Block figBlock = getBlocks().get(blockPtr);
String norm = LayoutTokensUtil.toText(figBlock.getTokens()).trim().toLowerCase();
if (norm.startsWith("fig") || norm.startsWith("abb") || norm.startsWith("scheme") || norm.startsWith("photo")
|| norm.startsWith("gambar") || norm.startsWith("quadro")
|| norm.startsWith("wykres")
|| norm.startsWith("fuente")
) {
result.addAll(figBlock.getTokens());
while (it.hasNext()) {
BoundingBox prevBlock = BoundingBox.fromPointAndDimensions(figBlock.getPageNumber(), figBlock.getX(), figBlock.getY(), figBlock.getWidth(), figBlock.getHeight());
blockPtr = it.next();
Block b = getBlocks().get(blockPtr);
if (BoundingBox.fromPointAndDimensions(b.getPageNumber(), b.getX(), b.getY(), b.getWidth(), b.getHeight()).distanceTo(prevBlock) < 15) {
result.addAll(b.getTokens());
figBlock = b;
} else {
break;
}
}
break;
} else {
// LOGGER.info("BAD_FIGIRE_LABEL: " + norm);
}
}
return result;
}
public void setConnectedGraphics2(Figure figure) {
//TODO: improve - make figures clustering on the page (take all images and captions into account)
List<LayoutToken> tokens = figure.getLayoutTokens();
figure.setTextArea(BoundingBoxCalculator.calculate(tokens));
// if (LayoutTokensUtil.tooFarAwayVertically(figure.getTextArea(), 100)) {
// return;
// }
final BoundingBox figureBox =
BoundingBoxCalculator.calculateOneBox(tokens, true);
double minDist = MAX_FIG_BOX_DISTANCE * 100;
GraphicObject bestGo = null;
if (figureBox != null) {
for (GraphicObject go : imagesPerPage.get(figure.getPage())) {
if (go.getType() != GraphicObjectType.BITMAP || go.isUsed()) {
continue;
}
BoundingBox goBox =
BoundingBox.fromPointAndDimensions(go.getPage(), go.getX(), go.getY(),
go.getWidth(), go.getHeight());
if (!getPage(goBox.getPage()).getMainArea().contains(goBox)) {
continue;
}
double dist = figureBox.distanceTo(goBox);
if (dist > MAX_FIG_BOX_DISTANCE) {
continue;
}
if (dist < minDist) {
minDist = dist;
bestGo = go;
}
}
}
if (bestGo != null) {
bestGo.setUsed(true);
figure.setGraphicObjects(Lists.newArrayList(bestGo));
}
}
// public static void setConnectedGraphics(Figure figure,
// List<LayoutToken> tokenizations,
// Document doc) {
// try {
// List<GraphicObject> localImages = null;
// // set the intial figure area based on its layout tokens
// LayoutToken startToken = figure.getStartToken();
// LayoutToken endToken = figure.getEndToken();
// int start = figure.getStart();
// int end = figure.getEnd();
//
// double maxRight = 0.0; // right border of the figure
// double maxLeft = 10000.0; // left border of the figure
// double maxUp = 10000.0; // upper border of the figure
// double maxDown = 0.0; // bottom border of the figure
// for (int i = start; i <= end; i++) {
// LayoutToken current = tokenizations.get(i);
// if ((figure.getPage() == -1) && (current.getPage() != -1))
// figure.setPage(current.getPage());
// if ((current.x >= 0.0) && (current.x < maxLeft))
// maxLeft = current.x;
// if ((current.y >= 0.0) && (current.y < maxUp))
// maxUp = current.y;
// if ((current.x >= 0.0) && (current.x + current.width > maxRight))
// maxRight = current.x + current.width;
// if ((current.y >= 0.0) && (current.y + current.height > maxDown))
// maxDown = current.y + current.height;
// }
//
// figure.setX(maxLeft);
// figure.setY(maxUp);
// figure.setWidth(maxRight - maxLeft);
// figure.setHeight(maxDown - maxUp);
//
// // attach connected graphics based on estimated figure area
// for (GraphicObject image : doc.getImages()) {
// if (image.getType() == GraphicObjectType.VECTOR)
// continue;
// if (figure.getPage() != image.getPage())
// continue;
////System.out.println(image.toString());
// if (((Math.abs((image.getY() + image.getHeight()) - figure.getY()) < MIN_DISTANCE) ||
// (Math.abs(image.getY() - (figure.getY() + figure.getHeight())) < MIN_DISTANCE)) //||
// //( (Math.abs((image.x+image.width) - figure.getX()) < MIN_DISTANCE) ||
// //(Math.abs(image.x - (figure.getX()+figure.getWidth())) < MIN_DISTANCE) )
// ) {
// // the image is at a distance of at least MIN_DISTANCE from one border
// // of the block on the vertical/horizontal axis
// if (localImages == null)
// localImages = new ArrayList<GraphicObject>();
// localImages.add(image);
// }
// }
//
// // re-evaluate figure area with connected graphics
// if (localImages != null) {
// for (GraphicObject image : localImages) {
// if (image.getX() < maxLeft)
// maxLeft = image.getX();
// if (image.getY() < maxUp)
// maxUp = image.getY();
// if (image.getX() + image.getWidth() > maxRight)
// maxRight = image.getX() + image.getWidth();
// if (image.getY() + image.getHeight() > maxDown)
// maxDown = image.getY() + image.getHeight();
// }
// }
//
// figure.setGraphicObjects(localImages);
// } catch (Exception e) {
// e.printStackTrace();
// }
// }
public void produceStatistics() {
// document lenght in characters
// we calculate current document length and intialize the body tokenization structure
for (Block block : blocks) {
List<LayoutToken> tokens = block.getTokens();
if (tokens == null)
continue;
documentLenghtChar += tokens.size();
}
// block spacing
maxBlockSpacing = 0.0;
minBlockSpacing = 10000.0;
Double previousBlockBottom = 0.0;
for (Page page : pages) {
int pageLength = 0;
if ((page.getBlocks() != null) && (page.getBlocks().size() > 0)) {
for (int blockIndex = 0; blockIndex < page.getBlocks().size(); blockIndex++) {
Block block = page.getBlocks().get(blockIndex);
if ((blockIndex != 0) && (previousBlockBottom > 0.0)) {
double spacing = block.getY() - previousBlockBottom;
if ((spacing > 0.0) && (spacing < page.getHeight())) {
if (spacing > maxBlockSpacing)
maxBlockSpacing = spacing;
else if (spacing < minBlockSpacing)
minBlockSpacing = spacing;
}
}
previousBlockBottom = block.getY() + block.getHeight();
if (block.getTokens() != null)
pageLength += block.getTokens().size();
}
}
page.setPageLengthChar(pageLength);
}
// character density is given by the number of characters in the block divided by the block's surface
maxCharacterDensity = 0.0;
minCharacterDensity = 1000000.0;
for (Block block : blocks) {
if ((block.getHeight() == 0.0) || (block.getWidth() == 0.0))
continue;
String text = block.getText();
if ((text != null) && (!text.contains("@PAGE")) && (!text.contains("@IMAGE"))) {
double surface = block.getWidth() * block.getHeight();
/*System.out.println("block.width: " + block.width);
System.out.println("block.height: " + block.height);
System.out.println("surface: " + surface);
System.out.println("text length: " + text.length());
System.out.println("text: " + text + "\n");*/
double density = ((double) text.length()) / surface;
if (density < minCharacterDensity)
minCharacterDensity = density;
if (density > maxCharacterDensity)
maxCharacterDensity = density;
}
}
/*System.out.println("documentLenghtChar: " + documentLenghtChar);
System.out.println("maxBlockSpacing: " + maxBlockSpacing);
System.out.println("minBlockSpacing: " + minBlockSpacing);
System.out.println("maxCharacterDensity: " + maxCharacterDensity);
System.out.println("minCharacterDensity: " + minCharacterDensity);*/
}
public DocumentSource getDocumentSource() {
return documentSource;
}
public void setFigures(List<Figure> figures) {
this.figures = figures;
}
public List<Figure> getFigures() {
return figures;
}
public void setTables(List<Table> tables) {
this.tables = tables;
}
public List<Table> getTables() {
return tables;
}
public void setEquations(List<Equation> equations) {
this.equations = equations;
}
public List<Equation> getEquations() {
return equations;
}
public void setResHeader(BiblioItem resHeader) {
this.resHeader = resHeader;
}
static public List<LayoutToken> getTokens(List<LayoutToken> tokenizations, int offsetBegin, int offsetEnd) {
return getTokensFrom(tokenizations, offsetBegin, offsetEnd, 0);
}
static public List<LayoutToken> getTokensFrom(List<LayoutToken> tokenizations,
int offsetBegin,
int offsetEnd,
int startTokenIndex) {
List<LayoutToken> result = new ArrayList<LayoutToken>();
for (int p = startTokenIndex; p < tokenizations.size(); p++) {
LayoutToken currentToken = tokenizations.get(p);
if ((currentToken == null) || (currentToken.getText() == null))
continue;
if (currentToken.getOffset() + currentToken.getText().length() < offsetBegin)
continue;
if (currentToken.getOffset() > offsetEnd)
return result;
result.add(currentToken);
}
return result;
}
/**
* Initialize the mapping between sequences of LayoutToken and
* fulltext model labels.
* @param labeledResult labeled sequence as produced by the CRF model
* @param tokenization List of LayoutToken for the body parts
*/
/*public void generalFullTextResultMapping(String labeledResult, List<LayoutToken> tokenizations) {
if (labeledTokenSequences == null)
labeledTokenSequences = new TreeMap<String, List<LayoutTokenization>>();
TaggingTokenClusteror clusteror = new TaggingTokenClusteror(GrobidModels.FULLTEXT, labeledResult, tokenizations);
List<TaggingTokenCluster> clusters = clusteror.cluster();
for (TaggingTokenCluster cluster : clusters) {
if (cluster == null) {
continue;
}
TaggingLabel clusterLabel = cluster.getTaggingLabel();
List<LayoutToken> clusterTokens = cluster.concatTokens();
List<LayoutTokenization> theList = labeledTokenSequences.get(clusterLabel.toString());
if (theList == null)
theList = new ArrayList<LayoutTokenization>();
LayoutTokenization newTokenization = new LayoutTokenization(clusterTokens);
theList.add(newTokenization);
labeledTokenSequences.put(clusterLabel.getLabel(), theList);
}
}*/
public double getByteSize() {
return byteSize;
}
public void setByteSize(double size) {
byteSize = size;
}
public String getMD5() {
if (documentSource != null)
return documentSource.getMD5();
else
return null;
}
}
| 59,349 | 36.70648 | 182 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/document/TEIFormatter.java
|
package org.grobid.core.document;
import com.google.common.base.Joiner;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.commons.lang3.StringUtils;
import nu.xom.Attribute;
import nu.xom.Element;
import nu.xom.Node;
import nu.xom.Text;
import org.grobid.core.GrobidModels;
import org.grobid.core.data.*;
import org.grobid.core.data.Date;
import org.grobid.core.document.xml.XmlBuilderUtils;
import org.grobid.core.engines.Engine;
import org.grobid.core.engines.FullTextParser;
import org.grobid.core.engines.label.SegmentationLabels;
import org.grobid.core.engines.config.GrobidAnalysisConfig;
import org.grobid.core.engines.label.TaggingLabel;
import org.grobid.core.engines.label.TaggingLabels;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.lang.Language;
import org.grobid.core.layout.*;
import org.grobid.core.utilities.SentenceUtilities;
import org.grobid.core.tokenization.TaggingTokenCluster;
import org.grobid.core.tokenization.TaggingTokenClusteror;
import org.grobid.core.utilities.*;
import org.grobid.core.utilities.matching.EntityMatcherException;
import org.grobid.core.utilities.matching.ReferenceMarkerMatcher;
import org.grobid.core.engines.citations.CalloutAnalyzer.MarkerType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import static org.grobid.core.document.xml.XmlBuilderUtils.teiElement;
import static org.grobid.core.document.xml.XmlBuilderUtils.addXmlId;
import static org.grobid.core.document.xml.XmlBuilderUtils.textNode;
/**
* Class for generating a TEI representation of a document.
*
*/
@SuppressWarnings("StringConcatenationInsideStringBuilderAppend")
public class TEIFormatter {
private static final Logger LOGGER = LoggerFactory.getLogger(TEIFormatter.class);
private Document doc = null;
private FullTextParser fullTextParser = null;
public static final Set<TaggingLabel> MARKER_LABELS = Sets.newHashSet(
TaggingLabels.CITATION_MARKER,
TaggingLabels.FIGURE_MARKER,
TaggingLabels.TABLE_MARKER,
TaggingLabels.EQUATION_MARKER);
// possible association to Grobid customised TEI schemas: DTD, XML schema, RelaxNG or compact RelaxNG
// DEFAULT means no schema association in the generated XML documents
public enum SchemaDeclaration {
DEFAULT, DTD, XSD, RNG, RNC
}
private Boolean inParagraph = false;
private ArrayList<String> elements = null;
// static variable for the position of italic and bold features in the CRF model
private static final int ITALIC_POS = 16;
private static final int BOLD_POS = 15;
private static Pattern numberRef = Pattern.compile("(\\[|\\()\\d+\\w?(\\)|\\])");
private static Pattern numberRefCompact =
Pattern.compile("(\\[|\\()((\\d)+(\\w)?(\\-\\d+\\w?)?,\\s?)+(\\d+\\w?)(\\-\\d+\\w?)?(\\)|\\])");
private static Pattern numberRefCompact2 = Pattern.compile("(\\[|\\()(\\d+)(-|‒|–|—|―|\u2013)(\\d+)(\\)|\\])");
private static Pattern startNum = Pattern.compile("^(\\d+\\.?\\s)(.*)");
private static final String SCHEMA_XSD_LOCATION = "https://raw.githubusercontent.com/kermitt2/grobid/master/grobid-home/schemas/xsd/Grobid.xsd";
private static final String SCHEMA_DTD_LOCATION = "https://raw.githubusercontent.com/kermitt2/grobid/master/grobid-home/schemas/dtd/Grobid.dtd";
private static final String SCHEMA_RNG_LOCATION = "https://raw.githubusercontent.com/kermitt2/grobid/master/grobid-home/schemas/rng/Grobid.rng";
public TEIFormatter(Document document, FullTextParser fullTextParser) {
this.doc = document;
this.fullTextParser = fullTextParser;
}
public StringBuilder toTEIHeader(BiblioItem biblio,
String defaultPublicationStatement,
List<BibDataSet> bds,
List<MarkerType> markerTypes,
GrobidAnalysisConfig config) {
return toTEIHeader(biblio, SchemaDeclaration.XSD, defaultPublicationStatement, bds, markerTypes, config);
}
public StringBuilder toTEIHeader(BiblioItem biblio,
SchemaDeclaration schemaDeclaration,
String defaultPublicationStatement,
List<BibDataSet> bds,
List<MarkerType> markerTypes,
GrobidAnalysisConfig config) {
StringBuilder tei = new StringBuilder();
tei.append("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
if (config.isWithXslStylesheet()) {
tei.append("<?xml-stylesheet type=\"text/xsl\" href=\"../jsp/xmlverbatimwrapper.xsl\"?> \n");
}
if (schemaDeclaration == SchemaDeclaration.DTD) {
tei.append("<!DOCTYPE TEI SYSTEM \"" + SCHEMA_DTD_LOCATION + "\">\n");
} else if (schemaDeclaration == SchemaDeclaration.XSD) {
// XML schema
tei.append("<TEI xml:space=\"preserve\" xmlns=\"http://www.tei-c.org/ns/1.0\" \n" +
"xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" \n" +
"xsi:schemaLocation=\"http://www.tei-c.org/ns/1.0 " +
SCHEMA_XSD_LOCATION +
"\"\n xmlns:xlink=\"http://www.w3.org/1999/xlink\">\n");
// "\n xmlns:mml=\"http://www.w3.org/1998/Math/MathML\">\n");
} else if (schemaDeclaration == SchemaDeclaration.RNG) {
// standard RelaxNG
tei.append("<?xml-model href=\"" + SCHEMA_RNG_LOCATION +
"\" schematypens=\"http://relaxng.org/ns/structure/1.0\"?>\n");
}
// by default there is no schema association
if (schemaDeclaration != SchemaDeclaration.XSD) {
tei.append("<TEI xml:space=\"preserve\" xmlns=\"http://www.tei-c.org/ns/1.0\">\n");
}
if (doc.getLanguage() != null) {
tei.append("\t<teiHeader xml:lang=\"" + doc.getLanguage() + "\">");
} else {
tei.append("\t<teiHeader>");
}
tei.append("\n\t\t<fileDesc>\n\t\t\t<titleStmt>\n\t\t\t\t<title level=\"a\" type=\"main\"");
if (config.isGenerateTeiIds()) {
String divID = KeyGen.getKey().substring(0, 7);
tei.append(" xml:id=\"_" + divID + "\"");
}
tei.append(">");
if (biblio == null) {
// if the biblio object is null, we simply create an empty one
biblio = new BiblioItem();
}
if (biblio.getTitle() != null) {
tei.append(TextUtilities.HTMLEncode(biblio.getTitle()));
}
tei.append("</title>\n\t\t\t</titleStmt>\n");
if ((biblio.getPublisher() != null) ||
(biblio.getPublicationDate() != null) ||
(biblio.getNormalizedPublicationDate() != null)) {
tei.append("\t\t\t<publicationStmt>\n");
if (biblio.getPublisher() != null) {
// publisher and date under <publicationStmt> for better TEI conformance
tei.append("\t\t\t\t<publisher>" + TextUtilities.HTMLEncode(biblio.getPublisher()) +
"</publisher>\n");
tei.append("\t\t\t\t<availability status=\"unknown\">");
tei.append("<p>Copyright ");
//if (biblio.getPublicationDate() != null)
tei.append(TextUtilities.HTMLEncode(biblio.getPublisher()) + "</p>\n");
tei.append("\t\t\t\t</availability>\n");
} else {
// a dummy publicationStmt is still necessary according to TEI
tei.append("\t\t\t\t<publisher/>\n");
if (defaultPublicationStatement == null) {
tei.append("\t\t\t\t<availability status=\"unknown\"><licence/></availability>");
} else {
tei.append("\t\t\t\t<availability status=\"unknown\"><p>" +
TextUtilities.HTMLEncode(defaultPublicationStatement) + "</p></availability>");
}
tei.append("\n");
}
if (biblio.getNormalizedPublicationDate() != null) {
Date date = biblio.getNormalizedPublicationDate();
String when = Date.toISOString(date);
if (StringUtils.isNotBlank(when)) {
tei.append("\t\t\t\t<date type=\"published\" when=\"");
tei.append(when).append("\">");
} else {
tei.append("\t\t\t\t<date>");
}
if (biblio.getPublicationDate() != null) {
tei.append(TextUtilities.HTMLEncode(biblio.getPublicationDate()));
} else {
tei.append(when);
}
tei.append("</date>\n");
} else if ((biblio.getYear() != null) && (biblio.getYear().length() > 0)) {
String when = "";
if (biblio.getYear().length() == 1)
when += "000" + biblio.getYear();
else if (biblio.getYear().length() == 2)
when += "00" + biblio.getYear();
else if (biblio.getYear().length() == 3)
when += "0" + biblio.getYear();
else if (biblio.getYear().length() == 4)
when += biblio.getYear();
if ((biblio.getMonth() != null) && (biblio.getMonth().length() > 0)) {
if (biblio.getMonth().length() == 1)
when += "-0" + biblio.getMonth();
else
when += "-" + biblio.getMonth();
if ((biblio.getDay() != null) && (biblio.getDay().length() > 0)) {
if (biblio.getDay().length() == 1)
when += "-0" + biblio.getDay();
else
when += "-" + biblio.getDay();
}
}
tei.append("\t\t\t\t<date type=\"published\" when=\"");
tei.append(when + "\">");
if (biblio.getPublicationDate() != null) {
tei.append(TextUtilities.HTMLEncode(biblio.getPublicationDate()));
} else {
tei.append(when);
}
tei.append("</date>\n");
} else if (biblio.getE_Year() != null) {
String when = "";
if (biblio.getE_Year().length() == 1)
when += "000" + biblio.getE_Year();
else if (biblio.getE_Year().length() == 2)
when += "00" + biblio.getE_Year();
else if (biblio.getE_Year().length() == 3)
when += "0" + biblio.getE_Year();
else if (biblio.getE_Year().length() == 4)
when += biblio.getE_Year();
if (biblio.getE_Month() != null) {
if (biblio.getE_Month().length() == 1)
when += "-0" + biblio.getE_Month();
else
when += "-" + biblio.getE_Month();
if (biblio.getE_Day() != null) {
if (biblio.getE_Day().length() == 1)
when += "-0" + biblio.getE_Day();
else
when += "-" + biblio.getE_Day();
}
}
tei.append("\t\t\t\t<date type=\"ePublished\" when=\"");
tei.append(when + "\">");
if (biblio.getPublicationDate() != null) {
tei.append(TextUtilities.HTMLEncode(biblio.getPublicationDate()));
} else {
tei.append(when);
}
tei.append("</date>\n");
} else if (biblio.getPublicationDate() != null) {
tei.append("\t\t\t\t<date type=\"published\">");
tei.append(TextUtilities.HTMLEncode(biblio.getPublicationDate())
+ "</date>");
}
tei.append("\t\t\t</publicationStmt>\n");
} else {
tei.append("\t\t\t<publicationStmt>\n");
tei.append("\t\t\t\t<publisher/>\n");
tei.append("\t\t\t\t<availability status=\"unknown\"><licence/></availability>\n");
tei.append("\t\t\t</publicationStmt>\n");
}
tei.append("\t\t\t<sourceDesc>\n\t\t\t\t<biblStruct>\n\t\t\t\t\t<analytic>\n");
// authors + affiliation
//biblio.createAuthorSet();
//biblio.attachEmails();
//biblio.attachAffiliations();
tei.append(biblio.toTEIAuthorBlock(6, config));
// title
String title = biblio.getTitle();
String language = biblio.getLanguage();
String english_title = biblio.getEnglishTitle();
if (title != null) {
tei.append("\t\t\t\t\t\t<title");
/*if ( (bookTitle == null) & (journal == null) )
tei.append(" level=\"m\"");
else */
tei.append(" level=\"a\" type=\"main\"");
if (config.isGenerateTeiIds()) {
String divID = KeyGen.getKey().substring(0, 7);
tei.append(" xml:id=\"_" + divID + "\"");
}
// here check the language ?
if (english_title == null)
tei.append(">" + TextUtilities.HTMLEncode(title) + "</title>\n");
else
tei.append(" xml:lang=\"" + language + "\">" + TextUtilities.HTMLEncode(title) + "</title>\n");
}
boolean hasEnglishTitle = false;
boolean generateIDs = config.isGenerateTeiIds();
if (english_title != null) {
// here do check the language!
LanguageUtilities languageUtilities = LanguageUtilities.getInstance();
Language resLang = languageUtilities.runLanguageId(english_title);
if (resLang != null) {
String resL = resLang.getLang();
if (resL.equals(Language.EN)) {
hasEnglishTitle = true;
tei.append("\t\t\t\t\t\t<title");
//if ( (bookTitle == null) & (journal == null) )
// tei.append(" level=\"m\"");
//else
tei.append(" level=\"a\"");
if (generateIDs) {
String divID = KeyGen.getKey().substring(0, 7);
tei.append(" xml:id=\"_" + divID + "\"");
}
tei.append(" xml:lang=\"en\">")
.append(TextUtilities.HTMLEncode(english_title)).append("</title>\n");
}
}
// if it's not something in English, we will write it anyway as note without type at the end
}
tei.append("\t\t\t\t\t</analytic>\n");
if ((biblio.getJournal() != null) ||
(biblio.getJournalAbbrev() != null) ||
(biblio.getISSN() != null) ||
(biblio.getISSNe() != null) ||
(biblio.getPublisher() != null) ||
(biblio.getPublicationDate() != null) ||
(biblio.getVolumeBlock() != null) ||
(biblio.getItem() == BiblioItem.Periodical) ||
(biblio.getItem() == BiblioItem.InProceedings) ||
(biblio.getItem() == BiblioItem.Proceedings) ||
(biblio.getItem() == BiblioItem.InBook) ||
(biblio.getItem() == BiblioItem.Book) ||
(biblio.getItem() == BiblioItem.Serie) ||
(biblio.getItem() == BiblioItem.InCollection)) {
tei.append("\t\t\t\t\t<monogr");
tei.append(">\n");
if (biblio.getJournal() != null) {
tei.append("\t\t\t\t\t\t<title level=\"j\" type=\"main\"");
if (generateIDs) {
String divID = KeyGen.getKey().substring(0, 7);
tei.append(" xml:id=\"_" + divID + "\"");
}
tei.append(">" + TextUtilities.HTMLEncode(biblio.getJournal()) + "</title>\n");
} else if (biblio.getBookTitle() != null) {
tei.append("\t\t\t\t\t\t<title level=\"m\"");
if (generateIDs) {
String divID = KeyGen.getKey().substring(0, 7);
tei.append(" xml:id=\"_" + divID + "\"");
}
tei.append(">" + TextUtilities.HTMLEncode(biblio.getBookTitle()) + "</title>\n");
}
if (biblio.getJournalAbbrev() != null) {
tei.append("\t\t\t\t\t\t<title level=\"j\" type=\"abbrev\">" +
TextUtilities.HTMLEncode(biblio.getJournalAbbrev()) + "</title>\n");
}
if (biblio.getISSN() != null) {
tei.append("\t\t\t\t\t\t<idno type=\"ISSN\">" +
TextUtilities.HTMLEncode(biblio.getISSN()) + "</idno>\n");
}
if (biblio.getISSNe() != null) {
if (!biblio.getISSNe().equals(biblio.getISSN()))
tei.append("\t\t\t\t\t\t<idno type=\"eISSN\">" +
TextUtilities.HTMLEncode(biblio.getISSNe()) + "</idno>\n");
}
// if (biblio.getEvent() != null) {
// // TODO:
// }
// in case the booktitle corresponds to a proceedings, we can try to indicate the meeting title
String meeting = biblio.getBookTitle();
boolean meetLoc = false;
if (biblio.getEvent() != null)
meeting = biblio.getEvent();
else if (meeting != null) {
meeting = meeting.trim();
for (String prefix : BiblioItem.confPrefixes) {
if (meeting.startsWith(prefix)) {
meeting = meeting.replace(prefix, "");
meeting = meeting.trim();
tei.append("\t\t\t\t\t\t<meeting>" + TextUtilities.HTMLEncode(meeting));
if ((biblio.getLocation() != null) || (biblio.getTown() != null) ||
(biblio.getCountry() != null)) {
tei.append(" <address>");
if (biblio.getTown() != null) {
tei.append("<settlement>" + TextUtilities.HTMLEncode(biblio.getTown()) + "</settlement>");
}
if (biblio.getCountry() != null) {
tei.append("<country>" + TextUtilities.HTMLEncode(biblio.getCountry()) + "</country>");
}
if ((biblio.getLocation() != null) && (biblio.getTown() == null) &&
(biblio.getCountry() == null)) {
tei.append("<addrLine>" + TextUtilities.HTMLEncode(biblio.getLocation()) + "</addrLine>");
}
tei.append("</address>\n");
meetLoc = true;
}
tei.append("\t\t\t\t\t\t</meeting>\n");
break;
}
}
}
if (((biblio.getLocation() != null) || (biblio.getTown() != null) ||
(biblio.getCountry() != null))
&& (!meetLoc)) {
tei.append("\t\t\t\t\t\t<meeting>");
tei.append(" <address>");
if (biblio.getTown() != null) {
tei.append(" <settlement>" + TextUtilities.HTMLEncode(biblio.getTown()) + "</settlement>");
}
if (biblio.getCountry() != null) {
tei.append(" <country>" + TextUtilities.HTMLEncode(biblio.getCountry()) + "</country>");
}
if ((biblio.getLocation() != null) && (biblio.getTown() == null)
&& (biblio.getCountry() == null)) {
tei.append("<addrLine>" + TextUtilities.HTMLEncode(biblio.getLocation()) + "</addrLine>");
}
tei.append("</address>\n");
tei.append("\t\t\t\t\t\t</meeting>\n");
}
String pageRange = biblio.getPageRange();
if ((biblio.getVolumeBlock() != null) | (biblio.getPublicationDate() != null) |
(biblio.getNormalizedPublicationDate() != null) |
(pageRange != null) | (biblio.getIssue() != null) |
(biblio.getBeginPage() != -1) |
(biblio.getPublisher() != null)) {
tei.append("\t\t\t\t\t\t<imprint>\n");
if (biblio.getPublisher() != null) {
tei.append("\t\t\t\t\t\t\t<publisher>" + TextUtilities.HTMLEncode(biblio.getPublisher())
+ "</publisher>\n");
}
if (biblio.getVolumeBlock() != null) {
String vol = biblio.getVolumeBlock();
vol = vol.replace(" ", "").trim();
tei.append("\t\t\t\t\t\t\t<biblScope unit=\"volume\">" +
TextUtilities.HTMLEncode(vol) + "</biblScope>\n");
}
if (biblio.getIssue() != null) {
tei.append("\t\t\t\t\t\t\t<biblScope unit=\"issue\">"
+ TextUtilities.HTMLEncode(biblio.getIssue()) + "</biblScope>\n");
}
if (pageRange != null) {
StringTokenizer st = new StringTokenizer(pageRange, "--");
if (st.countTokens() == 2) {
tei.append("\t\t\t\t\t\t\t<biblScope unit=\"page\"");
tei.append(" from=\"" + TextUtilities.HTMLEncode(st.nextToken()) + "\"");
tei.append(" to=\"" + TextUtilities.HTMLEncode(st.nextToken()) + "\"/>\n");
//tei.append(">" + TextUtilities.HTMLEncode(pageRange) + "</biblScope>\n");
} else {
tei.append("\t\t\t\t\t\t\t<biblScope unit=\"page\">" + TextUtilities.HTMLEncode(pageRange)
+ "</biblScope>\n");
}
} else if (biblio.getBeginPage() != -1) {
if (biblio.getEndPage() != -1) {
tei.append("\t\t\t\t\t\t\t<biblScope unit=\"page\"");
tei.append(" from=\"" + biblio.getBeginPage() + "\"");
tei.append(" to=\"" + biblio.getEndPage() + "\"/>\n");
} else {
tei.append("\t\t\t\t\t\t\t<biblScope unit=\"page\"");
tei.append(" from=\"" + biblio.getBeginPage() + "\"/>\n");
}
}
if (biblio.getNormalizedPublicationDate() != null) {
Date date = biblio.getNormalizedPublicationDate();
String when = Date.toISOString(date);
if (StringUtils.isNotBlank(when)) {
if (biblio.getPublicationDate() != null) {
tei.append("\t\t\t\t\t\t\t<date type=\"published\" when=\"");
tei.append(when + "\">");
tei.append(TextUtilities.HTMLEncode(biblio.getPublicationDate())
+ "</date>\n");
} else {
tei.append("\t\t\t\t\t\t\t<date type=\"published\" when=\"");
tei.append(when + "\" />\n");
}
} else {
if (biblio.getPublicationDate() != null) {
tei.append("\t\t\t\t\t\t\t<date type=\"published\">");
tei.append(TextUtilities.HTMLEncode(biblio.getPublicationDate())
+ "</date>\n");
}
}
} else if (biblio.getYear() != null) {
String when = "";
if (biblio.getYear().length() == 1)
when += "000" + biblio.getYear();
else if (biblio.getYear().length() == 2)
when += "00" + biblio.getYear();
else if (biblio.getYear().length() == 3)
when += "0" + biblio.getYear();
else if (biblio.getYear().length() == 4)
when += biblio.getYear();
if (biblio.getMonth() != null) {
if (biblio.getMonth().length() == 1)
when += "-0" + biblio.getMonth();
else
when += "-" + biblio.getMonth();
if (biblio.getDay() != null) {
if (biblio.getDay().length() == 1)
when += "-0" + biblio.getDay();
else
when += "-" + biblio.getDay();
}
}
if (biblio.getPublicationDate() != null) {
tei.append("\t\t\t\t\t\t\t<date type=\"published\" when=\"");
tei.append(when + "\">");
tei.append(TextUtilities.HTMLEncode(biblio.getPublicationDate())
+ "</date>\n");
} else {
tei.append("\t\t\t\t\t\t\t<date type=\"published\" when=\"");
tei.append(when + "\" />\n");
}
} else if (biblio.getE_Year() != null) {
String when = "";
if (biblio.getE_Year().length() == 1)
when += "000" + biblio.getE_Year();
else if (biblio.getE_Year().length() == 2)
when += "00" + biblio.getE_Year();
else if (biblio.getE_Year().length() == 3)
when += "0" + biblio.getE_Year();
else if (biblio.getE_Year().length() == 4)
when += biblio.getE_Year();
if (biblio.getE_Month() != null) {
if (biblio.getE_Month().length() == 1)
when += "-0" + biblio.getE_Month();
else
when += "-" + biblio.getE_Month();
if (biblio.getE_Day() != null) {
if (biblio.getE_Day().length() == 1)
when += "-0" + biblio.getE_Day();
else
when += "-" + biblio.getE_Day();
}
}
tei.append("\t\t\t\t\t\t\t<date type=\"ePublished\" when=\"");
tei.append(when + "\" />\n");
} else if (biblio.getPublicationDate() != null) {
tei.append("\t\t\t\t\t\t\t<date type=\"published\">");
tei.append(TextUtilities.HTMLEncode(biblio.getPublicationDate())
+ "</date>\n");
}
// Fix for issue #31
tei.append("\t\t\t\t\t\t</imprint>\n");
}
tei.append("\t\t\t\t\t</monogr>\n");
} else {
tei.append("\t\t\t\t\t<monogr>\n");
tei.append("\t\t\t\t\t\t<imprint>\n");
tei.append("\t\t\t\t\t\t\t<date/>\n");
tei.append("\t\t\t\t\t\t</imprint>\n");
tei.append("\t\t\t\t\t</monogr>\n");
}
if (!StringUtils.isEmpty(doc.getMD5())) {
tei.append("\t\t\t\t\t<idno type=\"MD5\">" + doc.getMD5() + "</idno>\n");
}
if (!StringUtils.isEmpty(biblio.getDOI())) {
String theDOI = TextUtilities.HTMLEncode(biblio.getDOI());
if (theDOI.endsWith(".xml")) {
theDOI = theDOI.replace(".xml", "");
}
tei.append("\t\t\t\t\t<idno type=\"DOI\">" + TextUtilities.HTMLEncode(theDOI) + "</idno>\n");
}
if (!StringUtils.isEmpty(biblio.getArXivId())) {
tei.append("\t\t\t\t\t<idno type=\"arXiv\">" + TextUtilities.HTMLEncode(biblio.getArXivId()) + "</idno>\n");
}
if (!StringUtils.isEmpty(biblio.getPMID())) {
tei.append("\t\t\t\t\t<idno type=\"PMID\">" + TextUtilities.HTMLEncode(biblio.getPMID()) + "</idno>\n");
}
if (!StringUtils.isEmpty(biblio.getPMCID())) {
tei.append("\t\t\t\t\t<idno type=\"PMCID\">" + TextUtilities.HTMLEncode(biblio.getPMCID()) + "</idno>\n");
}
if (!StringUtils.isEmpty(biblio.getPII())) {
tei.append("\t\t\t\t\t<idno type=\"PII\">" + TextUtilities.HTMLEncode(biblio.getPII()) + "</idno>\n");
}
if (!StringUtils.isEmpty(biblio.getArk())) {
tei.append("\t\t\t\t\t<idno type=\"ark\">" + TextUtilities.HTMLEncode(biblio.getArk()) + "</idno>\n");
}
if (!StringUtils.isEmpty(biblio.getIstexId())) {
tei.append("\t\t\t\t\t<idno type=\"istexId\">" + TextUtilities.HTMLEncode(biblio.getIstexId()) + "</idno>\n");
}
if (!StringUtils.isEmpty(biblio.getOAURL())) {
tei.append("\t\t\t\t\t<ptr type=\"open-access\" target=\"").append(TextUtilities.HTMLEncode(biblio.getOAURL())).append("\" />\n");
}
if (biblio.getSubmission() != null) {
tei.append("\t\t\t\t\t<note type=\"submission\">" +
TextUtilities.HTMLEncode(biblio.getSubmission()) + "</note>\n");
}
if (biblio.getDedication() != null) {
tei.append("\t\t\t\t\t<note type=\"dedication\">" + TextUtilities.HTMLEncode(biblio.getDedication())
+ "</note>\n");
}
if ((english_title != null) & (!hasEnglishTitle)) {
tei.append("\t\t\t\t\t<note type=\"title\"");
if (generateIDs) {
String divID = KeyGen.getKey().substring(0, 7);
tei.append(" xml:id=\"_" + divID + "\"");
}
tei.append(">" + TextUtilities.HTMLEncode(english_title) + "</note>\n");
}
if (biblio.getNote() != null) {
tei.append("\t\t\t\t\t<note");
if (generateIDs) {
String divID = KeyGen.getKey().substring(0, 7);
tei.append(" xml:id=\"_" + divID + "\"");
}
tei.append(">" + TextUtilities.HTMLEncode(biblio.getNote()) + "</note>\n");
}
tei.append("\t\t\t\t</biblStruct>\n");
if (biblio.getURL() != null) {
tei.append("\t\t\t\t<ref target=\"" + biblio.getURL() + "\" />\n");
}
tei.append("\t\t\t</sourceDesc>\n");
tei.append("\t\t</fileDesc>\n");
// encodingDesc gives info about the producer of the file
tei.append("\t\t<encodingDesc>\n");
tei.append("\t\t\t<appInfo>\n");
TimeZone tz = TimeZone.getTimeZone("UTC");
DateFormat df = new SimpleDateFormat("yyyy-MM-dd'T'HH:mmZ");
df.setTimeZone(tz);
String dateISOString = df.format(new java.util.Date());
tei.append("\t\t\t\t<application version=\"" + GrobidProperties.getVersion() +
"\" ident=\"GROBID\" when=\"" + dateISOString + "\">\n");
tei.append("\t\t\t\t\t<desc>GROBID - A machine learning software for extracting information from scholarly documents</desc>\n");
tei.append("\t\t\t\t\t<ref target=\"https://github.com/kermitt2/grobid\"/>\n");
tei.append("\t\t\t\t</application>\n");
tei.append("\t\t\t</appInfo>\n");
tei.append("\t\t</encodingDesc>\n");
boolean textClassWritten = false;
tei.append("\t\t<profileDesc>\n");
// keywords here !! Normally the keyword field has been preprocessed
// if the segmentation into individual keywords worked, the first conditional
// statement will be used - otherwise the whole keyword field is outputed
if ((biblio.getKeywords() != null) && (biblio.getKeywords().size() > 0)) {
textClassWritten = true;
tei.append("\t\t\t<textClass>\n");
tei.append("\t\t\t\t<keywords>\n");
List<Keyword> keywords = biblio.getKeywords();
int pos = 0;
for (Keyword keyw : keywords) {
if ((keyw.getKeyword() == null) || (keyw.getKeyword().length() == 0))
continue;
String res = keyw.getKeyword().trim();
if (res.startsWith(":")) {
res = res.substring(1);
}
if (pos == (keywords.size() - 1)) {
if (res.endsWith(".")) {
res = res.substring(0, res.length() - 1);
}
}
tei.append("\t\t\t\t\t<term");
if (generateIDs) {
String divID = KeyGen.getKey().substring(0, 7);
tei.append(" xml:id=\"_" + divID + "\"");
}
tei.append(">" + TextUtilities.HTMLEncode(res) + "</term>\n");
pos++;
}
tei.append("\t\t\t\t</keywords>\n");
} else if (biblio.getKeyword() != null) {
String keywords = biblio.getKeyword();
textClassWritten = true;
tei.append("\t\t\t<textClass>\n");
tei.append("\t\t\t\t<keywords");
if (generateIDs) {
String divID = KeyGen.getKey().substring(0, 7);
tei.append(" xml:id=\"_" + divID + "\"");
}
tei.append(">");
tei.append(TextUtilities.HTMLEncode(biblio.getKeyword())).append("</keywords>\n");
}
if (biblio.getCategories() != null) {
if (!textClassWritten) {
textClassWritten = true;
tei.append("\t\t\t<textClass>\n");
}
List<String> categories = biblio.getCategories();
tei.append("\t\t\t\t<keywords>");
for (String category : categories) {
tei.append("\t\t\t\t\t<term");
if (generateIDs) {
String divID = KeyGen.getKey().substring(0, 7);
tei.append(" xml:id=\"_" + divID + "\"");
}
tei.append(">" + TextUtilities.HTMLEncode(category.trim()) + "</term>\n");
}
tei.append("\t\t\t\t</keywords>\n");
}
if (textClassWritten)
tei.append("\t\t\t</textClass>\n");
String abstractText = biblio.getAbstract();
Language resLang = null;
if (abstractText != null) {
LanguageUtilities languageUtilities = LanguageUtilities.getInstance();
resLang = languageUtilities.runLanguageId(abstractText);
}
if (resLang != null) {
String resL = resLang.getLang();
if (!resL.equals(doc.getLanguage())) {
tei.append("\t\t\t<abstract xml:lang=\"").append(resL).append("\">\n");
} else {
tei.append("\t\t\t<abstract>\n");
}
} else if ((abstractText == null) || (abstractText.length() == 0)) {
tei.append("\t\t\t<abstract/>\n");
} else {
tei.append("\t\t\t<abstract>\n");
}
if ((abstractText != null) && (abstractText.length() != 0)) {
if ( (biblio.getLabeledAbstract() != null) && (biblio.getLabeledAbstract().length() > 0) ) {
// we have available structured abstract, which can be serialized as a full text "piece"
StringBuilder buffer = new StringBuilder();
try {
buffer = toTEITextPiece(buffer,
biblio.getLabeledAbstract(),
biblio,
bds,
false,
new LayoutTokenization(biblio.getLayoutTokens(TaggingLabels.HEADER_ABSTRACT)),
null,
null,
null,
null,
markerTypes,
doc,
config); // no figure, no table, no equation
} catch(Exception e) {
throw new GrobidException("An exception occurred while serializing TEI.", e);
}
tei.append(buffer.toString());
} else {
tei.append("\t\t\t\t<p");
if (generateIDs) {
String divID = KeyGen.getKey().substring(0, 7);
tei.append(" xml:id=\"_" + divID + "\"");
}
tei.append(">").append(TextUtilities.HTMLEncode(abstractText)).append("</p>");
}
tei.append("\n\t\t\t</abstract>\n");
}
tei.append("\t\t</profileDesc>\n");
if ((biblio.getA_Year() != null) |
(biblio.getS_Year() != null) |
(biblio.getSubmissionDate() != null) |
(biblio.getNormalizedSubmissionDate() != null)
) {
tei.append("\t\t<revisionDesc>\n");
}
// submission and other review dates here !
if (biblio.getA_Year() != null) {
String when = biblio.getA_Year();
if (biblio.getA_Month() != null) {
when += "-" + biblio.getA_Month();
if (biblio.getA_Day() != null) {
when += "-" + biblio.getA_Day();
}
}
tei.append("\t\t\t\t<date type=\"accepted\" when=\"");
tei.append(when).append("\" />\n");
}
if (biblio.getNormalizedSubmissionDate() != null) {
Date date = biblio.getNormalizedSubmissionDate();
int year = date.getYear();
int month = date.getMonth();
int day = date.getDay();
String when = "" + year;
if (month != -1) {
when += "-" + month;
if (day != -1) {
when += "-" + day;
}
}
tei.append("\t\t\t\t<date type=\"submission\" when=\"");
tei.append(when).append("\" />\n");
} else if (biblio.getS_Year() != null) {
String when = biblio.getS_Year();
if (biblio.getS_Month() != null) {
when += "-" + biblio.getS_Month();
if (biblio.getS_Day() != null) {
when += "-" + biblio.getS_Day();
}
}
tei.append("\t\t\t\t<date type=\"submission\" when=\"");
tei.append(when).append("\" />\n");
} else if (biblio.getSubmissionDate() != null) {
tei.append("\t\t\t<date type=\"submission\">")
.append(TextUtilities.HTMLEncode(biblio.getSubmissionDate())).append("</date>\n");
/*tei.append("\t\t\t<change when=\"");
tei.append(TextUtilities.HTMLEncode(biblio.getSubmissionDate()));
tei.append("\">Submitted</change>\n");
*/
}
if ((biblio.getA_Year() != null) |
(biblio.getS_Year() != null) |
(biblio.getSubmissionDate() != null)
) {
tei.append("\t\t</revisionDesc>\n");
}
tei.append("\t</teiHeader>\n");
// output pages dimensions in the case coordinates will also be provided for some structures
try {
tei = toTEIPages(tei, doc, config);
} catch(Exception e) {
LOGGER.warn("Problem when serializing page size", e);
}
if (doc.getLanguage() != null) {
tei.append("\t<text xml:lang=\"").append(doc.getLanguage()).append("\">\n");
} else {
tei.append("\t<text>\n");
}
return tei;
}
/**
* TEI formatting of the body where only basic logical document structures are present.
* This TEI format avoids most of the risks of ill-formed TEI due to structure recognition
* errors and frequent PDF noises.
* It is adapted to fully automatic process and simple exploitation of the document structures
* like structured indexing and search.
*/
public StringBuilder toTEIBody(StringBuilder buffer,
String result,
BiblioItem biblio,
List<BibDataSet> bds,
LayoutTokenization layoutTokenization,
List<Figure> figures,
List<Table> tables,
List<Equation> equations,
List<MarkerType> markerTypes,
Document doc,
GrobidAnalysisConfig config) throws Exception {
if ((result == null) || (layoutTokenization == null) || (layoutTokenization.getTokenization() == null)) {
buffer.append("\t\t<body/>\n");
return buffer;
}
buffer.append("\t\t<body>\n");
List<Note> notes = getTeiNotes(doc);
buffer = toTEITextPiece(buffer, result, biblio, bds, true,
layoutTokenization, figures, tables, equations, notes, markerTypes, doc, config);
// notes are still in the body
buffer = toTEINote(buffer, notes, doc, markerTypes, config);
buffer.append("\t\t</body>\n");
return buffer;
}
protected List<Note> getTeiNotes(Document doc) {
// There are two types of structured notes currently supported, foot notes and margin notes.
// We consider that head notes are always only presentation matter and are never references
// in a text body.
SortedSet<DocumentPiece> documentNoteParts = doc.getDocumentPart(SegmentationLabels.FOOTNOTE);
List<Note> notes = getTeiNotes(doc, documentNoteParts, Note.NoteType.FOOT);
documentNoteParts = doc.getDocumentPart(SegmentationLabels.MARGINNOTE);
notes.addAll(getTeiNotes(doc, documentNoteParts, Note.NoteType.MARGIN));
return notes;
}
protected List<Note> getTeiNotes(Document doc, SortedSet<DocumentPiece> documentNoteParts, Note.NoteType noteType) {
List<Note> notes = new ArrayList<>();
if (documentNoteParts == null) {
return notes;
}
List<String> allNotes = new ArrayList<>();
for (DocumentPiece docPiece : documentNoteParts) {
List<LayoutToken> noteTokens = doc.getDocumentPieceTokenization(docPiece);
if (CollectionUtils.isEmpty(noteTokens)) {
continue;
}
String footText = doc.getDocumentPieceText(docPiece);
footText = TextUtilities.dehyphenize(footText);
footText = footText.replace("\n", " ");
//footText = footText.replace(" ", " ").trim();
if (footText.length() < 6)
continue;
if (allNotes.contains(footText)) {
// basically we have here the "recurrent" headnote/footnote for each page,
// no need to add them several times (in the future we could even use them
// differently combined with the header)
continue;
}
allNotes.add(footText);
List<Note> localNotes = makeNotes(noteTokens, footText, noteType, notes.size());
if (localNotes != null)
notes.addAll(localNotes);
}
return notes;
}
protected List<Note> makeNotes(List<LayoutToken> noteTokens, String footText, Note.NoteType noteType, int startIndex) {
if (footText == null)
return null;
List<Note> notes = new ArrayList<>();
Matcher ma = startNum.matcher(footText);
int currentNumber = -1;
// this string represents the possible characters after a note number (usually nothing or a dot)
String sugarText = null;
if (ma.find()) {
String groupStr = ma.group(1);
footText = ma.group(2);
try {
if (groupStr.indexOf(".") != -1)
sugarText = ".";
String groupStrNormalized = groupStr.replace(".", "");
groupStrNormalized = groupStrNormalized.trim();
currentNumber = Integer.parseInt(groupStrNormalized);
// remove this number from the layout tokens of the note
if (currentNumber != -1) {
String toConsume = groupStr;
int start = 0;
for(LayoutToken token : noteTokens) {
if (StringUtils.isEmpty(token.getText())) {
continue;
}
if (toConsume.startsWith(token.getText())) {
start++;
toConsume = toConsume.substring(token.getText().length());
} else
break;
if (toConsume.length() == 0)
break;
}
if (start != 0)
noteTokens = noteTokens.subList(start, noteTokens.size());
}
} catch (NumberFormatException e) {
currentNumber = -1;
}
}
Note localNote = null;
if (currentNumber == -1)
localNote = new Note(null, noteTokens, footText, noteType);
else
localNote = new Note(""+currentNumber, noteTokens, footText, noteType);
notes.add(localNote);
// add possible subsequent notes concatenated in the same note sequence (this is a common error,
// which is addressed here by heuristics, it may not be necessary in the future with a better
// segmentation model using more foot notes training data)
if (currentNumber != -1) {
String nextLabel = " " + (currentNumber+1);
// suger characters after note number must be consistent with the previous ones to avoid false match
if (sugarText != null)
nextLabel += sugarText;
int ind = footText.indexOf(nextLabel);
if (ind != -1) {
// optionally we could restrict here to superscript numbers
// review local note
localNote.setText(footText.substring(0, ind));
int pos = 0;
List<LayoutToken> previousNoteTokens = new ArrayList<>();
List<LayoutToken> nextNoteTokens = new ArrayList<>();
for(LayoutToken localToken : noteTokens) {
if (localToken.getText() == null || localToken.getText().length() == 0)
continue;
pos += localToken.getText().length();
if (pos <= ind+1) {
previousNoteTokens.add(localToken);
} else {
nextNoteTokens.add(localToken);
}
}
localNote.setTokens(previousNoteTokens);
String nextFootText = footText.substring(ind+1, footText.length());
// process the concatenated note
if (nextNoteTokens.size() >0 && nextFootText.length()>0) {
List<Note> nextNotes = makeNotes(nextNoteTokens, nextFootText, noteType, notes.size());
if (nextNotes != null && nextNotes.size()>0)
notes.addAll(nextNotes);
}
}
}
for(int noteIndex=0; noteIndex<notes.size(); noteIndex++) {
Note oneNote = notes.get(noteIndex);
oneNote.setIdentifier(oneNote.getNoteTypeName() + "_" + (noteIndex+startIndex));
}
return notes;
}
private StringBuilder toTEINote(StringBuilder tei,
List<Note> notes,
Document doc,
List<MarkerType> markerTypes,
GrobidAnalysisConfig config) throws Exception {
// pattern is <note n="1" place="foot" xml:id="foot_1">
// or
// pattern is <note n="1" place="margin" xml:id="margin_1">
// if no note label is found, no @n attribute but we generate a random xml:id (not be used currently)
for (Note note : notes) {
Element desc = XmlBuilderUtils.teiElement("note");
desc.addAttribute(new Attribute("place", note.getNoteTypeName()));
if (note.getLabel() != null) {
desc.addAttribute(new Attribute("n", note.getLabel()));
}
addXmlId(desc, note.getIdentifier());
// this is a paragraph element for storing text content of the note, which is
// better practice than just putting the text under the <note> element
Element pNote = XmlBuilderUtils.teiElement("p");
if (config.isGenerateTeiIds()) {
String pID = KeyGen.getKey().substring(0, 7);
addXmlId(pNote, "_" + pID);
}
// for labelling bibliographical references in notes
List<LayoutToken> noteTokens = note.getTokens();
String coords = null;
if (config.isGenerateTeiCoordinates("note")) {
coords = LayoutTokensUtil.getCoordsString(noteTokens);
}
if (coords != null) {
desc.addAttribute(new Attribute("coords", coords));
}
org.apache.commons.lang3.tuple.Pair<String, List<LayoutToken>> noteProcess =
fullTextParser.processShort(noteTokens, doc);
String labeledNote = noteProcess.getLeft();
List<LayoutToken> noteLayoutTokens = noteProcess.getRight();
if ( (labeledNote != null) && (labeledNote.length() > 0) ) {
TaggingTokenClusteror clusteror = new TaggingTokenClusteror(GrobidModels.FULLTEXT, labeledNote, noteLayoutTokens);
List<TaggingTokenCluster> clusters = clusteror.cluster();
for (TaggingTokenCluster cluster : clusters) {
if (cluster == null) {
continue;
}
TaggingLabel clusterLabel = cluster.getTaggingLabel();
String clusterContent = LayoutTokensUtil.normalizeDehyphenizeText(cluster.concatTokens());
if (clusterLabel.equals(TaggingLabels.CITATION_MARKER)) {
try {
List<Node> refNodes = this.markReferencesTEILuceneBased(
cluster.concatTokens(),
doc.getReferenceMarkerMatcher(),
config.isGenerateTeiCoordinates("ref"),
false);
if (refNodes != null) {
for (Node n : refNodes) {
pNote.appendChild(n);
}
}
} catch(Exception e) {
LOGGER.warn("Problem when serializing TEI fragment for figure caption", e);
}
} else {
pNote.appendChild(textNode(clusterContent));
}
}
} else {
String noteText = note.getText();
noteText = noteText.replace(" ", " ").trim();
if (noteText == null) {
noteText = LayoutTokensUtil.toText(note.getTokens());
} else {
noteText = noteText.trim();
}
pNote.appendChild(LayoutTokensUtil.normalizeText(noteText));
}
if (config.isWithSentenceSegmentation()) {
segmentIntoSentences(pNote, noteTokens, config, doc.getLanguage());
}
desc.appendChild(pNote);
tei.append("\t\t\t");
tei.append(desc.toXML());
tei.append("\n");
}
return tei;
}
public StringBuilder processTEIDivSection(String xmlType,
String indentation,
String text,
List<LayoutToken> tokens,
List<BibDataSet> biblioData,
GrobidAnalysisConfig config) throws Exception {
StringBuilder outputTei = new StringBuilder();
if ((StringUtils.isBlank(text)) || (tokens == null)) {
return outputTei;
}
outputTei.append("\n").append(indentation).append("<div type=\"").append(xmlType).append("\">\n");
StringBuilder contentBuffer = new StringBuilder();
contentBuffer = toTEITextPiece(contentBuffer, text, null, biblioData, false,
new LayoutTokenization(tokens), null, null, null,
null, null, doc, config);
String result = contentBuffer.toString();
String[] resultAsArray = result.split("\n");
/*buffer2 = toTEITextPiece(buffer2, reseAcknowledgement, null, bds, false,
new LayoutTokenization(tokenizationsAcknowledgement), null, null, null,
null, null, doc, config);
String acknowResult = buffer2.toString();
String[] acknowResultLines = acknowResult.split("\n");*/
boolean extraDiv = false;
if (resultAsArray.length != 0) {
for (int i = 0; i < resultAsArray.length; i++) {
if (resultAsArray[i].trim().length() == 0)
continue;
outputTei.append(TextUtilities.dehyphenize(resultAsArray[i])).append("\n");
}
}
outputTei.append(indentation).append("</div>\n\n");
return outputTei;
}
public StringBuilder toTEIAnnex(StringBuilder buffer,
String result,
BiblioItem biblio,
List<BibDataSet> bds,
List<LayoutToken> tokenizations,
List<MarkerType> markerTypes,
Document doc,
GrobidAnalysisConfig config) throws Exception {
if ((result == null) || (tokenizations == null)) {
return buffer;
}
buffer.append("\t\t\t<div type=\"annex\">\n");
buffer = toTEITextPiece(buffer, result, biblio, bds, true,
new LayoutTokenization(tokenizations), null, null, null, null,
markerTypes, doc, config);
buffer.append("\t\t\t</div>\n");
return buffer;
}
public StringBuilder toTEITextPiece(StringBuilder buffer,
String result,
BiblioItem biblio,
List<BibDataSet> bds,
boolean keepUnsolvedCallout,
LayoutTokenization layoutTokenization,
List<Figure> figures,
List<Table> tables,
List<Equation> equations,
List<Note> notes,
List<MarkerType> markerTypes,
Document doc,
GrobidAnalysisConfig config) throws Exception {
TaggingLabel lastClusterLabel = null;
int startPosition = buffer.length();
//boolean figureBlock = false; // indicate that a figure or table sequence was met
// used for reconnecting a paragraph that was cut by a figure/table
List<LayoutToken> tokenizations = layoutTokenization.getTokenization();
TaggingTokenClusteror clusteror = new TaggingTokenClusteror(GrobidModels.FULLTEXT, result, tokenizations);
String tokenLabel = null;
List<TaggingTokenCluster> clusters = clusteror.cluster();
List<Element> divResults = new ArrayList<>();
Element curDiv = teiElement("div");
if (config.isGenerateTeiIds()) {
String divID = KeyGen.getKey().substring(0, 7);
addXmlId(curDiv, "_" + divID);
}
divResults.add(curDiv);
Element curParagraph = null;
List<LayoutToken> curParagraphTokens = null;
Element curList = null;
int equationIndex = 0; // current equation index position
for (TaggingTokenCluster cluster : clusters) {
if (cluster == null) {
continue;
}
TaggingLabel clusterLabel = cluster.getTaggingLabel();
Engine.getCntManager().i(clusterLabel);
if (clusterLabel.equals(TaggingLabels.SECTION)) {
String clusterContent = LayoutTokensUtil.normalizeDehyphenizeText(cluster.concatTokens());
curDiv = teiElement("div");
Element head = teiElement("head");
// section numbers
org.grobid.core.utilities.Pair<String, String> numb = getSectionNumber(clusterContent);
if (numb != null) {
head.addAttribute(new Attribute("n", numb.b));
head.appendChild(numb.a);
} else {
head.appendChild(clusterContent);
}
if (config.isGenerateTeiIds()) {
String divID = KeyGen.getKey().substring(0, 7);
addXmlId(head, "_" + divID);
}
if (config.isGenerateTeiCoordinates("head") ) {
String coords = LayoutTokensUtil.getCoordsString(cluster.concatTokens());
if (coords != null) {
head.addAttribute(new Attribute("coords", coords));
}
}
curDiv.appendChild(head);
divResults.add(curDiv);
} else if (clusterLabel.equals(TaggingLabels.EQUATION) ||
clusterLabel.equals(TaggingLabels.EQUATION_LABEL)) {
// get starting position of the cluster
int start = -1;
if ( (cluster.concatTokens() != null) && (cluster.concatTokens().size() > 0) ) {
start = cluster.concatTokens().get(0).getOffset();
}
// get the corresponding equation
if (start != -1) {
Equation theEquation = null;
if (equations != null) {
for(int i=0; i<equations.size(); i++) {
if (i < equationIndex)
continue;
Equation equation = equations.get(i);
if (equation.getStart() == start) {
theEquation = equation;
equationIndex = i;
break;
}
}
if (theEquation != null) {
Element element = theEquation.toTEIElement(config);
if (element != null)
curDiv.appendChild(element);
}
}
}
} else if (clusterLabel.equals(TaggingLabels.ITEM)) {
String clusterContent = LayoutTokensUtil.normalizeText(cluster.concatTokens());
//curDiv.appendChild(teiElement("item", clusterContent));
Element itemNode = teiElement("item", clusterContent);
if (!MARKER_LABELS.contains(lastClusterLabel) && (lastClusterLabel != TaggingLabels.ITEM)) {
curList = teiElement("list");
curDiv.appendChild(curList);
}
if (curList != null) {
curList.appendChild(itemNode);
}
} else if (clusterLabel.equals(TaggingLabels.OTHER)) {
String clusterContent = LayoutTokensUtil.normalizeDehyphenizeText(cluster.concatTokens());
Element note = teiElement("note", clusterContent);
note.addAttribute(new Attribute("type", "other"));
if (config.isGenerateTeiIds()) {
String divID = KeyGen.getKey().substring(0, 7);
addXmlId(note, "_" + divID);
}
curDiv.appendChild(note);
} else if (clusterLabel.equals(TaggingLabels.PARAGRAPH)) {
List<LayoutToken> clusterTokens = cluster.concatTokens();
int clusterPage = Iterables.getLast(clusterTokens).getPage();
List<Note> notesSamePage = null;
if (notes != null && notes.size() > 0) {
notesSamePage = notes.stream()
.filter(f -> !f.isIgnored() && f.getPageNumber() == clusterPage)
.collect(Collectors.toList());
}
if (notesSamePage == null) {
String clusterContent = LayoutTokensUtil.normalizeDehyphenizeText(clusterTokens);
if (isNewParagraph(lastClusterLabel, curParagraph)) {
if (curParagraph != null && config.isWithSentenceSegmentation()) {
segmentIntoSentences(curParagraph, curParagraphTokens, config, doc.getLanguage());
}
curParagraph = teiElement("p");
if (config.isGenerateTeiIds()) {
String divID = KeyGen.getKey().substring(0, 7);
addXmlId(curParagraph, "_" + divID);
}
curDiv.appendChild(curParagraph);
curParagraphTokens = new ArrayList<>();
}
curParagraph.appendChild(clusterContent);
curParagraphTokens.addAll(clusterTokens);
} else {
if (isNewParagraph(lastClusterLabel, curParagraph)) {
if (curParagraph != null && config.isWithSentenceSegmentation()) {
segmentIntoSentences(curParagraph, curParagraphTokens, config, doc.getLanguage());
}
curParagraph = teiElement("p");
if (config.isGenerateTeiIds()) {
String divID = KeyGen.getKey().substring(0, 7);
addXmlId(curParagraph, "_" + divID);
}
curDiv.appendChild(curParagraph);
curParagraphTokens = new ArrayList<>();
}
// we need to cover several footnote callouts in the same paragraph segment
// we also can't assume notes are sorted and will appear first in the text as the same order
// they are defined in the note areas - this might not always be the case in
// ill-formed documents
// map the matched note labels to their corresponding note objects
Map<String, Note> labels2Notes = new TreeMap<>();
// map a note label (string) to a valid matching position in the sequence of Layout Tokens
// of the paragraph segment
List<Pair<String,OffsetPosition>> matchedLabelPosition = new ArrayList<>();
for (Note note : notesSamePage) {
Optional<LayoutToken> matching = clusterTokens
.stream()
.filter(t -> t.getText().equals(note.getLabel()) && t.isSuperscript())
.findFirst();
if (matching.isPresent()) {
int idx = clusterTokens.indexOf(matching.get());
note.setIgnored(true);
OffsetPosition matchingPosition = new OffsetPosition();
matchingPosition.start = idx;
matchingPosition.end = idx+1; // to be review, might be more than one layout token
matchedLabelPosition.add(Pair.of(note.getLabel(), matchingPosition));
labels2Notes.put(note.getLabel(), note);
}
}
// sort the matches by position
Collections.sort(matchedLabelPosition, (m1, m2) -> {
return m1.getRight().start - m2.getRight().start;
}
);
// position in the layout token index
int pos = 0;
// build the paragraph segment, match by match
for(Pair<String,OffsetPosition> matching : matchedLabelPosition) {
Note note = labels2Notes.get(matching.getLeft());
OffsetPosition matchingPosition = matching.getRight();
List<LayoutToken> before = clusterTokens.subList(pos, matchingPosition.start);
String clusterContentBefore = LayoutTokensUtil.normalizeDehyphenizeText(before);
if (CollectionUtils.isNotEmpty(before) && before.get(0).getText().equals(" ")) {
curParagraph.appendChild(new Text(" "));
}
curParagraph.appendChild(clusterContentBefore);
curParagraphTokens.addAll(before);
List<LayoutToken> calloutTokens = clusterTokens.subList(matchingPosition.start, matchingPosition.end);
Element ref = teiElement("ref");
ref.addAttribute(new Attribute("type", "foot"));
if (config.isGenerateTeiCoordinates("ref") ) {
String coords = LayoutTokensUtil.getCoordsString(calloutTokens);
if (coords != null) {
ref.addAttribute(new Attribute("coords", coords));
}
}
ref.appendChild(matching.getLeft());
ref.addAttribute(new Attribute("target", "#" + note.getIdentifier()));
curParagraph.appendChild(ref);
pos = matchingPosition.end;
}
// add last chunk of paragraph stuff (or whole paragraph if no note callout matching)
List<LayoutToken> remaining = clusterTokens.subList(pos, clusterTokens.size());
String remainingClusterContent = LayoutTokensUtil.normalizeDehyphenizeText(remaining);
if (CollectionUtils.isNotEmpty(remaining) && remaining.get(0).getText().equals(" ")) {
curParagraph.appendChild(new Text(" "));
}
curParagraph.appendChild(remainingClusterContent);
curParagraphTokens.addAll(remaining);
}
} else if (MARKER_LABELS.contains(clusterLabel)) {
List<LayoutToken> refTokens = cluster.concatTokens();
refTokens = LayoutTokensUtil.dehyphenize(refTokens);
String chunkRefString = LayoutTokensUtil.toText(refTokens);
Element parent = curParagraph != null ? curParagraph : curDiv;
parent.appendChild(new Text(" "));
List<Node> refNodes;
MarkerType citationMarkerType = null;
if (markerTypes != null && markerTypes.size()>0) {
citationMarkerType = markerTypes.get(0);
}
if (clusterLabel.equals(TaggingLabels.CITATION_MARKER)) {
refNodes = markReferencesTEILuceneBased(refTokens,
doc.getReferenceMarkerMatcher(),
config.isGenerateTeiCoordinates("ref"),
keepUnsolvedCallout, citationMarkerType);
} else if (clusterLabel.equals(TaggingLabels.FIGURE_MARKER)) {
refNodes = markReferencesFigureTEI(chunkRefString, refTokens, figures,
config.isGenerateTeiCoordinates("ref"));
} else if (clusterLabel.equals(TaggingLabels.TABLE_MARKER)) {
refNodes = markReferencesTableTEI(chunkRefString, refTokens, tables,
config.isGenerateTeiCoordinates("ref"));
} else if (clusterLabel.equals(TaggingLabels.EQUATION_MARKER)) {
refNodes = markReferencesEquationTEI(chunkRefString, refTokens, equations,
config.isGenerateTeiCoordinates("ref"));
} else {
throw new IllegalStateException("Unsupported marker type: " + clusterLabel);
}
if (refNodes != null) {
boolean footNoteCallout = false;
if (refNodes.size() == 1 && (refNodes.get(0) instanceof Text)) {
// filtered out superscript reference marker (based on the defined citationMarkerType) might
// be foot note callout - se we need in this particular case to try to match existing notes
// similarly as within paragraph
if (citationMarkerType == null || citationMarkerType != MarkerType.SUPERSCRIPT_NUMBER) {
// is refTokens superscript?
if (refTokens.size()>0 && refTokens.get(0).isSuperscript()) {
// check note callout matching
int clusterPage = Iterables.getLast(refTokens).getPage();
List<Note> notesSamePage = null;
if (notes != null && notes.size() > 0) {
notesSamePage = notes.stream()
.filter(f -> !f.isIgnored() && f.getPageNumber() == clusterPage)
.collect(Collectors.toList());
}
if (notesSamePage != null) {
for (Note note : notesSamePage) {
if (chunkRefString.trim().equals(note.getLabel())) {
footNoteCallout = true;
note.setIgnored(true);
Element ref = teiElement("ref");
ref.addAttribute(new Attribute("type", "foot"));
if (config.isGenerateTeiCoordinates("ref") ) {
String coords = LayoutTokensUtil.getCoordsString(refTokens);
if (coords != null) {
ref.addAttribute(new Attribute("coords", coords));
}
}
ref.appendChild(chunkRefString.trim());
ref.addAttribute(new Attribute("target", "#" + note.getIdentifier()));
parent.appendChild(ref);
if (chunkRefString.endsWith(" ")) {
parent.appendChild(new Text(" "));
}
}
}
}
}
}
}
if (!footNoteCallout) {
for (Node n : refNodes) {
parent.appendChild(n);
}
}
}
if (curParagraph != null)
curParagraphTokens.addAll(cluster.concatTokens());
} else if (clusterLabel.equals(TaggingLabels.FIGURE) || clusterLabel.equals(TaggingLabels.TABLE)) {
//figureBlock = true;
if (curParagraph != null)
curParagraph.appendChild(new Text(" "));
}
lastClusterLabel = cluster.getTaggingLabel();
}
// in case we segment paragraph into sentences, we still need to do it for the last paragraph
if (curParagraph != null && config.isWithSentenceSegmentation()) {
segmentIntoSentences(curParagraph, curParagraphTokens, config, doc.getLanguage());
}
// remove possibly empty div in the div list
if (divResults.size() != 0) {
for(int i = divResults.size()-1; i>=0; i--) {
Element theDiv = divResults.get(i);
if ( (theDiv.getChildElements() == null) || (theDiv.getChildElements().size() == 0) ) {
divResults.remove(i);
}
}
}
if (divResults.size() != 0)
buffer.append(XmlBuilderUtils.toXml(divResults));
else
buffer.append(XmlBuilderUtils.toXml(curDiv));
// we apply some overall cleaning and simplification
buffer = TextUtilities.replaceAll(buffer, "</head><head",
"</head>\n\t\t\t</div>\n\t\t\t<div>\n\t\t\t\t<head");
buffer = TextUtilities.replaceAll(buffer, "</p>\t\t\t\t<p>", " ");
//TODO: work on reconnection
// we evaluate the need to reconnect paragraphs cut by a figure or a table
int indP1 = buffer.indexOf("</p0>", startPosition - 1);
while (indP1 != -1) {
int indP2 = buffer.indexOf("<p>", indP1 + 1);
if ((indP2 != 1) && (buffer.length() > indP2 + 5)) {
if (Character.isUpperCase(buffer.charAt(indP2 + 4)) &&
Character.isLowerCase(buffer.charAt(indP2 + 5))) {
// a marker for reconnecting the two paragraphs
buffer.setCharAt(indP2 + 1, 'q');
}
}
indP1 = buffer.indexOf("</p0>", indP1 + 1);
}
buffer = TextUtilities.replaceAll(buffer, "</p0>(\\n\\t)*<q>", " ");
buffer = TextUtilities.replaceAll(buffer, "</p0>", "</p>");
buffer = TextUtilities.replaceAll(buffer, "<q>", "<p>");
if (figures != null) {
for (Figure figure : figures) {
String figSeg = figure.toTEI(config, doc, this, markerTypes);
if (figSeg != null) {
buffer.append(figSeg).append("\n");
}
}
}
if (tables != null) {
for (Table table : tables) {
String tabSeg = table.toTEI(config, doc, this, markerTypes);
if (tabSeg != null) {
buffer.append(tabSeg).append("\n");
}
}
}
return buffer;
}
public static boolean isNewParagraph(TaggingLabel lastClusterLabel, Element curParagraph) {
return (!MARKER_LABELS.contains(lastClusterLabel) && lastClusterLabel != TaggingLabels.FIGURE
&& lastClusterLabel != TaggingLabels.TABLE) || curParagraph == null;
}
public void segmentIntoSentences(Element curParagraph, List<LayoutToken> curParagraphTokens, GrobidAnalysisConfig config, String lang) {
// in order to avoid having a sentence boundary in the middle of a ref element
// (which is frequent given the abbreviation in the reference expression, e.g. Fig.)
// we only consider for sentence segmentation texts under <p> and skip the text under <ref>.
if (curParagraph == null)
return;
// in xom, the following gives all the text under the element, for the whole subtree
String text = curParagraph.getValue();
if (text == null || text.length() == 0)
return;
// identify ref nodes, ref spans and ref positions
Map<Integer,Node> mapRefNodes = new HashMap<>();
List<Integer> refPositions = new ArrayList<>();
List<OffsetPosition> forbiddenPositions = new ArrayList<>();
int pos = 0;
for(int i=0; i<curParagraph.getChildCount(); i++) {
Node theNode = curParagraph.getChild(i);
if (theNode instanceof Text) {
String chunk = theNode.getValue();
pos += chunk.length();
} else if (theNode instanceof Element) {
// for readability in another conditional
if (((Element) theNode).getLocalName().equals("ref")) {
// map character offset of the node
mapRefNodes.put(Integer.valueOf(pos), theNode);
refPositions.add(Integer.valueOf(pos));
String chunk = theNode.getValue();
forbiddenPositions.add(new OffsetPosition(pos, pos+chunk.length()));
pos += chunk.length();
}
}
}
List<OffsetPosition> theSentences =
SentenceUtilities.getInstance().runSentenceDetection(text, forbiddenPositions, curParagraphTokens, new Language(lang));
/*if (theSentences.size() == 0) {
// this should normally not happen, but it happens (depending on sentence splitter, usually the text
// is just a punctuation)
// in this case we consider the current text as a unique sentence as fall back
theSentences.add(new OffsetPosition(0, text.length()));
}*/
// segment the list of layout tokens according to the sentence segmentation if the coordinates are needed
List<List<LayoutToken>> segmentedParagraphTokens = new ArrayList<>();
List<LayoutToken> currentSentenceTokens = new ArrayList<>();
pos = 0;
if (config.isGenerateTeiCoordinates("s")) {
int currentSentenceIndex = 0;
String sentenceChunk = text.substring(theSentences.get(currentSentenceIndex).start, theSentences.get(currentSentenceIndex).end);
for(int i=0; i<curParagraphTokens.size(); i++) {
LayoutToken token = curParagraphTokens.get(i);
if (token.getText() == null || token.getText().length() == 0)
continue;
int newPos = sentenceChunk.indexOf(token.getText(), pos);
if ((newPos != -1) || SentenceUtilities.toSkipToken(token.getText())) {
// just move on
currentSentenceTokens.add(token);
if (newPos != -1 && !SentenceUtilities.toSkipToken(token.getText()))
pos = newPos;
} else {
if (currentSentenceTokens.size() > 0) {
segmentedParagraphTokens.add(currentSentenceTokens);
currentSentenceIndex++;
if (currentSentenceIndex >= theSentences.size()) {
currentSentenceTokens = new ArrayList<>();
break;
}
sentenceChunk = text.substring(theSentences.get(currentSentenceIndex).start, theSentences.get(currentSentenceIndex).end);
}
currentSentenceTokens = new ArrayList<>();
currentSentenceTokens.add(token);
pos = 0;
}
if (currentSentenceIndex >= theSentences.size())
break;
}
// last sentence
if (currentSentenceTokens.size() > 0) {
// check sentence index too ?
segmentedParagraphTokens.add(currentSentenceTokens);
}
/*if (segmentedParagraphTokens.size() != theSentences.size()) {
System.out.println("ERROR, segmentedParagraphTokens size:" + segmentedParagraphTokens.size() + " vs theSentences size: " + theSentences.size());
System.out.println(text);
System.out.println(theSentences.toString());
int k = 0;
for (List<LayoutToken> segmentedParagraphToken : segmentedParagraphTokens) {
if (k < theSentences.size())
System.out.println(k + " sentence segmented text-only: " + text.substring(theSentences.get(k).start, theSentences.get(k).end));
else
System.out.println("no text-only sentence at index " + k);
System.out.print(k + " layout token segmented sentence: ");
System.out.println(segmentedParagraphToken);
k++;
}
}*/
}
// update the xml paragraph element
int currenChildIndex = 0;
pos = 0;
int posInSentence = 0;
int refIndex = 0;
for(int i=0; i<theSentences.size(); i++) {
pos = theSentences.get(i).start;
posInSentence = 0;
Element sentenceElement = teiElement("s");
if (config.isGenerateTeiIds()) {
String sID = KeyGen.getKey().substring(0, 7);
addXmlId(sentenceElement, "_" + sID);
}
if (config.isGenerateTeiCoordinates("s")) {
if (segmentedParagraphTokens.size()>=i+1) {
currentSentenceTokens = segmentedParagraphTokens.get(i);
String coords = LayoutTokensUtil.getCoordsString(currentSentenceTokens);
if (coords != null) {
sentenceElement.addAttribute(new Attribute("coords", coords));
}
}
}
int sentenceLength = theSentences.get(i).end - pos;
// check if we have a ref between pos and pos+sentenceLength
for(int j=refIndex; j<refPositions.size(); j++) {
int refPos = refPositions.get(j).intValue();
if (refPos < pos+posInSentence)
continue;
if (refPos >= pos+posInSentence && refPos <= pos+sentenceLength) {
Node valueNode = mapRefNodes.get(Integer.valueOf(refPos));
if (pos+posInSentence < refPos) {
String local_text_chunk = text.substring(pos+posInSentence, refPos);
local_text_chunk = XmlBuilderUtils.stripNonValidXMLCharacters(local_text_chunk);
sentenceElement.appendChild(local_text_chunk);
}
valueNode.detach();
sentenceElement.appendChild(valueNode);
refIndex = j;
posInSentence = refPos+valueNode.getValue().length()-pos;
}
if (refPos > pos+sentenceLength) {
break;
}
}
if (pos+posInSentence <= theSentences.get(i).end) {
String local_text_chunk = text.substring(pos+posInSentence, theSentences.get(i).end);
local_text_chunk = XmlBuilderUtils.stripNonValidXMLCharacters(local_text_chunk);
sentenceElement.appendChild(local_text_chunk);
curParagraph.appendChild(sentenceElement);
}
}
for(int i=curParagraph.getChildCount()-1; i>=0; i--) {
Node theNode = curParagraph.getChild(i);
if (theNode instanceof Text) {
curParagraph.removeChild(theNode);
} else if (theNode instanceof Element) {
// for readability in another conditional
if (!((Element) theNode).getLocalName().equals("s")) {
curParagraph.removeChild(theNode);
}
}
}
}
/**
* Return the graphic objects in a given interval position in the document.
*/
private List<GraphicObject> getGraphicObject(List<GraphicObject> graphicObjects, int startPos, int endPos) {
List<GraphicObject> result = new ArrayList<GraphicObject>();
for (GraphicObject nto : graphicObjects) {
if ((nto.getStartPosition() >= startPos) && (nto.getStartPosition() <= endPos)) {
result.add(nto);
}
if (nto.getStartPosition() > endPos) {
break;
}
}
return result;
}
private org.grobid.core.utilities.Pair<String, String> getSectionNumber(String text) {
Matcher m1 = BasicStructureBuilder.headerNumbering1.matcher(text);
Matcher m2 = BasicStructureBuilder.headerNumbering2.matcher(text);
Matcher m3 = BasicStructureBuilder.headerNumbering3.matcher(text);
Matcher m = null;
String numb = null;
if (m1.find()) {
numb = m1.group(0);
m = m1;
} else if (m2.find()) {
numb = m2.group(0);
m = m2;
} else if (m3.find()) {
numb = m3.group(0);
m = m3;
}
if (numb != null) {
text = text.replace(numb, "").trim();
numb = numb.replace(" ", "");
return new org.grobid.core.utilities.Pair<>(text, numb);
} else {
return null;
}
}
public StringBuilder toTEIReferences(StringBuilder tei,
List<BibDataSet> bds,
GrobidAnalysisConfig config) throws Exception {
tei.append("\t\t\t<div type=\"references\">\n\n");
if ((bds == null) || (bds.size() == 0))
tei.append("\t\t\t\t<listBibl/>\n");
else {
tei.append("\t\t\t\t<listBibl>\n");
int p = 0;
if (bds.size() > 0) {
for (BibDataSet bib : bds) {
BiblioItem bit = bib.getResBib();
bit.setReference(bib.getRawBib());
if (bit != null) {
tei.append("\n" + bit.toTEI(p, 0, config));
} else {
tei.append("\n");
}
p++;
}
}
tei.append("\n\t\t\t\t</listBibl>\n");
}
tei.append("\t\t\t</div>\n");
return tei;
}
//bounding boxes should have already been calculated when calling this method
public static String getCoordsAttribute(List<BoundingBox> boundingBoxes, boolean generateCoordinates) {
if (!generateCoordinates || boundingBoxes == null || boundingBoxes.isEmpty()) {
return "";
}
String coords = Joiner.on(";").join(boundingBoxes);
return "coords=\"" + coords + "\"";
}
/**
* Mark using TEI annotations the identified references in the text body build with the machine learning model.
*/
public List<Node> markReferencesTEILuceneBased(List<LayoutToken> refTokens,
ReferenceMarkerMatcher markerMatcher,
boolean generateCoordinates,
boolean keepUnsolvedCallout) throws EntityMatcherException {
return markReferencesTEILuceneBased(refTokens, markerMatcher, generateCoordinates, keepUnsolvedCallout, null);
}
public List<Node> markReferencesTEILuceneBased(List<LayoutToken> refTokens,
ReferenceMarkerMatcher markerMatcher,
boolean generateCoordinates,
boolean keepUnsolvedCallout,
MarkerType citationMarkerType) throws EntityMatcherException {
// safety tests
if ( (refTokens == null) || (refTokens.size() == 0) )
return null;
String text = LayoutTokensUtil.toText(refTokens);
if (text == null || text.trim().length() == 0 || text.endsWith("</ref>") || text.startsWith("<ref") || markerMatcher == null)
return Collections.<Node>singletonList(new Text(text));
boolean spaceEnd = false;
text = text.replace("\n", " ");
if (text.endsWith(" "))
spaceEnd = true;
// check constraints on global marker type, we need to discard reference markers that do not follow the
// reference marker pattern of the document
if (citationMarkerType != null) {
// do we have superscript numbers in the ref tokens?
boolean hasSuperScriptNumber = false;
for(LayoutToken refToken : refTokens) {
if (refToken.isSuperscript()) {
hasSuperScriptNumber = true;
break;
}
}
if (citationMarkerType == MarkerType.SUPERSCRIPT_NUMBER) {
// we need to check that the reference tokens have some superscript numbers
if (!hasSuperScriptNumber) {
return Collections.<Node>singletonList(new Text(text));
}
} else {
// if the reference tokens has some superscript numbers, it is a callout for a different type of object
// (e.g. a foot note)
if (hasSuperScriptNumber) {
return Collections.<Node>singletonList(new Text(text));
}
}
// TBD: check other constraints and consistency issues
}
List<Node> nodes = new ArrayList<>();
List<ReferenceMarkerMatcher.MatchResult> matchResults = markerMatcher.match(refTokens);
if (matchResults != null) {
for (ReferenceMarkerMatcher.MatchResult matchResult : matchResults) {
// no need to HTMLEncode since XOM will take care about the correct escaping
String markerText = LayoutTokensUtil.normalizeText(matchResult.getText());
String coords = null;
if (generateCoordinates && matchResult.getTokens() != null) {
coords = LayoutTokensUtil.getCoordsString(matchResult.getTokens());
}
Element ref = teiElement("ref");
ref.addAttribute(new Attribute("type", "bibr"));
if (coords != null) {
ref.addAttribute(new Attribute("coords", coords));
}
ref.appendChild(markerText);
boolean solved = false;
if (matchResult.getBibDataSet() != null) {
ref.addAttribute(new Attribute("target", "#b" + matchResult.getBibDataSet().getResBib().getOrdinal()));
solved = true;
}
if ( solved || (!solved && keepUnsolvedCallout) )
nodes.add(ref);
else
nodes.add(textNode(matchResult.getText()));
}
}
if (spaceEnd)
nodes.add(new Text(" "));
return nodes;
}
public List<Node> markReferencesFigureTEI(String refText,
List<LayoutToken> allRefTokens,
List<Figure> figures,
boolean generateCoordinates) {
if (refText == null ||
refText.trim().isEmpty()) {
return null;
}
List<Node> nodes = new ArrayList<>();
if (refText.trim().length() == 1 && TextUtilities.fullPunctuations.contains(refText.trim())) {
// the reference text marker is a punctuation
nodes.add(new Text(refText));
return nodes;
}
List<org.grobid.core.utilities.Pair<String, List<LayoutToken>>> labels = null;
List<List<LayoutToken>> allYs = LayoutTokensUtil.split(allRefTokens, ReferenceMarkerMatcher.AND_WORD_PATTERN, true);
if (allYs.size() > 1) {
labels = new ArrayList<>();
for (List<LayoutToken> ys : allYs) {
labels.add(new org.grobid.core.utilities.Pair<>(LayoutTokensUtil.toText(LayoutTokensUtil.dehyphenize(ys)), ys));
}
} else {
// possibly expand range of reference numbers (like for numeriacval bibliographical markers)
labels = ReferenceMarkerMatcher.getNumberedLabels(allRefTokens, false);
}
if (labels == null || labels.size() <= 1) {
org.grobid.core.utilities.Pair<String, List<LayoutToken>> localLabel =
new org.grobid.core.utilities.Pair(refText, allRefTokens);
labels = new ArrayList<>();
labels.add(localLabel);
}
for (org.grobid.core.utilities.Pair<String, List<LayoutToken>> theLabel : labels) {
String text = theLabel.a;
List<LayoutToken> refTokens = theLabel.b;
String textLow = text.toLowerCase().trim();
String bestFigure = null;
if (figures != null) {
for (Figure figure : figures) {
if ((figure.getLabel() != null) && (figure.getLabel().length() > 0)) {
String label = TextUtilities.cleanField(figure.getLabel(), false);
if (label != null && (label.length() > 0) &&
(textLow.equals(label.toLowerCase()))) {
bestFigure = figure.getId();
break;
}
}
}
if (bestFigure == null) {
// second pass with relaxed figure marker matching
for(int i=figures.size()-1; i>=0; i--) {
Figure figure = figures.get(i);
if ((figure.getLabel() != null) && (figure.getLabel().length() > 0)) {
String label = TextUtilities.cleanField(figure.getLabel(), false);
if (label != null && (label.length() > 0) &&
(textLow.contains(label.toLowerCase()))) {
bestFigure = figure.getId();
break;
}
}
}
}
}
boolean spaceEnd = false;
text = text.replace("\n", " ");
if (text.endsWith(" "))
spaceEnd = true;
text = text.trim();
String andWordString = null;
if (text.endsWith("and") || text.endsWith("&")) {
// the AND_WORD_PATTERN case, we want to exclude the AND word from the tagged chunk
if (text.endsWith("and")) {
text = text.substring(0, text.length()-3);
andWordString = "and";
refTokens = refTokens.subList(0,refTokens.size()-1);
}
else if (text.endsWith("&")) {
text = text.substring(0, text.length()-1);
andWordString = "&";
refTokens = refTokens.subList(0,refTokens.size()-1);
}
if (text.endsWith(" ")) {
andWordString = " " + andWordString;
refTokens = refTokens.subList(0,refTokens.size()-1);
}
text = text.trim();
}
String coords = null;
if (generateCoordinates && refTokens != null) {
coords = LayoutTokensUtil.getCoordsString(refTokens);
}
Element ref = teiElement("ref");
ref.addAttribute(new Attribute("type", "figure"));
if (coords != null) {
ref.addAttribute(new Attribute("coords", coords));
}
ref.appendChild(text);
if (bestFigure != null) {
ref.addAttribute(new Attribute("target", "#fig_" + bestFigure));
}
nodes.add(ref);
if (andWordString != null) {
nodes.add(new Text(andWordString));
}
if (spaceEnd)
nodes.add(new Text(" "));
}
return nodes;
}
public List<Node> markReferencesTableTEI(String refText, List<LayoutToken> allRefTokens,
List<Table> tables,
boolean generateCoordinates) {
if (refText == null ||
refText.trim().isEmpty()) {
return null;
}
List<Node> nodes = new ArrayList<>();
if (refText.trim().length() == 1 && TextUtilities.fullPunctuations.contains(refText.trim())) {
// the reference text marker is a punctuation
nodes.add(new Text(refText));
return nodes;
}
List<org.grobid.core.utilities.Pair<String, List<LayoutToken>>> labels = null;
List<List<LayoutToken>> allYs = LayoutTokensUtil.split(allRefTokens, ReferenceMarkerMatcher.AND_WORD_PATTERN, true);
if (allYs.size() > 1) {
labels = new ArrayList<>();
for (List<LayoutToken> ys : allYs) {
labels.add(new org.grobid.core.utilities.Pair<>(LayoutTokensUtil.toText(LayoutTokensUtil.dehyphenize(ys)), ys));
}
} else {
// possibly expand range of reference numbers (like for numeriacval bibliographical markers)
labels = ReferenceMarkerMatcher.getNumberedLabels(allRefTokens, false);
}
if (labels == null || labels.size() <= 1) {
org.grobid.core.utilities.Pair<String, List<LayoutToken>> localLabel =
new org.grobid.core.utilities.Pair(refText, allRefTokens);
labels = new ArrayList<>();
labels.add(localLabel);
}
for (org.grobid.core.utilities.Pair<String, List<LayoutToken>> theLabel : labels) {
String text = theLabel.a;
List<LayoutToken> refTokens = theLabel.b;
String textLow = text.toLowerCase().trim();
String bestTable = null;
if (tables != null) {
for (Table table : tables) {
if ((table.getLabel() != null) && (table.getLabel().length() > 0)) {
String label = TextUtilities.cleanField(table.getLabel(), false);
if (label != null && (label.length() > 0) &&
(textLow.equals(label.toLowerCase()))) {
bestTable = table.getId();
break;
}
}
}
if (bestTable == null) {
// second pass with relaxed table marker matching
for(int i=tables.size()-1; i>=0; i--) {
Table table = tables.get(i);
if ((table.getLabel() != null) && (table.getLabel().length() > 0)) {
String label = TextUtilities.cleanField(table.getLabel(), false);
if (label != null && (label.length() > 0) &&
(textLow.contains(label.toLowerCase()))) {
bestTable = table.getId();
break;
}
}
}
}
}
boolean spaceEnd = false;
text = text.replace("\n", " ");
if (text.endsWith(" "))
spaceEnd = true;
text = text.trim();
String andWordString = null;
if (text.endsWith("and") || text.endsWith("&")) {
// the AND_WORD_PATTERN case, we want to exclude the AND word from the tagged chunk
if (text.endsWith("and")) {
text = text.substring(0, text.length()-3);
andWordString = "and";
refTokens = refTokens.subList(0,refTokens.size()-1);
}
else if (text.endsWith("&")) {
text = text.substring(0, text.length()-1);
andWordString = "&";
refTokens = refTokens.subList(0,refTokens.size()-1);
}
if (text.endsWith(" ")) {
andWordString = " " + andWordString;
refTokens = refTokens.subList(0,refTokens.size()-1);
}
text = text.trim();
}
String coords = null;
if (generateCoordinates && refTokens != null) {
coords = LayoutTokensUtil.getCoordsString(refTokens);
}
Element ref = teiElement("ref");
ref.addAttribute(new Attribute("type", "table"));
if (coords != null) {
ref.addAttribute(new Attribute("coords", coords));
}
ref.appendChild(text);
if (bestTable != null) {
ref.addAttribute(new Attribute("target", "#tab_" + bestTable));
}
nodes.add(ref);
if (andWordString != null) {
nodes.add(new Text(andWordString));
}
if (spaceEnd)
nodes.add(new Text(" "));
}
return nodes;
}
private static Pattern patternNumber = Pattern.compile("\\d+");
public List<Node> markReferencesEquationTEI(String text,
List<LayoutToken> refTokens,
List<Equation> equations,
boolean generateCoordinates) {
if (text == null || text.trim().isEmpty()) {
return null;
}
text = TextUtilities.cleanField(text, false);
String textNumber = null;
Matcher m = patternNumber.matcher(text);
if (m.find()) {
textNumber = m.group();
}
List<Node> nodes = new ArrayList<>();
String textLow = text.toLowerCase();
String bestFormula = null;
if (equations != null) {
for (Equation equation : equations) {
if ((equation.getLabel() != null) && (equation.getLabel().length() > 0)) {
String label = TextUtilities.cleanField(equation.getLabel(), false);
Matcher m2 = patternNumber.matcher(label);
String labelNumber = null;
if (m2.find()) {
labelNumber = m2.group();
}
//if ((label.length() > 0) &&
// (textLow.contains(label.toLowerCase()))) {
if ( (labelNumber != null && textNumber != null && labelNumber.length()>0 &&
labelNumber.equals(textNumber)) ||
((label.length() > 0) && (textLow.equals(label.toLowerCase()))) ) {
bestFormula = equation.getId();
break;
}
}
}
}
boolean spaceEnd = false;
text = text.replace("\n", " ");
if (text.endsWith(" "))
spaceEnd = true;
text = text.trim();
String coords = null;
if (generateCoordinates && refTokens != null) {
coords = LayoutTokensUtil.getCoordsString(refTokens);
}
Element ref = teiElement("ref");
ref.addAttribute(new Attribute("type", "formula"));
if (coords != null) {
ref.addAttribute(new Attribute("coords", coords));
}
ref.appendChild(text);
if (bestFormula != null) {
ref.addAttribute(new Attribute("target", "#formula_" + bestFormula));
}
nodes.add(ref);
if (spaceEnd)
nodes.add(new Text(" "));
return nodes;
}
private String normalizeText(String localText) {
localText = localText.trim();
localText = TextUtilities.dehyphenize(localText);
localText = localText.replace("\n", " ");
localText = localText.replace(" ", " ");
return localText.trim();
}
/**
* In case, the coordinates of structural elements are provided in the TEI
* representation, we need the page sizes in order to scale the coordinates
* appropriately. These size information are provided via the TEI facsimile
* element, with a surface element for each page carrying the page size info.
*/
public StringBuilder toTEIPages(StringBuilder buffer,
Document doc,
GrobidAnalysisConfig config) throws Exception {
if (!config.isGenerateTeiCoordinates()) {
// no cooredinates, nothing to do
return buffer;
}
// page height and width
List<Page> pages = doc.getPages();
int pageNumber = 1;
buffer.append("\t<facsimile>\n");
for(Page page : pages) {
buffer.append("\t\t<surface ");
buffer.append("n=\"" + pageNumber + "\" ");
buffer.append("ulx=\"0.0\" uly=\"0.0\" ");
buffer.append("lrx=\"" + page.getWidth() + "\" lry=\"" + page.getHeight() + "\"");
buffer.append("/>\n");
pageNumber++;
}
buffer.append("\t</facsimile>\n");
return buffer;
}
}
| 107,658 | 44.637558 | 148 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/document/DocumentNode.java
|
package org.grobid.core.document;
import java.util.ArrayList;
import java.util.List;
import org.grobid.core.layout.BoundingBox;
/**
* Class corresponding to a node of the structure of a hierarchically organized document (i.e. for a table
* of content).
*
*/
public class DocumentNode {
private Integer id = null;
// Gorn address for tree structure
private String address = null;
// real numbering of the section, if any
private String realNumber = null;
// normalized numbering of the section, if any
private String normalizedNumber = null;
// the string attached to this document level, e.g. section title
private String label = null;
// list of child document nodes
private List<DocumentNode> children = null;
// offset relatively to the document tokenization (so token offset, NOT character offset)
public int startToken = -1;
public int endToken = -1;
// coordinates of the string attached to this document level, typically where an index link
// action point in the document
private BoundingBox boundingBox = null;
// parent document node, if null it is a root node
private DocumentNode father = null;
public DocumentNode() {
}
public DocumentNode(String label, String address) {
this.label = label;
this.address = address;
}
public String getRealNumber() {
return realNumber;
}
public void setRealNumber(String number) {
realNumber = number;
}
public String getNormalizedNumber() {
return normalizedNumber;
}
public void setNormalizedNumber(String number) {
normalizedNumber = number;
}
public String getAddress() {
return address;
}
public void setAddress(String theAddress) {
address = theAddress;
}
public String getLabel() {
return label;
}
public void setLabel(String theLabel) {
label = theLabel;
}
public List<DocumentNode> getChildren() {
return children;
}
public void setChildren(List<DocumentNode> nodes) {
children = nodes;
}
public BoundingBox getBoundingBox() {
return boundingBox;
}
public void setBoundingBox(BoundingBox box) {
boundingBox = box;
}
public DocumentNode getFather() {
return father;
}
public void setFather(DocumentNode parent) {
father = parent;
}
public void addChild(DocumentNode child) {
if (this.children == null) {
this.children = new ArrayList<DocumentNode>();
}
String addr = null;
if (this.address != null) {
if (this.address.equals("0")) {
addr = "" + (this.children.size() + 1);
} else {
addr = this.address + (this.children.size() + 1);
}
}
child.address = addr;
child.father = this;
if (child.endToken > this.endToken) {
this.endToken = child.endToken;
}
this.children.add(child);
}
public String toString() {
return toString(0);
}
public String toString(int tab) {
StringBuilder sb = new StringBuilder();
sb.append(id).append(" ").append(address).append(" ").append(label).append(" ").append(startToken).append(" ").append(endToken).append("\n");
if (children != null) {
for (DocumentNode node : children) {
for (int n = 0; n < tab + 1; n++) {
sb.append("\t");
}
sb.append(node.toString(tab + 1));
}
}
return sb.toString();
}
public DocumentNode clone() {
DocumentNode result = new DocumentNode();
result.address = this.address;
result.realNumber = this.realNumber;
result.label = this.label;
result.startToken = this.startToken;
result.endToken = this.endToken;
return result;
}
public DocumentNode getSpanningNode(int position) {
if ((startToken <= position) && (endToken >= position)) {
if (children != null) {
for (DocumentNode node : children) {
if ((node.startToken <= position) && (node.endToken >= position)) {
return node.getSpanningNode(position);
}
}
return this;
} else {
return this;
}
} else {
return null;
}
}
/*public DocumentNode nextSlibing() {
if ( (children != null) && (children.size() > 0) ) {
return children.get(0);
}
else if (father == null) {
return null;
}
else {
for (DocumentNode node : father.children) {
}
}
}*/
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
}
| 5,010 | 24.829897 | 149 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/document/OPSService.java
|
package org.grobid.core.document;
import java.io.*;
import java.net.*;
import java.util.*;
import org.xml.sax.*;
import org.xml.sax.helpers.*;
import javax.xml.parsers.*;
import java.util.regex.*;
import org.grobid.core.sax.TextSaxParser;
/**
*
* Usage of the EPO OPS service for online interactive version of patent document retrieval.
* We use a POST request since the EPO wsdl file result in a terrible mess with WSDL2Java.
*
* There is now however a new REST interface that should be used instead of the SOAP one.
*
* Service "fair use" implies no more than 6 request per minutes and up to 20 query per
* batch SOAP envelope.
*
*/
public class OPSService {
public OPSService() {}
static String OPS_HOST = "ops.epo.org";
static int OPS_PORT = 80;
public String stripNonValidXMLCharacters(String in) {
StringBuffer out = new StringBuffer();
char current;
if (in == null || ("".equals(in))) {
return "";
}
for (int i = 0; i < in.length(); i++) {
current = in.charAt(i);
if ((current == 0x9) ||
(current == 0xA) ||
(current == 0xD) ||
((current >= 0x20) && (current <= 0xD7FF)) ||
((current >= 0xE000) && (current <= 0xFFFD)) ||
((current >= 0x10000) && (current <= 0x10FFFF)))
out.append(current);
}
return out.toString();
}
/**
* Access to full text for a given patent publication number
*/
public String descriptionRetrieval(String patentNumber) throws IOException,
ClassNotFoundException,
InstantiationException, IllegalAccessException {
try {
// header
String envelope = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n";
envelope += "<soapenv:Envelope xmlns:soapenv=\"http://schemas.xmlsoap.org/soap/envelope/\" xmlns:ops=\"http://ops.epo.org\" xmlns:exc=\"http://www.epo.org/exchange\">\n";
envelope += "<soapenv:Header/>\n";
envelope += "<soapenv:Body>\n";
envelope += "<ops:description-retrieval format=\"text-only\" format-version=\"1.0\">\n";
// body
envelope += "<exc:publication-reference data-format=\"epodoc\">\n";
envelope += "<exc:document-id>\n";
envelope += "<exc:doc-number>"+patentNumber+"</exc:doc-number>\n";
envelope += "</exc:document-id>\n";
envelope += "</exc:publication-reference>\n";
envelope += "</ops:description-retrieval>\n";
envelope += "</soapenv:Body>\n";
envelope += "</soapenv:Envelope>\n";
//Create socket
InetAddress addr = InetAddress.getByName(OPS_HOST);
Socket sock = new Socket(addr, OPS_PORT);
//Send header
String path = "/soap-services/description-retrieval";
BufferedWriter wr = new BufferedWriter(new OutputStreamWriter(sock.getOutputStream(),"UTF-8"));
wr.write("POST " + path + " HTTP/1.0\r\n");
wr.write("Host: "+ OPS_HOST +"\r\n");
wr.write("SOAPAction: description-retrieval" + "\r\n");
wr.write("Content-Length: " + envelope.length() + "\r\n");
wr.write("Content-Type: text/xml; charset=\"utf-8\"\r\n");
wr.write("\r\n");
//Send data
wr.write(envelope);
wr.flush();
// Response
BufferedReader rd = new BufferedReader(new InputStreamReader(sock.getInputStream()));
StringBuffer sb = new StringBuffer();
String line = null;
boolean toRead = false;
while((line = rd.readLine()) != null) {
if (line.startsWith("<?xml"))
toRead = true;
if (toRead) {
line = stripNonValidXMLCharacters(line);
sb.append(line);
}
}
TextSaxParser sax = new TextSaxParser();
// get a factory
SAXParserFactory spf = SAXParserFactory.newInstance();
spf.setValidating(false);
spf.setFeature("http://xml.org/sax/features/namespaces", false);
spf.setFeature("http://xml.org/sax/features/validation", false);
//get a new instance of parser
XMLReader reader = XMLReaderFactory.createXMLReader();
reader.setEntityResolver(new EntityResolver() {
public InputSource resolveEntity(String publicId, String systemId) {
return new InputSource(
new ByteArrayInputStream("<?xml version=\"1.0\" encoding=\"UTF-8\"?>".getBytes()));
}
});
reader.setContentHandler(sax);
InputSource input = new InputSource(new StringReader(sb.toString()));
input.setEncoding("UTF-8");
reader.parse(input);
String res = sax.getText();
if (res != null)
return res;
else
return null;
}
catch(Exception e) {
e.printStackTrace();
}
return null;
}
} // end of class
| 4,755 | 31.8 | 177 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/document/DocumentSource.java
|
package org.grobid.core.document;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.SystemUtils;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.exceptions.GrobidExceptionStatus;
import org.grobid.core.exceptions.GrobidResourceException;
import org.grobid.core.process.ProcessRunner;
import org.grobid.core.utilities.GrobidProperties;
import org.grobid.core.utilities.KeyGen;
import org.grobid.core.utilities.Utilities;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
/**
* Input document to be processed, which could come from a PDF or directly be an XML file.
* If from a PDF document, this is the place where pdfalto is called.
*/
public class DocumentSource {
private static final Logger LOGGER = LoggerFactory.getLogger(DocumentSource.class);
// private static final int DEFAULT_TIMEOUT = 30000;
private static final int KILLED_DUE_2_TIMEOUT = 143;
private static final int MISSING_LIBXML2 = 127;
private static final int MISSING_PDFALTO = 126;
public static final int PDFALTO_FILES_AMOUNT_LIMIT = 5000;
private File pdfFile;
private File xmlFile;
boolean cleanupXml = false;
private String md5Str = null;
private DocumentSource() {
}
public static DocumentSource fromPdf(File pdfFile) {
return fromPdf(pdfFile, -1, -1);
}
/**
* By default the XML extracted from the PDF is without images, to avoid flooding the grobid-home/tmp directory,
* but with the extra annotation file and with outline
*/
public static DocumentSource fromPdf(File pdfFile, int startPage, int endPage) {
return fromPdf(pdfFile, startPage, endPage, false, true, false);
}
public static DocumentSource fromPdf(File pdfFile, int startPage, int endPage,
boolean withImages, boolean withAnnotations, boolean withOutline) {
if (!pdfFile.exists() || pdfFile.isDirectory()) {
throw new GrobidException("Input PDF file " + pdfFile + " does not exist or a directory",
GrobidExceptionStatus.BAD_INPUT_DATA);
}
DocumentSource source = new DocumentSource();
source.cleanupXml = true;
try {
source.xmlFile = source.pdfalto(null, false, startPage, endPage, pdfFile,
GrobidProperties.getTempPath(), withImages, withAnnotations, withOutline);
} catch (Exception e) {
source.close(withImages, withAnnotations, withOutline);
throw e;
} finally {
}
source.pdfFile = pdfFile;
return source;
}
private String getPdfaltoCommand(boolean withImage, boolean withAnnotations, boolean withOutline) {
StringBuilder pdfToXml = new StringBuilder();
pdfToXml.append(GrobidProperties.getPdfaltoPath().getAbsolutePath());
// bat files sets the path env variable for cygwin dll
if (SystemUtils.IS_OS_WINDOWS) {
//pdfalto executable are separated to avoid dll conflicts
pdfToXml.append(File.separator +"pdfalto");
}
pdfToXml.append(
GrobidProperties.isContextExecutionServer() ? File.separator + "pdfalto_server" : File.separator + "pdfalto");
pdfToXml.append(" -fullFontName -noLineNumbers");
if (!withImage) {
pdfToXml.append(" -noImage ");
}
if (withAnnotations) {
pdfToXml.append(" -annotation ");
}
if (withOutline) {
pdfToXml.append(" -outline ");
}
// pdfToXml.append(" -readingOrder ");
// pdfToXml.append(" -ocr ");
pdfToXml.append(" -filesLimit 2000 ");
//System.out.println(pdfToXml);
//pdfToXml.append(" -conf <path to config> ");
return pdfToXml.toString();
}
/**
* Create an XML representation from a pdf file. If tout is true (default),
* a timeout is used. If force is true, the xml file is always regenerated,
* even if already present (default is false, it can save up to 50% overall
* runtime). If full is true, the extraction covers also images within the
* pdf, which is relevant for fulltext extraction.
*/
public File pdfalto(Integer timeout, boolean force, int startPage,
int endPage, File pdfPath, File tmpPath, boolean withImages,
boolean withAnnotations, boolean withOutline) {
LOGGER.debug("start pdf to xml sub process");
long time = System.currentTimeMillis();
String pdftoxml0;
pdftoxml0 = getPdfaltoCommand(withImages, withAnnotations, withOutline);
if (startPage > 0)
pdftoxml0 += " -f " + startPage + " ";
if (endPage > 0)
pdftoxml0 += " -l " + endPage + " ";
// if the XML representation already exists, no need to redo the
// conversion,
// except if the force parameter is set to true
File tmpPathXML = new File(tmpPath, KeyGen.getKey() + ".lxml");
xmlFile = tmpPathXML;
File f = tmpPathXML;
if ((!f.exists()) || force) {
List<String> cmd = new ArrayList<>();
String[] tokens = pdftoxml0.split(" ");
for (String token : tokens) {
if (token.trim().length() > 0) {
cmd.add(token);
}
}
cmd.add(pdfPath.getAbsolutePath());
cmd.add(tmpPathXML.getAbsolutePath());
if (GrobidProperties.isContextExecutionServer()) {
cmd.add("--timeout");
cmd.add(String.valueOf(GrobidProperties.getPdfaltoTimeoutS()));
tmpPathXML = processPdfaltoServerMode(pdfPath, tmpPathXML, cmd);
} else {
if (!SystemUtils.IS_OS_WINDOWS && !SystemUtils.IS_OS_MAC) {
cmd = Arrays.asList("bash", "-c", "ulimit -Sv " +
GrobidProperties.getPdfaltoMemoryLimitMb() * 1024 + " && " + pdftoxml0 + " '" + pdfPath + "' " + tmpPathXML);
}
LOGGER.debug("Executing command: " + cmd);
tmpPathXML = processPdfaltoThreadMode(timeout, pdfPath, tmpPathXML, cmd);
}
File dataFolder = new File(tmpPathXML.getAbsolutePath() + "_data");
File[] files = dataFolder.listFiles();
if (files != null && files.length > PDFALTO_FILES_AMOUNT_LIMIT) {
//throw new GrobidException("The temp folder " + dataFolder + " contains " + files.length + " files and exceeds the limit",
// GrobidExceptionStatus.PARSING_ERROR);
LOGGER.warn("The temp folder " + dataFolder + " contains " + files.length +
" files and exceeds the limit, only the first " + PDFALTO_FILES_AMOUNT_LIMIT + " asset files will be kept.");
}
}
LOGGER.debug("pdf to xml sub process process finished. Time to process:" + (System.currentTimeMillis() - time) + "ms");
return tmpPathXML;
}
/**
* Process the conversion of pdfalto format using thread calling native
* executable.
* <p>
* Executed NOT in the server mode
*
* @param timeout in ms. null, if default
* @param pdfPath path to pdf
* @param tmpPathXML temporary path to save the converted file
* @param cmd arguments to call the executable pdfalto
* @return the path the the converted file.
*/
private File processPdfaltoThreadMode(Integer timeout, File pdfPath,
File tmpPathXML, List<String> cmd) {
LOGGER.debug("Executing: " + cmd.toString());
ProcessRunner worker = new ProcessRunner(cmd, "pdfalto[" + pdfPath + "]", true);
worker.start();
try {
if (timeout != null) {
worker.join(timeout);
} else {
worker.join(GrobidProperties.getPdfaltoTimeoutMs()); // max 50 second even without predefined
// timeout
}
if (worker.getExitStatus() == null) {
tmpPathXML = null;
//killing all child processes harshly
worker.killProcess();
close(true, true, true);
throw new GrobidException("PDF to XML conversion timed out", GrobidExceptionStatus.TIMEOUT);
}
if (worker.getExitStatus() != 0) {
String errorStreamContents = worker.getErrorStreamContents();
close(true, true, true);
throw new GrobidException("PDF to XML conversion failed on pdf file " + pdfPath + " " +
(StringUtils.isEmpty(errorStreamContents) ? "" : ("due to: " + errorStreamContents)),
GrobidExceptionStatus.PDFALTO_CONVERSION_FAILURE);
}
} catch (InterruptedException ex) {
tmpPathXML = null;
worker.interrupt();
Thread.currentThread().interrupt();
} finally {
worker.interrupt();
}
return tmpPathXML;
}
/**
* Process the conversion of pdf to xml format calling native executable. No
* thread used for the execution.
*
* @param pdfPath path to pdf
* @param tmpPathXML temporary path to save the converted file
* @param cmd arguments to call the executable pdfalto
* @return the path the the converted file.
*/
private File processPdfaltoServerMode(File pdfPath, File tmpPathXML, List<String> cmd) {
LOGGER.debug("Executing: " + cmd.toString());
Integer exitCode = org.grobid.core.process.ProcessPdfToXml.process(cmd);
if (exitCode == null) {
throw new GrobidException("An error occurred while converting pdf " + pdfPath, GrobidExceptionStatus.BAD_INPUT_DATA);
} else if (exitCode == KILLED_DUE_2_TIMEOUT) {
throw new GrobidException("PDF to XML conversion timed out", GrobidExceptionStatus.TIMEOUT);
} else if (exitCode == MISSING_PDFALTO) {
throw new GrobidException("PDF to XML conversion failed. Cannot find pdfalto executable", GrobidExceptionStatus.PDFALTO_CONVERSION_FAILURE);
} else if (exitCode == MISSING_LIBXML2) {
throw new GrobidException("PDF to XML conversion failed. pdfalto cannot be executed correctly. Has libxml2 been installed in the system? More information can be found in the logs. ", GrobidExceptionStatus.PDFALTO_CONVERSION_FAILURE);
} else if (exitCode != 0) {
throw new GrobidException("PDF to XML conversion failed with error code: " + exitCode, GrobidExceptionStatus.BAD_INPUT_DATA);
}
return tmpPathXML;
}
private boolean cleanXmlFile(File pathToXml, boolean cleanImages, boolean cleanAnnotations, boolean cleanOutline) {
boolean success = false;
try {
if (pathToXml != null) {
if (pathToXml.exists()) {
success = pathToXml.delete();
if (!success) {
throw new GrobidResourceException("Deletion of a temporary XML file failed for file '" + pathToXml.getAbsolutePath() + "'");
}
File fff = new File(pathToXml + "_metadata.xml");
if (fff.exists()) {
success = Utilities.deleteDir(fff);
if (!success) {
throw new GrobidResourceException(
"Deletion of temporary metadata file failed for file '" + fff.getAbsolutePath() + "'");
}
}
}
}
} catch (Exception e) {
if (e instanceof GrobidResourceException) {
throw (GrobidResourceException) e;
} else {
throw new GrobidResourceException("An exception occurred while deleting an XML file '" + pathToXml + "'.", e);
}
}
// if cleanImages is true, we also remove the corresponding image
// resources subdirectory
if (cleanImages) {
try {
if (pathToXml != null) {
File fff = new File(pathToXml + "_data");
if (fff.exists()) {
if (fff.isDirectory()) {
success = Utilities.deleteDir(fff);
if (!success) {
throw new GrobidResourceException(
"Deletion of temporary image files failed for file '" + fff.getAbsolutePath() + "'");
}
}
}
}
} catch (Exception e) {
if (e instanceof GrobidResourceException) {
throw (GrobidResourceException) e;
} else {
throw new GrobidResourceException("An exception occurred while deleting an XML file '" + pathToXml + "'.", e);
}
}
}
// if cleanAnnotations is true, we also remove the additional annotation file
if (cleanAnnotations) {
try {
if (pathToXml != null) {
File fff = new File(pathToXml + "_annot.xml");
if (fff.exists()) {
success = fff.delete();
if (!success) {
throw new GrobidResourceException(
"Deletion of temporary annotation file failed for file '" + fff.getAbsolutePath() + "'");
}
}
}
} catch (Exception e) {
if (e instanceof GrobidResourceException) {
throw (GrobidResourceException) e;
} else {
throw new GrobidResourceException("An exception occurred while deleting an XML file '" + pathToXml + "'.", e);
}
}
}
// if cleanOutline is true, we also remoce the additional outline file
if (cleanOutline) {
try {
if (pathToXml != null) {
File fff = new File(pathToXml + "_outline.xml");
if (fff.exists()) {
success = fff.delete();
if (!success) {
throw new GrobidResourceException(
"Deletion of temporary outline file failed for file '" + fff.getAbsolutePath() + "'");
}
}
}
} catch (Exception e) {
if (e instanceof GrobidResourceException) {
throw (GrobidResourceException) e;
} else {
throw new GrobidResourceException("An exception occurred while deleting an XML file '" + pathToXml + "'.", e);
}
}
}
return success;
}
public void close(boolean cleanImages, boolean cleanAnnotations, boolean cleanOutline) {
try {
if (cleanupXml) {
cleanXmlFile(xmlFile, cleanImages, cleanAnnotations, cleanOutline);
}
} catch (Exception e) {
LOGGER.error("Cannot cleanup resources (just printing exception):", e);
}
}
public static void close(DocumentSource source, boolean cleanImages, boolean cleanAnnotations, boolean cleanOutline) {
if (source != null) {
source.close(cleanImages, cleanAnnotations, cleanOutline);
}
}
public File getPdfFile() {
return pdfFile;
}
public void setPdfFile(File pdfFile) {
this.pdfFile = pdfFile;
}
public File getXmlFile() {
return xmlFile;
}
public void setXmlFile(File xmlFile) {
this.xmlFile = xmlFile;
}
public double getByteSize() {
if (pdfFile != null)
return pdfFile.length();
return 0;
}
public String getMD5() {
return this.md5Str;
}
public void setMD5(String md5Str) {
this.md5Str = md5Str;
}
}
| 16,378 | 39.144608 | 245 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/document/xml/XmlBuilderUtils.java
|
package org.grobid.core.document.xml;
import com.google.common.base.Function;
import com.google.common.base.Joiner;
import com.google.common.collect.Iterables;
import nu.xom.Attribute;
import nu.xom.Builder;
import nu.xom.Document;
import nu.xom.Element;
import nu.xom.Text;
import nu.xom.Node;
import nu.xom.ParsingException;
import nu.xom.Serializer;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.io.StringReader;
import java.util.List;
public class XmlBuilderUtils {
public static final String TEI_NS = "http://www.tei-c.org/ns/1.0";
public static final Function<Element, String> TO_XML_FUNCTION = new Function<Element, String>() {
@Override
public String apply(Element element) {
return toXml(element);
}
};
public static final String XML_NS = "http://www.w3.org/XML/1998/namespace";
public static Element fromString(String xml) {
Builder parser = new Builder();
Document doc;
try {
doc = parser.build(new StringReader(xml));
} catch (ParsingException | IOException e) {
throw new RuntimeException(e);
}
Element rootElement = doc.getRootElement();
return (Element) rootElement.copy();
// return rootElement;
}
public static String toXml(Element element) {
// OutputStream os = new ByteOutputStream();
// try {
// Serializer serializer = new Serializer(os, "UTF-8");
// serializer.setIndent(4);
// serializer.write(new Document(element));
// } catch (IOException e) {
// throw new RuntimeException("Cannot serialize "e);
// }
// return os.toString();
return element.toXML();
}
public static String toPrettyXml(Element element) {
OutputStream os = new ByteArrayOutputStream();
try {
Serializer serializer = new Serializer(os, "UTF-8");
serializer.setIndent(4);
serializer.write(new Document(element));
} catch (IOException e) {
throw new RuntimeException("Cannot serialize document", e);
}
return os.toString();
}
public static String toXml(List<Element> elements) {
return Joiner.on("\n").join(Iterables.transform(elements, TO_XML_FUNCTION));
}
public static Element teiElement(String name) {
return new Element(name, TEI_NS);
}
public static void addCoords(Element el, String coords) {
if (coords != null) {
el.addAttribute(new Attribute("coords", coords));
}
}
public static void addXmlId(Element el, String id) {
el.addAttribute(new Attribute("xml:id", XML_NS, id));
}
public static Node textNode(String text) {
return new Text(text);
}
public static Element teiElement(String name, String content) {
Element element = new Element(name, TEI_NS);
element.appendChild(content);
return element;
}
public static void main(String[] args) throws ParsingException, IOException {
Element e = fromString("<div><a>Test</a></div>");
System.out.println(toXml(e));
}
public static String stripNonValidXMLCharacters(String in) {
StringBuffer out = new StringBuffer(); // Used to hold the output.
char current; // Used to reference the current character.
if (in == null || ("".equals(in)))
return "";
for (int i = 0; i < in.length(); i++) {
current = in.charAt(i);
if ((current == 0x9) ||
(current == 0xA) ||
(current == 0xD) ||
((current >= 0x20) && (current <= 0xD7FF)) ||
((current >= 0xE000) && (current <= 0xFFFD)) ||
((current >= 0x10000) && (current <= 0x10FFFF)))
out.append(current);
}
return out.toString();
}
}
| 3,976 | 31.333333 | 101 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/document/xml/NodesIterator.java
|
package org.grobid.core.document.xml;
import nu.xom.Node;
import nu.xom.Nodes;
import java.util.Iterator;
import java.util.NoSuchElementException;
public class NodesIterator implements Iterable<Node>, Iterator<Node> {
private Nodes nodes;
private int cur;
public NodesIterator(Nodes nodes) {
this.nodes = nodes;
cur = 0;
}
@Override
public Iterator<Node> iterator() {
return this;
}
@Override
public boolean hasNext() {
return cur < nodes.size();
}
@Override
public Node next() {
if (cur == nodes.size()) {
throw new NoSuchElementException();
}
return nodes.get(cur++);
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
}
| 801 | 18.095238 | 70 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/document/xml/NodeChildrenIterator.java
|
package org.grobid.core.document.xml;
import com.google.common.base.Predicate;
import com.google.common.collect.Iterables;
import nu.xom.Element;
import nu.xom.Node;
import nu.xom.Text;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Iterator;
import java.util.NoSuchElementException;
import java.util.Set;
public class NodeChildrenIterator implements Iterable<Node>, Iterator<Node> {
private static class BlacklistElementNamePredicate implements Predicate<Node> {
private Set<String> blacklistElements = new HashSet<>();
public BlacklistElementNamePredicate(String... elementNames) {
this.blacklistElements.addAll(Arrays.asList(elementNames));
}
@Override
public boolean apply(Node input) {
return (input instanceof Text && !blacklistElements.contains("text")) || (input instanceof Element && !blacklistElements.contains(((Element) input).getLocalName()));
}
}
private Node node;
private int cur;
private NodeChildrenIterator(Node node) {
this.node = node;
cur = 0;
}
@Override
public Iterator<Node> iterator() {
return this;
}
@Override
public boolean hasNext() {
return node != null && cur < node.getChildCount();
}
@Override
public Node next() {
if (node == null || cur == node.getChildCount()) {
throw new NoSuchElementException();
}
return node.getChild(cur++);
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
public static Iterable<Node> get(Node parent) {
return new NodeChildrenIterator(parent);
}
public static Iterable<Node> get(Node parent, String... blacklistedNodes) {
return Iterables.filter(get(parent), new BlacklistElementNamePredicate(blacklistedNodes));
}
}
| 1,898 | 25.746479 | 177 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/visualization/CitationsVisualizer.java
|
package org.grobid.core.visualization;
import com.fasterxml.jackson.core.JsonFactory;
import com.fasterxml.jackson.core.JsonGenerator;
import com.google.common.collect.Multimap;
import net.sf.saxon.trans.XPathException;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.pdfbox.cos.COSArray;
import org.apache.pdfbox.pdmodel.PDDocument;
import org.apache.pdfbox.pdmodel.PDPage;
import org.apache.pdfbox.pdmodel.interactive.documentnavigation.destination.PDPageDestination;
import org.apache.pdfbox.pdmodel.PDPageContentStream;
import org.apache.pdfbox.pdmodel.common.PDRectangle;
import org.apache.pdfbox.pdmodel.graphics.color.PDColor;
import org.apache.pdfbox.pdmodel.graphics.color.PDDeviceRGB;
import org.apache.pdfbox.pdmodel.interactive.action.PDActionGoTo;
import org.apache.pdfbox.pdmodel.interactive.action.PDActionURI;
import org.apache.pdfbox.pdmodel.interactive.annotation.PDAnnotationLink;
import org.apache.pdfbox.pdmodel.interactive.annotation.PDBorderStyleDictionary;
import org.apache.pdfbox.pdmodel.interactive.documentnavigation.destination.PDPageFitWidthDestination;
import org.grobid.core.data.BibDataSet;
import org.grobid.core.data.DataSetContext;
import org.grobid.core.data.BiblioItem;
import org.grobid.core.data.Person;
import org.grobid.core.data.Equation;
import org.grobid.core.data.Figure;
import org.grobid.core.data.Table;
import org.grobid.core.document.Document;
import org.grobid.core.layout.BoundingBox;
import org.grobid.core.layout.Page;
import org.grobid.core.utilities.DataSetContextExtractor;
import org.grobid.core.utilities.LayoutTokensUtil;
import org.grobid.core.utilities.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.io.StringWriter;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Utilities for visualizing citation markers and biblographical references, wither directly
* in the PDF using the PDF annotation layer or as annotations in JSON for supporting
* web based rendering (e.g. with PDF.js) and interactive HTML layer.
* See the web console/demo for actual examples of usage.
*/
public class CitationsVisualizer {
private static final Logger LOGGER = LoggerFactory.getLogger(CitationsVisualizer.class);
private static final JsonFactory jFactory = new JsonFactory();
/**
* Augment a PDF with bibliographical annotation, for bib. ref. and bib markers.
* The PDF annotation layer is used with "GoTo" and "URI" action links.
* The annotations of the bibliographical references can be associated to an URL in order
* to have clickable references direclty in the PDF.
* The Apache PDFBox library is used.
*
* @param document PDDocument object resulting from the PDF parsing with PDFBox
* @param teiDoc the Document object resulting from the full document structuring
* @param resolvedBibRefUrl the list of URL to be added to the bibliographical reference
* annotations, if null the bib. ref. annotations are not associated to external URL.
*/
public static PDDocument annotatePdfWithCitations(PDDocument document, Document teiDoc,
List<String> resolvedBibRefUrl) throws IOException, XPathException {
String tei = teiDoc.getTei();
//System.out.println(tei);
int totalBib = 0;
int totalMarkers1 = 0;
int totalMarkers2 = 0;
Multimap<String, DataSetContext> contexts = DataSetContextExtractor.getCitationReferences(tei);
Map<String, Pair<Integer, Integer>> dictionary = new HashMap<>();
int indexBib = 0;
for (BibDataSet cit : teiDoc.getBibDataSets()) {
String teiId = cit.getResBib().getTeiId();
totalBib++;
String theUrl = null;
if ( (resolvedBibRefUrl != null) &&
(resolvedBibRefUrl.size() > indexBib) &&
(resolvedBibRefUrl.get(indexBib) != null) )
theUrl = resolvedBibRefUrl.get(indexBib);
else {
// by default we put the existing url, doi or arXiv link
BiblioItem biblio = cit.getResBib();
if (!StringUtils.isEmpty(biblio.getDOI())) {
theUrl = "https://dx.doi.org/" + biblio.getDOI();
} else if (!StringUtils.isEmpty(biblio.getArXivId())) {
theUrl = "https://arxiv.org/abs/" + biblio.getArXivId();
} else if (!StringUtils.isEmpty(biblio.getWeb())) {
theUrl = biblio.getWeb();
}
}
if (cit.getResBib().getCoordinates() != null) {
for (BoundingBox b : cit.getResBib().getCoordinates()) {
annotatePage(document, b.toString(), teiId, theUrl, 1.5f, false, dictionary);
}
}
//annotating reference markers
for (DataSetContext c : contexts.get(teiId)) {
//System.out.println(c.getContext());
String mrect = c.getDocumentCoords();
if ((mrect != null) && (mrect.trim().length()>0)) {
for (String coords : mrect.split(";")) {
if (coords.trim().length() == 0)
continue;
annotatePage(document, coords, teiId, null, 1.0f, true, dictionary);
totalMarkers1++;
}
}
}
indexBib++;
}
for (DataSetContext c : contexts.get("")) {
String mrect = c.getDocumentCoords();
if ((mrect != null) && (mrect.trim().length()>0)) {
for (String coords : mrect.split(";")) {
if (coords.trim().length() == 0)
continue;
annotatePage(document, coords, null, null, 1.0f, true, dictionary);
totalMarkers2++;
}
}
}
if (teiDoc.getResHeader() != null && teiDoc.getResHeader().getFullAuthors() != null) {
for (Person p : teiDoc.getResHeader().getFullAuthors()) {
if (p.getLayoutTokens() != null) {
String coordsString = LayoutTokensUtil.getCoordsString(p.getLayoutTokens());
for (String coords : coordsString.split(";")) {
annotatePage(document, coords, "123", null,
// p.getLastName() == null ? 1 : p.getLastName().hashCode(),
1.0f, true, dictionary);
}
}
}
}
LOGGER.debug("totalBib: " + totalBib);
LOGGER.debug("totalMarkers1: " + totalMarkers1);
LOGGER.debug("totalMarkers2: " + totalMarkers2);
return document;
}
private static void annotatePage(PDDocument document,
String coords,
String teiId,
String uri,
float lineWidth,
boolean isMarker,
Map<String, Pair<Integer, Integer>> dictionary) throws IOException {
//System.out.println("Annotating for coordinates: " + coords);
/*long seed = 0L;
if (teiId != null)
seed = teiId.hashCode();*/
if (StringUtils.isEmpty(coords)) {
return;
}
String[] split = coords.split(",");
Long pageNum = Long.valueOf(split[0], 10) - 1;
PDPage page = document.getDocumentCatalog().getPages().get(pageNum.intValue());
PDRectangle mediaBox = page.getCropBox();
if (mediaBox == null) {
mediaBox = page.getMediaBox();
// this will look for the main media box of the page up in the PDF element hierarchy
if (mediaBox == null) {
// last hope
mediaBox = page.getArtBox();
if (mediaBox == null) {
// we tried our best given PDFBox
LOGGER.warn("Media box for page " + pageNum.intValue() + " not found.");
return;
}
}
}
float height = mediaBox.getHeight();
float lowerX = mediaBox.getLowerLeftX();
float lowerY = mediaBox.getLowerLeftY();
float x = Float.parseFloat(split[1]);
float y = Float.parseFloat(split[2]);
float w = Float.parseFloat(split[3]);
float h = Float.parseFloat(split[4]);
float annX = x + lowerX;
float annY = (height - (y + h)) + lowerY;
float annRightX = x + w + lowerX;
float annTopY = height - y + lowerY;
PDRectangle rect = new PDRectangle();
rect.setLowerLeftX(annX);
rect.setLowerLeftY(annY);
rect.setUpperRightX(annRightX);
rect.setUpperRightY(annTopY);
PDBorderStyleDictionary borderULine = new PDBorderStyleDictionary();
borderULine.setStyle(PDBorderStyleDictionary.STYLE_BEVELED);
// so that a border is not visible at all
borderULine.setWidth(0);
PDAnnotationLink txtLink = new PDAnnotationLink();
txtLink.setBorderStyle(borderULine);
//white rectangle border color (ideally, should be transparent)
COSArray white = new COSArray();
white.setFloatArray(new float[]{1f, 1f, 1f});
txtLink.setColor(new PDColor(white, PDDeviceRGB.INSTANCE));
txtLink.setReadOnly(true);
txtLink.setHighlightMode(PDAnnotationLink.HIGHLIGHT_MODE_PUSH);
if (isMarker && (teiId != null)) {
Pair<Integer, Integer> thePlace = dictionary.get(teiId);
if (thePlace != null) {
PDPageFitWidthDestination destination = new PDPageFitWidthDestination();
PDPage pdpage = document.getPage(thePlace.getA());
destination.setPage(pdpage);
//destination.setPageNumber(thePlace.getA());
destination.setTop(thePlace.getB());
PDActionGoTo action = new PDActionGoTo();
action.setDestination(destination);
txtLink.setAction(action);
}
} else {
if (teiId != null) {
// register the object in the dictionary
if (dictionary.get(teiId) == null) {
Pair<Integer, Integer> thePlace =
new Pair<>(pageNum.intValue(), Math.round(annTopY + h));
dictionary.put(teiId, thePlace);
}
}
if (uri != null) {
PDActionURI action = new PDActionURI();
if (uri.endsWith("fulltext/original"))
uri = uri.replace("fulltext/original", "fulltext/pdf");
action.setURI(uri);
txtLink.setAction(action);
} else
return;
}
txtLink.setRectangle(rect);
// adding link to the reference
page.getAnnotations().add(txtLink);
//draw a line
PDBorderStyleDictionary borderThick = new PDBorderStyleDictionary();
borderThick.setWidth(1); // 12th inch
// adding line to the reference
PDPageContentStream stream = new PDPageContentStream(document, page, true, false, true);
//Random r = new Random(seed + 1);
// stream.setStrokingColor(85, 177, 245);
//stream.setStrokingColor(r.nextInt(255), r.nextInt(255), r.nextInt(255));
stream.setStrokingColor(0, 0, 255);
if (isMarker || (uri != null))
stream.setLineWidth(lineWidth);
else
stream.setLineWidth(0f);
stream.drawLine(annX, annY, annRightX, annY);
stream.close();
}
public static String getJsonAnnotations(Document teiDoc, List<String> resolvedBibRefUrl) throws IOException, XPathException {
return getJsonAnnotations(teiDoc, resolvedBibRefUrl, false);
}
/**
* Produce JSON annotations with PDF coordinates for web based PDF rendering. Annotations
* are given for bib. ref. and bib markers separately, together with the dimension of the
* different pages for client resizing.
* The annotations of the bibliographical references can be associated to an URL in
* order to support "clickable" bib. ref. annotations.
*
* @param teiDoc the Document object resulting from the full document structuring
* @param resolvedBibRefUrl the list of URL to be added to the bibliographical reference
* annotations, if null the bib. ref. annotations are not associated
* to external URL.
* @param addFiguresTables if true, also annotate figure and table areas, plus the callout
* to figures and tables
*
*/
public static String getJsonAnnotations(Document teiDoc, List<String> resolvedBibRefUrl, boolean addFiguresTables) throws IOException, XPathException {
StringWriter refW = new StringWriter();
JsonGenerator jsonRef = jFactory.createGenerator(refW);
//jsonRef.useDefaultPrettyPrinter();
jsonRef.writeStartObject();
// page height and width
List<Page> pages = teiDoc.getPages();
int pageNumber = 1;
jsonRef.writeArrayFieldStart("pages");
for(Page page : pages) {
jsonRef.writeStartObject();
jsonRef.writeNumberField("page_height", page.getHeight());
jsonRef.writeNumberField("page_width", page.getWidth());
jsonRef.writeEndObject();
pageNumber++;
}
jsonRef.writeEndArray();
StringWriter markW = new StringWriter();
JsonGenerator jsonMark = jFactory.createGenerator(markW);
jsonMark.writeStartArray();
int totalMarkers1 = 0;
int totalMarkers2 = 0;
int totalBib = 0;
jsonRef.writeArrayFieldStart("refBibs");
String tei = teiDoc.getTei();
Multimap<String, DataSetContext> contexts =
DataSetContextExtractor.getCitationReferences(tei);
int bibIndex = 0;
for (BibDataSet cit : teiDoc.getBibDataSets()) {
String teiId = cit.getResBib().getTeiId();
totalBib++;
jsonRef.writeStartObject();
jsonRef.writeStringField("id", teiId);
// url if any - they are passed via the resolvedBibRefUrl vector provided as argument
if ( (resolvedBibRefUrl != null) &&
(resolvedBibRefUrl.size()>bibIndex) &&
(resolvedBibRefUrl.get(bibIndex) != null) ) {
jsonRef.writeStringField("url", resolvedBibRefUrl.get(bibIndex));
} else {
// by default we put the existing url, doi or arXiv link
BiblioItem biblio = cit.getResBib();
String theUrl = null;
if (!StringUtils.isEmpty(biblio.getOAURL())) {
theUrl = biblio.getOAURL();
} else if (!StringUtils.isEmpty(biblio.getDOI())) {
theUrl = "https://dx.doi.org/" + biblio.getDOI();
} else if (!StringUtils.isEmpty(biblio.getArXivId())) {
theUrl = "https://arxiv.org/abs/" + biblio.getArXivId();
} else if (!StringUtils.isEmpty(biblio.getWeb())) {
theUrl = biblio.getWeb();
}
if (theUrl != null)
jsonRef.writeStringField("url", theUrl);
}
jsonRef.writeArrayFieldStart("pos");
if (cit.getResBib().getCoordinates() != null) {
for (BoundingBox b : cit.getResBib().getCoordinates()) {
// reference string
jsonRef.writeStartObject();
b.writeJsonProps(jsonRef);
jsonRef.writeEndObject();
}
}
jsonRef.writeEndArray(); // pos
jsonRef.writeEndObject(); // refBibs element
// reference markers for this reference
for (DataSetContext c : contexts.get(teiId)) {
//System.out.println(c.getContext());
String mrect = c.getDocumentCoords();
if ((mrect != null) && (mrect.trim().length()>0)) {
for (String coords : mrect.split(";")) {
if ((coords == null) || (coords.length() == 0))
continue;
//annotatePage(document, coords, teiId.hashCode(), 1.0f);
jsonMark.writeStartObject();
jsonMark.writeStringField("id", teiId);
BoundingBox b2 = BoundingBox.fromString(coords);
b2.writeJsonProps(jsonMark);
jsonMark.writeEndObject();
totalMarkers1++;
}
}
}
bibIndex++;
}
jsonRef.writeEndArray(); // refBibs
// remaining reference markers which have not been solved with an actual full
// bibliographical reference object
for (DataSetContext c : contexts.get("")) {
String mrect = c.getDocumentCoords();
if ((mrect != null) && (mrect.trim().length()>0)) {
for (String coords : mrect.split(";")) {
if (coords.trim().length() == 0)
continue;
//annotatePage(document, coords, 0, 1.0f);
BoundingBox b = BoundingBox.fromString(coords);
jsonMark.writeStartObject();
b.writeJsonProps(jsonMark);
jsonMark.writeEndObject();
totalMarkers2++;
}
}
}
jsonMark.writeEndArray();
jsonMark.close();
LOGGER.debug("totalBib: " + totalBib);
LOGGER.debug("totalBibMarkers1: " + totalMarkers1);
LOGGER.debug("totalBibMarkers2: " + totalMarkers2);
jsonRef.writeFieldName("refMarkers");
jsonRef.writeRawValue(markW.toString());
// for the same price, we add the formulas
markW = new StringWriter();
jsonMark = jFactory.createGenerator(markW);
jsonMark.writeStartArray();
totalMarkers1 = 0;
totalMarkers2 = 0;
int totalFormulas = 0;
jsonRef.writeArrayFieldStart("formulas");
contexts = DataSetContextExtractor.getFormulaReferences(tei);
if (CollectionUtils.isNotEmpty(teiDoc.getEquations())) {
for (Equation formula : teiDoc.getEquations()) {
String teiId = formula.getTeiId();
totalFormulas++;
jsonRef.writeStartObject();
jsonRef.writeStringField("id", teiId);
jsonRef.writeArrayFieldStart("pos");
if (formula.getCoordinates() != null) {
for (BoundingBox b : formula.getCoordinates()) {
// reference string
jsonRef.writeStartObject();
b.writeJsonProps(jsonRef);
jsonRef.writeEndObject();
}
}
jsonRef.writeEndArray(); // pos
jsonRef.writeEndObject(); // formula element
// reference markers for this formula
for (DataSetContext c : contexts.get(teiId)) {
//System.out.println(c.getContext());
String mrect = c.getDocumentCoords();
if ((mrect != null) && (mrect.trim().length() > 0)) {
for (String coords : mrect.split(";")) {
if ((coords == null) || (coords.length() == 0))
continue;
//annotatePage(document, coords, teiId.hashCode(), 1.0f);
jsonMark.writeStartObject();
jsonMark.writeStringField("id", teiId);
BoundingBox b2 = BoundingBox.fromString(coords);
b2.writeJsonProps(jsonMark);
jsonMark.writeEndObject();
totalMarkers1++;
}
}
}
}
}
jsonRef.writeEndArray(); // formulas
// remaining formula markers which have not been solved with an actual full
// formula object
for (DataSetContext c : contexts.get("")) {
String mrect = c.getDocumentCoords();
if ((mrect != null) && (mrect.trim().length()>0)) {
for (String coords : mrect.split(";")) {
if (coords.trim().length() == 0)
continue;
//annotatePage(document, coords, 0, 1.0f);
BoundingBox b = BoundingBox.fromString(coords);
jsonMark.writeStartObject();
b.writeJsonProps(jsonMark);
jsonMark.writeEndObject();
totalMarkers2++;
}
}
}
jsonMark.writeEndArray();
jsonMark.close();
jsonRef.writeFieldName("formulaMarkers");
jsonRef.writeRawValue(markW.toString());
LOGGER.debug("totalFormulas: " + totalBib);
LOGGER.debug("totalFormulaMarkers1: " + totalMarkers1);
LOGGER.debug("totalFormulaMarkers2: " + totalMarkers2);
// if requested, for the same price, we add the figures+tables
if (addFiguresTables) {
markW = new StringWriter();
jsonMark = jFactory.createGenerator(markW);
jsonMark.writeStartArray();
totalMarkers1 = 0;
totalMarkers2 = 0;
int totalFigures = 0;
jsonRef.writeArrayFieldStart("figures");
contexts = DataSetContextExtractor.getFigureReferences(tei);
for (Figure figure : teiDoc.getFigures()) {
String teiId = figure.getTeiId();
totalFigures++;
jsonRef.writeStartObject();
jsonRef.writeStringField("id", teiId);
jsonRef.writeArrayFieldStart("pos");
if (figure.getCoordinates() != null) {
for (BoundingBox b : figure.getCoordinates()) {
// reference string
jsonRef.writeStartObject();
b.writeJsonProps(jsonRef);
jsonRef.writeEndObject();
}
}
jsonRef.writeEndArray(); // pos
jsonRef.writeEndObject(); // figure element
// reference markers for this figure
for (DataSetContext c : contexts.get(teiId)) {
//System.out.println(c.getContext());
String mrect = c.getDocumentCoords();
if ((mrect != null) && (mrect.trim().length()>0)) {
for (String coords : mrect.split(";")) {
if ((coords == null) || (coords.length() == 0))
continue;
//annotatePage(document, coords, teiId.hashCode(), 1.0f);
jsonMark.writeStartObject();
jsonMark.writeStringField("id", teiId);
BoundingBox b2 = BoundingBox.fromString(coords);
b2.writeJsonProps(jsonMark);
jsonMark.writeEndObject();
totalMarkers1++;
}
}
}
}
jsonRef.writeEndArray(); // figures
// remaining reference markers which have not been solved with an actual
// figure object
for (DataSetContext c : contexts.get("")) {
String mrect = c.getDocumentCoords();
if ((mrect != null) && (mrect.trim().length()>0)) {
for (String coords : mrect.split(";")) {
if (coords.trim().length() == 0)
continue;
//annotatePage(document, coords, 0, 1.0f);
BoundingBox b = BoundingBox.fromString(coords);
jsonMark.writeStartObject();
b.writeJsonProps(jsonMark);
jsonMark.writeEndObject();
totalMarkers2++;
}
}
}
jsonMark.writeEndArray();
jsonMark.close();
jsonRef.writeFieldName("figureMarkers");
jsonRef.writeRawValue(markW.toString());
LOGGER.debug("totalFigures: " + totalBib);
LOGGER.debug("totalFigureMarkers1: " + totalMarkers1);
LOGGER.debug("totalFigureMarkers2: " + totalMarkers2);
// same for tables
markW = new StringWriter();
jsonMark = jFactory.createGenerator(markW);
jsonMark.writeStartArray();
totalMarkers1 = 0;
totalMarkers2 = 0;
int totalTables = 0;
jsonRef.writeArrayFieldStart("tables");
contexts = DataSetContextExtractor.getTableReferences(tei);
for (Table table : teiDoc.getTables()) {
String teiId = table.getTeiId();
totalTables++;
jsonRef.writeStartObject();
jsonRef.writeStringField("id", teiId);
jsonRef.writeArrayFieldStart("pos");
if (table.getCoordinates() != null) {
for (BoundingBox b : table.getCoordinates()) {
// reference string
jsonRef.writeStartObject();
b.writeJsonProps(jsonRef);
jsonRef.writeEndObject();
}
}
jsonRef.writeEndArray(); // pos
jsonRef.writeEndObject(); // table element
// reference markers for this table
for (DataSetContext c : contexts.get(teiId)) {
//System.out.println(c.getContext());
String mrect = c.getDocumentCoords();
if ((mrect != null) && (mrect.trim().length()>0)) {
for (String coords : mrect.split(";")) {
if ((coords == null) || (coords.length() == 0))
continue;
//annotatePage(document, coords, teiId.hashCode(), 1.0f);
jsonMark.writeStartObject();
jsonMark.writeStringField("id", teiId);
BoundingBox b2 = BoundingBox.fromString(coords);
b2.writeJsonProps(jsonMark);
jsonMark.writeEndObject();
totalMarkers1++;
}
}
}
}
jsonRef.writeEndArray(); // tables
// remaining reference markers which have not been solved with an actual full
// table object
for (DataSetContext c : contexts.get("")) {
String mrect = c.getDocumentCoords();
if ((mrect != null) && (mrect.trim().length()>0)) {
for (String coords : mrect.split(";")) {
if (coords.trim().length() == 0)
continue;
//annotatePage(document, coords, 0, 1.0f);
BoundingBox b = BoundingBox.fromString(coords);
jsonMark.writeStartObject();
b.writeJsonProps(jsonMark);
jsonMark.writeEndObject();
totalMarkers2++;
}
}
}
jsonMark.writeEndArray();
jsonMark.close();
jsonRef.writeFieldName("tableMarkers");
jsonRef.writeRawValue(markW.toString());
LOGGER.debug("totalTables: " + totalBib);
LOGGER.debug("totalTableMarkers1: " + totalMarkers1);
LOGGER.debug("totalTableMarkers2: " + totalMarkers2);
}
jsonRef.writeEndObject();
jsonRef.close();
return refW.toString();
}
/*
* A variant where annotations are provided page per page
*/
/*public static String getJsonAnnotationsPerPage(Document teiDoc, List<String> resolvedBibRefUrl) throws IOException, XPathException {
StringBuilder jsonRef = new StringBuilder();
jsonRef.append("{\"pages\" : [");
int totalMarkers1 = 0;
int totalMarkers2 = 0;
int totalBib = 0;
List<Page> pages = teiDoc.getPages();
int pageNumber = 1;
for(Page page : pages) {
if (pageNumber > 1)
jsonRef.append(", ");
// page height and width
jsonRef.append("{\"page_height\":" + page.getHeight());
jsonRef.append(", \"page_width\":" + page.getWidth());
boolean refBibOutput = false;
StringBuilder jsonMark = new StringBuilder();
boolean refMarkOutput = false;
String tei = teiDoc.getTei();
Multimap<String, DataSetContext> contexts = DataSetContextExtractor.getCitationReferences(tei);
boolean beginMark = true;
boolean begin = true;
for (BibDataSet cit : teiDoc.getBibDataSets()) {
String teiId = cit.getResBib().getTeiId();
boolean idOutput = false;
boolean begin2 = true;
if (cit.getResBib().getCoordinates() != null) {
for (BoundingBox b : cit.getResBib().getCoordinates()) {
if (b.getPage() == pageNumber) {
if (!refBibOutput) {
jsonRef.append(", \"refBibs\": [ ");
refBibOutput = true;
}
if (!idOutput) {
if (begin)
begin = false;
else
jsonRef.append(", ");
jsonRef.append("{\"id\":\"").append(teiId).append("\", ");
jsonRef.append("\"pos\":[");
idOutput = true;
}
// reference string
if (begin2)
begin2 = false;
else
jsonRef.append(", ");
jsonRef.append("{").append(b.toJson()).append("}");
totalBib++;
}
//annotatePage(document, b.toString(), teiId.hashCode(), contexts.containsKey(teiId) ? 1.5f : 0.5f);
}
}
// reference markers for this reference
for (DataSetContext c : contexts.get(teiId)) {
//System.out.println(c.getContext());
String mrect = c.getDocumentCoords();
if ( (mrect != null) && (mrect.trim().length()>0) ) {
for (String coords : mrect.split(";")) {
if (coords.trim().length() == 0)
continue;
//annotatePage(document, coords, teiId.hashCode(), 1.0f);
BoundingBox b2 = BoundingBox.fromString(coords);
if (b2.getPage() == pageNumber) {
if (!refMarkOutput) {
jsonMark.append(", \"refMarkers\": [");
refMarkOutput = true;
} else
jsonMark.append(", ");
jsonMark.append("{ \"id\":\"").append(teiId).append("\", ");
jsonMark.append(b2.toJson()).append(" }");
totalMarkers1++;
}
}
}
}
if (idOutput) {
jsonRef.append("] }");
}
}
for (DataSetContext c : contexts.get("")) {
String mrect = c.getDocumentCoords();
if ( (mrect != null) && (mrect.trim().length()>0) ) {
for (String coords : mrect.split(";")) {
if (coords.trim().length() == 0)
continue;
BoundingBox b = BoundingBox.fromString(coords);
if (b.getPage() == pageNumber) {
if (!refMarkOutput) {
jsonMark.append(", \"refMarkers\": [");
refMarkOutput = true;
} else
jsonMark.append(", ");
jsonMark.append("{").append(b.toJson()).append("}");
totalMarkers2++;
}
}
}
}
pageNumber++;
if (refBibOutput) {
jsonRef.append("]");
}
if (refMarkOutput) {
jsonRef.append(jsonMark.toString()).append("]");
}
jsonRef.append("}");
}
LOGGER.debug("totalBib: " + totalBib);
LOGGER.debug("totalMarkers1: " + totalMarkers1);
LOGGER.debug("totalMarkers2: " + totalMarkers2);
jsonRef.append("]}");
return jsonRef.toString();
}*/
}
| 34,028 | 42.349045 | 155 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/visualization/BlockVisualizer.java
|
package org.grobid.core.visualization;
import com.google.common.base.Predicate;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Multimap;
import net.sf.saxon.om.Item;
import net.sf.saxon.om.SequenceIterator;
import net.sf.saxon.trans.XPathException;
import org.apache.pdfbox.pdmodel.PDDocument;
import org.grobid.core.document.Document;
import org.grobid.core.document.DocumentSource;
import org.grobid.core.engines.Engine;
import org.grobid.core.engines.config.GrobidAnalysisConfig;
import org.grobid.core.factory.GrobidFactory;
import org.grobid.core.layout.Block;
import org.grobid.core.layout.BoundingBox;
import org.grobid.core.main.LibraryLoader;
import org.grobid.core.utilities.GrobidProperties;
import org.grobid.core.utilities.XQueryProcessor;
import java.awt.*;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import static org.grobid.core.layout.VectorGraphicBoxCalculator.mergeBoxes;
/**
* Visualizing blocks
*/
public class BlockVisualizer {
public static PDDocument annotateBlocks(PDDocument document, File xmlFile, Document teiDoc,
boolean visualizeBlocks,
boolean visualizePageMainArea,
boolean visualizeVectorGraphics) throws IOException, XPathException {
Multimap<Integer, Block> blockMultimap = HashMultimap.create();
for (Block b : teiDoc.getBlocks()) {
if (visualizeBlocks) {
AnnotationUtil.annotatePage(document, b.getPageNumber() + "," + b.getX() + "," + b.getY() +
"," + b.getWidth() + "," + b.getHeight(), 0);
blockMultimap.put(b.getPageNumber(), b);
}
}
for (int pageNum = 1; pageNum <= document.getNumberOfPages(); pageNum++) {
BoundingBox mainPageArea = teiDoc.getPage(pageNum).getMainArea();
if (visualizePageMainArea) {
AnnotationUtil.annotatePage(document,
mainPageArea.toString(), 10);
}
File f = new File(xmlFile.getAbsolutePath() + "_data", "image-" + pageNum + ".svg");
if (f.exists()) {
String q = XQueryProcessor.getQueryFromResources("vector-coords.xq");
XQueryProcessor pr = new XQueryProcessor(f);
SequenceIterator it = pr.getSequenceIterator(q);
Item item;
List<BoundingBox> boxes = new ArrayList<>();
while ((item = it.next()) != null) {
String c = item.getStringValue();
String coords = pageNum + "," + c;
BoundingBox e = BoundingBox.fromString(coords);
if (!mainPageArea.contains(e) || e.area() / mainPageArea.area() > 0.8) {
continue;
}
AnnotationUtil.annotatePage(document, e.toString(), 3);
boxes.add(e);
}
if (visualizeVectorGraphics) {
List<BoundingBox> remainingBoxes = mergeBoxes(boxes);
for (int i = 0; i < remainingBoxes.size(); i++) {
Collection<Block> col = blockMultimap.get(pageNum);
for (Block bl : col) {
// if (!bl.getPage().getMainArea().contains(b)) {
// continue;
// }
BoundingBox b = BoundingBox.fromPointAndDimensions(pageNum, bl.getX(), bl.getY(), bl.getWidth(), bl.getHeight());
if (remainingBoxes.get(i).intersect(b)) {
remainingBoxes.set(i, remainingBoxes.get(i).boundBox(b));
}
}
}
remainingBoxes = mergeBoxes(remainingBoxes);
for (BoundingBox b : remainingBoxes) {
if (b.area() > 500) {
AnnotationUtil.annotatePage(document, b.toString(), 1);
}
}
}
}
}
return document;
}
}
| 4,397 | 36.913793 | 141 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/visualization/FigureTableVisualizer.java
|
package org.grobid.core.visualization;
import com.google.common.collect.Lists;
import net.sf.saxon.om.Item;
import net.sf.saxon.om.SequenceIterator;
import net.sf.saxon.trans.XPathException;
import org.apache.commons.io.FileUtils;
import org.apache.pdfbox.pdmodel.PDDocument;
import org.grobid.core.data.Figure;
import org.grobid.core.data.Table;
import org.grobid.core.document.Document;
import org.grobid.core.document.DocumentSource;
import org.grobid.core.engines.Engine;
import org.grobid.core.engines.config.GrobidAnalysisConfig;
import org.grobid.core.factory.GrobidFactory;
import org.grobid.core.layout.BoundingBox;
import org.grobid.core.layout.GraphicObject;
import org.grobid.core.layout.GraphicObjectType;
import org.grobid.core.main.LibraryLoader;
import org.grobid.core.utilities.BoundingBoxCalculator;
import org.grobid.core.utilities.GrobidProperties;
import org.grobid.core.utilities.XQueryProcessor;
import java.awt.*;
import java.io.File;
import java.io.IOException;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import static org.grobid.core.utilities.PathUtil.getOneFile;
/**
* Visualize figures and tables
*/
public class FigureTableVisualizer {
public static final boolean VISUALIZE_VECTOR_BOXES = true;
private static Set<Integer> blacklistedPages;
private static File inputPdf;
private static boolean annotated;
private static boolean annotatedFigure;
static boolean singleFile = true;
private static Set<Integer> getVectorGraphicPages(File pdfaltoDirectory) throws XPathException, IOException {
//TODO: temp
if (true) {
return new HashSet<>();
}
XQueryProcessor xq = new XQueryProcessor(getOneFile(pdfaltoDirectory, ".xml"));
String query = XQueryProcessor.getQueryFromResources("self-contained-images.xq");
SequenceIterator it = xq.getSequenceIterator(query);
Item item;
Set<Integer> blacklistedPages = new HashSet<>();
while ((item = it.next()) != null) {
blacklistedPages.add(Integer.parseInt(item.getStringValue()));
it.next();
}
return blacklistedPages;
}
private static void processPdfFile(File input, File outputFolder) throws Exception {
inputPdf = input;
annotated = false;
annotatedFigure = false;
final PDDocument document = PDDocument.load(input);
File outPdf = new File("/tmp/testFigures.pdf");
final Engine engine = setupEngine();
File contentDir = new File("/tmp/contentDir");
FileUtils.deleteDirectory(contentDir);
File assetPath = new File(contentDir, "tei");
GrobidAnalysisConfig config = new GrobidAnalysisConfig.GrobidAnalysisConfigBuilder()
.pdfAssetPath(assetPath)
.withPreprocessImages(false)
.generateTeiCoordinates(Lists.newArrayList("figure"))
.withProcessVectorGraphics(true)
.build();
DocumentSource documentSource = DocumentSource.fromPdf(input, -1, -1, true, false, false);
File pdfaltoDirectory = new File(contentDir, "pdfalto");
pdfaltoDirectory.mkdirs();
FileUtils.copyFileToDirectory(input, contentDir);
File copiedFile = new File(pdfaltoDirectory, "input.xml");
FileUtils.copyFile(documentSource.getXmlFile(), copiedFile);
FileUtils.copyDirectory(new File(documentSource.getXmlFile().getAbsolutePath() + "_data"), new File(pdfaltoDirectory, documentSource.getXmlFile().getName() + "_data"));
System.out.println(documentSource.getXmlFile());
blacklistedPages = getVectorGraphicPages(pdfaltoDirectory);
Document teiDoc = engine.fullTextToTEIDoc(documentSource, config);
PDDocument out = annotateFigureAndTables(
document, copiedFile, teiDoc,
false, false, true, true, VISUALIZE_VECTOR_BOXES);
if (out != null) {
out.save(outPdf);
if (singleFile) {
if (Desktop.isDesktopSupported()) {
Desktop.getDesktop().open(outPdf);
}
}
}
if (outputFolder != null) {
if (annotated) {
Engine.getCntManager().i("TABLES_TEST", "ANNOTATED_PDFS");
FileUtils.copyFile(outPdf, new File(outputFolder, annotated ?
(annotatedFigure ? input.getName() + "_annotatedFigure.pdf" : input.getName() + "_annotated.pdf")
: input.getName()));
}
}
}
private static Engine setupEngine() {
GrobidProperties.setGrobidHome("grobid-home");
GrobidProperties.setGrobidConfigPath("grobid-home/config/grobid.yaml");
LibraryLoader.load();
return GrobidFactory.getInstance().getEngine();
}
public static PDDocument annotateFigureAndTables(
PDDocument document,
File xmlFile, Document teiDoc,
boolean visualizeTeiFigures,
boolean visualizePdfaltoImages,
boolean visualizeGraphicObjects,
boolean visualizeTables,
boolean visualizeVectorBoxes
) throws IOException, XPathException {
String q = XQueryProcessor.getQueryFromResources("figure-table-coords.xq");
String tei = teiDoc.getTei();
XQueryProcessor pr = new XQueryProcessor(tei);
SequenceIterator it = pr.getSequenceIterator(q);
Item item;
// visualizing TEI image coords
if (visualizeTeiFigures) {
while ((item = it.next()) != null) {
String coords = item.getStringValue();
String stringValue = it.next().getStringValue();
boolean isFigure = Boolean.parseBoolean(stringValue);
AnnotationUtil.annotatePage(document, coords, isFigure ? 1 : 2);
}
}
//VISUALIZING "IMAGE" elements from pdfalto
if (visualizePdfaltoImages) {
q = XQueryProcessor.getQueryFromResources("figure-coords-pdfalto.xq");
pr = new XQueryProcessor(xmlFile);
it = pr.getSequenceIterator(q);
while ((item = it.next()) != null) {
String coords = item.getStringValue();
AnnotationUtil.annotatePage(document, coords, 3);
}
}
if (visualizeGraphicObjects) {
int i = 10;
if (teiDoc.getFigures() != null) {
for (Figure f : teiDoc.getFigures()) {
if (f == null) {
continue;
}
i++;
List<GraphicObject> boxedGo = f.getBoxedGraphicObjects();
if (f.getTextArea() != null) {
for (BoundingBox b : f.getTextArea()) {
annotated = true;
AnnotationUtil.annotatePage(document, b.toString(),
// AnnotationUtil.getCoordString(f.getPage(), f.getX(), f.getY(),
// f.getWidth(), f.getHeight()),
i, boxedGo == null ? 1 : 2
);
}
}
if (boxedGo != null) {
for (GraphicObject go : boxedGo) {
annotatedFigure = true;
AnnotationUtil.annotatePage(document,
AnnotationUtil.getCoordString(go.getPage(), go.getX(), go.getY(),
go.getWidth(), go.getHeight()), i, 2
);
}
}
}
}
}
if (visualizeVectorBoxes) {
if (teiDoc.getImages() != null) {
for (GraphicObject img : teiDoc.getImages()) {
if (img.getType() == GraphicObjectType.VECTOR_BOX) {
BoundingBox go = img.getBoundingBox();
AnnotationUtil.annotatePage(document,
AnnotationUtil.getCoordString(go.getPage(), go.getX(), go.getY(),
go.getWidth(), go.getHeight()), 12, 3
);
}
}
}
}
if (visualizeTables) {
boolean hasSomeTables = false;
if (teiDoc.getTables() != null) {
for (Table t : teiDoc.getTables()) {
hasSomeTables = true;
if (!t.isGoodTable()) {
//System.out.println("Skipping bad table on page: " + t.getTextArea().get(0).getPage());
Engine.getCntManager().i("TABLES_TEST", "BAD_TABLES");
continue;
}
BoundingBox contentBox = BoundingBoxCalculator.calculateOneBox(t.getContentTokens());
BoundingBox descBox = BoundingBoxCalculator.calculateOneBox(t.getFullDescriptionTokens());
System.out.println("Annotating TABLE on page: " + contentBox.getPage());
AnnotationUtil.annotatePage(document,
AnnotationUtil.getCoordString(descBox), 100, 2);
AnnotationUtil.annotatePage(document,
AnnotationUtil.getCoordString(contentBox), 101, 2);
annotatedFigure = true;
annotated = true;
Engine.getCntManager().i("TABLES_TEST", "ANNOTATED_TABLES");
}
}
if (hasSomeTables) {
Engine.getCntManager().i("TABLES_TEST", "PDF_HAS_SOME_TABLES");
}
}
return document;
}
}
| 9,879 | 37.745098 | 176 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/visualization/AnnotationUtil.java
|
package org.grobid.core.visualization;
import org.apache.pdfbox.cos.COSArray;
import org.apache.pdfbox.pdmodel.PDDocument;
import org.apache.pdfbox.pdmodel.PDPage;
import org.apache.pdfbox.pdmodel.PDPageContentStream;
import org.apache.pdfbox.pdmodel.common.PDRectangle;
import org.apache.pdfbox.pdmodel.graphics.color.PDColor;
import org.apache.pdfbox.pdmodel.graphics.color.PDDeviceRGB;
import org.apache.pdfbox.pdmodel.interactive.action.PDActionURI;
import org.apache.pdfbox.pdmodel.interactive.annotation.PDAnnotationLink;
import org.apache.pdfbox.pdmodel.interactive.annotation.PDBorderStyleDictionary;
import org.grobid.core.layout.BoundingBox;
import java.io.IOException;
import java.util.Random;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Utilities for annotating PDF
*/
public class AnnotationUtil {
private static final Logger LOGGER = LoggerFactory.getLogger(AnnotationUtil.class);
public static BoundingBox getBoundingBoxForPdf(PDDocument document, String coords) {
String[] split = coords.split(",");
Long pageNum = Long.valueOf(split[0], 10) - 1;
PDPage page = (PDPage) document.getDocumentCatalog().getPages().get(pageNum.intValue());
PDRectangle mediaBox = page.getCropBox();
if (mediaBox == null) {
mediaBox = page.getMediaBox();
// this will look for the main media box of the page up in the PDF element hierarchy
if (mediaBox == null) {
// last hope
mediaBox = page.getArtBox();
if (mediaBox == null) {
// we tried our best given PDFBox
LOGGER.warn("Media box for page " + pageNum.intValue() + " not found.");
return null;
}
}
}
if (mediaBox == null) {
System.out.println("Null mediabox for page: " + (pageNum + 1));
return null;
}
float height = mediaBox.getHeight();
float lowerX = mediaBox.getLowerLeftX();
float lowerY = mediaBox.getLowerLeftY();
float x = Float.parseFloat(split[1]);
float y = Float.parseFloat(split[2]);
float w = Float.parseFloat(split[3]);
String nextString = split[4];
if (nextString.indexOf(";") != -1)
nextString = nextString.substring(0, nextString.indexOf(";"));
float h = Float.parseFloat(nextString);
float annX = x + lowerX;
float annY = (height - (y + h)) + lowerY;
float annRightX = x + w + lowerX;
float annTopY = height - y + lowerY;
return BoundingBox.fromTwoPoints(pageNum.intValue(), annX, annY, annRightX, annTopY);
}
public static void annotatePage(PDDocument document, String coords, int seed) throws IOException {
annotatePage(document, coords, seed, 1);
}
public static void annotatePage(PDDocument document, String coords, int seed, int lineWidth) throws IOException {
if (coords == null) {
return;
}
//System.out.println("Annotating for coordinates: " + coords);
BoundingBox box = getBoundingBoxForPdf(document, coords);
if (box == null) {
//System.out.println("Null bounding box for coords: " + coords);
// nothing to do
return;
}
PDPage page = document.getDocumentCatalog().getPages().get(box.getPage());
float annX = (float) box.getX();
float annY = (float) box.getY();
float annRightX = (float) box.getX2();
float annTopY = (float) box.getY2();
PDRectangle rect = new PDRectangle();
rect.setLowerLeftX(annX);
rect.setLowerLeftY(annY);
rect.setUpperRightX(annRightX);
rect.setUpperRightY(annTopY);
PDBorderStyleDictionary borderULine = new PDBorderStyleDictionary();
borderULine.setStyle(PDBorderStyleDictionary.STYLE_SOLID);
// so that a border is not visible at all
borderULine.setWidth(0);
PDAnnotationLink txtLink = new PDAnnotationLink();
txtLink.setBorderStyle(borderULine);
//linkColor rectangle border color (ideally, should be transparent)
COSArray linkColor = new COSArray();
Random r = new Random(seed);
// linkColor.setFloatArray(new float[]{r.nextInt(128) + 127, r.nextInt(255), r.nextInt(255)});
linkColor.setFloatArray(new float[]{224, 9, 56});
txtLink.setColor(new PDColor(linkColor, PDDeviceRGB.INSTANCE));
txtLink.setReadOnly(true);
//so that
txtLink.setHighlightMode(PDAnnotationLink.HIGHLIGHT_MODE_PUSH);
PDActionURI action = new PDActionURI();
action.setURI("");
txtLink.setAction(action);
txtLink.setRectangle(rect);
// ADDING LINK TO THE REFERENCE
// page.getAnnotations().add(txtLink);
//draw a line
PDBorderStyleDictionary borderThick = new PDBorderStyleDictionary();
borderThick.setWidth(lineWidth); // 12th inch
// PDAnnotationLine line = new PDAnnotationLine();
// line.setLine(new float[]{annX, annY, annRightX, annY});
// line.setRectangle(rect);
// line.setBorderStyle(borderThick);
// line.setReadOnly(true);
// line.setLocked(true);
//
// COSArray rgLineColor = new COSArray();
// rgLineColor.setFloatArray(new float[]{85 / 255f, 177 / 255f, 245 / 255f});
// PDGamma col = new PDGamma(rgLineColor);
// line.setColour(col);
// ADDING LINE TO THE REFERENCE
// page.getAnnotations().add(line);
// ADDING LINE TO THE REFERENCE
PDPageContentStream stream = new PDPageContentStream(document, page, PDPageContentStream.AppendMode.APPEND, false, true);
// Random r = new Random(seed + 1);
//
//
//// stream.setStrokingColor(85, 177, 245);
stream.setStrokingColor(r.nextInt(255), r.nextInt(255), r.nextInt(255));
stream.setLineWidth(lineWidth);
stream.drawLine(annX, annY, annRightX, annY);
stream.drawLine(annX, annTopY, annRightX, annTopY);
stream.drawLine(annX, annY, annX, annTopY);
stream.drawLine(annRightX, annY, annRightX, annTopY);
stream.close();
// }
// return 1;
}
public static String getCoordString(BoundingBox b) {
if (b == null) {
return null;
}
return b.getPage() + "," + b.getX() + "," + b.getY() + "," + b.getWidth() + "," + b.getHeight();
}
public static String getCoordString(int page, double x, double y, double w, double h) {
return page + "," + x + "," + y + "," + w + "," + h;
}
}
| 6,720 | 35.928571 | 129 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/exceptions/GrobidExceptionStatus.java
|
package org.grobid.core.exceptions;
public enum GrobidExceptionStatus {
BAD_INPUT_DATA,
TAGGING_ERROR,
PARSING_ERROR,
TIMEOUT,
TOO_MANY_BLOCKS,
NO_BLOCKS,
PDFALTO_CONVERSION_FAILURE,
TOO_MANY_TOKENS,
GENERAL
}
| 247 | 16.714286 | 35 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/exceptions/GrobidPropertyException.java
|
package org.grobid.core.exceptions;
public class GrobidPropertyException extends GrobidException {
private static final long serialVersionUID = -3337770841815682150L;
public GrobidPropertyException() {
super();
}
public GrobidPropertyException(String message) {
super(message);
}
public GrobidPropertyException(Throwable cause) {
super(cause);
}
public GrobidPropertyException(String message, Throwable cause) {
super(message, cause);
}
}
| 513 | 21.347826 | 71 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/exceptions/GrobidException.java
|
package org.grobid.core.exceptions;
public class GrobidException extends RuntimeException {
private GrobidExceptionStatus status = GrobidExceptionStatus.GENERAL;
private static final long serialVersionUID = -3337770841815682150L;
public GrobidException() {
super();
}
public GrobidException(GrobidExceptionStatus grobidExceptionStatus) {
super();
this.status = grobidExceptionStatus;
}
public GrobidException(String message) {
super(message);
}
public GrobidException(String message, GrobidExceptionStatus grobidExceptionStatus) {
super(message);
this.status = grobidExceptionStatus;
}
public GrobidException(Throwable cause, GrobidExceptionStatus grobidExceptionStatus) {
super(cause);
if (cause instanceof GrobidException) {
this.status = ((GrobidException) cause).getStatus();
} else {
this.status = grobidExceptionStatus;
}
}
public GrobidException(Throwable cause) {
super(cause);
}
public GrobidException(String message, Throwable cause) {
super(message, cause);
}
public GrobidException(String message, Throwable cause, GrobidExceptionStatus grobidExceptionStatus) {
super(message, cause);
this.status = grobidExceptionStatus;
}
@Override
public String getMessage() {
return status != null ? "[" + status + "] " + super.getMessage() : super.getMessage();
}
public GrobidExceptionStatus getStatus() {
return status;
}
}
| 1,582 | 26.77193 | 106 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/exceptions/GrobidResourceException.java
|
package org.grobid.core.exceptions;
public class GrobidResourceException extends GrobidException {
private static final long serialVersionUID = -3337770841815682150L;
public GrobidResourceException() {
super();
}
public GrobidResourceException(String message) {
super(message);
}
public GrobidResourceException(Throwable cause) {
super(cause);
}
public GrobidResourceException(String message, Throwable cause) {
super(message, cause);
}
}
| 513 | 21.347826 | 71 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/layout/PDFAnnotation.java
|
package org.grobid.core.layout;
import java.util.ArrayList;
import java.util.List;
/**
* Class for representing an annotation present in a PDF source file. Annotations are area in the PDF document
* associated with an action (URI for external web link, goto for internal document link).
*
*/
public class PDFAnnotation {
private String destination = null;
private List<BoundingBox> boundingBoxes = null;
// start position of the block in the original tokenization, if known
private int startToken = -1;
// end position of the block in the original tokenization, if known
private int endToken = -1;
// the page in the document where the annotation is located
// warning: in PDF, the page numbers start at 1
private int pageNumber = -1;
public enum Type {UNKNOWN, GOTO, URI, GOTOR};
private Type type = Type.UNKNOWN; // default
public PDFAnnotation() {
}
public void setType(Type t) {
type = t;
}
public Type getType() {
return type;
}
public List<BoundingBox> getBoundingBoxes() {
return boundingBoxes;
}
public void setBoundingBoxes(List<BoundingBox> boxes) {
boundingBoxes = boxes;
}
public void addBoundingBox(BoundingBox box) {
if (boundingBoxes == null) {
boundingBoxes = new ArrayList<BoundingBox>();
};
boundingBoxes.add(box);
}
public int getStartToken() {
return startToken;
}
public int getEndToken() {
return endToken;
}
public void setStartToken(int start) {
startToken = start;
}
public void setEndToken(int end) {
endToken = end;
}
public int getPageNumber() {
return pageNumber;
}
public void setPageNumber(int pageNumber) {
this.pageNumber = pageNumber;
}
public boolean isNull() {
if ( (boundingBoxes == null) && (startToken == -1) && (endToken == -1) && (type == null) ) {
return true;
}
else
return false;
}
public String getDestination() {
return destination;
}
public void setDestination(String destination) {
this.destination = destination;
}
@Override
public String toString() {
String res = "PDFAnnotation{" +
", pageNumber=" + pageNumber +
", startToken=" + startToken +
", endToken=" + endToken +
", type=" + type;
if (boundingBoxes != null)
res += ", boundingBoxes=" + boundingBoxes.toString() + '}';
return res;
}
/**
* Return true if the annotation covers the given LayoutToken, based on their
* respective coordinates.
*/
public boolean cover(LayoutToken token) {
if (token == null)
return false;
boolean res = false;
// do we have an entity annotation at this location?
// we need to check the coordinates
int pageToken = token.getPage();
if (pageToken == pageNumber) {
BoundingBox tokenBox = BoundingBox.fromLayoutToken(token);
for(BoundingBox box : boundingBoxes) {
if (box.intersect(tokenBox)) {
// bounding boxes are at least touching, but we need to further check if we
// have also a significant surface covered
if (box.contains(tokenBox)) {
res = true;
break;
}
double areaToken = tokenBox.area();
// the bounding box of the insection
BoundingBox intersectionBox = box.boundingBoxIntersection(tokenBox);
if (intersectionBox != null) {
double intersectionArea = intersectionBox.area();
if (intersectionArea > (areaToken / 4)) {
res = true;
break;
}
}
}
}
}
return res;
}
/**
* Return the intersection box between token and annotation
*/
public BoundingBox getIntersectionBox(LayoutToken token) {
if (token == null)
return null;
BoundingBox intersectBox = null;
int pageToken = token.getPage();
if (pageToken == pageNumber) {
BoundingBox tokenBox = BoundingBox.fromLayoutToken(token);
for(BoundingBox box : boundingBoxes) {
if (box.intersect(tokenBox)) {
if (box.contains(tokenBox)) {
intersectBox = tokenBox;
break;
}
intersectBox = box.boundingBoxIntersection(tokenBox);
}
}
}
return intersectBox;
}
}
| 4,479 | 26.317073 | 111 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/layout/BoundingBox.java
|
package org.grobid.core.layout;
import com.fasterxml.jackson.core.JsonGenerator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
/**
* Represents a bounding box to identify area in the original PDF
*/
public class BoundingBox implements Comparable {
private static final Logger LOGGER = LoggerFactory.getLogger(BoundingBox.class);
private int page;
private double x, y, width, height;
private double x2, y2;
private BoundingBox(int page, double x, double y, double width, double height) {
this.page = page;
this.x = x;
this.y = y;
this.width = width;
this.height = height;
this.x2 = x + width;
this.y2 = y + height;
}
public static BoundingBox fromTwoPoints(int page, double x1, double y1, double x2, double y2) {
if (x1 > x2 || y1 > y2) {
throw new IllegalArgumentException("Invalid points provided: (" + x1 + ";" + y1 + ")-(" + x2 + ";" + y2 + ")");
}
return new BoundingBox(page, x1, y1, x2 - x1, y2 - y1);
}
public static BoundingBox fromString(String coords) {
try {
String[] split = coords.split(",");
Long pageNum = Long.valueOf(split[0], 10);
float x = Float.parseFloat(split[1]);
float y = Float.parseFloat(split[2]);
float w = Float.parseFloat(split[3]);
float h = Float.parseFloat(split[4]);
return new BoundingBox(pageNum.intValue(), x, y, w, h);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public static BoundingBox fromPointAndDimensions(int page, double x, double y, double width, double height) {
return new BoundingBox(page, x, y, width, height);
}
public static BoundingBox fromLayoutToken(LayoutToken tok) {
return BoundingBox.fromPointAndDimensions(tok.getPage(), tok.getX(), tok.getY(), tok.getWidth(), tok.getHeight());
}
public boolean intersect(BoundingBox b) {
double ax1 = this.x;
double ax2 = this.x2;
double ay1 = this.y;
double ay2 = this.y2;
double bx1 = b.x;
double bx2 = b.x2;
double by1 = b.y;
double by2 = b.y2;
if (ax2 < bx1) return false;
else if (ax1 > bx2) return false;
else if (ay2 < by1) return false;
else if (ay1 > by2) return false;
else
return true;
}
public int getPage() {
return page;
}
public double getX() {
return x;
}
public double getY() {
return y;
}
public double getWidth() {
return width;
}
public double getHeight() {
return height;
}
public double getX2() {
return x2;
}
public double getY2() {
return y2;
}
public BoundingBox boundBox(BoundingBox o) {
if (this.page != o.page) {
throw new IllegalStateException("Cannot compute a bounding box for different pages");
}
return fromTwoPoints(o.page, Math.min(this.x, o.x), Math.min(this.y, o.y), Math.max(this.x2, o.x2), Math.max(this.y2, o.y2));
}
public BoundingBox boundBoxExcludingAnotherPage(BoundingBox o) {
if (this.page != o.page) {
LOGGER.debug("Cannot compute a bounding box for different pages: " + this + " and " + o + "; skipping");
return this;
}
return fromTwoPoints(o.page, Math.min(this.x, o.x), Math.min(this.y, o.y), Math.max(this.x2, o.x2), Math.max(this.y2, o.y2));
}
public boolean contains(BoundingBox b) {
return x <= b.x && y <= b.y && x2 >= b.x2 && y2 >= b.y2;
}
private double dist(double x1, double y1, double x2, double y2) {
return Math.sqrt((x2 - x1) * (x2 - 1) + (y2 - y1) * (y2 - y1));
}
public double verticalDistanceTo(BoundingBox to) {
//the current box is completely "bottomer"
boolean bottom = to.y2 < y;
boolean top = y2 < to.y;
if (bottom) {
return y - to.y2;
} else if (top) {
return to.y - y2;
}
return 0;
}
public double area() {
return width * height;
}
public double distanceTo(BoundingBox to) {
if (this.page != to.page) {
return 1000 * Math.abs(this.page - to.page);
}
//the current box is completely "lefter"
boolean left = x2 < to.x;
boolean right = to.x2 < x;
boolean bottom = to.y2 < y;
boolean top = y2 < to.y;
if (top && left) {
return dist(x2, y2, to.x, y);
} else if (left && bottom) {
return dist(x2, y, to.x, to.y2);
} else if (bottom && right) {
return dist(x, y, to.x2, to.y2);
} else if (right && top) {
return dist(x, y2, to.x2, to.y);
} else if (left) {
return to.x - x2;
} else if (right) {
return x - to.x2;
} else if (bottom) {
return y - to.y2;
} else if (top) {
return to.y - y2;
} else {
return 0;
}
}
public BoundingBox boundingBoxIntersection(BoundingBox b) {
if (!this.intersect(b))
return null;
double ax1 = this.x;
double ax2 = this.x2;
double ay1 = this.y;
double ay2 = this.y2;
double bx1 = b.x;
double bx2 = b.x2;
double by1 = b.y;
double by2 = b.y2;
double ix1 = 0.0;
if (ax1 > bx1)
ix1 = ax1;
else
ix1 = bx1;
double iy1 = 0.0;
if (ay1 > by1)
iy1 = ay1;
else
iy1 = by1;
double ix2 = 0.0;
if (ax2 > bx2)
ix2 = bx2;
else
ix2 = ax2;
double iy2 = 0.0;
if (ay2 > by2)
iy2 = by2;
else
iy2 = ay2;
return fromTwoPoints(page, ix1, iy1, ix2, iy2);
}
@Override
public String toString() {
return String.format("%d,%.2f,%.2f,%.2f,%.2f", page, x, y, width, height);
}
public String toJson() {
StringBuilder builder = new StringBuilder();
builder.append("\"p\":").append(page).append(", ");
builder.append("\"x\":").append(x).append(", ");
builder.append("\"y\":").append(y).append(", ");
builder.append("\"w\":").append(width).append(", ");
builder.append("\"h\":").append(height);
return builder.toString();
}
public void writeJsonProps(JsonGenerator gen) throws IOException {
gen.writeNumberField("p", page);
gen.writeNumberField("x", x);
gen.writeNumberField("y", y);
gen.writeNumberField("w", width);
gen.writeNumberField("h", height);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof BoundingBox)) return false;
BoundingBox that = (BoundingBox) o;
if (getPage() != that.getPage()) return false;
if (Double.compare(that.getX(), getX()) != 0) return false;
if (Double.compare(that.getY(), getY()) != 0) return false;
if (Double.compare(that.getWidth(), getWidth()) != 0) return false;
return Double.compare(that.getHeight(), getHeight()) == 0;
}
@Override
public int compareTo(Object otherBox) {
if (this.equals(otherBox))
return 0;
if (!(otherBox instanceof BoundingBox))
return -1;
BoundingBox that = (BoundingBox) otherBox;
// the rest of position comparison is using the barycenter of the boxes
double thisCenterX = x + (width/2);
double thisCenterY = y + (height/2);
double otherCenterX = that.x + (that.width/2);
double otherCenterY = that.y+ (that.height/2);
if (Double.compare(thisCenterY, otherCenterY) == 0)
return Double.compare(thisCenterX, otherCenterX);
else
return Double.compare(thisCenterY, otherCenterY);
}
@Override
public int hashCode() {
int result;
long temp;
result = getPage();
temp = Double.doubleToLongBits(getX());
result = 31 * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(getY());
result = 31 * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(getWidth());
result = 31 * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(getHeight());
result = 31 * result + (int) (temp ^ (temp >>> 32));
return result;
}
}
| 8,551 | 27.697987 | 133 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/layout/Page.java
|
package org.grobid.core.layout;
import java.util.*;
/**
* Class for representing a page.
*
*/
public class Page {
private List<Block> blocks = null;
private double width = 0.0;
private double height = 0.0;
private int number = -1;
private int pageLengthChar = 0;
private BoundingBox mainArea;
public Page(int nb) {
number = nb;
}
public boolean isEven() {
return number % 2 == 0;
}
public void addBlock(Block b) {
if (blocks == null) {
blocks = new ArrayList<>();
}
blocks.add(b);
}
public List<Block> getBlocks() {
return blocks;
}
public void setHeight(double d) {
height = Math.abs(d);
}
public double getHeight() {
return height;
}
public void setWidth(double d) {
width = Math.abs(d);
}
public double getWidth() {
return width;
}
public void setPageLengthChar(int length) {
pageLengthChar = length;
}
public int getPageLengthChar() {
return pageLengthChar;
}
public void setNumber(int number) {
this.number = number;
}
public int getNumber() {
return number;
}
public BoundingBox getMainArea() {
return mainArea;
}
public void setMainArea(BoundingBox mainArea) {
this.mainArea = mainArea;
}
}
| 1,391 | 17.810811 | 51 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/layout/Cluster.java
|
package org.grobid.core.layout;
import java.util.ArrayList;
import java.util.List;
/**
* Class for representing cluster of document layout elements,
* typically all blocks having the same font parameters.
*
*/
public class Cluster {
private List<Block> blocks = null;
private List<Integer> blocks2 = null;
public double y = 0.0;
public double x = 0.0;
public double width = 0.0;
public double height = 0.0;
private String font = null;
private boolean bold = false;
private boolean italic = false;
private String colorFont = null;
private double fontSize = 0.0;
private int nbTokens = 0;
public Cluster() {
}
public void addBlock(Block b) {
if (blocks == null)
blocks = new ArrayList<Block>();
blocks.add(b);
}
public void addBlock2(Integer b) {
if (blocks2 == null)
blocks2 = new ArrayList<Integer>();
blocks2.add(b);
}
public List<Block> getBlocks() {
return blocks;
}
public List<Integer> getBlocks2() {
return blocks2;
}
public void setFont(String f) {
font = f;
}
public void setNbTokens(int t) {
nbTokens = t;
}
public String getFont() {
return font;
}
public int getNbBlocks() {
if (blocks == null)
return 0;
else
return blocks.size();
}
public void setBold(boolean b) {
bold = b;
}
public void setItalic(boolean i) {
italic = i;
}
public boolean getBold() {
return bold;
}
public boolean getItalic() {
return italic;
}
public void setFontSize(double d) {
fontSize = d;
}
public double getFontSize() {
return fontSize;
}
}
| 1,804 | 18.619565 | 62 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/layout/LayoutToken.java
|
package org.grobid.core.layout;
import org.grobid.core.utilities.UnicodeUtil;
import org.grobid.core.engines.label.TaggingLabel;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.List;
/**
* Class for representing the layout information associated to a PDF object.
*
*/
public class LayoutToken implements Comparable<LayoutToken>, Serializable {
private static final long serialVersionUID = 1L;
private String text = null;
public double y = -1.0;
public double x = -1.0;
public double width = 0.0;
public double height = 0.0;
private String font = null;
private boolean bold = false;
private boolean italic = false;
private String colorFont = null;
public double fontSize = 0.0;
private boolean rotation = false;
private int page = -1;
private boolean newLineAfter;
private int blockPtr;
private int offset = 0;
private boolean subscript = false;
private boolean superscript = false;
/**
* All TaggingLabel accumulated for this token
*/
private ArrayList<TaggingLabel> labels = null;
public LayoutToken() {
}
public LayoutToken(String text) {
this.text = text;
}
public LayoutToken(LayoutToken token) {
this.text = token.text;
this.y = token.y;
this.x = token.x;
this.width = token.width;
this.height = token.height;
this.font = token.font;
this.bold = token.bold;
this.italic = token.italic;
this.colorFont = token.colorFont;
this.fontSize = token.fontSize;
this.rotation = token.rotation;
this.page = token.page;
this.newLineAfter = token.newLineAfter;
this.blockPtr = token.blockPtr;
this.offset = token.offset;
this.subscript = token.subscript;
this.superscript = token.superscript;
// deep copy of the TaggingLabel list
if (token.labels != null) {
this.labels = new ArrayList<TaggingLabel>();
for(TaggingLabel l : token.labels) {
this.labels.add(l);
}
}
}
public LayoutToken(String text, TaggingLabel label) {
this(text);
this.addLabel(label);
}
public void setFont(String f) {
font = f;
}
public String getFont() {
return font;
}
public void setText(String f) {
//text = f.replaceAll("\n", "");
text = f;
}
public void setRotation(boolean b) {
rotation = b;
}
public boolean getRotation() {
return rotation;
}
public String getText() {
return text;
}
public String t() {
return text;
}
public void setColorFont(String f) {
colorFont = f;
}
public String getColorFont() {
return colorFont;
}
public void setBold(boolean b) {
bold = b;
}
public void setItalic(boolean i) {
italic = i;
}
public boolean isBold() {
return bold;
}
/** @use isBold() **/
@Deprecated
public boolean getBold() {
return bold;
}
public boolean isItalic() {
return italic;
}
/** @use isItalic() **/
@Deprecated
public boolean getItalic() {
return italic;
}
public boolean isSubscript() {
return subscript;
}
public void setSubscript(boolean script) {
this.subscript = script;
}
public boolean isSuperscript() {
return superscript;
}
public void setSuperscript(boolean script) {
this.superscript = script;
}
public void setFontSize(double d) {
fontSize = d;
}
public double getFontSize() {
return fontSize;
}
public void setX(double d) {
x = d;
}
public double getX() {
return x;
}
public void setY(double d) {
y = d;
}
public double getY() {
return y;
}
public void setHeight(double d) {
height = d;
}
public double getHeight() {
return height;
}
public void setWidth(double d) {
width = d;
}
public double getWidth() {
return width;
}
public int getPage() {
return page;
}
public void setPage(int page) {
this.page = page;
}
public boolean isNewLineAfter() {
return newLineAfter;
}
public void setNewLineAfter(boolean newLineAfter) {
this.newLineAfter = newLineAfter;
}
public int getBlockPtr() {
return blockPtr;
}
public void setBlockPtr(int blockPtr) {
this.blockPtr = blockPtr;
}
public int getOffset() {
return offset;
}
public void setOffset(int offset) {
this.offset = offset;
}
/**
* Get all TaggingLabel accumulated for this token
*/
public List<TaggingLabel> getLabels() {
if (this.labels == null)
return new ArrayList<TaggingLabel>();
return this.labels;
}
/**
* Check if a given TaggingLabel is associated with this token
*/
public boolean hasLabel(TaggingLabel label) {
return this.labels.contains(label);
}
/**
* Add a TaggingLabel to this token
*/
public void addLabel(TaggingLabel label) {
if (this.labels == null)
this.labels = new ArrayList<TaggingLabel>();
if (!hasLabel(label))
this.labels.add(label);
}
@Override
public String toString() {
return text;
}
@Override
public int hashCode() {
int result;
long temp;
result = text != null ? text.hashCode() : 0;
temp = Double.doubleToLongBits(y);
result = 31 * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(x);
result = 31 * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(width);
result = 31 * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(height);
result = 31 * result + (int) (temp ^ (temp >>> 32));
result = 31 * result + (font != null ? font.hashCode() : 0);
result = 31 * result + (bold ? 1 : 0);
result = 31 * result + (italic ? 1 : 0);
result = 31 * result + (colorFont != null ? colorFont.hashCode() : 0);
temp = Double.doubleToLongBits(fontSize);
result = 31 * result + (int) (temp ^ (temp >>> 32));
result = 31 * result + (rotation ? 1 : 0);
result = 31 * result + page;
return result;
}
@Override
public int compareTo(LayoutToken token2) {
if (y != token2.y) {
if (y < token2.y)
return -1;
else
return 1;
}
else if (x != token2.x) {
if (x < token2.x)
return -1;
else
return 1;
}
else {
double area1 = height*width;
double area2 = token2.height*token2.width;
return Double.compare(area1, area2);
}
}
}
| 6,980 | 21.446945 | 78 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/layout/VectorGraphicBoxCalculator.java
|
package org.grobid.core.layout;
import com.google.common.base.Predicate;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Iterables;
import com.google.common.collect.LinkedHashMultimap;
import com.google.common.collect.Lists;
import com.google.common.collect.Multimap;
import net.sf.saxon.om.Item;
import net.sf.saxon.om.SequenceIterator;
import net.sf.saxon.trans.XPathException;
import org.grobid.core.document.Document;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.utilities.XQueryProcessor;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Workign with vector graphics
*/
public class VectorGraphicBoxCalculator {
private static final Logger LOGGER = LoggerFactory.getLogger(VectorGraphicBoxCalculator.class);
public static final int MINIMUM_VECTOR_BOX_AREA = 3000;
public static final int VEC_GRAPHICS_FILE_SIZE_LIMIT = 100 * 1024 * 1024;
public static Multimap<Integer, GraphicObject> calculate(Document document) throws IOException, XPathException {
Multimap<Integer, Block> blockMultimap = HashMultimap.create();
Multimap<Integer, GraphicObject> result = LinkedHashMultimap.create();
for (int pageNum = 1; pageNum <= document.getPages().size(); pageNum++) {
BoundingBox mainPageArea = document.getPage(pageNum).getMainArea();
String q = XQueryProcessor.getQueryFromResources("vector-coords.xq");
File vecFile = new File(document.getDocumentSource().getXmlFile().getAbsolutePath() + "_data", "image-" + pageNum + ".svg");
if (vecFile.exists()) {
if (vecFile.length() > VEC_GRAPHICS_FILE_SIZE_LIMIT) {
LOGGER.error("The vector file " + vecFile + " is too large to be processed, size: " + vecFile.length());
continue;
}
XQueryProcessor pr = new XQueryProcessor(vecFile);
SequenceIterator it = pr.getSequenceIterator(q);
Item item;
List<BoundingBox> boxes = new ArrayList<>();
while ((item = it.next()) != null) {
String c = item.getStringValue();
// TODO: figure out why such string are returned at all (AS:602281691082754@1520606553791)
if (c.equals(",,,")) {
continue;
}
String coords = pageNum + "," + c;
BoundingBox e = BoundingBox.fromString(coords);
if (!mainPageArea.contains(e) || e.area() / mainPageArea.area() > 0.7) {
continue;
}
boxes.add(e);
}
List<BoundingBox> remainingBoxes = mergeBoxes(boxes);
for (int i = 0; i < remainingBoxes.size(); i++) {
Collection<Block> col = blockMultimap.get(pageNum);
for (Block bl : col) {
// if (!bl.getPage().getMainArea().contains(b)) {
// continue;
// }
BoundingBox b = BoundingBox.fromPointAndDimensions(pageNum, bl.getX(), bl.getY(), bl.getWidth(), bl.getHeight());
if (remainingBoxes.get(i).intersect(b)) {
remainingBoxes.set(i, remainingBoxes.get(i).boundBox(b));
}
}
}
remainingBoxes = mergeBoxes(remainingBoxes);
for (BoundingBox b : remainingBoxes) {
if (b.area() > MINIMUM_VECTOR_BOX_AREA) {
result.put(pageNum, new GraphicObject(b, GraphicObjectType.VECTOR_BOX));
}
}
}
}
return result;
}
public static List<BoundingBox> mergeBoxes(List<BoundingBox> boxes) {
boolean allMerged = false;
while (!allMerged) {
allMerged = true;
for (int i = 0; i < boxes.size(); i++) {
BoundingBox a = boxes.get(i);
if (a == null) continue;
for (int j = i + 1; j < boxes.size(); j++) {
BoundingBox b = boxes.get(j);
if (b != null) {
if (a.intersect(b)) {
allMerged = false;
a = a.boundBox(b);
boxes.set(i, a);
boxes.set(j, null);
}
}
}
}
}
return Lists.newArrayList(Iterables.filter(boxes, new Predicate<BoundingBox>() {
@Override
public boolean apply(BoundingBox boundingBox) {
if (boundingBox == null) {
return false;
}
if (boundingBox.getHeight() < 5 || boundingBox.getWidth() < 5) {
return false;
}
return true;
}
}));
}
}
| 5,215 | 38.515152 | 137 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/layout/GraphicObjectType.java
|
package org.grobid.core.layout;
/**
* Type of extracted graphic objects
*/
public enum GraphicObjectType {
UNKNOWN,
BITMAP,
VECTOR,
VECTOR_BOX
}
| 164 | 12.75 | 36 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/layout/GraphicObject.java
|
package org.grobid.core.layout;
import java.io.File;
/**
* Class for representing graphical objects occurring within a document.
*
*/
public class GraphicObject {
private String filePath = null;
private GraphicObjectType type = GraphicObjectType.UNKNOWN;
// position in the global tokenization
private int startPosition = -1;
private int endPosition = -1;
private int blockNumber = -1;
private BoundingBox boundingBox = null;
private boolean mask = false;
// in case of vector image, we don't have a boundingBox from pdfalto, simply the page information
private int page = -1;
public boolean used;
public GraphicObject() {
}
public GraphicObject(BoundingBox boundingBox, GraphicObjectType type) {
this.boundingBox = boundingBox;
this.type = type;
}
/**
* Return the full path of the file corresponding to the graphic object, useful
* as internal implementation information only
*/
public String getFilePath() {
return this.filePath;
}
/**
* Return an URI for the file corresponding to the graphic object, in practice a
* portable relative path usable for data exchange
*/
public String getURI() {
if (filePath == null) {
return null;
}
int ind = filePath.lastIndexOf("/");
if (ind != -1) {
//int ind2 = filePath.substring(0, ind-1).lastIndexOf("/");
//if (ind2 != -1)
// return filePath.substring(ind2+1, filePath.length());
return filePath.substring(ind+1, filePath.length());
}
return new File(filePath).getName();
}
public GraphicObjectType getType() {
return type;
}
public void setFilePath(String path) {
this.filePath = path;
}
public void setType(GraphicObjectType type) {
this.type = type;
}
public int getStartPosition() {
return startPosition;
}
public int getEndPosition() {
return endPosition;
}
public void setStartPosition(int startPosition) {
this.startPosition = startPosition;
}
public void setEndPosition(int endPosition) {
this.endPosition = endPosition;
}
/*public int getBlockNumber() {
return blockNumber;
}*/
public void setBlockNumber(int blockNumber) {
this.blockNumber = blockNumber;
}
public double getX() {
if (boundingBox != null)
return boundingBox.getX();
else
return 0.0;
}
public double getY() {
if (boundingBox != null)
return boundingBox.getY();
else
return 0.0;
}
public double getWidth() {
if (boundingBox != null)
return boundingBox.getWidth();
else
return 0.0;
}
public double getHeight() {
if (boundingBox != null)
return boundingBox.getHeight();
else
return 0.0;
}
public int getPage() {
if (boundingBox != null)
return boundingBox.getPage();
else
return page;
}
/*public void setX(double x1) {
this.x = Math.abs(x1);
}
public void setY(double y1) {
this.y = Math.abs(y1);
}
public void setWidth(double x2) {
this.width = Math.abs(x2);
}
public void setHeight(double y2) {
this.height = Math.abs(y2);
}*/
public void setPage(int page) {
this.page = page;
}
public BoundingBox getBoundingBox() {
return boundingBox;
}
public void setBoundingBox(BoundingBox box) {
boundingBox = box;
}
public String toString() {
StringBuilder res = new StringBuilder();
if (type == GraphicObjectType.BITMAP) {
res.append("Graphic Bitmap [");
} else if (type == GraphicObjectType.VECTOR) {
res.append("Vector Graphic [");
} else if (type == GraphicObjectType.VECTOR_BOX) {
res.append("Vector Box: [");
} else {
res.append("Unknown [");
}
if (startPosition != -1) {
res.append(startPosition);
}
res.append("-");
if (endPosition != -1) {
res.append(endPosition);
}
res.append("]: \t");
if (filePath != null) {
res.append(filePath + "\t");
} else {
res.append("\t");
}
res.append("(" + (boundingBox != null ? boundingBox.toString() : "no bounding box") + "\t");
return res.toString();
}
public boolean isUsed() {
return used;
}
public void setUsed(boolean used) {
this.used = used;
}
public boolean isMask() {
return mask;
}
public void setMask(boolean mask) {
this.mask = mask;
}
}
| 4,898 | 22.440191 | 101 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/layout/Block.java
|
package org.grobid.core.layout;
import java.util.ArrayList;
import java.util.List;
/**
* Class for representing and exchanging a document block. A block is defined here relatively to
* some properties in the document layout.
*/
public class Block {
private String text = null;
private BoundingBox boundingBox = null;
/*private double y = 0.0;
private double x = 0.0;
private double width = 0.0;
private double height = 0.0;*/
private String font = null;
private boolean bold = false;
private boolean italic = false;
private String colorFont = null;
public double fontSize = 0.0;
public List<LayoutToken> tokens = null;
// start position of the block in the original tokenization
private int startToken = -1;
// end position of the block in the original tokenization
private int endToken = -1;
// the page in the document where the block is located
private Page page = null;
public enum Type {DEFAULT, BULLET, FIGURE, TABLE, REFERENCE}
private Type type;
public Block() {
}
public void addToken(LayoutToken lt) {
if (tokens == null) {
tokens = new ArrayList<LayoutToken>();
}
tokens.add(lt);
}
public List<LayoutToken> getTokens() {
return tokens;
}
public void resetTokens() {
tokens = null;
}
public void setType(Type t) {
type = t;
}
public Type getType() {
return type;
}
public String getText() {
if (text != null && text.trim().startsWith("@"))
return text.trim();
else if (tokens == null)
return null;
else if (text != null)
return text;
else {
StringBuilder localText = new StringBuilder();
for(LayoutToken token : tokens) {
localText.append(token.getText());
}
text = localText.toString();
return text;
}
}
public int getNbTokens() {
if (tokens == null)
return 0;
else
return tokens.size();
}
public void setFont(String f) {
font = f;
}
public String getFont() {
return font;
}
public void setColorFont(String f) {
colorFont = f;
}
public String getColorFont() {
return colorFont;
}
public void setBold(boolean b) {
bold = b;
}
public void setItalic(boolean i) {
italic = i;
}
public boolean getBold() {
return bold;
}
public boolean getItalic() {
return italic;
}
public void setFontSize(double d) {
fontSize = d;
}
public double getFontSize() {
return fontSize;
}
public BoundingBox getBoundingBox() {
return boundingBox;
}
public void setBoundingBox(BoundingBox box) {
boundingBox = box;
}
/*public void setX(double d) {
x = Math.abs(d);
}*/
public double getX() {
if (boundingBox != null)
return boundingBox.getX();
else
return 0.0;
}
/*public void setY(double d) {
y = Math.abs(d);
}*/
public double getY() {
if (boundingBox != null)
return boundingBox.getY();
else
return 0.0;
}
/*public void setHeight(double d) {
height = Math.abs(d);
}*/
public double getHeight() {
if (boundingBox != null)
return boundingBox.getHeight();
else
return 0.0;
}
/*public void setWidth(double d) {
width = Math.abs(d);
}*/
public double getWidth() {
if (boundingBox != null)
return boundingBox.getWidth();
else
return 0.0;
}
public int getStartToken() {
return startToken;
}
public int getEndToken() {
if (endToken == -1) {
if (tokens == null || tokens.size() == 0) {
return getStartToken();
} else {
return getStartToken() + tokens.size();
}
} else
return endToken;
}
public void setStartToken(int start) {
startToken = start;
}
public void setEndToken(int end) {
endToken = end;
}
public Page getPage() {
return page;
}
public int getPageNumber() {
if (page != null) {
return page.getNumber();
} else {
return -1;
}
}
public void setPage(Page page) {
this.page = page;
}
public boolean isNull() {
if ( (tokens == null) && (startToken == -1) && (endToken == -1) && (type == null) ) {
return true;
}
else
return false;
}
@Override
public String toString() {
String res = "Block{" +
", startToken=" + startToken +
", endToken=" + endToken +
", type=" + type;
if (boundingBox != null)
res += ", boundingBox=" + boundingBox.toString() + '}';
return res;
}
}
| 5,154 | 20.84322 | 96 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/layout/LayoutTokenization.java
|
package org.grobid.core.layout;
import java.util.ArrayList;
import java.util.List;
/**
* Class for representing a tokenization of document section where tokens include layout attributes.
* Once built, it is possible to iterate through the string tokens only ignoring the layout information or
* through the layout token objects.
*
*/
public class LayoutTokenization {
// private List<LayoutToken> layoutTokenization = null;
private List<LayoutToken> tokenization = null; // this should ultimately be removed
public LayoutTokenization() {
// layoutTokenization = layoutTokens;
tokenization = new ArrayList<LayoutToken>();
}
public LayoutTokenization(List<LayoutToken> tokens) {
// layoutTokenization = layoutTokens;
tokenization = tokens;
}
// public List<LayoutToken> getLayoutTokens() {
// return layoutTokenization;
// }
public List<LayoutToken> getTokenization() {
return tokenization;
}
// public void addLayoutToken(LayoutToken token) {
// if (layoutTokenization == null)
// layoutTokenization = new ArrayList<LayoutToken>();
// else
// layoutTokenization.add(token);
// }
// public void setLayoutTokens(List<LayoutToken> layoutTokens) {
// this.layoutTokenization = layoutTokens;
// }
public void addToken(LayoutToken token) {
if (tokenization == null)
tokenization = new ArrayList<LayoutToken>();
else
tokenization.add(token);
}
public void addTokens(List<LayoutToken> tokens) {
if (tokenization == null)
tokenization = new ArrayList<LayoutToken>();
else {
tokenization.addAll(tokens);
}
}
public void setTokenization(List<LayoutToken> tokens) {
tokenization = tokens;
}
public int size() {
if (tokenization == null)
return 0;
else
return tokenization.size();
}
}
| 1,762 | 24.185714 | 106 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/CitationParser.java
|
package org.grobid.core.engines;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.collections4.CollectionUtils;
import org.grobid.core.GrobidModels;
import org.grobid.core.data.BibDataSet;
import org.grobid.core.data.BiblioItem;
import org.grobid.core.data.Date;
import org.grobid.core.document.Document;
import org.grobid.core.document.DocumentSource;
import org.grobid.core.engines.citations.LabeledReferenceResult;
import org.grobid.core.engines.citations.ReferenceSegmenter;
import org.grobid.core.engines.config.GrobidAnalysisConfig;
import org.grobid.core.engines.counters.CitationParserCounters;
import org.grobid.core.engines.label.SegmentationLabels;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.features.FeaturesVectorCitation;
import org.grobid.core.lexicon.Lexicon;
import org.grobid.core.utilities.Consolidation;
import org.grobid.core.layout.LayoutToken;
import org.grobid.core.layout.PDFAnnotation;
import org.grobid.core.layout.PDFAnnotation.Type;
import org.grobid.core.utilities.LayoutTokensUtil;
import org.grobid.core.utilities.OffsetPosition;
import org.grobid.core.utilities.TextUtilities;
import org.grobid.core.utilities.UnicodeUtil;
import org.grobid.core.utilities.counters.CntManager;
import org.grobid.core.tokenization.TaggingTokenCluster;
import org.grobid.core.tokenization.TaggingTokenClusteror;
import org.grobid.core.engines.label.TaggingLabel;
import org.grobid.core.engines.label.TaggingLabels;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.StringTokenizer;
import java.util.regex.Matcher;
public class CitationParser extends AbstractParser {
private static final Logger LOGGER = LoggerFactory.getLogger(CitationParser.class);
public Lexicon lexicon = Lexicon.getInstance();
private EngineParsers parsers;
public CitationParser(EngineParsers parsers, CntManager cntManager) {
super(GrobidModels.CITATION, cntManager);
this.parsers = parsers;
}
public CitationParser(EngineParsers parsers) {
super(GrobidModels.CITATION);
this.parsers = parsers;
}
/**
* Process one single raw reference string
*/
public BiblioItem processingString(String input, int consolidate) {
List<String> inputs = new ArrayList<>();
input = TextUtilities.removeLeadingAndTrailingChars(input, "[({.,])}: \n"," \n");
inputs.add(input);
List<BiblioItem> result = processingStringMultiple(inputs, consolidate);
if (result != null && result.size()>0)
return result.get(0);
else
return null;
}
/**
* Process a list of raw reference strings by taking advantage of batch processing
* when a DeLFT deep learning model is used
*/
public List<BiblioItem> processingStringMultiple(List<String> inputs, int consolidate) {
if (inputs == null || inputs.size() == 0)
return null;
List<List<LayoutToken>> tokenList = new ArrayList<>();
for(String input : inputs) {
if (StringUtils.isBlank(input))
tokenList.add(new ArrayList<LayoutToken>());
else {
// some cleaning
input = UnicodeUtil.normaliseText(input);
input = TextUtilities.removeLeadingAndTrailingChars(input, "[({.,])}: \n"," \n");
List<LayoutToken> tokens = analyzer.tokenizeWithLayoutToken(input);
tokens = analyzer.retokenizeSubdigitsFromLayoutToken(tokens);
tokenList.add(tokens);
}
}
List<BiblioItem> results = processingLayoutTokenMultiple(tokenList, consolidate);
if (results != null && results.size() == inputs.size()) {
// store original references to enable optional raw output
int i = 0;
for (BiblioItem result : results) {
if (result != null) {
String localInput = inputs.get(i);
localInput = TextUtilities.removeLeadingAndTrailingChars(localInput, "[({.,])}: \n"," \n");
result.setReference(localInput);
}
i++;
}
}
return results;
}
/**
* Process one single raw reference string tokenized as layout objects
*/
public BiblioItem processingLayoutToken(List<LayoutToken> tokens, int consolidate) {
List<List<LayoutToken>> tokenList = new ArrayList<>();
tokenList.add(tokens);
List<BiblioItem> result = processingLayoutTokenMultiple(tokenList, consolidate);
if (result != null && result.size()>0)
return result.get(0);
else
return null;
}
/**
* Process a list of raw reference string, each one tokenized as layout objects, and taking advantage
* of batch processing when a DeLFT deep learning model is used
*/
public List<BiblioItem> processingLayoutTokenMultiple(List<List<LayoutToken>> tokenList, int consolidate) {
if (tokenList == null || tokenList.size() == 0)
return null;
List<BiblioItem> results = new ArrayList<>();
StringBuilder featuredInput = new StringBuilder();
int p = 0;
for(List<LayoutToken> tokens : tokenList) {
tokenList.set(p, analyzer.retokenizeSubdigitsFromLayoutToken(tokens));
p++;
}
for (List<LayoutToken> tokens : tokenList) {
if (CollectionUtils.isEmpty(tokens))
continue;
List<OffsetPosition> journalsPositions = lexicon.tokenPositionsJournalNames(tokens);
List<OffsetPosition> abbrevJournalsPositions = lexicon.tokenPositionsAbbrevJournalNames(tokens);
List<OffsetPosition> conferencesPositions = lexicon.tokenPositionsConferenceNames(tokens);
List<OffsetPosition> publishersPositions = lexicon.tokenPositionsPublisherNames(tokens);
List<OffsetPosition> locationsPositions = lexicon.tokenPositionsLocationNames(tokens);
List<OffsetPosition> collaborationsPositions = lexicon.tokenPositionsCollaborationNames(tokens);
List<OffsetPosition> identifiersPositions = lexicon.tokenPositionsIdentifierPattern(tokens);
List<OffsetPosition> urlPositions = lexicon.tokenPositionsUrlPattern(tokens);
try {
String featuredBlock = FeaturesVectorCitation.addFeaturesCitation(tokens, null, journalsPositions,
abbrevJournalsPositions, conferencesPositions, publishersPositions, locationsPositions,
collaborationsPositions, identifiersPositions, urlPositions);
featuredInput.append(featuredBlock);
featuredInput.append("\n\n");
} catch (Exception e) {
LOGGER.error("An exception occured while adding features for processing a citation.", e);
}
}
if (featuredInput.toString().length() == 0)
return null;
String allRes = null;
try {
allRes = label(featuredInput.toString());
} catch (Exception e) {
LOGGER.error("An exception occured while labeling a citation.", e);
throw new GrobidException(
"An exception occured while labeling a citation.", e);
}
if (allRes == null || allRes.length() == 0)
return null;
String[] resBlocks = allRes.split("\n\n");
int i = 0;
for (List<LayoutToken> tokens : tokenList) {
if (CollectionUtils.isEmpty(tokens))
results.add(null);
else {
String res = resBlocks[i];
i++;
BiblioItem resCitation = resultExtractionLayoutTokens(res, true, tokens);
// post-processing (additional field parsing and cleaning)
if (resCitation != null) {
BiblioItem.cleanTitles(resCitation);
resCitation.setOriginalAuthors(resCitation.getAuthors());
try {
resCitation.setFullAuthors(parsers.getAuthorParser().processingCitation(resCitation.getAuthors()));
} catch (Exception e) {
LOGGER.error("An exception occured when processing author names of a citation.", e);
}
if (resCitation.getPublicationDate() != null) {
List<Date> dates = parsers.getDateParser().processing(resCitation
.getPublicationDate());
if (dates != null) {
Date bestDate = null;
if (dates.size() > 0) {
// we take the earliest most specified date
for (Date theDate : dates) {
if (bestDate == null) {
bestDate = theDate;
} else {
if (bestDate.compareTo(theDate) == 1) {
bestDate = theDate;
}
}
}
if (bestDate != null) {
resCitation.setNormalizedPublicationDate(bestDate);
}
}
}
}
resCitation.setPageRange(TextUtilities.cleanField(
resCitation.getPageRange(), true));
resCitation.setPublisher(TextUtilities.cleanField(
resCitation.getPublisher(), true));
resCitation.setJournal(TextUtilities.cleanField(
resCitation.getJournal(), true));
resCitation.postProcessPages();
// editors (they are human persons in theory)
resCitation.setOriginalEditors(resCitation.getEditors());
try {
resCitation.setFullEditors(parsers.getAuthorParser().processingCitation(resCitation.getEditors()));
} catch (Exception e) {
LOGGER.error("An exception occured when processing editor names of a citation.", e);
}
}
resCitation = consolidateCitation(resCitation, LayoutTokensUtil.toText(tokens), consolidate);
results.add(resCitation);
}
}
return results;
}
public List<BibDataSet> processingReferenceSection(String referenceTextBlock, ReferenceSegmenter referenceSegmenter) {
List<LabeledReferenceResult> segm = referenceSegmenter.extract(referenceTextBlock);
List<BibDataSet> results = new ArrayList<>();
List<List<LayoutToken>> allRefBlocks = new ArrayList<>();
if (segm == null || segm.size() == 0)
return results;
for (LabeledReferenceResult ref : segm) {
if (ref.getTokens() == null || ref.getTokens().size() == 0)
continue;
List<LayoutToken> localTokens = ref.getTokens();
localTokens = TextUtilities.removeLeadingAndTrailingCharsLayoutTokens(localTokens, "[({.,])}: \n"," \n");
allRefBlocks.add(localTokens);
}
List<BiblioItem> bibList = processingLayoutTokenMultiple(allRefBlocks, 0);
int i = 0;
for (LabeledReferenceResult ref : segm) {
if (ref.getTokens() == null || ref.getTokens().size() == 0)
continue;
BiblioItem bib = bibList.get(i);
i++;
if ((bib != null) && !bib.rejectAsReference()) {
BibDataSet bds = new BibDataSet();
String localLabel = ref.getLabel();
if (localLabel != null && localLabel.length()>0) {
// cleaning the label for matching
localLabel = TextUtilities.removeLeadingAndTrailingChars(localLabel, "([{<,. \n", ")}]>,.: \n");
}
String localRef = ref.getReferenceText();
localRef = TextUtilities.removeLeadingAndTrailingChars(localRef, "[({.,])}: \n"," \n");
bds.setRefSymbol(localLabel);
bib.setReference(localRef);
bds.setResBib(bib);
bds.setRawBib(localRef);
bds.getResBib().setCoordinates(ref.getCoordinates());
results.add(bds);
}
}
return results;
}
public List<BibDataSet> processingReferenceSection(Document doc, ReferenceSegmenter referenceSegmenter, int consolidate) {
List<BibDataSet> results = new ArrayList<>();
String referencesStr = doc.getDocumentPartText(SegmentationLabels.REFERENCES);
if (StringUtils.isEmpty(referencesStr)) {
cntManager.i(CitationParserCounters.EMPTY_REFERENCES_BLOCKS);
return results;
}
cntManager.i(CitationParserCounters.NOT_EMPTY_REFERENCES_BLOCKS);
List<LabeledReferenceResult> references = referenceSegmenter.extract(doc);
if (references == null) {
cntManager.i(CitationParserCounters.NULL_SEGMENTED_REFERENCES_LIST);
return results;
} else {
cntManager.i(CitationParserCounters.SEGMENTED_REFERENCES, references.size());
}
// consolidation: if selected, is not done individually for each citation but
// in a second stage for all citations
if (references != null) {
List<String> refTexts = new ArrayList<>();
for (LabeledReferenceResult ref : references) {
// paranoiac check
if (ref == null)
continue;
String localRef = ref.getReferenceText();
localRef = TextUtilities.removeLeadingAndTrailingChars(localRef, "[({.,])}: \n"," \n");
refTexts.add(localRef);
}
List<BiblioItem> bibList = processingStringMultiple(refTexts, 0);
if (bibList != null && bibList.size()>0) {
int i = 0;
for (LabeledReferenceResult ref : references) {
// paranoiac check
if (ref == null)
continue;
//BiblioItem bib = processingString(ref.getReferenceText(), 0);
BiblioItem bib = bibList.get(i);
i++;
if (bib == null)
continue;
// check if we have an interesting url annotation over this bib. ref.
List<LayoutToken> refTokens = ref.getTokens();
if ((refTokens != null) && (refTokens.size() > 0)) {
List<Integer> localPages = new ArrayList<Integer>();
for(LayoutToken token : refTokens) {
if (!localPages.contains(token.getPage())) {
localPages.add(token.getPage());
}
}
for(PDFAnnotation annotation : doc.getPDFAnnotations()) {
if (annotation.getType() != Type.URI)
continue;
if (!localPages.contains(annotation.getPageNumber()))
continue;
for(LayoutToken token : refTokens) {
if (annotation.cover(token)) {
// annotation covers tokens, let's look at the href
String uri = annotation.getDestination();
// is it a DOI?
Matcher doiMatcher = TextUtilities.DOIPattern.matcher(uri);
if (doiMatcher.find()) {
// the BiblioItem setter will take care of the prefix and doi cleaninng
bib.setDOI(uri);
}
// TBD: is it something else?
}
}
}
}
if (!bib.rejectAsReference()) {
BibDataSet bds = new BibDataSet();
String localLabel = ref.getLabel();
if (localLabel != null && localLabel.length()>0) {
// cleaning the label for matching
localLabel = TextUtilities.removeLeadingAndTrailingChars(localLabel, "([{<,. \n", ")}]>,.: \n");
}
String localRef = ref.getReferenceText();
localRef = TextUtilities.removeLeadingAndTrailingChars(localRef, "[({.,])}: \n"," \n");
bds.setRefSymbol(localLabel);
bds.setResBib(bib);
bib.setReference(localRef);
bds.setRawBib(localRef);
bds.getResBib().setCoordinates(ref.getCoordinates());
results.add(bds);
}
}
}
}
// consolidate the set
if (consolidate != 0) {
Consolidation consolidator = Consolidation.getInstance();
if (consolidator.getCntManager() == null)
consolidator.setCntManager(cntManager);
Map<Integer,BiblioItem> resConsolidation = null;
try {
resConsolidation = consolidator.consolidate(results);
} catch(Exception e) {
throw new GrobidException(
"An exception occured while running consolidation on bibliographical references.", e);
}
if (resConsolidation != null) {
for(int i=0; i<results.size(); i++) {
BiblioItem resCitation = results.get(i).getResBib();
BiblioItem bibo = resConsolidation.get(Integer.valueOf(i));
if (bibo != null) {
if (consolidate == 1)
BiblioItem.correct(resCitation, bibo);
else if (consolidate == 2)
BiblioItem.injectIdentifiers(resCitation, bibo);
}
}
}
}
doc.setBibDataSets(results);
return results;
}
public List<BibDataSet> processingReferenceSection(File input,
ReferenceSegmenter referenceSegmenter,
int consolidate) {
DocumentSource documentSource = DocumentSource.fromPdf(input);
return processingReferenceSection(documentSource, referenceSegmenter, consolidate);
}
public List<BibDataSet> processingReferenceSection(File input,
String md5Str,
ReferenceSegmenter referenceSegmenter,
int consolidate) {
DocumentSource documentSource = DocumentSource.fromPdf(input);
documentSource.setMD5(md5Str);
return processingReferenceSection(documentSource, referenceSegmenter, consolidate);
}
public List<BibDataSet> processingReferenceSection(DocumentSource documentSource,
ReferenceSegmenter referenceSegmenter,
int consolidate) {
List<BibDataSet> results;
try {
Document doc = parsers.getSegmentationParser().processing(documentSource,
GrobidAnalysisConfig.builder().consolidateCitations(consolidate).build());
results = processingReferenceSection(doc, referenceSegmenter, consolidate);
} catch (GrobidException e) {
LOGGER.error("An exception occured while running Grobid.", e);
throw e;
} catch (Exception e) {
LOGGER.error("An exception occured while running Grobid.", e);
throw new GrobidException("An exception occurred while running Grobid.", e);
}
return results;
}
/**
* Extract results from a labeled sequence.
*
* @param result result
* @param volumePostProcess whether post process volume
* @param tokenizations list of tokens
* @return bibilio item
*/
public BiblioItem resultExtractionLayoutTokens(String result,
boolean volumePostProcess,
List<LayoutToken> tokenizations) {
BiblioItem biblio = new BiblioItem();
TaggingLabel lastClusterLabel = null;
TaggingTokenClusteror clusteror = new TaggingTokenClusteror(GrobidModels.CITATION, result, tokenizations);
String tokenLabel = null;
List<TaggingTokenCluster> clusters = clusteror.cluster();
for (TaggingTokenCluster cluster : clusters) {
if (cluster == null) {
continue;
}
TaggingLabel clusterLabel = cluster.getTaggingLabel();
Engine.getCntManager().i(clusterLabel);
//String clusterContent = LayoutTokensUtil.normalizeText(LayoutTokensUtil.toText(cluster.concatTokens()));
//String clusterContent = LayoutTokensUtil.toText(cluster.concatTokens());
String clusterContent = LayoutTokensUtil.normalizeDehyphenizeText(cluster.concatTokens());
//String clusterNonDehypenizedContent = LayoutTokensUtil.toText(cluster.concatTokens());
if (clusterLabel.equals(TaggingLabels.CITATION_TITLE)) {
if (biblio.getTitle() == null)
biblio.setTitle(clusterContent);
else if (biblio.getTitle().length() >= clusterContent.length())
biblio.setNote(clusterContent);
else {
biblio.setNote(biblio.getTitle());
biblio.setTitle(clusterContent);
}
} else if (clusterLabel.equals(TaggingLabels.CITATION_AUTHOR)) {
if (biblio.getAuthors() == null)
biblio.setAuthors(clusterContent);
else
biblio.setAuthors(biblio.getAuthors() + " ; " + clusterContent);
} else if (clusterLabel.equals(TaggingLabels.CITATION_TECH)) {
biblio.setBookType(clusterContent);
} else if (clusterLabel.equals(TaggingLabels.CITATION_LOCATION)) {
if (biblio.getLocation() != null)
biblio.setLocation(biblio.getLocation() + "; " + clusterContent);
else
biblio.setLocation(clusterContent);
} else if (clusterLabel.equals(TaggingLabels.CITATION_DATE)) {
if (biblio.getPublicationDate() != null)
biblio.setPublicationDate(biblio.getPublicationDate() + ". " + clusterContent);
else
biblio.setPublicationDate(clusterContent);
} else if (clusterLabel.equals(TaggingLabels.CITATION_BOOKTITLE)) {
if (biblio.getBookTitle() == null)
biblio.setBookTitle(clusterContent);
else if (biblio.getBookTitle().length() >= clusterContent.length())
biblio.setNote(clusterContent);
else {
biblio.setNote(biblio.getBookTitle());
biblio.setBookTitle(clusterContent);
}
} else if (clusterLabel.equals(TaggingLabels.CITATION_SERIES)) {
if (biblio.getSerieTitle() == null)
biblio.setSerieTitle(clusterContent);
else if (biblio.getSerieTitle().length() >= clusterContent.length())
biblio.setNote(clusterContent);
else {
biblio.setNote(biblio.getSerieTitle());
biblio.setSerieTitle(clusterContent);
}
} else if (clusterLabel.equals(TaggingLabels.CITATION_PAGES)) {
String clusterNonDehypenizedContent = LayoutTokensUtil.toText(cluster.concatTokens());
biblio.setPageRange(clusterNonDehypenizedContent);
} else if (clusterLabel.equals(TaggingLabels.CITATION_PUBLISHER)) {
biblio.setPublisher(clusterContent);
} else if (clusterLabel.equals(TaggingLabels.CITATION_COLLABORATION)) {
if (biblio.getCollaboration() != null)
biblio.setCollaboration(biblio.getCollaboration() + " ; " + clusterContent);
else
biblio.setCollaboration(clusterContent);
} else if (clusterLabel.equals(TaggingLabels.CITATION_JOURNAL)) {
if (biblio.getJournal() == null)
biblio.setJournal(clusterContent);
else if (biblio.getJournal().length() >= clusterContent.length())
biblio.setNote(clusterContent);
else {
biblio.setNote(biblio.getJournal());
biblio.setJournal(clusterContent);
}
} else if (clusterLabel.equals(TaggingLabels.CITATION_VOLUME)) {
if (biblio.getVolumeBlock() == null)
biblio.setVolumeBlock(clusterContent, volumePostProcess);
} else if (clusterLabel.equals(TaggingLabels.CITATION_ISSUE)) {
if (biblio.getIssue() == null)
biblio.setIssue(clusterContent);
} else if (clusterLabel.equals(TaggingLabels.CITATION_EDITOR)) {
biblio.setEditors(clusterContent);
} else if (clusterLabel.equals(TaggingLabels.CITATION_INSTITUTION)) {
if (biblio.getInstitution() != null)
biblio.setInstitution(biblio.getInstitution() + " ; " + clusterContent);
else
biblio.setInstitution(clusterContent);
} else if (clusterLabel.equals(TaggingLabels.CITATION_NOTE)) {
if (biblio.getNote() != null)
biblio.setNote(biblio.getNote()+ ". " + clusterContent);
else
biblio.setNote(clusterContent);
} else if (clusterLabel.equals(TaggingLabels.CITATION_PUBNUM)) {
String clusterNonDehypenizedContent = LayoutTokensUtil.toText(cluster.concatTokens());
biblio.setPubnum(clusterNonDehypenizedContent);
biblio.checkIdentifier();
} else if (clusterLabel.equals(TaggingLabels.CITATION_WEB)) {
String clusterNonDehypenizedContent = LayoutTokensUtil.toText(cluster.concatTokens());
biblio.setWeb(clusterNonDehypenizedContent);
}
}
return biblio;
}
/**
* Consolidate an existing list of recognized citations based on access to
* external internet bibliographic databases.
*
* @param resCitation citation
* @return consolidated biblio item
*/
public BiblioItem consolidateCitation(BiblioItem resCitation, String rawCitation, int consolidate) {
if (consolidate == 0) {
// no consolidation
return resCitation;
}
Consolidation consolidator = null;
try {
consolidator = Consolidation.getInstance();
if (consolidator.getCntManager() == null)
consolidator.setCntManager(cntManager);
List<BibDataSet> biblios = new ArrayList<BibDataSet>();
BibDataSet theBib = new BibDataSet();
theBib.setResBib(resCitation);
biblios.add(theBib);
Map<Integer,BiblioItem> bibis = consolidator.consolidate(biblios);
//BiblioItem bibo = consolidator.consolidate(resCitation, rawCitation);
BiblioItem bibo = bibis.get(0);
if (bibo != null) {
if (consolidate == 1)
BiblioItem.correct(resCitation, bibo);
else if (consolidate == 2)
BiblioItem.injectIdentifiers(resCitation, bibo);
}
} catch (Exception e) {
LOGGER.error("An exception occurred while running bibliographical data consolidation.", e);
throw new GrobidException(
"An exception occurred while running bibliographical data consolidation.", e);
}
return resCitation;
}
/**
* Extract results from a list of citation strings in the training format
* without any string modification.
*
* @param inputs list of input data
* @return result
*/
public StringBuilder trainingExtraction(List<String> inputs) {
StringBuilder buffer = new StringBuilder();
try {
if (inputs == null)
return null;
if (inputs.size() == 0)
return null;
List<OffsetPosition> journalsPositions = null;
List<OffsetPosition> abbrevJournalsPositions = null;
List<OffsetPosition> conferencesPositions = null;
List<OffsetPosition> publishersPositions = null;
List<OffsetPosition> locationsPositions = null;
List<OffsetPosition> collaborationsPositions = null;
List<OffsetPosition> identifiersPositions = null;
List<OffsetPosition> urlPositions = null;
for (String input : inputs) {
if (input == null)
continue;
List<LayoutToken> tokenizations = analyzer.tokenizeWithLayoutToken(input);
tokenizations = analyzer.retokenizeSubdigitsFromLayoutToken(tokenizations);
if (tokenizations.size() == 0)
return null;
journalsPositions = lexicon.tokenPositionsJournalNames(tokenizations);
abbrevJournalsPositions = lexicon.tokenPositionsAbbrevJournalNames(tokenizations);
conferencesPositions = lexicon.tokenPositionsConferenceNames(tokenizations);
publishersPositions = lexicon.tokenPositionsPublisherNames(tokenizations);
locationsPositions = lexicon.tokenPositionsLocationNames(tokenizations);
collaborationsPositions = lexicon.tokenPositionsCollaborationNames(tokenizations);
identifiersPositions = lexicon.tokenPositionsIdentifierPattern(tokenizations);
urlPositions = lexicon.tokenPositionsUrlPattern(tokenizations);
String ress = FeaturesVectorCitation.addFeaturesCitation(tokenizations,
null, journalsPositions, abbrevJournalsPositions,
conferencesPositions, publishersPositions, locationsPositions,
collaborationsPositions, identifiersPositions, urlPositions);
String res = label(ress);
String lastTag = null;
String lastTag0;
String currentTag0 = null;
boolean start = true;
String s1 = null;
String s2 = null;
int p = 0;
// extract results from the processed file
StringTokenizer st = new StringTokenizer(res, "\n");
while (st.hasMoreTokens()) {
boolean addSpace = false;
String tok = st.nextToken().trim();
if (tok.length() == 0) {
// new citation
//buffer.append("/t<bibl>\n");
start = true;
continue;
}
StringTokenizer stt = new StringTokenizer(tok, "\t");
int i = 0;
boolean newLine = false;
int ll = stt.countTokens();
while (stt.hasMoreTokens()) {
String s = stt.nextToken().trim();
if (i == 0) {
s2 = TextUtilities.HTMLEncode(s);
//s2 = s;
boolean strop = false;
while ((!strop) && (p < tokenizations.size())) {
String tokOriginal = tokenizations.get(p).t();
if (tokOriginal.equals(" ")
|| tokOriginal.equals("\u00A0")) {
addSpace = true;
} else if (tokOriginal.equals(s)) {
strop = true;
}
p++;
}
} else if (i == ll - 1) {
s1 = s;
}
i++;
}
if (start && (s1 != null)) {
buffer.append("\t<bibl>");
start = false;
}
lastTag0 = null;
if (lastTag != null) {
if (lastTag.startsWith("I-")) {
lastTag0 = lastTag.substring(2, lastTag.length());
} else {
lastTag0 = lastTag;
}
}
if (s1 != null) {
if (s1.startsWith("I-")) {
currentTag0 = s1.substring(2, s1.length());
} else {
currentTag0 = s1;
}
}
//tagClosed = lastTag0 != null &&
if ((lastTag0 != null) && (currentTag0 != null))
testClosingTag(buffer, currentTag0, lastTag0);
String output = writeField(s1, lastTag0, s2, "<title>", "<title level=\"a\">", addSpace, 0);
if (output == null) {
output = writeField(s1, lastTag0, s2, "<other>", "", addSpace, 0);
}
if (output == null) {
output = writeField(s1, lastTag0, s2, "<author>", "<author>", addSpace, 0);
}
if (output == null) {
output = writeField(s1, lastTag0, s2, "<journal>", "<title level=\"j\">", addSpace, 0);
}
if (output == null) {
output = writeField(s1, lastTag0, s2, "<series>", "<title level=\"s\">", addSpace, 0);
}
if (output == null) {
output = writeField(s1, lastTag0, s2, "<booktitle>", "<title level=\"m\">", addSpace, 0);
}
if (output == null) {
output = writeField(s1, lastTag0, s2, "<date>", "<date>", addSpace, 0);
}
if (output == null) {
output = writeField(s1, lastTag0, s2, "<volume>", "<biblScope unit=\"volume\">", addSpace, 0);
}
if (output == null) {
output = writeField(s1, lastTag0, s2, "<publisher>", "<publisher>", addSpace, 0);
}
if (output == null) {
output = writeField(s1, lastTag0, s2, "<location>", "<pubPlace>", addSpace, 0);
}
if (output == null) {
output = writeField(s1, lastTag0, s2, "<editor>", "<editor>", addSpace, 0);
}
if (output == null) {
output = writeField(s1, lastTag0, s2, "<pages>", "<biblScope unit=\"page\">", addSpace, 0);
}
if (output == null) {
output = writeField(s1, lastTag0, s2, "<tech>", "<note type=\"report\">", addSpace, 0);
}
if (output == null) {
output = writeField(s1, lastTag0, s2, "<issue>", "<biblScope unit=\"issue\">", addSpace, 0);
}
if (output == null) {
output = writeField(s1, lastTag0, s2, "<web>", "<ptr type=\"web\">", addSpace, 0);
}
if (output == null) {
output = writeField(s1, lastTag0, s2, "<note>", "<note>", addSpace, 0);
}
if (output == null) {
output = writeField(s1, lastTag0, s2, "<institution>", "<orgName>", addSpace, 0);
}
if (output == null) {
output = writeField(s1, lastTag0, s2, "<collaboration>", "<orgName type=\"collaboration\">", addSpace, 0);
}
if (output == null) {
String localTag = null;
String cleanS2 = StringUtils.normalizeSpace(s2);
cleanS2 = cleanS2.replace(" ", "");
Matcher doiMatcher = TextUtilities.DOIPattern.matcher(cleanS2);
if (doiMatcher.find())
localTag = "<idno type=\"DOI\">";
if (localTag == null) {
Matcher arxivMatcher = TextUtilities.arXivPattern.matcher(cleanS2);
if (arxivMatcher.find())
localTag = "<idno type=\"arXiv\">";
}
if (localTag == null) {
Matcher pmidMatcher = TextUtilities.pmidPattern.matcher(cleanS2);
if (pmidMatcher.find())
localTag = "<idno type=\"PMID\">";
}
if (localTag == null) {
Matcher pmcidMatcher = TextUtilities.pmcidPattern.matcher(cleanS2);
if (pmcidMatcher.find())
localTag = "<idno type=\"PMC\">";
}
if (localTag == null) {
if (cleanS2.toLowerCase().indexOf("issn") != -1) {
localTag = "<idno type=\"ISSN\">";
}
}
if (localTag == null) {
if (cleanS2.toLowerCase().indexOf("isbn") != -1) {
localTag = "<idno type=\"ISBN\">";
}
}
// TODO: PII
if (localTag == null)
localTag = "<idno>";
output = writeField(s1, lastTag0, s2, "<pubnum>", localTag, addSpace, 0);
}
if (output != null) {
buffer.append(output);
lastTag = s1;
continue;
}
lastTag = s1;
}
if (lastTag != null) {
if (lastTag.startsWith("I-")) {
lastTag0 = lastTag.substring(2, lastTag.length());
} else {
lastTag0 = lastTag;
}
currentTag0 = "";
testClosingTag(buffer, currentTag0, lastTag0);
buffer.append("</bibl>\n");
}
}
} catch (Exception e) {
throw new GrobidException("An exception occured while running Grobid.", e);
}
return buffer;
}
private String writeField(String s1, String lastTag0, String s2,
String field, String outField, boolean addSpace, int nbIndent) {
String result = null;
if ((s1.equals(field)) || (s1.equals("I-" + field))) {
if (s1.equals(lastTag0) || s1.equals("I-" + lastTag0)) {
if (addSpace)
result = " " + s2;
else
result = s2;
} else {
result = "";
/*for (int i = 0; i < nbIndent; i++) {
result += "\t";
}*/
if (addSpace) {
result += " " + outField + s2;
} else {
result += outField + s2;
}
}
}
return result;
}
private boolean writeField2(StringBuilder buffer, String s1, String lastTag0, String s2, String field, String outField, boolean addSpace) {
boolean result = false;
if ((s1.equals(field)) || (s1.equals("I-" + field))) {
result = true;
if (s1.equals(lastTag0) || (s1).equals("I-" + lastTag0)) {
if (addSpace)
buffer.append(" ").append(s2);
else
buffer.append(s2);
} else {
if (addSpace)
buffer.append(" ").append(outField).append(s2);
else
buffer.append(outField).append(s2);
}
}
return result;
}
private boolean testClosingTag(StringBuilder buffer, String currentTag0,
String lastTag0) {
boolean res = false;
if (!currentTag0.equals(lastTag0)) {
res = true;
// we close the current tag
if (lastTag0.equals("<other>")) {
buffer.append("");
} else if (lastTag0.equals("<title>")) {
buffer.append("</title>");
} else if (lastTag0.equals("<series>")) {
buffer.append("</title>");
} else if (lastTag0.equals("<author>")) {
buffer.append("</author>");
} else if (lastTag0.equals("<tech>")) {
buffer.append("</note>");
} else if (lastTag0.equals("<location>")) {
buffer.append("</pubPlace>");
} else if (lastTag0.equals("<date>")) {
buffer.append("</date>");
} else if (lastTag0.equals("<booktitle>")) {
buffer.append("</title>");
} else if (lastTag0.equals("<pages>")) {
buffer.append("</biblScope>");
} else if (lastTag0.equals("<publisher>")) {
buffer.append("</publisher>");
} else if (lastTag0.equals("<journal>")) {
buffer.append("</title>");
} else if (lastTag0.equals("<volume>")) {
buffer.append("</biblScope>");
} else if (lastTag0.equals("<issue>")) {
buffer.append("</biblScope>");
} else if (lastTag0.equals("<editor>")) {
buffer.append("</editor>");
} else if (lastTag0.equals("<pubnum>")) {
buffer.append("</idno>");
} else if (lastTag0.equals("<web>")) {
buffer.append("</ptr>");
} else if (lastTag0.equals("<note>")) {
buffer.append("</note>");
} else if (lastTag0.equals("<institution>")) {
buffer.append("</orgName>");
} else if (lastTag0.equals("<collaboration>")) {
buffer.append("</orgName>");
} else {
res = false;
}
}
return res;
}
@Override
public void close() throws IOException {
super.close();
}
}
| 44,474 | 44.945248 | 143 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/EngineParsers.java
|
package org.grobid.core.engines;
import org.grobid.core.engines.entities.ChemicalParser;
import org.grobid.core.engines.patent.ReferenceExtractor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.Closeable;
import java.io.IOException;
public class EngineParsers implements Closeable {
public static final Logger LOGGER = LoggerFactory.getLogger(EngineParsers.class);
private AuthorParser authorParser = null;
private AffiliationAddressParser affiliationAddressParser = null;
private HeaderParser headerParser = null;
private DateParser dateParser = null;
private CitationParser citationParser = null;
private FullTextParser fullTextParser = null;
private ReferenceExtractor referenceExtractor = null;
private ChemicalParser chemicalParser = null;
private Segmentation segmentationParser = null;
private ReferenceSegmenterParser referenceSegmenterParser = null;
private FigureParser figureParser = null;
private TableParser tableParser = null;
private MonographParser monographParser = null;
public AffiliationAddressParser getAffiliationAddressParser() {
if (affiliationAddressParser == null) {
synchronized (this) {
if (affiliationAddressParser == null) {
affiliationAddressParser = new AffiliationAddressParser();
}
}
}
return affiliationAddressParser;
}
public AuthorParser getAuthorParser() {
if (authorParser == null) {
synchronized (this) {
if (authorParser == null) {
authorParser = new AuthorParser();
}
}
}
return authorParser;
}
public HeaderParser getHeaderParser() {
if (headerParser == null) {
synchronized (this) {
if (headerParser == null) {
headerParser = new HeaderParser(this);
}
}
}
return headerParser;
}
public DateParser getDateParser() {
if (dateParser == null) {
synchronized (this) {
if (dateParser == null) {
dateParser = new DateParser();
}
}
}
return dateParser;
}
public CitationParser getCitationParser() {
if (citationParser == null) {
synchronized (this) {
if (citationParser == null) {
citationParser = new CitationParser(this);
}
}
}
return citationParser;
}
public FullTextParser getFullTextParser() {
if (fullTextParser == null) {
synchronized (this) {
if (fullTextParser == null) {
fullTextParser = new FullTextParser(this);
}
}
}
return fullTextParser;
}
public Segmentation getSegmentationParser() {
if (segmentationParser == null) {
synchronized (this) {
if (segmentationParser == null) {
segmentationParser = new Segmentation();
}
}
}
return segmentationParser;
}
public ReferenceExtractor getReferenceExtractor() {
if (referenceExtractor == null) {
synchronized (this) {
if (referenceExtractor == null) {
referenceExtractor = new ReferenceExtractor(this);
}
}
}
return referenceExtractor;
}
public ReferenceSegmenterParser getReferenceSegmenterParser() {
if (referenceSegmenterParser == null) {
synchronized (this) {
if (referenceSegmenterParser == null) {
referenceSegmenterParser = new ReferenceSegmenterParser();
}
}
}
return referenceSegmenterParser;
}
public ChemicalParser getChemicalParser() {
if (chemicalParser == null) {
synchronized (this) {
if (chemicalParser == null) {
chemicalParser = new ChemicalParser();
}
}
}
return chemicalParser;
}
public FigureParser getFigureParser() {
if (figureParser == null) {
synchronized (this) {
if (figureParser == null) {
figureParser = new FigureParser();
}
}
}
return figureParser;
}
public TableParser getTableParser() {
if (tableParser == null) {
synchronized (this) {
if (tableParser == null) {
tableParser = new TableParser();
}
}
}
return tableParser;
}
public MonographParser getMonographParser() {
if (monographParser == null) {
synchronized (this) {
if (monographParser == null) {
monographParser = new MonographParser();
}
}
}
return monographParser;
}
/**
* Init all model, this will also load the model into memory
*/
public void initAll() {
affiliationAddressParser = getAffiliationAddressParser();
authorParser = getAuthorParser();
headerParser = getHeaderParser();
dateParser = getDateParser();
citationParser = getCitationParser();
fullTextParser = getFullTextParser();
//referenceExtractor = getReferenceExtractor();
segmentationParser = getSegmentationParser();
referenceSegmenterParser = getReferenceSegmenterParser();
figureParser = getFigureParser();
tableParser = getTableParser();
//MonographParser monographParser = getMonographParser();
}
@Override
public void close() throws IOException {
LOGGER.debug("==> Closing all resources...");
if (authorParser != null) {
authorParser.close();
authorParser = null;
LOGGER.debug("CLOSING authorParser");
}
if (affiliationAddressParser != null) {
affiliationAddressParser.close();
affiliationAddressParser = null;
LOGGER.debug("CLOSING affiliationAddressParser");
}
if (headerParser != null) {
headerParser.close();
headerParser = null;
LOGGER.debug("CLOSING headerParser");
}
if (dateParser != null) {
dateParser.close();
dateParser = null;
LOGGER.debug("CLOSING dateParser");
}
if (citationParser != null) {
citationParser.close();
citationParser = null;
LOGGER.debug("CLOSING citationParser");
}
if (segmentationParser != null) {
segmentationParser.close();
segmentationParser = null;
LOGGER.debug("CLOSING segmentationParser");
}
if (fullTextParser != null) {
fullTextParser.close();
fullTextParser = null;
LOGGER.debug("CLOSING fullTextParser");
}
if (referenceExtractor != null) {
referenceExtractor.close();
referenceExtractor = null;
LOGGER.debug("CLOSING referenceExtractor");
}
if (referenceSegmenterParser != null) {
referenceSegmenterParser.close();
referenceSegmenterParser = null;
LOGGER.debug("CLOSING referenceSegmenterParser");
}
if (chemicalParser != null) {
chemicalParser.close();
chemicalParser = null;
LOGGER.debug("CLOSING chemicalParser");
}
if (figureParser != null) {
figureParser.close();
figureParser = null;
LOGGER.debug("CLOSING figureParser");
}
if (tableParser != null) {
tableParser.close();
tableParser = null;
LOGGER.debug("CLOSING tableParser");
}
if (monographParser != null) {
monographParser.close();
monographParser = null;
LOGGER.debug("CLOSING monographParser");
}
LOGGER.debug("==> All resources closed");
}
}
| 8,349 | 29.363636 | 85 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/ReferenceSegmenterParser.java
|
package org.grobid.core.engines;
import com.google.common.base.Function;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import org.grobid.core.GrobidModels;
import org.grobid.core.document.Document;
import org.grobid.core.document.DocumentPiece;
import org.grobid.core.document.DocumentPointer;
import org.grobid.core.engines.citations.LabeledReferenceResult;
import org.grobid.core.engines.citations.ReferenceSegmenter;
import org.grobid.core.engines.label.SegmentationLabels;
import org.grobid.core.engines.label.TaggingLabels;
import org.grobid.core.engines.tagging.GenericTaggerUtils;
import org.grobid.core.engines.tagging.GrobidCRFEngine;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.features.FeatureFactory;
import org.grobid.core.features.FeaturesVectorReferenceSegmenter;
import org.grobid.core.layout.Block;
import org.grobid.core.layout.LayoutToken;
import org.grobid.core.tokenization.LabeledTokensContainer;
import org.grobid.core.tokenization.TaggingTokenSynchronizer;
import org.grobid.core.utilities.BoundingBoxCalculator;
import org.grobid.core.utilities.GrobidProperties;
import org.grobid.core.utilities.TextUtilities;
import org.grobid.core.utilities.Triple;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.SortedSet;
import java.util.regex.Matcher;
import org.apache.commons.lang3.tuple.Pair;
public class ReferenceSegmenterParser extends AbstractParser implements ReferenceSegmenter {
private static final Logger LOGGER = LoggerFactory.getLogger(ReferenceSegmenterParser.class);
// projection scale for line length
private static final int LINESCALE = 10;
protected ReferenceSegmenterParser() {
super(GrobidModels.REFERENCE_SEGMENTER);
}
@Override
public List<LabeledReferenceResult> extract(String referenceBlock) {
Document res = Document.createFromText(referenceBlock);
DocumentPiece piece = new DocumentPiece(
new DocumentPointer(0, 0, 0),
new DocumentPointer(0, res.getTokenizations().size() - 1, res.getTokenizations().size() - 1));
return extract(res, Sets.newTreeSet(Collections.singletonList(piece)), false);
}
/**
*
* @param doc Document object
* @return <reference_label, reference_string> Note, that label is null when no label was detected
* example: <"[1]", "Hu W., Barkana, R., & Gruzinov A. Phys. Rev. Lett. 85, 1158">
*/
public List<LabeledReferenceResult> extract(Document doc) {
return extract(doc, false);
}
public List<LabeledReferenceResult> extract(Document doc, boolean training) {
SortedSet<DocumentPiece> referencesParts = doc.getDocumentPart(SegmentationLabels.REFERENCES);
return extract(doc, referencesParts, training);
}
public List<LabeledReferenceResult> extract(Document doc, SortedSet<DocumentPiece> referencesParts, boolean training) {
Pair<String,List<LayoutToken>> featSeg = getReferencesSectionFeatured(doc, referencesParts);
String res;
List<LayoutToken> tokenizationsReferences;
if (featSeg == null) {
return null;
}
// if featSeg is null, it usually means that no reference segment is found in the
// document segmentation
String featureVector = featSeg.getLeft();
tokenizationsReferences = featSeg.getRight();
try {
// to support long sequence in case of RNN usage we segment in pieces of less than the
// max_sequence_length and quite significantly overlapping
// this does not apply to CRF which can process "infinite" input sequence
// this is relevant to the reference segmenter RNN model, which is position-free in its
// application, but could not be generalized to other RNN or transformer model long inputs
if (GrobidProperties.getGrobidCRFEngine(GrobidModels.REFERENCE_SEGMENTER) == GrobidCRFEngine.DELFT) {
String[] featureVectorLines = featureVector.split("\n");
/*for(LayoutToken token : tokenizationsReferences) {
System.out.print(token.getText());
}
System.out.println("\n");
System.out.println("total input lines: " + featureVectorLines.length + " - " + tokenizationsReferences.size() + " tokens");*/
int originalMaxSequence = 2000;
if (GrobidProperties.getInstance().getDelftRuntimeMaxSequenceLength(GrobidModels.REFERENCE_SEGMENTER.getModelName()) != -1) {
originalMaxSequence = GrobidProperties.getInstance().getDelftRuntimeMaxSequenceLength(GrobidModels.REFERENCE_SEGMENTER.getModelName());
}
if (featureVectorLines.length < originalMaxSequence || originalMaxSequence < 600) {
// if the input is lower than max sequence length, not need to segment
// if the max sequence length is too small, e.g. transformer, we won't be able to manage
// overlaps adapted to references
res = label(featureVector);
} else {
// we adjust max sequence value to take into account 500 token lines overlap
int maxSequence = Math.max(500, originalMaxSequence - 1000);
//System.out.println("originalMaxSequence: " + originalMaxSequence + " / maxSequence adjusted to: " + maxSequence);
List<List<String>> featureVectorPieces = new ArrayList<>();
// segment the input vectors in overlapping sequences, according to the model max_sequence_length parameter
for(int i=0; (i*maxSequence) < featureVectorLines.length; i++) {
int lowerBound = i*maxSequence;
// overlapping: this localRes has 500 extra lines after the normal end
int upperBound = Math.min( ((i+1)*maxSequence)+500, featureVectorLines.length );
if (featureVectorLines.length - lowerBound < originalMaxSequence)
upperBound = featureVectorLines.length;
//System.out.println("lowerBound: " + lowerBound + " - upperBound: " + upperBound);
List<String> featureVectorPiece = new ArrayList<>();
for(int j=lowerBound; j<upperBound; j++)
featureVectorPiece.add(featureVectorLines[j]);
featureVectorPieces.add(featureVectorPiece);
if (upperBound == featureVectorLines.length)
break;
}
/*System.out.println("featureVectorPieces.size(): " + featureVectorPieces.size());
for(List<String> featureVectorPiece : featureVectorPieces) {
System.out.println(featureVectorPiece.size());
}*/
// label every pieces in batch
List<String> allRes = new ArrayList<>();
List<String> allVectors = new ArrayList<>();
for(List<String> featureVectorPiece : featureVectorPieces) {
StringBuilder localFeatureVector = new StringBuilder();
for(int j=0; j<featureVectorPiece.size(); j++) {
localFeatureVector.append(featureVectorPiece.get(j)).append("\n");
}
allVectors.add(localFeatureVector.toString());
}
// parallel labeling of the input segments
String fullRes = label(allVectors);
// segment this result to get back the input chunk alignment (with extra 500 overlaping lines)
String[] fullResLines = fullRes.split("\n");
int pos = 0;
for(List<String> featureVectorPiece : featureVectorPieces) {
StringBuilder localRes = new StringBuilder();
int localSize = featureVectorPiece.size();
for(int i=pos; i<pos+localSize; i++) {
localRes.append(fullResLines[i]).append("\n");
}
allRes.add(localRes.toString());
pos += localSize;
}
// combine results and reconnect smoothly overlaps
StringBuilder resBuilder = new StringBuilder();
int previousTransitionPos = 0;
for(int i=0; i<allRes.size(); i++) {
String localRes = allRes.get(i);
String[] localResLines = localRes.split("\n");
//System.out.println("localResLines.length: " + localResLines.length);
int transitionPos = localResLines.length;
if (i != allRes.size()-1) {
// in the trailing redundant part (500 last lines), we identify the line index
// of the last "closing" label, this is the point where we will reconnect the
// labeled segments to avoid breaking a labeled field
for(int k=localResLines.length-1; k>=0; k--) {
if (localResLines.length-k == 500) {
// this is the max overlap, we don't go beyond!
transitionPos = k;
break;
}
String line = localResLines[k];
if (line.endsWith(TaggingLabels.GROBID_START_ENTITY_LABEL_PREFIX+"<label>") ||
line.endsWith(TaggingLabels.GROBID_START_ENTITY_LABEL_PREFIX+"<reference>")) {
// we can stop the line before this one
transitionPos = k;
break;
}
}
}
// else: we are at the last chunk, so we take the content until the very end
//System.out.println("previousTransitionPos: " + previousTransitionPos);
//System.out.println("transitionPos: " + transitionPos + "\n");
List<String> selectedlocalResLines = new ArrayList<>();
for(int j= previousTransitionPos; j<transitionPos; j++) {
if (j == previousTransitionPos && previousTransitionPos != 0) {
// we want to be sure to have a starting label
String localLine = localResLines[j];
if (localLine.indexOf(TaggingLabels.GROBID_START_ENTITY_LABEL_PREFIX) == -1) {
localLine = localLine.replace("<label>", TaggingLabels.GROBID_START_ENTITY_LABEL_PREFIX+"<label>");
localLine = localLine.replace("<reference>", TaggingLabels.GROBID_START_ENTITY_LABEL_PREFIX+"<reference>");
}
selectedlocalResLines.add(localLine);
} else if (j == previousTransitionPos && previousTransitionPos == 0 && i != 0) {
// previousTransitionPos is 0 and we are not at the first segment: we had a non overlapping
// transition, we might want to avoid a starting label at this point
String localLine = localResLines[j];
if (localLine.indexOf(TaggingLabels.GROBID_START_ENTITY_LABEL_PREFIX) != -1) {
localLine = localLine.replace(TaggingLabels.GROBID_START_ENTITY_LABEL_PREFIX+"<label>", "<label>");
localLine = localLine.replace(TaggingLabels.GROBID_START_ENTITY_LABEL_PREFIX+"<reference>", "<reference>");
}
selectedlocalResLines.add(localLine);
} else {
selectedlocalResLines.add(localResLines[j]);
}
}
for(String localResLine : selectedlocalResLines)
resBuilder.append(localResLine).append("\n");
previousTransitionPos = transitionPos-maxSequence;
}
res = resBuilder.toString();
}
} else
res = label(featureVector);
}
catch(Exception e) {
throw new GrobidException("Labeling in ReferenceSegmenter fails.", e);
}
if (res == null) {
return null;
}
// if we extract for generating training data, we also give back the used features
List<Triple<String, String, String>> labeled = GenericTaggerUtils.getTokensWithLabelsAndFeatures(res, training);
return getExtractionResult(tokenizationsReferences, labeled);
}
private List<LabeledReferenceResult> getExtractionResult(List<LayoutToken> tokenizations, List<Triple<String, String, String>> labeled) {
final List<LabeledReferenceResult> resultList = new ArrayList<>();
final StringBuilder reference = new StringBuilder();
final List<LayoutToken> referenceTokens = new ArrayList<>();
final StringBuilder features = new StringBuilder();
final StringBuilder referenceLabel = new StringBuilder();
TaggingTokenSynchronizer synchronizer = new TaggingTokenSynchronizer(null, labeled, tokenizations);
Function<LabeledTokensContainer, Void> function = new Function<LabeledTokensContainer, Void>() {
@Override public Void apply(LabeledTokensContainer container) {
features.append(container.getFeatureString());
features.append('\n');
if (container.isBeginning()) {
if (reference.length() != 0) {
resultList.add(new LabeledReferenceResult(referenceLabel.length() == 0 ? null :
referenceLabel.toString().trim(), reference.toString().trim(), Lists.newArrayList(referenceTokens),
features.toString(), BoundingBoxCalculator.calculate(referenceTokens)));
reference.setLength(0);
referenceLabel.setLength(0);
features.setLength(0);
referenceTokens.clear();
}
}
return null;
}
};
Iterator<LabeledTokensContainer> iterator = synchronizer.iterator();
while (iterator.hasNext()) {
LabeledTokensContainer container = iterator.next();
if (container == null)
continue;
String tok = container.getToken();
String plainLabel = container.getPlainLabel();
if ("<label>".equals(plainLabel)) {
function.apply(container);
referenceLabel.append(tok);
if (container.isTrailingSpace() || container.isTrailingNewLine()) {
referenceLabel.append(' ');
}
} else if (plainLabel.equals("<reference>")) {
function.apply(container);
reference.append(tok);
if (container.isTrailingSpace()) {
reference.append(' ');
}
if (container.isTrailingNewLine()) {
reference.append('\n');
}
referenceTokens.addAll(container.getLayoutTokens());
} else if (plainLabel.equals("<other>")) {
// NOP
}
// Handle last one.
if (!iterator.hasNext()) {
resultList.add(new LabeledReferenceResult(referenceLabel.length() == 0 ? null :
referenceLabel.toString().trim(), reference.toString().trim(),
referenceTokens, features.toString(),
BoundingBoxCalculator.calculate(referenceTokens)));
reference.setLength(0);
referenceLabel.setLength(0);
}
}
return resultList;
}
public Pair<String,String> createTrainingData(Document doc, int id) {
SortedSet<DocumentPiece> referencesParts = doc.getDocumentPart(SegmentationLabels.REFERENCES);
Pair<String,List<LayoutToken>> featSeg = getReferencesSectionFeatured(doc, referencesParts);
String res;
List<LayoutToken> tokenizations;
if (featSeg == null) {
return null;
}
// if featSeg is null, it usually means that no reference segment is found in the
// document segmentation
String featureVector = featSeg.getLeft();
tokenizations = featSeg.getRight();
try {
res = label(featureVector);
}
catch(Exception e) {
throw new GrobidException("CRF labeling in ReferenceSegmenter fails.", e);
}
if (res == null) {
return null;
}
List<Pair<String, String>> labeled = GenericTaggerUtils.getTokensAndLabels(res);
StringBuilder sb = new StringBuilder();
//noinspection StringConcatenationInsideStringBufferAppend
sb.append("<tei xml:space=\"preserve\">\n" +
" <teiHeader>\n" +
" <fileDesc xml:id=\"_" + id + "\"/>\n" +
" </teiHeader>\n" +
" <text xml:lang=\"en\">\n" +
" <listBibl>\n");
int tokPtr = 0;
boolean addSpace = false;
boolean addEOL = false;
String lastTag = null;
boolean refOpen = false;
for (Pair<String, String> l : labeled) {
String tok = l.getLeft();
String label = l.getRight();
int tokPtr2 = tokPtr;
for(; tokPtr2 < tokenizations.size(); tokPtr2++) {
if (tokenizations.get(tokPtr2).t().equals(" ")) {
addSpace = true;
}
else if (tokenizations.get(tokPtr2).t().equals("\n") ||
tokenizations.get(tokPtr).t().equals("\r") ) {
addEOL = true;
}
else {
break;
}
}
tokPtr = tokPtr2;
if (tokPtr >= tokenizations.size()) {
LOGGER.error("Implementation error: Reached the end of tokenizations, but current token is " + tok);
// we add a space to avoid concatenated text
addSpace = true;
}
else {
String tokenizationToken = tokenizations.get(tokPtr).getText();
if ((tokPtr != tokenizations.size()) && !tokenizationToken.equals(tok)) {
// and we add a space by default to avoid concatenated text
addSpace = true;
if (!tok.startsWith(tokenizationToken)) {
// this is a very exceptional case due to a sequence of accent/diacresis, in this case we skip
// a shift in the tokenizations list and continue on the basis of the labeled token
// we check one ahead
tokPtr++;
tokenizationToken = tokenizations.get(tokPtr).getText();
if (!tok.equals(tokenizationToken)) {
// we try another position forward (second hope!)
tokPtr++;
tokenizationToken = tokenizations.get(tokPtr).getText();
if (!tok.equals(tokenizationToken)) {
// we try another position forward (last hope!)
tokPtr++;
tokenizationToken = tokenizations.get(tokPtr).getText();
if (!tok.equals(tokenizationToken)) {
// we return to the initial position
tokPtr = tokPtr-3;
tokenizationToken = tokenizations.get(tokPtr).getText();
LOGGER.error("Implementation error, tokens out of sync: " +
tokenizationToken + " != " + tok + ", at position " + tokPtr);
}
}
}
}
// note: if the above condition is true, this is an exceptional case due to a
// sequence of accent/diacresis and we can go on as a full string match
}
}
String plainLabel = GenericTaggerUtils.getPlainLabel(label);
boolean tagClosed = (lastTag != null) && testClosingTag(sb, label, lastTag, addSpace, addEOL);
if (tagClosed) {
addSpace = false;
addEOL = false;
}
if (tagClosed && lastTag.equals("<reference>")) {
refOpen = false;
}
String output;
String field;
if (refOpen) {
field = "<label>";
}
else {
field = "<bibl><label>";
}
output = writeField(label, lastTag, tok, "<label>", field, addSpace, addEOL, 2);
if (output != null) {
sb.append(output);
refOpen = true;
}
else {
if (refOpen) {
field = "";
}
else {
field = "<bibl>";
}
output = writeField(label, lastTag, tok, "<reference>", field, addSpace, addEOL, 2);
if (output != null) {
sb.append(output);
refOpen= true;
}
else {
output = writeField(label, lastTag, tok, "<other>", "", addSpace, addEOL, 2);
if (output != null) {
sb.append(output);
refOpen = false;
}
}
}
lastTag = plainLabel;
addSpace = false;
addEOL = false;
tokPtr++;
}
if (refOpen) {
sb.append("</bibl>");
}
sb.append("\n </listBibl>\n" +
" </text>\n" +
"</tei>\n");
return Pair.of(sb.toString(), featureVector);
}
private boolean testClosingTag(StringBuilder buffer,
String currentTag,
String lastTag,
boolean addSpace,
boolean addEOL) {
boolean res = false;
if (!currentTag.equals(lastTag)) {
res = true;
// we close the current tag
if (lastTag.equals("<other>")) {
if (addEOL)
buffer.append("<lb/>");
if (addSpace)
buffer.append(" ");
buffer.append("\n");
} else if (lastTag.equals("<label>")) {
buffer.append("</label>");
if (addEOL)
buffer.append("<lb/>");
if (addSpace)
buffer.append(" ");
} else if (lastTag.equals("<reference>")) {
if (addEOL)
buffer.append("<lb/>");
if (addSpace)
buffer.append(" ");
buffer.append("</bibl>\n");
} else {
res = false;
}
}
return res;
}
private String writeField(String currentTag,
String lastTag,
String token,
String field,
String outField,
boolean addSpace,
boolean addEOL,
int nbIndent) {
String result = null;
if (currentTag.endsWith(field)) {
if (currentTag.endsWith("<other>")) {
result = "";
if (currentTag.equals("I-<other>")) {
result += "\n";
for (int i = 0; i < nbIndent; i++) {
result += " ";
}
}
if (addEOL)
result += "<lb/>";
if (addSpace)
result += " ";
result += TextUtilities.HTMLEncode(token);
}
else if ((lastTag != null) && currentTag.endsWith(lastTag)) {
result = "";
if (addEOL)
result += "<lb/>";
if (addSpace)
result += " ";
if (currentTag.startsWith("I-"))
result += outField;
result += TextUtilities.HTMLEncode(token);
}
else {
result = "";
if (outField.length() > 0) {
for (int i = 0; i < nbIndent; i++) {
result += " ";
}
}
if (addEOL)
result += "<lb/>";
if (addSpace)
result += " ";
result += outField + TextUtilities.HTMLEncode(token);
}
}
return result;
}
static public Pair<String,List<LayoutToken>> getReferencesSectionFeatured(Document doc,
SortedSet<DocumentPiece> referencesParts) {
if ((referencesParts == null) || (referencesParts.size() == 0)) {
return null;
}
FeatureFactory featureFactory = FeatureFactory.getInstance();
List<Block> blocks = doc.getBlocks();
if ( (blocks == null) || blocks.size() == 0) {
return null;
}
StringBuilder citations = new StringBuilder();
boolean newline;
int n; // overall token number
FeaturesVectorReferenceSegmenter features;
FeaturesVectorReferenceSegmenter previousFeatures = null;
boolean endblock;
boolean startblock;
//int mm = 0; // token position in the sentence
int nn; // token position in the line
double lineStartX = Double.NaN;
boolean indented = false;
List<LayoutToken> tokenizationsReferences = new ArrayList<LayoutToken>();
List<LayoutToken> tokenizations = doc.getTokenizations();
int maxLineLength = 1;
//List<Integer> lineLengths = new ArrayList<Integer>();
int currentLineLength = 0;
//int lineIndex = 0;
// we calculate current max line length and intialize the body tokenization structure
for(DocumentPiece docPiece : referencesParts) {
DocumentPointer dp1 = docPiece.getLeft();
DocumentPointer dp2 = docPiece.getRight();
int tokens = dp1.getTokenDocPos();
int tokene = dp2.getTokenDocPos();
for (int i = tokens; i <= tokene; i++) {
tokenizationsReferences.add(tokenizations.get(i));
currentLineLength += tokenizations.get(i).getText().length();
if (tokenizations.get(i).t().equals("\n") || tokenizations.get(i).t().equals("\r") ) {
//lineLengths.add(currentLineLength);
if (currentLineLength > maxLineLength)
maxLineLength = currentLineLength;
currentLineLength = 0;
}
}
}
for(DocumentPiece docPiece : referencesParts) {
DocumentPointer dp1 = docPiece.getLeft();
DocumentPointer dp2 = docPiece.getRight();
/*for(int i=dp1.getTokenDocPos(); i<dp2.getTokenDocPos(); i++) {
System.out.print(tokenizations.get(i));
}
System.out.println("");
*/
//currentLineLength = lineLengths.get(lineIndex);
nn = 0;
int tokenIndex = 0;
int blockIndex = dp1.getBlockPtr();
Block block = null;
List<LayoutToken> tokens;
boolean previousNewline = true;
currentLineLength = 0;
String currentLineProfile = null;
for (n = dp1.getTokenDocPos(); n <= dp2.getTokenDocPos(); n++) {
String text = tokenizations.get(n).getText();
if (text == null) {
continue;
}
// set corresponding block
if ( (block != null) && (n > block.getEndToken()) ) {
blockIndex++;
tokenIndex = 0;
currentLineLength = 0;
currentLineProfile = null;
}
if (blockIndex<blocks.size()) {
block = blocks.get(blockIndex);
if (n == block.getStartToken()) {
startblock = true;
endblock = false;
}
else if (n == block.getEndToken()) {
startblock = false;
endblock = true;
}
else {
startblock = false;
endblock = false;
}
}
else {
block = null;
startblock = false;
endblock = false;
}
// set corresponding token
if (block != null)
tokens = block.getTokens();
else
tokens = null;
if (text.equals("\n") || text.equals("\r")) {
previousNewline = true;
nn = 0;
currentLineLength = 0;
currentLineProfile = null;
//lineIndex++;
//currentLineLength = lineLengths.get(lineIndex);
continue;
}
else {
newline = false;
nn += text.length(); // +1 for segmentation symbol
}
if (text.equals(" ") || text.equals("\t")) {
nn++;
continue;
}
if (text.trim().length() == 0) {
continue;
}
LayoutToken token = null;
if (tokens != null) {
int i = tokenIndex;
while (i < tokens.size()) {
token = tokens.get(i);
if (text.equals(token.getText())) {
tokenIndex = i;
break;
}
i++;
}
}
if (previousNewline) {
newline = true;
previousNewline = false;
if (token != null && previousFeatures != null) {
double previousLineStartX = lineStartX;
lineStartX = token.getX();
double characterWidth = token.width / token.getText().length();
if (!Double.isNaN(previousLineStartX)) {
// Indentation if line start is > 1 character width to the right of previous line start
if (lineStartX - previousLineStartX > characterWidth)
indented = true;
// Indentation ends if line start is > 1 character width to the left of previous line start
else if (previousLineStartX - lineStartX > characterWidth)
indented = false;
// Otherwise indentation is unchanged
}
}
}
if (TextUtilities.filterLine(text)) {
continue;
}
features = new FeaturesVectorReferenceSegmenter();
features.token = token;
features.string = text;
if (newline) {
features.lineStatus = "LINESTART";
}
Matcher m0 = featureFactory.isPunct.matcher(text);
if (m0.find()) {
features.punctType = "PUNCT";
}
if (text.equals("(") || text.equals("[")) {
features.punctType = "OPENBRACKET";
} else if (text.equals(")") || text.equals("]")) {
features.punctType = "ENDBRACKET";
} else if (text.equals(".")) {
features.punctType = "DOT";
} else if (text.equals(",")) {
features.punctType = "COMMA";
} else if (text.equals("-")) {
features.punctType = "HYPHEN";
} else if (text.equals("\"") || text.equals("\'") || text.equals("`")) {
features.punctType = "QUOTE";
}
if ( (n == 0) || (previousNewline) ) {
features.lineStatus = "LINESTART";
if (n == 0)
features.blockStatus = "BLOCKSTART";
nn = 0;
}
if (indented) {
features.alignmentStatus = "LINEINDENT";
}
else {
features.alignmentStatus = "ALIGNEDLEFT";
}
{
// look ahead...
boolean endline = true;
int ii = 1;
boolean endloop = false;
String accumulated = text;
while ((n + ii < tokenizations.size()) && (!endloop)) {
String tok = tokenizations.get(n + ii).getText();
if (tok != null) {
if (currentLineProfile == null)
accumulated += tok;
if (tok.equals("\n") || tok.equals("\r")) {
endloop = true;
if (currentLineLength ==0) {
currentLineLength = accumulated.length();
}
if (currentLineProfile == null) {
currentLineProfile = TextUtilities.punctuationProfile(accumulated);
}
}
else if (!tok.equals(" ") && !tok.equals("\t")) {
endline = false;
}
else {
if (TextUtilities.filterLine(tok)) {
endloop = true;
if (currentLineLength ==0) {
currentLineLength = accumulated.length();
}
if (currentLineProfile == null) {
currentLineProfile = TextUtilities.punctuationProfile(accumulated);
}
}
}
}
if (n + ii >= tokenizations.size() - 1) {
endblock = true;
endline = true;
}
if (endline && (block != null) && (n+ii == block.getEndToken())) {
endblock = true;
}
ii++;
}
if ((!endline) && !(newline)) {
features.lineStatus = "LINEIN";
}
else if (!newline) {
features.lineStatus = "LINEEND";
previousNewline = true;
}
if (startblock) {
features.blockStatus = "BLOCKSTART";
}
if ((!endblock) && (features.blockStatus == null))
features.blockStatus = "BLOCKIN";
else if (features.blockStatus == null) {
features.blockStatus = "BLOCKEND";
}
}
if (text.length() == 1) {
features.singleChar = true;
}
if (Character.isUpperCase(text.charAt(0))) {
features.capitalisation = "INITCAP";
}
if (featureFactory.test_all_capital(text)) {
features.capitalisation = "ALLCAP";
}
if (featureFactory.test_digit(text)) {
features.digit = "CONTAINSDIGITS";
}
if (featureFactory.test_common(text)) {
features.commonName = true;
}
if (featureFactory.test_names(text)) {
features.properName = true;
}
if (featureFactory.test_month(text)) {
features.month = true;
}
Matcher m = featureFactory.isDigit.matcher(text);
if (m.find()) {
features.digit = "ALLDIGIT";
}
Matcher m2 = featureFactory.year.matcher(text);
if (m2.find()) {
features.year = true;
}
Matcher m3 = featureFactory.email.matcher(text);
if (m3.find()) {
features.email = true;
}
Matcher m4 = featureFactory.http.matcher(text);
if (m4.find()) {
features.http = true;
}
if ( (token != null) && (token.getBold()) )
features.bold = true;
if ( (token != null) && (token.getItalic()) )
features.italic = true;
if (features.capitalisation == null)
features.capitalisation = "NOCAPS";
if (features.digit == null)
features.digit = "NODIGIT";
if (features.punctType == null)
features.punctType = "NOPUNCT";
//System.out.println(nn + "\t" + currentLineLength + "\t" + maxLineLength);
features.lineLength = featureFactory
.linearScaling(currentLineLength, maxLineLength, LINESCALE);
features.relativePosition = featureFactory
.linearScaling(nn, currentLineLength, LINESCALE);
features.punctuationProfile = currentLineProfile;
if (previousFeatures != null)
citations.append(previousFeatures.printVector());
//mm++;
previousFeatures = features;
}
}
if (previousFeatures != null)
citations.append(previousFeatures.printVector());
return Pair.of(citations.toString(), tokenizationsReferences);
}
}
| 33,948 | 35.582974 | 141 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/ModelMap.java
|
package org.grobid.core.engines;
import org.chasen.crfpp.Model;
import org.chasen.crfpp.Tagger;
import org.grobid.core.GrobidModel;
import org.grobid.core.GrobidModels;
import org.grobid.core.exceptions.GrobidException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.util.HashMap;
import java.util.Map;
/**
* Class that creates a tagger from a given model or reuse it if it already exists.
*
*/
@Deprecated
public class ModelMap {
/**
* The logger.
*/
private static final Logger LOGGER = LoggerFactory.getLogger(ModelMap.class);
/**
* Map that contains all the models loaded in memory.
*/
private static Map<String, Model> models = null;
/**
* Return a CRFPP tagger created corresponding to the model given in argument.
*
* @param grobidModel
* the model to use for the creation of the tagger.
* @return Tagger
*
*/
@Deprecated
public static Tagger getTagger(GrobidModel grobidModel) {
LOGGER.debug("start getTagger");
Tagger tagger;
try {
LOGGER.debug("Creating tagger");
Model model = getModel(grobidModel.getModelPath());
tagger = model.createTagger();
} catch (Throwable thb) {
throw new GrobidException("Cannot instantiate a tagger", thb);
}
LOGGER.debug("end getTagger");
return tagger;
}
/**
* Loading of the CRFPP models.
*/
@Deprecated
public static synchronized void initModels() {
LOGGER.info("Loading models");
GrobidModels[] models = GrobidModels.values();
for (GrobidModels model : models) {
if (new File(model.getModelPath()).exists()) {
getModel(model.getModelPath());
}
else {
LOGGER.info("Loading model " + model.getModelPath() + " failed because the path is not valid.");
}
}
LOGGER.info("Models loaded");
}
public static Model getModel(GrobidModel grobidModel) {
return getModel(grobidModel.getModelPath());
}
/**
* Return the model corresponding to the given path. Models are loaded in
* memory if they don't exist.
*
* @param modelPath
* the path to the model
* @return the model corresponding to the given path.
*/
@Deprecated
protected static Model getModel(String modelPath) {
LOGGER.debug("start getModel");
if (models == null) {
models = new HashMap<String, Model>();
}
if (models.get(modelPath) == null) {
getNewModel(modelPath);
}
LOGGER.debug("end getModel");
return models.get(modelPath);
}
/**
* Set models with a new model.
*
* @param modelPath
* The path of the model to use.
*/
@Deprecated
protected static synchronized void getNewModel(String modelPath) {
LOGGER.info("Loading model " + modelPath + " in memory");
models.put(modelPath, new Model("-m " + modelPath + " "));
}
}
| 2,781 | 23.839286 | 100 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/AffiliationAddressParser.java
|
package org.grobid.core.engines;
import org.chasen.crfpp.Tagger;
import org.grobid.core.GrobidModels;
import org.grobid.core.data.Affiliation;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.features.FeaturesVectorAffiliationAddress;
import org.grobid.core.layout.LayoutToken;
import org.grobid.core.lexicon.Lexicon;
import org.grobid.core.utilities.OffsetPosition;
import org.grobid.core.utilities.TextUtilities;
import org.grobid.core.utilities.UnicodeUtil;
import org.grobid.core.engines.tagging.GenericTaggerUtils;
import java.util.ArrayList;
import java.util.List;
import java.util.StringTokenizer;
public class AffiliationAddressParser extends AbstractParser {
public Lexicon lexicon = Lexicon.getInstance();
public AffiliationAddressParser() {
super(GrobidModels.AFFILIATION_ADDRESS);
}
public ArrayList<Affiliation> processing(String input) {
try {
if ((input == null) || (input.length() == 0)) {
return null;
}
input = UnicodeUtil.normaliseText(input);
input = input.trim();
input = TextUtilities.dehyphenize(input);
// TBD: pass the language object to the tokenizer
List<LayoutToken> tokenizations = analyzer.tokenizeWithLayoutToken(input);
List<String> affiliationBlocks = getAffiliationBlocks(tokenizations);
List<List<OffsetPosition>> placesPositions = new ArrayList<List<OffsetPosition>>();
placesPositions.add(lexicon.tokenPositionsCityNames(tokenizations));
List<List<LayoutToken>> allTokens = new ArrayList<List<LayoutToken>>();
allTokens.add(tokenizations);
String header = FeaturesVectorAffiliationAddress.addFeaturesAffiliationAddress(affiliationBlocks, allTokens, placesPositions);
String res = label(header);
return resultBuilder(res, tokenizations, false); // don't use pre-labels
} catch (Exception e) {
throw new GrobidException("An exception occurred while running Grobid.", e);
}
}
protected static List<String> getAffiliationBlocks(List<LayoutToken> tokenizations) {
ArrayList<String> affiliationBlocks = new ArrayList<String>();
for(LayoutToken tok : tokenizations) {
if (tok.getText().length() == 0)
continue;
if (!tok.getText().equals(" ")) {
if (tok.getText().equals("\n")) {
affiliationBlocks.add("@newline");
} else
affiliationBlocks.add(tok + " <affiliation>");
}
}
return affiliationBlocks;
}
/**
* Post processing of extracted field affiliation and address.
* Here the input string to be processed comes from a previous parser: the segmentation
* can be kept and we filter in all tokens labelled <address> or <affiliation>.
* We also need to keep the original tokenization information to recreate the exact
* initial string.
*/
public List<Affiliation> processReflow(String result, List<LayoutToken> tokenizations) {
if ((result == null) || (result.length() == 0)) {
return null;
}
List<String> affiliationBlocks = new ArrayList<String>();
List<LayoutToken> subTokenizations = new ArrayList<LayoutToken>();
filterAffiliationAddress(result, tokenizations, affiliationBlocks, subTokenizations);
return processingReflow(affiliationBlocks, subTokenizations);
}
private void filterAffiliationAddress(String result,
List<LayoutToken> tokenizations,
List<String> affiliationBlocks,
List<LayoutToken> subTokenizations) {
StringTokenizer st = new StringTokenizer(result, "\n");
String lastLabel = null;
int p = 0;
List<LayoutToken> tokenizationsBuffer = null;
while (st.hasMoreTokens() && (p < tokenizations.size())) {
String line = st.nextToken();
if (line.trim().length() == 0) {
affiliationBlocks.add("\n");
lastLabel = null;
}
else {
String delimiter = "\t";
if (line.indexOf(delimiter) == -1)
delimiter = " ";
String[] s = line.split(delimiter);
String s0 = s[0].trim();
boolean isEndLine = false;
if (line.contains("LINEEND")) {
isEndLine = true;
}
int p0 = p;
boolean strop = false;
tokenizationsBuffer = new ArrayList<LayoutToken>();
String tokOriginal = null;
while ((!strop) && (p < tokenizations.size())) {
tokOriginal = tokenizations.get(p).getText().trim();
tokenizationsBuffer.add(tokenizations.get(p));
if (tokOriginal.equals(s0)) {
strop = true;
}
p++;
}
if (p == tokenizations.size()) {
// either we are at the end of the header, or we might have
// a problematic token in tokenization for some reasons
if ((p - p0) > 2) {
// we loose the synchronicity, so we reinit p for the next token
p = p0;
continue;
}
}
int ll = s.length;
String label = s[ll-1];
if ((tokOriginal != null) && ( ((label.indexOf("affiliation") != -1) || (label.indexOf("address") != -1)) )) {
affiliationBlocks.add(tokOriginal + " " + label);
// add the content of tokenizationsBuffer
for(LayoutToken tokk : tokenizationsBuffer) {
subTokenizations.add(tokk);
}
if (tokenizationsBuffer.size() > 0 && isEndLine) {
affiliationBlocks.add("@newline");
}
}
else if (lastLabel != null) {
affiliationBlocks.add("\n");
}
if ((label.indexOf("affiliation") != -1) || (label.indexOf("address") != -1)) {
lastLabel = label;
} else {
lastLabel = null;
}
}
}
//System.out.println(subTokenizations.toString());
//System.out.println(affiliationBlocks.toString());
}
private ArrayList<Affiliation> processingReflow(List<String> affiliationBlocks, List<LayoutToken> tokenizations) {
String res = runReflow(affiliationBlocks, tokenizations);
return resultBuilder(res, tokenizations, false); // normally use pre-label because it is a reflow
}
private String runReflow(List<String> affiliationBlocks,
List<LayoutToken> tokenizations) {
try {
List<List<OffsetPosition>> placesPositions = new ArrayList<List<OffsetPosition>>();
placesPositions.add(lexicon.tokenPositionsCityNames(tokenizations));
List<List<LayoutToken>> allTokens = new ArrayList<List<LayoutToken>>();
allTokens.add(tokenizations);
String header =
FeaturesVectorAffiliationAddress.addFeaturesAffiliationAddress(affiliationBlocks, allTokens, placesPositions);
if ((header == null) || (header.trim().length() == 0)) {
return null;
}
return label(header);
} catch (Exception e) {
throw new GrobidException("An exception occured while running Grobid.", e);
}
}
protected ArrayList<Affiliation> resultBuilder(String result,
List<LayoutToken> tokenizations,
boolean usePreLabel) {
ArrayList<Affiliation> fullAffiliations = null;
if (result == null) {
return fullAffiliations;
}
result = result.replace("\n\n", "\n \n"); // force empty line between affiliation blocks
try {
//System.out.println(tokenizations.toString());
// extract results from the processed file
if ((result == null) || (result.length() == 0)) {
return null;
}
StringTokenizer st2 = new StringTokenizer(result, "\n");
String lastTag = null;
org.grobid.core.data.Affiliation aff = new Affiliation();
int lineCount = 0;
boolean hasInstitution;
boolean hasDepartment = false;
boolean hasAddress = false;
boolean hasLaboratory;
boolean newMarker = false;
boolean useMarker = false;
String currentMarker = null;
int p = 0;
while (st2.hasMoreTokens()) {
boolean addSpace = false;
String line = st2.nextToken();
Integer lineCountInt = lineCount;
if (line.trim().length() == 0) {
if (aff.notNull()) {
if (fullAffiliations == null) {
fullAffiliations = new ArrayList<Affiliation>();
}
fullAffiliations.add(aff);
aff = new Affiliation();
currentMarker = null;
}
hasInstitution = false;
hasDepartment = false;
hasLaboratory = false;
hasAddress = false;
continue;
}
String delimiter = "\t";
if (line.indexOf(delimiter) == -1)
delimiter = " ";
StringTokenizer st3 = new StringTokenizer(line, delimiter);
int ll = st3.countTokens();
int i = 0;
String s1 = null; // predicted label
String s2 = null; // lexical token
String s3 = null; // pre-label
ArrayList<String> localFeatures = new ArrayList<String>();
while (st3.hasMoreTokens()) {
String s = st3.nextToken().trim();
if (i == 0) {
s2 = s; // lexical token
boolean strop = false;
while ((!strop) && (p < tokenizations.size())) {
String tokOriginal = tokenizations.get(p).getText();
if (tokOriginal.equals(" ")) {
addSpace = true;
} else if (tokOriginal.equals(s)) {
strop = true;
}
p++;
}
} else if (i == ll - 2) {
s3 = s; // pre-label
} else if (i == ll - 1) {
s1 = s; // label
} else {
localFeatures.add(s);
}
i++;
}
if (s1.equals("<marker>")) {
if (currentMarker == null)
currentMarker = s2;
else {
if (addSpace) {
currentMarker += " " + s2;
} else
currentMarker += s2;
}
aff.setMarker(currentMarker);
newMarker = false;
useMarker = true;
} else if (s1.equals("I-<marker>")) {
currentMarker = s2;
newMarker = true;
useMarker = true;
}
if (newMarker) {
if (aff.notNull()) {
if (fullAffiliations == null)
fullAffiliations = new ArrayList<Affiliation>();
fullAffiliations.add(aff);
}
aff = new Affiliation();
hasInstitution = false;
hasLaboratory = false;
hasDepartment = false;
hasAddress = false;
if (currentMarker != null) {
aff.setMarker(currentMarker);
}
newMarker = false;
} else if (s1.equals("<institution>") || s1.equals("I-<institution>")) {
if ((!usePreLabel) ||
((usePreLabel) && (s3.equals("<affiliation>") || s3.equals("I-<affiliation>")))
) {
hasInstitution = true;
if (aff.getInstitutions() != null) {
if (s1.equals("I-<institution>") &&
(localFeatures.contains("LINESTART"))) {
// new affiliation
if (aff.notNull()) {
if (fullAffiliations == null)
fullAffiliations = new ArrayList<Affiliation>();
fullAffiliations.add(aff);
}
hasInstitution = true;
hasDepartment = false;
hasLaboratory = false;
hasAddress = false;
aff = new Affiliation();
aff.addInstitution(s2);
if (currentMarker != null)
aff.setMarker(currentMarker.trim());
} else if (s1.equals("I-<institution>") && hasInstitution && hasAddress &&
(!lastTag.equals("<institution>"))) {
// new affiliation
if (aff.notNull()) {
if (fullAffiliations == null) {
fullAffiliations = new ArrayList<Affiliation>();
}
fullAffiliations.add(aff);
}
hasInstitution = true;
hasDepartment = false;
hasLaboratory = false;
hasAddress = false;
aff = new Affiliation();
aff.addInstitution(s2);
if (currentMarker != null) {
aff.setMarker(currentMarker.trim());
}
} else if (s1.equals("I-<institution>")) {
// we have multiple institutions for this affiliation
//aff.addInstitution(aff.institution);
aff.addInstitution(s2);
} else if (addSpace) {
aff.extendLastInstitution(" " + s2);
} else {
aff.extendLastInstitution(s2);
}
} else {
aff.addInstitution(s2);
}
} else if ((usePreLabel) && (s3.equals("<address>") || s3.equals("I-<address>"))) {
// that's a piece of the address badly labelled according to the model
if (aff.getAddressString() != null) {
if (addSpace) {
aff.setAddressString(aff.getAddressString() + " " + s2);
} else {
aff.setAddressString(aff.getAddressString() + s2);
}
} else {
aff.setAddressString(s2);
}
}
} else if (s1.equals("<addrLine>") || s1.equals("I-<addrLine>")) {
if ((!usePreLabel) ||
((usePreLabel) && ((s3.equals("<address>") || s3.equals("I-<address>"))))) {
if (aff.getAddrLine() != null) {
if (s1.equals(lastTag) || lastTag.equals("I-<addrLine>")) {
if (s1.equals("I-<addrLine>")) {
aff.setAddrLine(aff.getAddrLine() + " ; " + s2);
} else if (addSpace) {
aff.setAddrLine(aff.getAddrLine() + " " + s2);
} else {
aff.setAddrLine(aff.getAddrLine() + s2);
}
} else {
aff.setAddrLine(aff.getAddrLine() + ", " + s2);
}
} else {
aff.setAddrLine(s2);
}
hasAddress = true;
} else if ((usePreLabel) && ((s3.equals("<affiliation>") || s3.equals("I-<affiliation>")))) {
if (aff.getAffiliationString() != null) {
if (s1.equals(lastTag)) {
if (addSpace) {
aff.setAffiliationString(aff.getAffiliationString() + " " + s2);
} else {
aff.setAffiliationString(aff.getAffiliationString() + s2);
}
} else {
aff.setAffiliationString(aff.getAffiliationString() + " ; " + s2);
}
} else {
aff.setAffiliationString(s2);
}
}
} else if (s1.equals("<department>") || s1.equals("I-<department>")) {
if ((!usePreLabel) ||
((usePreLabel) && (s3.equals("<affiliation>") || s3.equals("I-<affiliation>")))
) {
if (aff.getDepartments() != null) {
/*if (localFeatures.contains("LINESTART"))
aff.department += " " + s2;*/
if ((s1.equals("I-<department>")) &&
(localFeatures.contains("LINESTART"))
) {
if (aff.notNull()) {
if (fullAffiliations == null)
fullAffiliations = new ArrayList<Affiliation>();
fullAffiliations.add(aff);
}
hasInstitution = false;
hasDepartment = true;
hasLaboratory = false;
hasAddress = false;
aff = new Affiliation();
aff.addDepartment(s2);
if (currentMarker != null) {
aff.setMarker(currentMarker.trim());
}
} else if ((s1.equals("I-<department>")) && hasDepartment && hasAddress &&
!lastTag.equals("<department>")) {
if (aff.notNull()) {
if (fullAffiliations == null) {
fullAffiliations = new ArrayList<Affiliation>();
}
fullAffiliations.add(aff);
}
hasInstitution = false;
hasDepartment = true;
hasAddress = false;
hasLaboratory = false;
aff = new Affiliation();
aff.addDepartment(s2);
if (currentMarker != null) {
aff.setMarker(currentMarker.trim());
}
} else if (s1.equals("I-<department>")) {
// we have multiple departments for this affiliation
aff.addDepartment(s2);
//aff.department = s2;
} else if (addSpace) {
//aff.extendFirstDepartment(" " + s2);
aff.extendLastDepartment(" " + s2);
} else {
//aff.extendFirstDepartment(s2);
aff.extendLastDepartment(s2);
}
} else if (aff.getInstitutions() != null) {
/*if (localFeatures.contains("LINESTART"))
aff.department += " " + s2;*/
if ((s1.equals("I-<department>")) && hasAddress &&
(localFeatures.contains("LINESTART"))
) {
if (aff.notNull()) {
if (fullAffiliations == null)
fullAffiliations = new ArrayList<Affiliation>();
fullAffiliations.add(aff);
}
hasInstitution = false;
hasDepartment = true;
hasLaboratory = false;
hasAddress = false;
aff = new Affiliation();
aff.addDepartment(s2);
if (currentMarker != null) {
aff.setMarker(currentMarker.trim());
}
} else {
aff.addDepartment(s2);
}
} else {
aff.addDepartment(s2);
}
} else if ((usePreLabel) && (s3.equals("<address>") || s3.equals("I-<address>"))) {
if (aff.getAddressString() != null) {
if (addSpace) {
aff.setAddressString(aff.getAddressString() + " " + s2);
} else {
aff.setAddressString(aff.getAddressString() + s2);
}
} else {
aff.setAddressString(s2);
}
}
} else if (s1.equals("<laboratory>") || s1.equals("I-<laboratory>")) {
if ((!usePreLabel) ||
((usePreLabel) && (s3.equals("<affiliation>") || s3.equals("I-<affiliation>")))
) {
hasLaboratory = true;
if (aff.getLaboratories() != null) {
if (s1.equals("I-<laboratory>") &&
(localFeatures.contains("LINESTART"))) {
// new affiliation
if (aff.notNull()) {
if (fullAffiliations == null)
fullAffiliations = new ArrayList<Affiliation>();
fullAffiliations.add(aff);
}
hasInstitution = false;
hasLaboratory = true;
hasDepartment = false;
hasAddress = false;
aff = new Affiliation();
aff.addLaboratory(s2);
if (currentMarker != null) {
aff.setMarker(currentMarker.trim());
}
} else if (s1.equals("I-<laboratory>")
&& hasLaboratory
&& hasAddress
&& (!lastTag.equals("<laboratory>"))) {
// new affiliation
if (aff.notNull()) {
if (fullAffiliations == null)
fullAffiliations = new ArrayList<Affiliation>();
fullAffiliations.add(aff);
}
hasInstitution = false;
hasLaboratory = true;
hasDepartment = false;
hasAddress = false;
aff = new Affiliation();
aff.addLaboratory(s2);
if (currentMarker != null) {
aff.setMarker(currentMarker.trim());
}
} else if (s1.equals("I-<laboratory>")) {
// we have multiple laboratories for this affiliation
aff.addLaboratory(s2);
} else if (addSpace) {
aff.extendLastLaboratory(" " + s2);
} else {
aff.extendLastLaboratory(s2);
}
} else {
aff.addLaboratory(s2);
}
} else if ((usePreLabel) && (s3.equals("<address>") || s3.equals("I-<address>"))) {
// that's a piece of the address badly labelled
if (aff.getAddressString() != null) {
if (addSpace) {
aff.setAddressString(aff.getAddressString() + " " + s2);
} else {
aff.setAddressString(aff.getAddressString() + s2);
}
} else {
aff.setAddressString(s2);
}
}
} else if (s1.equals("<country>") || s1.equals("I-<country>")) {
if ((!usePreLabel) ||
((usePreLabel) && ((s3.equals("<address>") || s3.equals("I-<address>"))))) {
if (aff.getCountry() != null) {
if (s1.equals("I-<country>")) {
aff.setCountry(aff.getCountry() + ", " + s2);
} else if (addSpace) {
aff.setCountry(aff.getCountry() + " " + s2);
} else {
aff.setCountry(aff.getCountry() + s2);
}
} else {
aff.setCountry(s2);
}
hasAddress = true;
} else if ((usePreLabel) && ((s3.equals("<affiliation>") || s3.equals("I-<affiliation>")))) {
if (aff.getAffiliationString() != null) {
if (addSpace) {
aff.setAffiliationString(aff.getAffiliationString() + " " + s2);
} else {
aff.setAffiliationString(aff.getAffiliationString() + s2);
}
} else {
aff.setAffiliationString(s2);
}
}
} else if (s1.equals("<postCode>") || s1.equals("I-<postCode>")) {
if ((!usePreLabel) ||
((usePreLabel) && ((s3.equals("<address>") || s3.equals("I-<address>"))))) {
if (aff.getPostCode() != null) {
if (s1.equals("I-<postCode>")) {
aff.setPostCode(aff.getPostCode() + ", " + s2);
} else if (addSpace) {
aff.setPostCode(aff.getPostCode() + " " + s2);
} else {
aff.setPostCode(aff.getPostCode() + s2);
}
} else {
aff.setPostCode(s2);
}
} else if ((usePreLabel) && ((s3.equals("<affiliation>") || s3.equals("I-<affiliation>")))) {
if (aff.getAffiliationString() != null) {
if (addSpace) {
aff.setAffiliationString(aff.getAffiliationString() + " " + s2);
} else {
aff.setAffiliationString(aff.getAffiliationString() + s2);
}
} else {
aff.setAffiliationString(s2);
}
}
} else if (s1.equals("<postBox>") || s1.equals("I-<postBox>")) {
if ((!usePreLabel) ||
((usePreLabel) && ((s3.equals("<address>") || s3.equals("I-<address>"))))) {
if (aff.getPostBox() != null) {
if (s1.equals("I-<postBox>")) {
aff.setPostBox(aff.getPostBox() + ", " + s2);
} else if (addSpace) {
aff.setPostBox(aff.getPostBox() + " " + s2);
} else {
aff.setPostBox(aff.getPostBox() + s2);
}
} else {
aff.setPostBox(s2);
}
} else if ((usePreLabel) && ((s3.equals("<affiliation>") || s3.equals("I-<affiliation>")))) {
if (aff.getAffiliationString() != null) {
if (addSpace) {
aff.setAffiliationString(aff.getAffiliationString() + " " + s2);
} else {
aff.setAffiliationString(aff.getAffiliationString() + s2);
}
} else {
aff.setAffiliationString(s2);
}
}
} else if (s1.equals("<region>") || s1.equals("I-<region>")) {
if ((!usePreLabel) ||
((usePreLabel) && ((s3.equals("<address>") || s3.equals("I-<address>"))))) {
if (aff.getRegion() != null) {
if (s1.equals("I-<region>")) {
aff.setRegion(aff.getRegion() + ", " + s2);
} else if (addSpace) {
aff.setRegion(aff.getRegion() + " " + s2);
} else {
aff.setRegion(aff.getRegion() + s2);
}
} else {
aff.setRegion(s2);
}
} else if ((usePreLabel) && ((s3.equals("<affiliation>") || s3.equals("I-<affiliation>")))) {
if (aff.getAffiliationString() != null) {
if (addSpace) {
aff.setAffiliationString(aff.getAffiliationString() + " " + s2);
} else {
aff.setAffiliationString(aff.getAffiliationString() + s2);
}
} else {
aff.setAffiliationString(s2);
}
}
} else if (s1.equals("<settlement>") || s1.equals("I-<settlement>")) {
if ((!usePreLabel) ||
((usePreLabel) && ((s3.equals("<address>") || s3.equals("I-<address>"))))) {
if (aff.getSettlement() != null) {
if (s1.equals("I-<settlement>")) {
aff.setSettlement(aff.getSettlement() + ", " + s2);
} else if (addSpace) {
aff.setSettlement(aff.getSettlement() + " " + s2);
} else {
aff.setSettlement(aff.getSettlement() + s2);
}
} else {
aff.setSettlement(s2);
}
hasAddress = true;
} else if ((usePreLabel) && ((s3.equals("<affiliation>") || s3.equals("I-<affiliation>")))) {
if (aff.getAffiliationString() != null) {
if (addSpace) {
aff.setAffiliationString(aff.getAffiliationString() + " " + s2);
} else {
aff.setAffiliationString(aff.getAffiliationString() + s2);
}
} else {
aff.setAffiliationString(s2);
}
}
}
if (!s1.endsWith("<marker>")) {
if (aff.getRawAffiliationString() == null) {
aff.setRawAffiliationString(s2);
} else if (addSpace) {
aff.setRawAffiliationString(aff.getRawAffiliationString() + " " + s2);
} else {
aff.setRawAffiliationString(aff.getRawAffiliationString() + s2);
}
}
lastTag = s1;
lineCount++;
newMarker = false;
}
if (aff.notNull()) {
if (fullAffiliations == null)
fullAffiliations = new ArrayList<Affiliation>();
fullAffiliations.add(aff);
hasInstitution = false;
hasDepartment = false;
hasAddress = false;
}
// we clean a little bit
if (fullAffiliations != null) {
for (Affiliation affi : fullAffiliations) {
affi.clean();
}
}
} catch (Exception e) {
throw new GrobidException("An exception occurred while running Grobid.", e);
}
return fullAffiliations;
}
/**
* Extract results from a labelled header in the training format without any string modification.
*/
public StringBuilder trainingExtraction(String result,
List<LayoutToken> tokenizations) {
if ((result == null) || (result.length() == 0)) {
return null;
}
List<String> affiliationBlocks = new ArrayList<String>();
List<LayoutToken> tokenizationsAffiliation = new ArrayList<LayoutToken>();
filterAffiliationAddress(result, tokenizations, affiliationBlocks, tokenizationsAffiliation);
String resultAffiliation = runReflow(affiliationBlocks, tokenizationsAffiliation);
StringBuilder bufferAffiliation = new StringBuilder();
if (resultAffiliation == null) {
return bufferAffiliation;
}
StringTokenizer st = new StringTokenizer(resultAffiliation, "\n");
String s1 = null;
String s2 = null;
String lastTag = null;
int p = 0;
String currentTag0 = null;
String lastTag0 = null;
boolean hasAddressTag = false;
boolean hasAffiliationTag = false;
boolean hasAddress = false;
boolean hasAffiliation = false;
boolean start = true;
boolean tagClosed = false;
while (st.hasMoreTokens()) {
boolean addSpace = false;
String tok = st.nextToken().trim();
if (tok.length() == 0) {
continue;
}
StringTokenizer stt = new StringTokenizer(tok, "\t");
ArrayList<String> localFeatures = new ArrayList<String>();
int i = 0;
boolean newLine = false;
int ll = stt.countTokens();
while (stt.hasMoreTokens()) {
String s = stt.nextToken().trim();
if (i == 0) {
s2 = TextUtilities.HTMLEncode(s);
boolean strop = false;
while ((!strop) && (p < tokenizationsAffiliation.size())) {
String tokOriginal = tokenizationsAffiliation.get(p).getText();
if (tokOriginal.equals(" ")) {
addSpace = true;
} else if (tokOriginal.equals(s)) {
strop = true;
}
p++;
}
} else if (i == ll - 1) {
s1 = s;
} else {
localFeatures.add(s);
if (s.equals("LINESTART") && !start) {
newLine = true;
start = false;
} else if (s.equals("LINESTART")) {
start = false;
}
}
i++;
}
lastTag0 = null;
if (lastTag != null) {
if (lastTag.startsWith("I-")) {
lastTag0 = lastTag.substring(2, lastTag.length());
} else {
lastTag0 = lastTag;
}
}
currentTag0 = null;
if (s1 != null) {
if (s1.startsWith("I-")) {
currentTag0 = s1.substring(2, s1.length());
} else {
currentTag0 = s1;
}
}
if (lastTag != null) {
tagClosed = testClosingTag(bufferAffiliation, currentTag0, lastTag0);
} else
tagClosed = false;
if (newLine) {
if (tagClosed) {
bufferAffiliation.append("\t\t\t\t\t\t\t<lb/>\n");
} else {
bufferAffiliation.append("<lb/>");
}
}
String output = writeField(s1, lastTag0, s2, "<marker>", "<marker>", addSpace, 7);
if (output != null) {
if (hasAddressTag) {
bufferAffiliation.append("\t\t\t\t\t\t\t</address>\n");
hasAddressTag = false;
}
if (hasAffiliationTag) {
bufferAffiliation.append("\t\t\t\t\t\t</affiliation>\n");
hasAffiliationTag = false;
}
bufferAffiliation.append("\t\t\t\t\t\t<affiliation>\n" + output);
hasAffiliationTag = true;
hasAddressTag = false;
hasAddress = false;
hasAffiliation = false;
lastTag = s1;
continue;
} else {
output = writeField(s1, lastTag0, s2, "<institution>", "<orgName type=\"institution\">", addSpace, 7);
}
if (output == null) {
output = writeField(s1, lastTag0, s2, "<department>", "<orgName type=\"department\">", addSpace, 7);
} else {
if (hasAddressTag) {
bufferAffiliation.append("\t\t\t\t\t\t\t</address>\n");
hasAddressTag = false;
}
if (hasAddress && hasAffiliation) {
bufferAffiliation.append("\t\t\t\t\t\t</affiliation>\n");
hasAffiliationTag = false;
hasAddress = false;
hasAffiliation = false;
hasAddressTag = false;
}
if (!hasAffiliationTag) {
bufferAffiliation.append("\t\t\t\t\t\t<affiliation>\n");
hasAffiliationTag = true;
}
bufferAffiliation.append(output);
hasAffiliation = true;
lastTag = s1;
continue;
}
if (output == null) {
output = writeField(s1, lastTag0, s2, "<laboratory>", "<orgName type=\"laboratory\">", addSpace, 7);
} else {
if (hasAddressTag) {
bufferAffiliation.append("\t\t\t\t\t\t\t</address>\n");
hasAddressTag = false;
}
if (hasAddress && hasAffiliation) {
bufferAffiliation.append("\t\t\t\t\t\t</affiliation>\n");
hasAffiliationTag = false;
hasAddress = false;
hasAffiliation = false;
hasAddressTag = false;
}
if (!hasAffiliationTag) {
bufferAffiliation.append("\t\t\t\t\t\t<affiliation>\n");
hasAffiliationTag = true;
}
bufferAffiliation.append(output);
lastTag = s1;
hasAffiliation = true;
continue;
}
if (output == null) {
output = writeField(s1, lastTag0, s2, "<addrLine>", "<addrLine>", addSpace, 8);
} else {
if (hasAddressTag) {
bufferAffiliation.append("\t\t\t\t\t\t\t</address>\n");
hasAddressTag = false;
}
if (hasAddress && hasAffiliation) {
bufferAffiliation.append("\t\t\t\t\t\t</affiliation>\n");
hasAffiliationTag = false;
hasAddress = false;
hasAffiliation = false;
hasAddressTag = false;
}
if (!hasAffiliationTag) {
bufferAffiliation.append("\t\t\t\t\t\t<affiliation>\n");
hasAffiliationTag = true;
}
bufferAffiliation.append(output);
hasAffiliation = true;
lastTag = s1;
continue;
}
if (output == null) {
output = writeField(s1, lastTag0, s2, "<postCode>", "<postCode>", addSpace, 8);
} else {
if (hasAddress && hasAffiliation) {
bufferAffiliation.append("\t\t\t\t\t\t</affiliation>\n");
hasAffiliationTag = false;
hasAddress = false;
hasAffiliation = false;
hasAddressTag = false;
}
if (!hasAffiliationTag) {
bufferAffiliation.append("\t\t\t\t\t\t<affiliation>\n");
hasAffiliationTag = true;
hasAddressTag = false;
}
if (!hasAddressTag) {
bufferAffiliation.append("\t\t\t\t\t\t\t<address>\n");
hasAddressTag = true;
}
bufferAffiliation.append(output);
lastTag = s1;
continue;
}
if (output == null) {
output = writeField(s1, lastTag0, s2, "<postBox>", "<postBox>", addSpace, 8);
} else {
if (hasAddress && hasAffiliation) {
bufferAffiliation.append("\t\t\t\t\t\t</affiliation>\n");
hasAffiliationTag = false;
hasAddress = false;
hasAffiliation = false;
hasAddressTag = false;
}
if (!hasAffiliationTag) {
bufferAffiliation.append("\t\t\t\t\t\t<affiliation>\n");
hasAffiliationTag = true;
hasAddressTag = false;
}
if (!hasAddressTag) {
bufferAffiliation.append("\t\t\t\t\t\t\t<address>\n");
hasAddressTag = true;
}
bufferAffiliation.append(output);
lastTag = s1;
continue;
}
if (output == null) {
output = writeField(s1, lastTag0, s2, "<region>", "<region>", addSpace, 8);
} else {
if (hasAddress && hasAffiliation) {
bufferAffiliation.append("\t\t\t\t\t\t</affiliation>\n");
hasAffiliationTag = false;
hasAddress = false;
hasAffiliation = false;
hasAddressTag = false;
}
if (!hasAffiliationTag) {
bufferAffiliation.append("\t\t\t\t\t\t<affiliation>\n");
hasAffiliationTag = true;
hasAddressTag = false;
}
if (!hasAddressTag) {
bufferAffiliation.append("\t\t\t\t\t\t\t<address>\n");
hasAddressTag = true;
}
bufferAffiliation.append(output);
lastTag = s1;
continue;
}
if (output == null) {
output = writeField(s1, lastTag0, s2, "<settlement>", "<settlement>", addSpace, 8);
} else {
if (hasAddress && hasAffiliation) {
bufferAffiliation.append("\t\t\t\t\t\t</affiliation>\n");
hasAffiliationTag = false;
hasAddress = false;
hasAffiliation = false;
hasAddressTag = false;
}
if (!hasAffiliationTag) {
bufferAffiliation.append("\t\t\t\t\t\t<affiliation>\n");
hasAffiliationTag = true;
hasAddressTag = false;
}
if (!hasAddressTag) {
bufferAffiliation.append("\t\t\t\t\t\t\t<address>\n");
hasAddressTag = true;
}
bufferAffiliation.append(output);
lastTag = s1;
continue;
}
if (output == null) {
output = writeField(s1, lastTag0, s2, "<country>", "<country>", addSpace, 8);
} else {
if (hasAddress && hasAffiliation) {
bufferAffiliation.append("\t\t\t\t\t\t</affiliation>\n");
hasAffiliationTag = false;
hasAddress = false;
hasAffiliation = false;
hasAddressTag = false;
}
if (!hasAffiliationTag) {
bufferAffiliation.append("\t\t\t\t\t\t<affiliation>\n");
hasAffiliationTag = true;
hasAddressTag = false;
}
if (!hasAddressTag) {
bufferAffiliation.append("\t\t\t\t\t\t\t<address>\n");
hasAddressTag = true;
}
bufferAffiliation.append(output);
lastTag = s1;
continue;
}
if (output == null) {
output = writeField(s1, lastTag0, s2, "<other>", "<other>", addSpace, 8);
} else {
if (hasAddress && hasAffiliation) {
bufferAffiliation.append("\t\t\t\t\t\t</affiliation>\n");
hasAffiliationTag = false;
hasAddress = false;
hasAffiliation = false;
hasAddressTag = false;
}
if (!hasAffiliationTag) {
bufferAffiliation.append("\t\t\t\t\t\t<affiliation>\n");
hasAffiliationTag = true;
hasAddressTag = false;
}
if (!hasAddressTag) {
bufferAffiliation.append("\t\t\t\t\t\t\t<address>\n");
hasAddressTag = true;
}
bufferAffiliation.append(output);
lastTag = s1;
continue;
}
if (output != null) {
if (bufferAffiliation.length() > 0) {
if (bufferAffiliation.charAt(bufferAffiliation.length() - 1) == '\n') {
bufferAffiliation.deleteCharAt(bufferAffiliation.length() - 1);
}
}
bufferAffiliation.append(output);
}
lastTag = s1;
}
if (lastTag != null) {
if (lastTag.startsWith("I-")) {
lastTag0 = lastTag.substring(2, lastTag.length());
} else {
lastTag0 = lastTag;
}
currentTag0 = "";
testClosingTag(bufferAffiliation, currentTag0, lastTag0);
if (hasAddressTag) {
bufferAffiliation.append("\t\t\t\t\t\t\t</address>\n");
}
bufferAffiliation.append("\t\t\t\t\t\t</affiliation>\n");
}
return bufferAffiliation;
}
private String writeField(String s1,
String lastTag0,
String s2,
String field,
String outField,
boolean addSpace,
int nbIndent) {
String result = null;
if ((s1.equals(field)) || (s1.equals("I-" + field))) {
if ((s1.equals("<other>") || s1.equals("I-<other>"))) {
//result = "";
/*for(int i=0; i<nbIndent; i++) {
result += "\t";
}*/
if (addSpace)
result = " " + s2;
else
result = s2;
} else if (s1.equals(lastTag0) || s1.equals("I-" + lastTag0)) {
if (addSpace)
result = " " + s2;
else
result = s2;
} else {
result = "";
for (int i = 0; i < nbIndent; i++) {
result += "\t";
}
result += outField + s2;
}
}
return result;
}
private boolean testClosingTag(StringBuilder buffer,
String currentTag0,
String lastTag0) {
boolean res = false;
if (!currentTag0.equals(lastTag0)) {
res = true;
// we close the current tag
if (lastTag0.equals("<institution>")) {
buffer.append("</orgName>\n");
} else if (lastTag0.equals("<department>")) {
buffer.append("</orgName>\n");
} else if (lastTag0.equals("<laboratory>")) {
buffer.append("</orgName>\n");
} else if (lastTag0.equals("<addrLine>")) {
buffer.append("</addrLine>\n");
} else if (lastTag0.equals("<postCode>")) {
buffer.append("</postCode>\n");
} else if (lastTag0.equals("<postBox>")) {
buffer.append("</postBox>\n");
} else if (lastTag0.equals("<region>")) {
buffer.append("</region>\n");
} else if (lastTag0.equals("<settlement>")) {
buffer.append("</settlement>\n");
} else if (lastTag0.equals("<country>")) {
buffer.append("</country>\n");
} else if (lastTag0.equals("<marker>")) {
buffer.append("</marker>\n");
} else if (lastTag0.equals("<other>")) {
buffer.append("\n");
} else {
res = false;
}
}
return res;
}
}
| 53,208 | 44.361466 | 138 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/Engine.java
|
package org.grobid.core.engines;
import org.apache.commons.lang3.tuple.Pair;
import org.grobid.core.data.Affiliation;
import org.grobid.core.data.BibDataSet;
import org.grobid.core.data.BiblioItem;
import org.grobid.core.data.ChemicalEntity;
import org.grobid.core.data.PatentItem;
import org.grobid.core.data.Person;
import org.grobid.core.document.Document;
import org.grobid.core.document.DocumentSource;
import org.grobid.core.engines.config.GrobidAnalysisConfig;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.factory.GrobidPoolingFactory;
import org.grobid.core.lang.Language;
import org.grobid.core.utilities.Consolidation;
import org.grobid.core.utilities.LanguageUtilities;
import org.grobid.core.utilities.Utilities;
import org.grobid.core.utilities.counters.CntManager;
import org.grobid.core.utilities.counters.impl.CntManagerFactory;
import org.grobid.core.utilities.crossref.CrossrefClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.*;
import java.util.*;
import java.nio.charset.StandardCharsets;
/**
* Class for managing the extraction of bibliographical information from PDF
* documents or raw text.
*
*/
public class Engine implements Closeable {
private static final Logger LOGGER = LoggerFactory.getLogger(Engine.class);
private final EngineParsers parsers = new EngineParsers();
//TODO: when using one instance of Engine in e.g. grobid-service, then make this field not static
private static CntManager cntManager = CntManagerFactory.getCntManager();
// The list of accepted languages
// the languages are encoded in ISO 3166
// if null, all languages are accepted.
private List<String> acceptedLanguages = null;
/**
* Parse a sequence of authors from a header, i.e. containing possibly
* reference markers.
*
* @param authorSequence - the string corresponding to a raw sequence of names
* @return the list of structured author object
*/
public List<Person> processAuthorsHeader(String authorSequence) throws Exception {
List<Person> result = parsers.getAuthorParser().processingHeader(authorSequence);
return result;
}
/**
* Parse a sequence of authors from a citation, i.e. containing no reference
* markers.
*
* @param authorSequence - the string corresponding to a raw sequence of names
* @return the list of structured author object
*/
public List<Person> processAuthorsCitation(String authorSequence) throws Exception {
List<Person> result = parsers.getAuthorParser().processingCitation(authorSequence);
return result;
}
/**
* Parse a list of independent sequences of authors from citations.
*
* @param authorSequences - the list of strings corresponding each to a raw sequence of
* names.
* @return the list of all recognized structured author objects for each
* sequence of authors.
*/
public List<List<Person>> processAuthorsCitationLists(List<String> authorSequences) throws Exception {
return null;
}
/**
* Parse a text block corresponding to an affiliation+address.
*
* @param addressBlock - the string corresponding to a raw affiliation+address
* @return the list of all recognized structured affiliation objects.
* @throws IOException
*/
public List<Affiliation> processAffiliation(String addressBlock) throws IOException {
return parsers.getAffiliationAddressParser().processing(addressBlock);
}
/**
* Parse a list of text blocks corresponding to an affiliation+address.
*
* @param addressBlocks - the list of strings corresponding each to a raw
* affiliation+address.
* @return the list of all recognized structured affiliation objects for
* each sequence of affiliation + address block.
*/
public List<List<Affiliation>> processAffiliations(List<String> addressBlocks) throws Exception {
List<List<Affiliation>> results = null;
for (String addressBlock : addressBlocks) {
List<Affiliation> localRes = parsers.getAffiliationAddressParser().processing(addressBlock);
if (results == null) {
results = new ArrayList<List<Affiliation>>();
}
results.add(localRes);
}
return results;
}
/**
* Parse a raw string containing dates.
*
* @param dateBlock - the string containing raw dates.
* @return the list of all structured date objects recognized in the string.
* @throws IOException
*/
public List<org.grobid.core.data.Date> processDate(String dateBlock) throws IOException {
List<org.grobid.core.data.Date> result = parsers.getDateParser().processing(dateBlock);
return result;
}
/**
* Parse a list of raw dates.
*
* @param dateBlocks - the list of strings each containing raw dates.
* @return the list of all structured date objects recognized in the string
* for each inputed string.
*/
/*public List<List<org.grobid.core.data.Date>> processDates(List<String> dateBlocks) {
return null;
}*/
/**
* Apply a parsing model for a given single raw reference string based on CRF
*
* @param reference the reference string to be processed
* @param consolidate the consolidation option allows GROBID to exploit Crossref web services for improving header
* information. 0 (no consolidation, default value), 1 (consolidate the citation and inject extra
* metadata) or 2 (consolidate the citation and inject DOI only)
* @return the recognized bibliographical object
*/
public BiblioItem processRawReference(String reference, int consolidate) {
if (reference != null) {
reference = reference.replaceAll("\\\\", "");
}
return parsers.getCitationParser().processingString(reference, consolidate);
}
/**
* Apply a parsing model for a set of raw reference text based on CRF
*
* @param references the list of raw reference strings to be processed
* @param consolidate the consolidation option allows GROBID to exploit Crossref web services for improving header
* information. 0 (no consolidation, default value), 1 (consolidate the citation and inject extra
* metadata) or 2 (consolidate the citation and inject DOI only)
* @return the list of recognized bibliographical objects
*/
public List<BiblioItem> processRawReferences(List<String> references, int consolidate) throws Exception {
List<BiblioItem> finalResults = new ArrayList<BiblioItem>();
if (references == null || references.size() == 0)
return finalResults;
List<BiblioItem> results = parsers.getCitationParser().processingStringMultiple(references, 0);
if (results.size() == 0)
return finalResults;
// consolidation in a second stage to take advantage of parallel calls
if (consolidate == 0) {
return results;
} else {
// prepare for set consolidation
List<BibDataSet> bibDataSetResults = new ArrayList<BibDataSet>();
for (BiblioItem bib : results) {
BibDataSet bds = new BibDataSet();
bds.setResBib(bib);
bds.setRawBib(bib.getReference());
bibDataSetResults.add(bds);
}
Consolidation consolidator = Consolidation.getInstance();
if (consolidator.getCntManager() == null)
consolidator.setCntManager(cntManager);
Map<Integer,BiblioItem> resConsolidation = null;
try {
resConsolidation = consolidator.consolidate(bibDataSetResults);
} catch(Exception e) {
throw new GrobidException(
"An exception occured while running consolidation on bibliographical references.", e);
}
if (resConsolidation != null) {
for(int i=0; i<bibDataSetResults.size(); i++) {
BiblioItem resCitation = bibDataSetResults.get(i).getResBib();
BiblioItem bibo = resConsolidation.get(Integer.valueOf(i));
if (bibo != null) {
if (consolidate == 1)
BiblioItem.correct(resCitation, bibo);
else if (consolidate == 2)
BiblioItem.injectIdentifiers(resCitation, bibo);
}
finalResults.add(resCitation);
}
}
}
return finalResults;
}
/**
* Constructor for the Grobid engine instance.
*/
public Engine(boolean loadModels) {
/*
* Runtime.getRuntime().addShutdownHook(new Thread() {
*
* @Override public void run() { try { close(); } catch (IOException e)
* { LOGGER.error("Failed to close all resources: " + e); } } });
*/
if (loadModels)
parsers.initAll();
}
/**
* Apply a parsing model to the reference block of a PDF file based on CRF
*
* @param inputFile the path of the PDF file to be processed
* @param consolidate the consolidation option allows GROBID to exploit Crossref web services for improving header
* information. 0 (no consolidation, default value), 1 (consolidate the citation and inject extra
* metadata) or 2 (consolidate the citation and inject DOI only)
* @return the list of parsed references as bibliographical objects enriched
* with citation contexts
*/
public List<BibDataSet> processReferences(File inputFile, int consolidate) {
return parsers.getCitationParser()
.processingReferenceSection(inputFile, null, parsers.getReferenceSegmenterParser(), consolidate);
}
/**
* Apply a parsing model to the reference block of a PDF file based on CRF
*
* @param inputFile the path of the PDF file to be processed
* @param md5Str MD5 digest of the PDF file to be processed
* @param consolidate the consolidation option allows GROBID to exploit Crossref web services for improving header
* information. 0 (no consolidation, default value), 1 (consolidate the citation and inject extra
* metadata) or 2 (consolidate the citation and inject DOI only)
* @return the list of parsed references as bibliographical objects enriched
* with citation contexts
*/
public List<BibDataSet> processReferences(File inputFile, String md5Str, int consolidate) {
return parsers.getCitationParser()
.processingReferenceSection(inputFile, md5Str, parsers.getReferenceSegmenterParser(), consolidate);
}
/**
* Download a PDF file.
*
* @param url URL of the PDF to download
* @param dirName directory where to store the downloaded PDF
* @param name file name
*/
public String downloadPDF(String url, String dirName, String name) {
return Utilities.uploadFile(url, dirName, name);
}
/**
* Give the list of languages for which an extraction is allowed. If null,
* any languages will be processed
*
* @return the list of languages to be processed coded in ISO 3166.
*/
public List<String> getAcceptedLanguages() {
return acceptedLanguages;
}
/**
* Add a language to the list of accepted languages.
*
* @param lang the language in ISO 3166 to be added
*/
public void addAcceptedLanguages(String lang) {
if (acceptedLanguages == null) {
acceptedLanguages = new ArrayList<String>();
}
acceptedLanguages.add(lang);
}
/**
* Perform a language identification
*
* @param ext part
* @return language
*/
public Language runLanguageId(String filePath, String ext) {
try {
// we just skip the 50 first lines and get the next approx. 5000
// first characters,
// which should give a close to ~100% accuracy for the supported languages
String text = "";
FileInputStream fileIn = new FileInputStream(filePath.substring(0, filePath.length() - 3) + ext);
InputStreamReader reader = new InputStreamReader(fileIn, "UTF-8");
BufferedReader bufReader = new BufferedReader(reader);
String line;
int nbChar = 0;
while (((line = bufReader.readLine()) != null) && (nbChar < 5000)) {
if (line.length() == 0)
continue;
text += " " + line;
nbChar += line.length();
}
bufReader.close();
LanguageUtilities languageUtilities = LanguageUtilities.getInstance();
return languageUtilities.runLanguageId(text);
} catch (IOException e) {
throw new GrobidException("An exception occurred while running Grobid.", e);
}
}
/**
* Basic run for language identification, default is on the body of the
* current document.
*
* @return language id
*/
public Language runLanguageId(String filePath) {
return runLanguageId(filePath, "body");
}
/**
* Apply a parsing model for the header of a PDF file based on CRF, using
* first three pages of the PDF
*
* @param inputFile the path of the PDF file to be processed
* @param consolidate the consolidation option allows GROBID to exploit Crossref web services for improving header
* information. 0 (no consolidation, default value), 1 (consolidate the citation and inject extra
* metadata) or 2 (consolidate the citation and inject DOI only)
* @param result bib result
* @return the TEI representation of the extracted bibliographical
* information
*/
public String processHeader(
String inputFile,
int consolidate,
boolean includeRawAffiliations,
BiblioItem result
) {
GrobidAnalysisConfig config = new GrobidAnalysisConfig.GrobidAnalysisConfigBuilder()
.startPage(0)
.endPage(2)
.consolidateHeader(consolidate)
.includeRawAffiliations(includeRawAffiliations)
.build();
return processHeader(inputFile, null, config, result);
}
/**
* Apply a parsing model for the header of a PDF file based on CRF, using
* first three pages of the PDF
*
* @param inputFile the path of the PDF file to be processed
* @param md5Str MD5 digest of the processed file
* @param consolidate the consolidation option allows GROBID to exploit Crossref web services for improving header
* information. 0 (no consolidation, default value), 1 (consolidate the citation and inject extra
* metadata) or 2 (consolidate the citation and inject DOI only)
* @param result bib result
* @return the TEI representation of the extracted bibliographical
* information
*/
public String processHeader(
String inputFile,
String md5Str,
int consolidate,
boolean includeRawAffiliations,
BiblioItem result
) {
GrobidAnalysisConfig config = new GrobidAnalysisConfig.GrobidAnalysisConfigBuilder()
.startPage(0)
.endPage(2)
.consolidateHeader(consolidate)
.includeRawAffiliations(includeRawAffiliations)
.build();
return processHeader(inputFile, md5Str, config, result);
}
/**
* Apply a parsing model for the header of a PDF file based on CRF, using
* dynamic range of pages as header
*
* @param inputFile : the path of the PDF file to be processed
* @param consolidate the consolidation option allows GROBID to exploit Crossref web services for improving header
* information. 0 (no consolidation, default value), 1 (consolidate the citation and inject extra
* metadata) or 2 (consolidate the citation and inject DOI only)
* @param result bib result
*
* @return the TEI representation of the extracted bibliographical
* information
*/
public String processHeader(String inputFile, int consolidate, BiblioItem result) {
return processHeader(inputFile, null, GrobidAnalysisConfig.defaultInstance(), result);
}
public String processHeader(String inputFile, GrobidAnalysisConfig config, BiblioItem result) {
return processHeader(inputFile, null, config, result);
}
public String processHeader(String inputFile, String md5Str, GrobidAnalysisConfig config, BiblioItem result) {
// normally the BiblioItem reference must not be null, but if it is the
// case, we still continue
// with a new instance, so that the resulting TEI string is still
// delivered
if (result == null) {
result = new BiblioItem();
}
Pair<String, Document> resultTEI = parsers.getHeaderParser().processing(new File(inputFile), md5Str, result, config);
return resultTEI.getLeft();
}
/**
* Create training data for the monograph model based on the application of
* the current monograph text model on a new PDF
*
* @param inputFile : the path of the PDF file to be processed
* @param pathRaw : the path where to put the CRF feature file
* @param pathTEI : the path where to put the annotated TEI representation (the
* file to be corrected for gold-level training data)
* @param id : an optional ID to be used in the TEI file and the full text
* file, -1 if not used
*/
public void createTrainingMonograph(File inputFile, String pathRaw, String pathTEI, int id) {
Document doc = parsers.getMonographParser().createTrainingFromPDF(inputFile, pathRaw, pathTEI, id);
}
/**
* Generate blank training data from provided directory of PDF documents, i.e. where TEI files are text only
* without tags. This can be used to start from scratch any new model.
*
* @param inputFile : the path of the PDF file to be processed
* @param pathRaw : the path where to put the CRF feature file
* @param pathTEI : the path where to put the annotated TEI representation (the
* file to be annotated for "from scratch" training data)
* @param id : an optional ID to be used in the TEI file and the full text
* file, -1 if not used
*/
public void createTrainingBlank(File inputFile, String pathRaw, String pathTEI, int id) {
parsers.getSegmentationParser().createBlankTrainingData(inputFile, pathRaw, pathTEI, id);
}
/**
* Create training data for all models based on the application of
* the current full text model on a new PDF
*
* @param inputFile : the path of the PDF file to be processed
* @param pathRaw : the path where to put the CRF feature file
* @param pathTEI : the path where to put the annotated TEI representation (the
* file to be corrected for gold-level training data)
* @param id : an optional ID to be used in the TEI file, -1 if not used
*/
public void createTraining(File inputFile, String pathRaw, String pathTEI, int id) {
System.out.println(inputFile.getPath());
Document doc = parsers.getFullTextParser().createTraining(inputFile, pathRaw, pathTEI, id);
}
/**
*
* //TODO: remove invalid JavaDoc once refactoring is done and tested (left for easier reference)
* Parse and convert the current article into TEI, this method performs the
* whole parsing and conversion process. If onlyHeader is true, than only
* the tei header data will be created.
*
* @param inputFile - absolute path to the pdf to be processed
* @param config - Grobid config
* @return the resulting structured document as a TEI string.
*/
public String fullTextToTEI(File inputFile,
GrobidAnalysisConfig config) throws Exception {
return fullTextToTEIDoc(inputFile, null, config).getTei();
}
/**
*
* //TODO: remove invalid JavaDoc once refactoring is done and tested (left for easier reference)
* Parse and convert the current article into TEI, this method performs the
* whole parsing and conversion process. If onlyHeader is true, than only
* the tei header data will be created.
*
* @param inputFile - absolute path to the pdf to be processed
* @param md5Str - MD5 digest of the PDF file to be processed
* @param config - Grobid config
* @return the resulting structured document as a TEI string.
*/
public String fullTextToTEI(File inputFile,
String md5Str,
GrobidAnalysisConfig config) throws Exception {
return fullTextToTEIDoc(inputFile, md5Str, config).getTei();
}
public Document fullTextToTEIDoc(File inputFile,
String md5Str,
GrobidAnalysisConfig config) throws Exception {
FullTextParser fullTextParser = parsers.getFullTextParser();
Document resultDoc;
LOGGER.debug("Starting processing fullTextToTEI on " + inputFile);
long time = System.currentTimeMillis();
resultDoc = fullTextParser.processing(inputFile, md5Str, config);
LOGGER.debug("Ending processing fullTextToTEI on " + inputFile + ". Time to process: "
+ (System.currentTimeMillis() - time) + "ms");
return resultDoc;
}
public Document fullTextToTEIDoc(File inputFile,
GrobidAnalysisConfig config) throws Exception {
return fullTextToTEIDoc(inputFile, null, config);
}
public Document fullTextToTEIDoc(DocumentSource documentSource,
GrobidAnalysisConfig config) throws Exception {
FullTextParser fullTextParser = parsers.getFullTextParser();
Document resultDoc;
LOGGER.debug("Starting processing fullTextToTEI on " + documentSource);
long time = System.currentTimeMillis();
resultDoc = fullTextParser.processing(documentSource, config);
LOGGER.debug("Ending processing fullTextToTEI on " + documentSource + ". Time to process: "
+ (System.currentTimeMillis() - time) + "ms");
return resultDoc;
}
/**
* Process all the PDF in a given directory with a segmentation process and
* produce the corresponding training data format files for manual
* correction. The goal of this method is to help to produce additional
* traning data based on an existing model.
*
* @param directoryPath - the path to the directory containing PDF to be processed.
* @param resultPath - the path to the directory where the results as XML files
* shall be written.
* @param ind - identifier integer to be included in the resulting files to
* identify the training case. This is optional: no identifier
* will be included if ind = -1
* @return the number of processed files.
*/
public int batchCreateTraining(String directoryPath, String resultPath, int ind) {
try {
File path = new File(directoryPath);
// we process all pdf files in the directory
File[] refFiles = path.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
System.out.println(name);
return name.endsWith(".pdf") || name.endsWith(".PDF");
}
});
if (refFiles == null)
return 0;
System.out.println(refFiles.length + " files to be processed.");
int n = 0;
if (ind == -1) {
// for undefined identifier (value at -1), we initialize it to 0
n = 1;
}
for (final File pdfFile : refFiles) {
try {
createTraining(pdfFile, resultPath, resultPath, ind + n);
} catch (final Exception exp) {
LOGGER.error("An error occured while processing the following pdf: "
+ pdfFile.getPath(), exp);
}
if (ind != -1)
n++;
}
return refFiles.length;
} catch (final Exception exp) {
throw new GrobidException("An exception occured while running Grobid batch.", exp);
}
}
/**
* Process all the PDF in a given directory with a monograph process and
* produce the corresponding training data format files for manual
* correction. The goal of this method is to help to produce additional
* traning data based on an existing model.
*
* @param directoryPath - the path to the directory containing PDF to be processed.
* @param resultPath - the path to the directory where the results as XML files
* and CRF feature files shall be written.
* @param ind - identifier integer to be included in the resulting files to
* identify the training case. This is optional: no identifier
* will be included if ind = -1
* @return the number of processed files.
*/
public int batchCreateTrainingMonograph(String directoryPath, String resultPath, int ind) {
try {
File path = new File(directoryPath);
// we process all pdf files in the directory
File[] refFiles = path.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
System.out.println(name);
return name.endsWith(".pdf") || name.endsWith(".PDF");
}
});
if (refFiles == null)
return 0;
System.out.println(refFiles.length + " files to be processed.");
int n = 0;
if (ind == -1) {
// for undefined identifier (value at -1), we initialize it to 0
n = 1;
}
for (final File pdfFile : refFiles) {
try {
createTrainingMonograph(pdfFile, resultPath, resultPath, ind + n);
} catch (final Exception exp) {
LOGGER.error("An error occured while processing the following pdf: "
+ pdfFile.getPath(), exp);
}
if (ind != -1)
n++;
}
return refFiles.length;
} catch (final Exception exp) {
throw new GrobidException("An exception occured while running Grobid batch.", exp);
}
}
/**
* Process all the PDF in a given directory with a pdf extraction and
* produce blank training data, i.e. TEI files with text only
* without tags. This can be used to start from scratch any new model.
*
* @param directoryPath - the path to the directory containing PDF to be processed.
* @param resultPath - the path to the directory where the results as XML files
* and default CRF feature files shall be written.
* @param ind - identifier integer to be included in the resulting files to
* identify the training case. This is optional: no identifier
* will be included if ind = -1
* @return the number of processed files.
*/
public int batchCreateTrainingBlank(String directoryPath, String resultPath, int ind) {
try {
File path = new File(directoryPath);
// we process all pdf files in the directory
File[] refFiles = path.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
System.out.println(name);
return name.endsWith(".pdf") || name.endsWith(".PDF");
}
});
if (refFiles == null)
return 0;
System.out.println(refFiles.length + " files to be processed.");
int n = 0;
if (ind == -1) {
// for undefined identifier (value at -1), we initialize it to 0
n = 1;
}
for (final File pdfFile : refFiles) {
try {
createTrainingBlank(pdfFile, resultPath, resultPath, ind + n);
} catch (final Exception exp) {
LOGGER.error("An error occured while processing the following pdf: "
+ pdfFile.getPath(), exp);
}
if (ind != -1)
n++;
}
return refFiles.length;
} catch (final Exception exp) {
throw new GrobidException("An exception occured while running Grobid batch.", exp);
}
}
/**
* Get the TEI XML string corresponding to the recognized header text
*/
public static String header2TEI(BiblioItem resHeader) {
return resHeader.toTEI(0);
}
/**
* Get the BibTeX string corresponding to the recognized header text
*/
public static String header2BibTeX(BiblioItem resHeader) {
return resHeader.toBibTeX();
}
/**
* Get the TEI XML string corresponding to the recognized citation section,
* with pointers and advanced structuring
*/
public static String references2TEI(String path, List<BibDataSet> resBib) {
StringBuilder result = new StringBuilder();
result.append("<listbibl>\n");
int p = 0;
for (BibDataSet bib : resBib) {
BiblioItem bit = bib.getResBib();
bit.setPath(path);
result.append("\n").append(bit.toTEI(p));
p++;
}
result.append("\n</listbibl>\n");
return result.toString();
}
/**
* Get the BibTeX string corresponding to the recognized citation section
*/
public String references2BibTeX(String path, List<BibDataSet> resBib) {
StringBuilder result = new StringBuilder();
for (BibDataSet bib : resBib) {
BiblioItem bit = bib.getResBib();
bit.setPath(path);
result.append("\n").append(bit.toBibTeX());
}
return result.toString();
}
/**
* Get the TEI XML string corresponding to the recognized citation section
* for a particular citation
*/
public static String reference2TEI(String path, List<BibDataSet> resBib, int i) {
StringBuilder result = new StringBuilder();
if (resBib != null) {
if (i <= resBib.size()) {
BibDataSet bib = resBib.get(i);
BiblioItem bit = bib.getResBib();
bit.setPath(path);
result.append(bit.toTEI(i));
}
}
return result.toString();
}
/**
* Get the BibTeX string corresponding to the recognized citation section
* for a given citation
*/
public static String reference2BibTeX(String path, List<BibDataSet> resBib, int i) {
StringBuilder result = new StringBuilder();
if (resBib != null) {
if (i <= resBib.size()) {
BibDataSet bib = resBib.get(i);
BiblioItem bit = bib.getResBib();
bit.setPath(path);
result.append(bit.toBibTeX());
}
}
return result.toString();
}
/**
* Extract and parse both patent and non patent references within a patent text. Result are provided as a BibDataSet
* with offset position instanciated relative to input text and as PatentItem containing both "WISIWIG" results (the
* patent reference attributes as they appear in the text) and the attributes in DOCDB format (format according to
* WIPO and ISO standards). Patent references' offset positions are also given in the PatentItem object.
*
* @param text the string corresponding to the text body of the patent.
* @param nplResults the list of extracted and parsed non patent references as BiblioItem object. This
* list must be instantiated before calling the method for receiving the results.
* @param patentResults the list of extracted and parsed patent references as PatentItem object. This list
* must be instantiated before calling the method for receiving the results.
* @param consolidateCitations the consolidation option allows GROBID to exploit Crossref web services for improving
* header information. 0 (no consolidation, default value), 1 (consolidate the citation
* and inject extra metadata) or 2 (consolidate the citation and inject DOI only)
* @return the list of extracted and parserd patent and non-patent references encoded in TEI.
*/
public String processAllCitationsInPatent(String text,
List<BibDataSet> nplResults,
List<PatentItem> patentResults,
int consolidateCitations,
boolean includeRawCitations) throws Exception {
if ((nplResults == null) && (patentResults == null)) {
return null;
}
// we initialize the attribute individually for readability...
boolean filterDuplicate = false;
return parsers.getReferenceExtractor().extractAllReferencesString(text, filterDuplicate,
consolidateCitations, includeRawCitations, patentResults, nplResults);
}
/**
* Extract and parse both patent and non patent references within a patent in ST.36 format. Result are provided as a
* BibDataSet with offset position instantiated relative to input text and as PatentItem containing both "WISIWIG"
* results (the patent reference attributes as they appear in the text) and the attributes in DOCDB format (format
* according to WIPO and ISO standards). Patent references' offset positions are also given in the PatentItem
* object.
*
* @param nplResults the list of extracted and parsed non patent references as BiblioItem object. This
* list must be instanciated before calling the method for receiving the results.
* @param patentResults the list of extracted and parsed patent references as PatentItem object. This list
* must be instanciated before calling the method for receiving the results.
* @param consolidateCitations the consolidation option allows GROBID to exploit Crossref web services for improving
* header information. 0 (no consolidation, default value), 1 (consolidate the citation
* and inject extra metadata) or 2 (consolidate the citation and inject DOI only)
* @return the list of extracted and parserd patent and non-patent references encoded in TEI.
* @throws Exception if sth. went wrong
*/
public String processAllCitationsInXMLPatent(String xmlPath, List<BibDataSet> nplResults,
List<PatentItem> patentResults,
int consolidateCitations,
boolean includeRawCitations) throws Exception {
if ((nplResults == null) && (patentResults == null)) {
return null;
}
// we initialize the attribute individually for readability...
boolean filterDuplicate = false;
return parsers.getReferenceExtractor().extractAllReferencesXMLFile(xmlPath, filterDuplicate,
consolidateCitations, includeRawCitations, patentResults, nplResults);
}
/**
* Extract and parse both patent and non patent references within a patent
* in PDF format. Result are provided as a BibDataSet with offset position
* instanciated relative to input text and as PatentItem containing both
* "WISIWIG" results (the patent reference attributes as they appear in the
* text) and the attributes in DOCDB format (format according to WIPO and
* ISO standards). Patent references' offset positions are also given in the
* PatentItem object.
*
* @param pdfPath pdf path
* @param nplResults the list of extracted and parsed non patent references as
* BiblioItem object. This list must be instanciated before
* calling the method for receiving the results.
* @param patentResults the list of extracted and parsed patent references as
* PatentItem object. This list must be instanciated before
* calling the method for receiving the results.
* @param consolidateCitations the consolidation option allows GROBID to exploit Crossref web services for improving
* header information. 0 (no consolidation, default value), 1 (consolidate the citation
* and inject extra metadata) or 2 (consolidate the citation and inject DOI only)
* @return the list of extracted and parserd patent and non-patent references
* encoded in TEI.
* @throws Exception if sth. went wrong
*/
public String processAllCitationsInPDFPatent(String pdfPath, List<BibDataSet> nplResults,
List<PatentItem> patentResults,
int consolidateCitations,
boolean includeRawCitations) throws Exception {
if ((nplResults == null) && (patentResults == null)) {
return null;
}
// we initialize the attribute individually for readability...
boolean filterDuplicate = false;
return parsers.getReferenceExtractor().extractAllReferencesPDFFile(pdfPath, filterDuplicate,
consolidateCitations, includeRawCitations, patentResults, nplResults);
}
/**
* Extract and parse both patent and non patent references within a patent
* in PDF format. Results are provided as JSON annotations with coordinates
* of the annotations in the orignal PDF and reference informations in DOCDB
* format (format according to WIPO and ISO standards).
*
* @param pdfPath pdf path
* @param consolidateCitations the consolidation option allows GROBID to exploit Crossref web services for improving
* header information. 0 (no consolidation, default value), 1 (consolidate the citation
* and inject extra metadata) or 2 (consolidate the citation and inject DOI only)
*
* @return JSON annotations with extracted and parsed patent and non-patent references
* together with coordinates in the original PDF.
*/
public String annotateAllCitationsInPDFPatent(String pdfPath,
int consolidateCitations,
boolean includeRawCitations) throws Exception {
List<BibDataSet> nplResults = new ArrayList<BibDataSet>();
List<PatentItem> patentResults = new ArrayList<PatentItem>();
// we initialize the attribute individually for readability...
boolean filterDuplicate = false;
return parsers.getReferenceExtractor().annotateAllReferencesPDFFile(pdfPath, filterDuplicate,
consolidateCitations, includeRawCitations, patentResults, nplResults);
}
/*public void processCitationPatentTEI(String teiPath, String outTeiPath,
int consolidateCitations) throws Exception {
try {
InputStream inputStream = new FileInputStream(new File(teiPath));
OutputStream output = new FileOutputStream(new File(outTeiPath));
final TeiStAXParser parser = new TeiStAXParser(inputStream, output, false,
consolidateCitations);
parser.parse();
inputStream.close();
output.close();
} catch (Exception e) {
throw new GrobidException("An exception occured while running Grobid.", e);
}
}*/
/**
* Process an XML patent document with a patent citation extraction and
* produce the corresponding training data format files for manual
* correction. The goal of this method is to help to produce additional
* traning data based on an existing model.
*
* @param pathXML - the path to the XML patent document to be processed.
* @param resultPath - the path to the directory where the results as XML files
* shall be written.
*/
public void createTrainingPatentCitations(String pathXML, String resultPath)
throws Exception {
parsers.getReferenceExtractor().generateTrainingData(pathXML, resultPath);
}
/**
* Process all the XML patent documents in a given directory with a patent
* citation extraction and produce the corresponding training data format
* files for manual correction. The goal of this method is to help to
* produce additional traning data based on an existing model.
*
* @param directoryPath - the path to the directory containing XML files to be
* processed.
* @param resultPath - the path to the directory where the results as XML files
* shall be written.
* @return the number of processed files.
*/
public int batchCreateTrainingPatentcitations(String directoryPath, String resultPath)
throws Exception {
try {
File path = new File(directoryPath);
// we process all xml files in the directory
File[] refFiles = path.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
return name.endsWith(".xml") || name.endsWith(".XML") ||
name.endsWith(".xml.gz") || name.endsWith(".XML.gz");
}
});
if (refFiles == null)
return 0;
// System.out.println(refFiles.length + " files to be processed.");
int n = 0;
for (; n < refFiles.length; n++) {
File xmlFile = refFiles[n];
createTrainingPatentCitations(xmlFile.getPath(), resultPath);
}
return refFiles.length;
} catch (Exception e) {
throw new GrobidException("An exception occured while running Grobid.", e);
}
}
/**
* Extract chemical names from text.
*
* @param text - text to be processed.
* @return List of chemical entites as POJO.
*/
public List<ChemicalEntity> extractChemicalEntities(String text) throws Exception {
return parsers.getChemicalParser().extractChemicalEntities(text);
}
/**
* Print the abstract content. Useful for term extraction.
*/
public String getAbstract(Document doc) throws Exception {
String abstr = doc.getResHeader().getAbstract();
abstr = abstr.replace("@BULLET", " • ");
return abstr;
}
/**
* Process all the .txt in a given directory to generate pre-labeld training data for
* the citation model. Input file expects one raw reference string per line.
*
* @param directoryPath - the path to the directory containing .txt to be processed.
* @param resultPath - the path to the directory where the results as XML training files
* shall be written.
**/
public int batchCreateTrainingCitation(String directoryPath, String resultPath) {
try {
File path = new File(directoryPath);
// we process all pdf files in the directory
File[] refFiles = path.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
System.out.println(name);
return name.endsWith(".txt");
}
});
if (refFiles == null)
return 0;
System.out.println(refFiles.length + " files to be processed.");
int n = 0;
for (final File txtFile : refFiles) {
try {
// read file line by line, assuming one reference string per line
List<String> allInput = new ArrayList<>();
BufferedReader reader;
try {
reader = new BufferedReader(new FileReader(txtFile));
String line = reader.readLine();
while (line != null) {
allInput.add(line.trim());
line = reader.readLine();
}
reader.close();
} catch (IOException e) {
e.printStackTrace();
}
// process the training generation
StringBuilder bufferReference = parsers.getCitationParser().trainingExtraction(allInput);
// write the XML training file
if (bufferReference != null) {
bufferReference.append("\n");
Writer writerReference = new OutputStreamWriter(new FileOutputStream(new File(resultPath +
File.separator +
txtFile.getName().replace(".txt", ".training.references.tei.xml")), false), StandardCharsets.UTF_8);
writerReference.write("<?xml version=\"1.0\" ?>\n<TEI xml:space=\"preserve\" xmlns=\"http://www.tei-c.org/ns/1.0\" " +
"xmlns:xlink=\"http://www.w3.org/1999/xlink\" " +
"\n xmlns:mml=\"http://www.w3.org/1998/Math/MathML\">\n");
writerReference.write("\t<teiHeader>\n\t\t<fileDesc xml:id=\"_" + n +
"\"/>\n\t</teiHeader>\n\t<text>\n\t\t<front/>\n\t\t<body/>\n\t\t<back>\n");
writerReference.write("<listBibl>\n");
writerReference.write(bufferReference.toString());
writerReference.write("\t\t</listBibl>\n\t</back>\n\t</text>\n</TEI>\n");
writerReference.close();
}
} catch (final Exception exp) {
LOGGER.error("An error occured while processing the following pdf: "
+ txtFile.getPath(), exp);
}
n++;
}
return refFiles.length;
} catch (final Exception exp) {
throw new GrobidException("An exception occured while running Grobid batch.", exp);
}
}
/**
* Return all the reference titles. Maybe useful for term extraction.
*/
public String printRefTitles(List<BibDataSet> resBib) throws Exception {
StringBuilder accumulated = new StringBuilder();
for (BibDataSet bib : resBib) {
BiblioItem bit = bib.getResBib();
if (bit.getTitle() != null) {
accumulated.append(bit.getTitle()).append("\n");
}
}
return accumulated.toString();
}
@Override
public synchronized void close() throws IOException {
CrossrefClient.getInstance().close();
parsers.close();
}
public static void setCntManager(CntManager cntManager) {
Engine.cntManager = cntManager;
}
public static CntManager getCntManager() {
return cntManager;
}
public EngineParsers getParsers() {
return parsers;
}
/**
* @return a new engine from GrobidFactory if the execution is parallel,
* else return the instance of engine.
*/
/*public static Engine getEngine(boolean isparallelExec) {
return isparallelExec ? GrobidPoolingFactory.getEngineFromPool()
: GrobidFactory.getInstance().getEngine();
}*/
public static Engine getEngine(boolean preload) {
return GrobidPoolingFactory.getEngineFromPool(preload);
}
}
| 49,526 | 43.659152 | 142 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/DateParser.java
|
package org.grobid.core.engines;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
import org.grobid.core.GrobidModel;
import org.grobid.core.GrobidModels;
import org.grobid.core.data.Date;
import org.grobid.core.engines.label.TaggingLabel;
import org.grobid.core.engines.label.TaggingLabels;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.features.FeaturesVectorDate;
import org.grobid.core.lang.Language;
import org.grobid.core.layout.LayoutToken;
import org.grobid.core.tokenization.TaggingTokenCluster;
import org.grobid.core.tokenization.TaggingTokenClusteror;
import org.grobid.core.utilities.LayoutTokensUtil;
import org.grobid.core.utilities.TextUtilities;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.List;
import java.util.StringTokenizer;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import static org.apache.commons.lang3.StringUtils.isNotBlank;
public class DateParser extends AbstractParser {
private static final Pattern NEWLINE_REGEX_PATTERN = Pattern.compile("[ \n]");
public DateParser() {
super(GrobidModels.DATE);
}
DateParser(GrobidModel model) {
super(model);
}
/**
* Deprecated, @Use process(String input)
**/
@Deprecated
public List<Date> processing(String input) {
return process(input);
}
public List<Date> process(String input) {
List<String> dateBlocks = new ArrayList<>();
// force English language for the tokenization only
List<String> tokenizations = analyzer.tokenize(input, new Language("en", 1.0));
if (CollectionUtils.isEmpty(tokenizations)) {
return null;
}
for(String tok : tokenizations) {
if (!" ".equals(tok) && !"\n".equals(tok)) {
// para final sanitisation
tok = NEWLINE_REGEX_PATTERN.matcher(tok).replaceAll( "");
dateBlocks.add(tok + " <date>");
}
}
return processCommon(dateBlocks);
}
public List<Date> process(List<LayoutToken> input) {
List<String> dateBlocks = new ArrayList<>();
for(LayoutToken tok : input) {
if (!" ".equals(tok.getText()) && !"\n".equals(tok.getText())) {
// para final sanitisation
String normalizedText = tok.getText().replaceAll("[ \n]", "");
dateBlocks.add(normalizedText + " <date>");
}
}
return processCommon(dateBlocks);
}
protected List<Date> processCommon(List<String> input) {
if (CollectionUtils.isEmpty(input))
return null;
try {
String features = FeaturesVectorDate.addFeaturesDate(input);
String res = label(features);
List<LayoutToken> tokenization = input.stream()
.map(token -> new LayoutToken(token.split(" ")[0]))
.collect(Collectors.toList());
// extract results from the processed file
return resultExtraction(res, tokenization);
} catch (Exception e) {
throw new GrobidException("An exception on " + this.getClass().getName() + " occured while running Grobid.", e);
}
}
public List<Date> resultExtractionOld(String result) {
List<Date> dates = null;
StringTokenizer st2 = new StringTokenizer(result, "\n");
String lastTag = null;
org.grobid.core.data.Date date = new Date();
int lineCount = 0;
String currentMarker = null;
while (st2.hasMoreTokens()) {
String line = st2.nextToken();
if ((line.trim().length() == 0)) {
if (date.isNotNull()) {
if (dates == null)
dates = new ArrayList<>();
Date normalizedDate = normalizeAndClean(date);
dates.add(normalizedDate);
}
date = new Date();
continue;
}
StringTokenizer st3 = new StringTokenizer(line, "\t ");
int ll = st3.countTokens();
int i = 0;
String s1 = null;
String s2 = null;
while (st3.hasMoreTokens()) {
String s = st3.nextToken().trim();
if (i == 0) {
s2 = s; // string
} else if (i == ll - 1) {
s1 = s; // label
}
i++;
}
if ("<year>".equals(s1) || "I-<year>".equals(s1)) {
} else if ("<month>".equals(s1) || "I-<month>".equals(s1)) {
} else if ("<day>".equals(s1) || "I-<day>".equals(s1)) {
}
lastTag = s1;
lineCount++;
}
if (date.isNotNull()) {
if (dates == null)
dates = new ArrayList<>();
Date normalizedDate = normalizeAndClean(date);
dates.add(normalizedDate);
}
return dates;
}
public List<Date> resultExtraction(String result, List<LayoutToken> tokenizations) {
List<Date> dates = new ArrayList<>();
Date date = new Date();
TaggingTokenClusteror clusteror = new TaggingTokenClusteror(GrobidModels.DATE, result, tokenizations);
List<TaggingTokenCluster> clusters = clusteror.cluster();
for (TaggingTokenCluster cluster : clusters) {
if (cluster == null) {
continue;
}
TaggingLabel clusterLabel = cluster.getTaggingLabel();
Engine.getCntManager().i(clusterLabel);
String clusterText = LayoutTokensUtil.toText(cluster.concatTokens());
if (clusterLabel.equals(TaggingLabels.DATE_YEAR)) {
if (isNotBlank(date.getYearString())) {
if (date.isNotNull()) {
Date normalizedDate = normalizeAndClean(date);
dates.add(normalizedDate);
date = new Date();
}
date.setYearString(clusterText);
} else {
date.setYearString(clusterText);
}
} else if (clusterLabel.equals(TaggingLabels.DATE_DAY)) {
if (isNotBlank(date.getDayString())) {
if (date.isNotNull()) {
Date normalizedDate = normalizeAndClean(date);
dates.add(normalizedDate);
date = new Date();
}
date.setDayString(clusterText);
} else {
date.setDayString(clusterText);
}
} else if (clusterLabel.equals(TaggingLabels.DATE_MONTH)) {
if (isNotBlank(date.getMonthString())) {
if (date.isNotNull()) {
Date normalizedDate = normalizeAndClean(date);
dates.add(normalizedDate);
date = new Date();
}
date.setMonthString(clusterText);
} else {
date.setMonthString(clusterText);
}
}
}
if (date.isNotNull()) {
Date normalizedDate = normalizeAndClean(date);
dates.add(normalizedDate);
}
return dates;
}
public static final Pattern jan =
Pattern.compile("([Jj]an$|[Jj]anuary$|[Jj]anvier$|[Jj]annewaori$|[Jj]anuar$|[Ee]nero$|[Jj]anuaro$|[Jj]anuari$|[Jj]aneiro$|[Gg]ennaio$|[Gg]en$|[Oo]cak$|[Jj]a$|(^1$)|(^01$)|(1月))");
public static final Pattern feb =
Pattern.compile("([Ff]eb$|[Ff]ebruary$|[Ff][eé]vrier$|[Ff]ebruar$|[Ff]ebrewaori$|[Ff]ebrero$|[Ff]evereiro$|[Ff]ebbraio$|[Ll]uty$|[Ss]tyczeń$|Ş$|ubat$|[Ff]e$|^2$|^02$|2月)");
public static final Pattern mar =
Pattern.compile("([Mm]ar$|[Mm]arch$|[Mm]ars$|[Mm]eert$|[Mm]ärz$|[Mm]arzo$|[Mm]arço$|[Mm]art$|[Mm]a$|[Mm]a$|^3$|^03$|3月)");
public static final Pattern apr =
Pattern.compile("([Aa]pr$|[Aa]br$|[Aa]vr$|[Aa]pril$|[Aa]vril$|[Aa]pril$|[Aa]prile$|[Aa]bril$|[Nn]isan$|[Aa]p$|^4$|^04$|4月)");
public static final Pattern may =
Pattern.compile("([Mm]ay$|[Mm]ai$|[Mm]ay$|[Mm]ayıs$|[Mm]ei$|[Mm]aio$|[Mm]aggio$|[Mm]eie$|[Mm]a$|^5$|^05$|5月)");
public static final Pattern jun =
Pattern.compile("([Jj]un$|[Jj]une$|[Jj]uin$|[Jj]uni$|[Jj]unho$|[Gg]iugno$|[Hh]aziran$|^6$|^06$|6月)");
public static final Pattern jul =
Pattern.compile("([Jj]ul$|[Jj]uly$|[Jj]uillet$|[Jj]uli$|[Tt]emmuz$|[Ll]uglio$|[Jj]ulho$|^7$|^07$|7月)");
public static final Pattern aug =
Pattern.compile("([Aa]ug$|[Aa]ugust$|[Aa]o[uû]t$|[Aa]ugust$|[Aa]gosto$|[Aa]ugustus$|[Aa]ğustos$|^8$|^08$|8月)");
public static final Pattern sep =
Pattern.compile("([Ss]ep$|[Ss]ept$|[Ss]eptember$|[Ss]eptembre$|[Ss]eptember$|[Ss]ettembre$|[Ss]etembro$|[Ee]ylül$|^9$|^09$|9月)");
public static final Pattern oct =
Pattern.compile("([Oo]ct$|[Oo]cto$|[Oo]ctober$|[Oo]ctobre$|[Ee]kim$|[Oo]ktober$|[Oo]ttobre$|[Oo]utubro$|^10$|10月)");
public static final Pattern nov =
Pattern.compile("([Nn]ov$|[Nn]ovember$|[Nn]ovembre$|[Kk]asım$|[Nn]oviembre$|[Nn]ovembro$|^11$|11月)");
public static final Pattern dec =
Pattern.compile("([Dd]ec$|[Dd]ecember$|[Dd][eé]cembre$|[Dd]iciembre$|[Aa]ralık$|^12$|12月)");
public static final Pattern[] months = {jan, feb, mar, apr, may, jun, jul, aug, sep, oct, nov, dec};
public Date normalizeAndClean(Date date) {
return cleaning(normalize(date));
}
public Date normalize(Date date) {
Date normalizedDate = new Date();
// normalize day
if (isNotBlank(date.getDayString())) {
StringBuilder dayStringBis = new StringBuilder();
String dayString = date.getDayString().trim();
normalizedDate.setDayString(dayString);
for (int n = 0; n < dayString.length(); n++) {
char c = dayString.charAt(n);
if (Character.isDigit(c)) {
dayStringBis.append(c);
}
}
try {
int day = Integer.parseInt(dayStringBis.toString());
normalizedDate.setDay(day);
} catch (Exception e) {
//e.printStackTrace();
}
}
//normalize month
if (isNotBlank(date.getMonthString())) {
String month = date.getMonthString().trim();
normalizedDate.setMonthString(month);
int n = 0;
while (n < 12) {
Matcher ma = months[n].matcher(month);
if (ma.find()) {
normalizedDate.setMonth(n + 1);
break;
}
n++;
}
}
if (StringUtils.isNotBlank(date.getYearString())) {
StringBuilder yearStringBis = new StringBuilder();
String yearString = date.getYearString().trim();
normalizedDate.setYearString(yearString);
for (int n = 0; n < yearString.length(); n++) {
char c = yearString.charAt(n);
if (Character.isDigit(c)) {
yearStringBis.append(c);
}
}
try {
int year = Integer.parseInt(yearStringBis.toString());
if ((year >= 20) && (year < 100)) {
year = year + 1900;
} else if ((year >= 0) && (year < 20)) {
year = year + 2000;
}
normalizedDate.setYear(year);
} catch (Exception e) {
//e.printStackTrace();
}
}
// if we don't have day and month, but a year with 8 digits, we might have a YYYYMMDD pattern
int maxYear = Calendar.getInstance().getWeekYear() + 4;
if (date.getDay() == -1 && date.getMonth() == -1 && date.getYear() != -1 && date.getYear() > 19000000 && date.getYear() < maxYear * 10000+1231) {
int yearPart = date.getYear() / 10000;
if (yearPart > 1900 && yearPart < maxYear) {
String yearString = ""+date.getYear();
String theMonthString = yearString.substring(4,6);
String theDayString = yearString.substring(6,8);
int dayPart = -1;
try {
dayPart = Integer.parseInt(theDayString);
} catch (Exception e) {
//e.printStackTrace();
}
int monthPart = -1;
try {
monthPart = Integer.parseInt(theMonthString);
} catch (Exception e) {
//e.printStackTrace();
}
if (dayPart != -1 && monthPart != -1) {
if (dayPart > 0 && dayPart < 32 && monthPart > 0 && monthPart < 13) {
normalizedDate.setDay(dayPart);
normalizedDate.setDayString(theDayString);
normalizedDate.setMonth(monthPart);
normalizedDate.setMonthString(theMonthString);
normalizedDate.setYear(yearPart);
}
}
}
}
return normalizedDate;
}
/**
* Simple and loose date validation, checking:
* - the year has not more than 4 digits
* - the month and day has not more than 2 digits
*
* Assuming that incomplete dates of any form and nature can pass by here, only the information that are "out of bounds" will be reverted.
*
* @return the date where invalid information are removed or reverted
*/
public static Date cleaning(Date originalDate) {
Date validatedDate = new Date();
if (originalDate.getDay() > -1) {
if (String.valueOf(originalDate.getDay()).length() < 3) {
validatedDate.setDay(originalDate.getDay());
validatedDate.setDayString(originalDate.getDayString());
}
}
if (originalDate.getMonth() > -1) {
if (String.valueOf(originalDate.getMonth()).length() < 3) {
validatedDate.setMonth(originalDate.getMonth());
validatedDate.setMonthString(originalDate.getMonthString());
}
}
if (originalDate.getYear() > -1) {
if (String.valueOf(originalDate.getYear()).length() < 5) {
validatedDate.setYear(originalDate.getYear());
validatedDate.setYearString(originalDate.getYearString());
}
}
return validatedDate;
}
/**
* Extract results from a date string in the training format without any string modification.
*/
public StringBuilder trainingExtraction(List<String> inputs) {
StringBuilder buffer = new StringBuilder();
try {
if (inputs == null)
return null;
if (inputs.size() == 0)
return null;
List<String> tokenizations = null;
List<String> dateBlocks = new ArrayList<String>();
for (String input : inputs) {
if (input == null)
continue;
//StringTokenizer st = new StringTokenizer(input, " \t\n"+TextUtilities.fullPunctuations, true);
//StringTokenizer st = new StringTokenizer(input, "([" + TextUtilities.punctuations, true);
tokenizations = analyzer.tokenize(input);
//if (st.countTokens() == 0)
if (tokenizations.size() == 0)
return null;
//while (st.hasMoreTokens()) {
// String tok = st.nextToken();
for(String tok : tokenizations) {
if (tok.equals("\n")) {
dateBlocks.add("@newline");
} else if (!tok.equals(" ")) {
dateBlocks.add(tok + " <date>");
}
//tokenizations.add(tok);
}
dateBlocks.add("\n");
}
String headerDate = FeaturesVectorDate.addFeaturesDate(dateBlocks);
String res = label(headerDate);
// extract results from the processed file
//System.out.print(res.toString());
StringTokenizer st2 = new StringTokenizer(res, "\n");
String lastTag = null;
boolean tagClosed = false;
int q = 0;
boolean addSpace;
boolean hasYear = false;
boolean hasMonth = false;
boolean hasDay = false;
String lastTag0;
String currentTag0;
boolean start = true;
while (st2.hasMoreTokens()) {
String line = st2.nextToken();
addSpace = false;
if ((line.trim().length() == 0)) {
// new date
buffer.append("</date>\n");
hasYear = false;
hasMonth = false;
hasDay = false;
buffer.append("\t<date>");
continue;
} else {
String theTok = tokenizations.get(q);
while (theTok.equals(" ")) {
addSpace = true;
q++;
theTok = tokenizations.get(q);
}
q++;
}
StringTokenizer st3 = new StringTokenizer(line, "\t");
int ll = st3.countTokens();
int i = 0;
String s1 = null;
String s2 = null;
//String s3 = null;
//List<String> localFeatures = new ArrayList<String>();
while (st3.hasMoreTokens()) {
String s = st3.nextToken().trim();
if (i == 0) {
s2 = TextUtilities.HTMLEncode(s); // string
} /*else if (i == ll - 2) {
s3 = s; // pre-label, in this case it should always be <date>
} */
else if (i == ll - 1) {
s1 = s; // label
}
i++;
}
if (start && (s1 != null)) {
buffer.append("\t<date>");
start = false;
}
lastTag0 = null;
if (lastTag != null) {
if (lastTag.startsWith("I-")) {
lastTag0 = lastTag.substring(2, lastTag.length());
} else {
lastTag0 = lastTag;
}
}
currentTag0 = null;
if (s1 != null) {
if (s1.startsWith("I-")) {
currentTag0 = s1.substring(2, s1.length());
} else {
currentTag0 = s1;
}
}
tagClosed = lastTag0 != null && testClosingTag(buffer, currentTag0, lastTag0);
/*if (newLine) {
if (tagClosed) {
buffer.append("\t\t\t\t\t\t\t<lb/>\n");
}
else {
buffer.append("<lb/>");
}
}*/
String output = writeField(s1, lastTag0, s2, "<day>", "<day>", addSpace, 0);
if (output != null) {
if (lastTag0 != null) {
if (hasDay && !lastTag0.equals("<day>")) {
buffer.append("</date>\n");
hasYear = false;
hasMonth = false;
buffer.append("\t<date>");
}
}
hasDay = true;
buffer.append(output);
lastTag = s1;
continue;
} else {
output = writeField(s1, lastTag0, s2, "<other>", "<other>", addSpace, 0);
}
if (output == null) {
output = writeField(s1, lastTag0, s2, "<month>", "<month>", addSpace, 0);
} else {
buffer.append(output);
lastTag = s1;
continue;
}
if (output == null) {
output = writeField(s1, lastTag0, s2, "<year>", "<year>", addSpace, 0);
} else {
if (lastTag0 != null) {
if (hasMonth && !lastTag0.equals("<month>")) {
buffer.append("</date>\n");
hasYear = false;
hasDay = false;
buffer.append("\t<date>");
}
}
buffer.append(output);
hasMonth = true;
lastTag = s1;
continue;
}
if (output != null) {
if (lastTag0 != null) {
if (hasYear && !lastTag0.equals("<year>")) {
buffer.append("</date>\n");
hasDay = false;
hasMonth = false;
buffer.append("\t<date>");
}
}
buffer.append(output);
hasYear = true;
lastTag = s1;
continue;
}
lastTag = s1;
}
if (lastTag != null) {
if (lastTag.startsWith("I-")) {
lastTag0 = lastTag.substring(2, lastTag.length());
} else {
lastTag0 = lastTag;
}
currentTag0 = "";
testClosingTag(buffer, currentTag0, lastTag0);
buffer.append("</date>\n");
}
} catch (Exception e) {
// e.printStackTrace();
throw new GrobidException("An exception occured while running Grobid.", e);
}
return buffer;
}
private String writeField(String s1,
String lastTag0,
String s2,
String field,
String outField,
boolean addSpace,
int nbIndent) {
String result = null;
if ((s1.equals(field)) || (s1.equals("I-" + field))) {
if ((s1.equals("<other>") || s1.equals("I-<other>"))) {
if (addSpace)
result = " " + s2;
else
result = s2;
} else if (s1.equals(lastTag0) || s1.equals("I-" + lastTag0)) {
if (addSpace)
result = " " + s2;
else
result = s2;
} else {
result = "";
for (int i = 0; i < nbIndent; i++) {
result += "\t";
}
if (addSpace)
result += " " + outField + s2;
else
result += outField + s2;
}
}
return result;
}
private boolean testClosingTag(StringBuilder buffer,
String currentTag0,
String lastTag0) {
boolean res = false;
if (!currentTag0.equals(lastTag0)) {
res = true;
// we close the current tag
if (lastTag0.equals("<other>")) {
buffer.append("");
} else if (lastTag0.equals("<day>")) {
buffer.append("</day>");
} else if (lastTag0.equals("<month>")) {
buffer.append("</month>");
} else if (lastTag0.equals("<year>")) {
buffer.append("</year>");
} else {
res = false;
}
}
return res;
}
}
| 24,864 | 37.550388 | 191 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/Segmentation.java
|
package org.grobid.core.engines;
import eugfc.imageio.plugins.PNMRegistry;
import org.apache.commons.io.FileUtils;
import org.grobid.core.GrobidModels;
import org.grobid.core.document.BasicStructureBuilder;
import org.grobid.core.document.Document;
import org.grobid.core.document.DocumentSource;
import org.grobid.core.engines.config.GrobidAnalysisConfig;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.exceptions.GrobidExceptionStatus;
import org.grobid.core.features.FeatureFactory;
import org.grobid.core.features.FeaturesVectorSegmentation;
import org.grobid.core.layout.*;
import org.grobid.core.utilities.GrobidProperties;
import org.grobid.core.utilities.LanguageUtilities;
import org.grobid.core.utilities.TextUtilities;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.imageio.ImageIO;
import java.awt.image.BufferedImage;
import java.io.*;
import java.util.*;
import java.util.regex.Matcher;
import static org.apache.commons.lang3.StringUtils.*;
// for image conversion we're using an ImageIO plugin for PPM format support
// see https://github.com/eug/imageio-pnm
// the jar for this plugin is located in the local repository
/**
* Realise a high level segmentation of a document into cover page, document header, page footer,
* page header, document body, bibliographical section, each bibliographical references in
* the biblio section and finally the possible annexes.
*
*/
public class Segmentation extends AbstractParser {
/*
13 labels for this model:
cover page <cover>,
document header <header>,
page footer <footnote>,
page header <headnote>,
note in margin <marginnote>,
document body <body>,
bibliographical section <references>,
page number <page>,
annexes <annex>,
acknowledgement <acknowledgement>,
availability <availability>,
funding <funding>,
other <other>,
toc <toc> -> not yet used because not yet training data for this
*/
private static final Logger LOGGER = LoggerFactory.getLogger(Segmentation.class);
// default bins for relative position
private static final int NBBINS_POSITION = 12;
// default bins for inter-block spacing
private static final int NBBINS_SPACE = 5;
// default bins for block character density
private static final int NBBINS_DENSITY = 5;
// projection scale for line length
private static final int LINESCALE = 10;
private LanguageUtilities languageUtilities = LanguageUtilities.getInstance();
private FeatureFactory featureFactory = FeatureFactory.getInstance();
/**
* TODO some documentation...
*/
public Segmentation() {
super(GrobidModels.SEGMENTATION);
}
/**
* Segment a PDF document into high level zones: cover page, document header,
* page footer, page header, body, page numbers, biblio section and annexes.
*
* @param documentSource document source
* @return Document object with segmentation information
*/
public Document processing(DocumentSource documentSource, GrobidAnalysisConfig config) {
try {
Document doc = new Document(documentSource);
if (config.getAnalyzer() != null)
doc.setAnalyzer(config.getAnalyzer());
doc.addTokenizedDocument(config);
doc = prepareDocument(doc);
// if assets is true, the images are still there under directory pathXML+"_data"
// we copy them to the assetPath directory
File assetFile = config.getPdfAssetPath();
if (assetFile != null) {
dealWithImages(documentSource, doc, assetFile, config);
}
return doc;
} finally {
// keep it clean when leaving...
/*if (config.getPdfAssetPath() == null) {
// remove the pdfalto tmp file
DocumentSource.close(documentSource, false, true, true);
} else*/ {
// remove the pdfalto tmp files, including the sub-directories
DocumentSource.close(documentSource, true, true, true);
}
}
}
public Document processing(String text) {
Document doc = Document.createFromText(text);
return prepareDocument(doc);
}
public Document prepareDocument(Document doc) {
List<LayoutToken> tokenizations = doc.getTokenizations();
if (tokenizations.size() > GrobidProperties.getPdfTokensMax()) {
throw new GrobidException("The document has " + tokenizations.size() + " tokens, but the limit is " + GrobidProperties.getPdfTokensMax(),
GrobidExceptionStatus.TOO_MANY_TOKENS);
}
doc.produceStatistics();
String content = getAllLinesFeatured(doc);
if (isNotEmpty(trim(content))) {
String labelledResult = label(content);
// set the different sections of the Document object
doc = BasicStructureBuilder.generalResultSegmentation(doc, labelledResult, tokenizations);
}
return doc;
}
private void dealWithImages(DocumentSource documentSource, Document doc, File assetFile, GrobidAnalysisConfig config) {
if (assetFile != null) {
// copy the files under the directory pathXML+"_data" (the asset files) into the path specified by assetPath
if (!assetFile.exists()) {
// we create it
if (assetFile.mkdir()) {
LOGGER.debug("Directory created: " + assetFile.getPath());
} else {
LOGGER.error("Failed to create directory: " + assetFile.getPath());
}
}
PNMRegistry.registerAllServicesProviders();
// filter all .jpg and .png files
File directoryPath = new File(documentSource.getXmlFile().getAbsolutePath() + "_data");
if (directoryPath.exists()) {
File[] files = directoryPath.listFiles();
if (files != null) {
int nbFiles = 0;
for (final File currFile : files) {
if (nbFiles > DocumentSource.PDFALTO_FILES_AMOUNT_LIMIT)
break;
String toLowerCaseName = currFile.getName().toLowerCase();
if (toLowerCaseName.endsWith(".png") || !config.isPreprocessImages()) {
try {
if (toLowerCaseName.endsWith(".svg")) {
continue;
}
FileUtils.copyFileToDirectory(currFile, assetFile);
nbFiles++;
} catch (IOException e) {
LOGGER.error("Cannot copy file " + currFile.getAbsolutePath() + " to " + assetFile.getAbsolutePath(), e);
}
} else if (toLowerCaseName.endsWith(".jpg")
|| toLowerCaseName.endsWith(".ppm")
// || currFile.getName().toLowerCase().endsWith(".pbm")
) {
String outputFilePath = "";
try {
final BufferedImage bi = ImageIO.read(currFile);
if (toLowerCaseName.endsWith(".jpg")) {
outputFilePath = assetFile.getPath() + File.separator +
toLowerCaseName.replace(".jpg", ".png");
}
/*else if (currFile.getName().toLowerCase().endsWith(".pbm")) {
outputFilePath = assetFile.getPath() + File.separator +
currFile.getName().toLowerCase().replace(".pbm",".png");
}*/
else {
outputFilePath = assetFile.getPath() + File.separator +
toLowerCaseName.replace(".ppm", ".png");
}
ImageIO.write(bi, "png", new File(outputFilePath));
nbFiles++;
} catch (IOException e) {
LOGGER.error("Cannot convert file " + currFile.getAbsolutePath() + " to " + outputFilePath, e);
}
}
}
}
}
// update the path of the image description stored in Document
if (config.isPreprocessImages()) {
List<GraphicObject> images = doc.getImages();
if (images != null) {
String subPath = assetFile.getPath();
int ind = subPath.lastIndexOf("/");
if (ind != -1)
subPath = subPath.substring(ind + 1, subPath.length());
for (GraphicObject image : images) {
String fileImage = image.getFilePath();
if (fileImage == null) {
continue;
}
fileImage = fileImage.replace(".ppm", ".png")
.replace(".jpg", ".png");
ind = fileImage.indexOf("/");
image.setFilePath(subPath + fileImage.substring(ind, fileImage.length()));
}
}
}
}
}
/**
* Addition of the features at line level for the complete document.
* <p/>
* This is an alternative to the token level, where the unit for labeling is the line - so allowing faster
* processing and involving less features.
* Lexical features becomes line prefix and suffix, the feature text unit is the first 10 characters of the
* line without space.
* The dictionary flags are at line level (i.e. the line contains a name mention, a place mention, a year, etc.)
* Regarding layout features: font, size and style are the one associated to the first token of the line.
*/
public String getAllLinesFeatured(Document doc) {
List<Block> blocks = doc.getBlocks();
if ((blocks == null) || blocks.size() == 0) {
return null;
}
//guaranteeing quality of service. Otherwise, there are some PDF that may contain 300k blocks and thousands of extracted "images" that ruins the performance
if (blocks.size() > GrobidProperties.getPdfBlocksMax()) {
throw new GrobidException("Postprocessed document is too big, contains: " + blocks.size(), GrobidExceptionStatus.TOO_MANY_BLOCKS);
}
//boolean graphicVector = false;
//boolean graphicBitmap = false;
// list of textual patterns at the head and foot of pages which can be re-occur on several pages
// (typically indicating a publisher foot or head notes)
Map<String, Integer> patterns = new TreeMap<String, Integer>();
Map<String, Boolean> firstTimePattern = new TreeMap<String, Boolean>();
for (Page page : doc.getPages()) {
// we just look at the two first and last blocks of the page
if ((page.getBlocks() != null) && (page.getBlocks().size() > 0)) {
for(int blockIndex=0; blockIndex < page.getBlocks().size(); blockIndex++) {
if ( (blockIndex < 2) || (blockIndex > page.getBlocks().size()-2)) {
Block block = page.getBlocks().get(blockIndex);
String localText = block.getText();
if ((localText != null) && (localText.length() > 0)) {
String[] lines = localText.split("[\\n\\r]");
if (lines.length > 0) {
String line = lines[0];
String pattern = featureFactory.getPattern(line);
if (pattern.length() > 8) {
Integer nb = patterns.get(pattern);
if (nb == null) {
patterns.put(pattern, Integer.valueOf(1));
firstTimePattern.put(pattern, false);
}
else
patterns.put(pattern, Integer.valueOf(nb+1));
}
}
}
}
}
}
}
String featuresAsString = getFeatureVectorsAsString(doc,
patterns, firstTimePattern);
return featuresAsString;
}
private String getFeatureVectorsAsString(Document doc, Map<String, Integer> patterns,
Map<String, Boolean> firstTimePattern) {
StringBuilder fulltext = new StringBuilder();
int documentLength = doc.getDocumentLenghtChar();
String currentFont = null;
int currentFontSize = -1;
boolean newPage;
boolean start = true;
int mm = 0; // page position
int nn = 0; // document position
int pageLength = 0; // length of the current page
double pageHeight = 0.0;
// vector for features
FeaturesVectorSegmentation features;
FeaturesVectorSegmentation previousFeatures = null;
for (Page page : doc.getPages()) {
pageHeight = page.getHeight();
newPage = true;
double spacingPreviousBlock = 0.0; // discretized
double lowestPos = 0.0;
pageLength = page.getPageLengthChar();
BoundingBox pageBoundingBox = page.getMainArea();
mm = 0;
//endPage = true;
if ((page.getBlocks() == null) || (page.getBlocks().size() == 0))
continue;
for(int blockIndex=0; blockIndex < page.getBlocks().size(); blockIndex++) {
Block block = page.getBlocks().get(blockIndex);
/*if (start) {
newPage = true;
start = false;
}*/
boolean graphicVector = false;
boolean graphicBitmap = false;
boolean lastPageBlock = false;
boolean firstPageBlock = false;
if (blockIndex == page.getBlocks().size()-1) {
lastPageBlock = true;
}
if (blockIndex == 0) {
firstPageBlock = true;
}
//endblock = false;
/*if (endPage) {
newPage = true;
mm = 0;
}*/
// check if we have a graphical object connected to the current block
List<GraphicObject> localImages = Document.getConnectedGraphics(block, doc);
if (localImages != null) {
for(GraphicObject localImage : localImages) {
if (localImage.getType() == GraphicObjectType.BITMAP)
graphicBitmap = true;
if (localImage.getType() == GraphicObjectType.VECTOR || localImage.getType() == GraphicObjectType.VECTOR_BOX)
graphicVector = true;
}
}
if (lowestPos > block.getY()) {
// we have a vertical shift, which can be due to a change of column or other particular layout formatting
spacingPreviousBlock = doc.getMaxBlockSpacing() / 5.0; // default
} else
spacingPreviousBlock = block.getY() - lowestPos;
String localText = block.getText();
if (localText == null)
continue;
// character density of the block
double density = 0.0;
if ( (block.getHeight() != 0.0) && (block.getWidth() != 0.0) &&
(block.getText() != null) && (!block.getText().contains("@PAGE")) &&
(!block.getText().contains("@IMAGE")) )
density = (double)block.getText().length() / (block.getHeight() * block.getWidth());
// is the current block in the main area of the page or not?
boolean inPageMainArea = true;
BoundingBox blockBoundingBox = BoundingBox.fromPointAndDimensions(page.getNumber(),
block.getX(), block.getY(), block.getWidth(), block.getHeight());
if (pageBoundingBox == null || (!pageBoundingBox.contains(blockBoundingBox) && !pageBoundingBox.intersect(blockBoundingBox)))
inPageMainArea = false;
String[] lines = localText.split("[\\n\\r]");
// set the max length of the lines in the block, in number of characters
int maxLineLength = 0;
for(int p=0; p<lines.length; p++) {
if (lines[p].length() > maxLineLength)
maxLineLength = lines[p].length();
}
List<LayoutToken> tokens = block.getTokens();
if ((tokens == null) || (tokens.size() == 0)) {
continue;
}
for (int li = 0; li < lines.length; li++) {
String line = lines[li];
/*boolean firstPageBlock = false;
boolean lastPageBlock = false;
if (newPage)
firstPageBlock = true;
if (endPage)
lastPageBlock = true;
*/
// for the layout information of the block, we take simply the first layout token
LayoutToken token = null;
if (tokens.size() > 0)
token = tokens.get(0);
double coordinateLineY = token.getY();
features = new FeaturesVectorSegmentation();
features.token = token;
features.line = line;
if ( (blockIndex < 2) || (blockIndex > page.getBlocks().size()-2)) {
String pattern = featureFactory.getPattern(line);
Integer nb = patterns.get(pattern);
if ((nb != null) && (nb > 1)) {
features.repetitivePattern = true;
Boolean firstTimeDone = firstTimePattern.get(pattern);
if ((firstTimeDone != null) && !firstTimeDone) {
features.firstRepetitivePattern = true;
firstTimePattern.put(pattern, true);
}
}
}
// we consider the first token of the line as usual lexical CRF token
// and the second token of the line as feature
StringTokenizer st2 = new StringTokenizer(line, " \t\f\u00A0");
// alternatively, use a grobid analyser
String text = null;
String text2 = null;
if (st2.hasMoreTokens())
text = st2.nextToken();
if (st2.hasMoreTokens())
text2 = st2.nextToken();
if (text == null)
continue;
// final sanitisation and filtering
text = text.replaceAll("[ \n\r]", "");
text = text.trim();
if ( (text.length() == 0) ||
// (text.equals("\n")) ||
// (text.equals("\r")) ||
// (text.equals("\n\r")) ||
(TextUtilities.filterLine(line))) {
continue;
}
features.string = text;
features.secondString = text2;
features.firstPageBlock = firstPageBlock;
features.lastPageBlock = lastPageBlock;
//features.lineLength = line.length() / LINESCALE;
features.lineLength = featureFactory
.linearScaling(line.length(), maxLineLength, LINESCALE);
features.punctuationProfile = TextUtilities.punctuationProfile(line);
if (graphicBitmap) {
features.bitmapAround = true;
}
if (graphicVector) {
features.vectorAround = true;
}
features.lineStatus = null;
features.punctType = null;
if ((li == 0) ||
((previousFeatures != null) && previousFeatures.blockStatus.equals("BLOCKEND"))) {
features.blockStatus = "BLOCKSTART";
} else if (li == lines.length - 1) {
features.blockStatus = "BLOCKEND";
//endblock = true;
} else if (features.blockStatus == null) {
features.blockStatus = "BLOCKIN";
}
if (newPage) {
features.pageStatus = "PAGESTART";
newPage = false;
//endPage = false;
if (previousFeatures != null)
previousFeatures.pageStatus = "PAGEEND";
} else {
features.pageStatus = "PAGEIN";
newPage = false;
//endPage = false;
}
if (text.length() == 1) {
features.singleChar = true;
}
if (Character.isUpperCase(text.charAt(0))) {
features.capitalisation = "INITCAP";
}
if (featureFactory.test_all_capital(text)) {
features.capitalisation = "ALLCAP";
}
if (featureFactory.test_digit(text)) {
features.digit = "CONTAINSDIGITS";
}
if (featureFactory.test_common(text)) {
features.commonName = true;
}
if (featureFactory.test_names(text)) {
features.properName = true;
}
if (featureFactory.test_month(text)) {
features.month = true;
}
Matcher m = featureFactory.isDigit.matcher(text);
if (m.find()) {
features.digit = "ALLDIGIT";
}
Matcher m2 = featureFactory.year.matcher(text);
if (m2.find()) {
features.year = true;
}
Matcher m3 = featureFactory.email.matcher(text);
if (m3.find()) {
features.email = true;
}
Matcher m4 = featureFactory.http.matcher(text);
if (m4.find()) {
features.http = true;
}
if (currentFont == null) {
currentFont = token.getFont();
features.fontStatus = "NEWFONT";
} else if (!currentFont.equals(token.getFont())) {
currentFont = token.getFont();
features.fontStatus = "NEWFONT";
} else
features.fontStatus = "SAMEFONT";
int newFontSize = (int) token.getFontSize();
if (currentFontSize == -1) {
currentFontSize = newFontSize;
features.fontSize = "HIGHERFONT";
} else if (currentFontSize == newFontSize) {
features.fontSize = "SAMEFONTSIZE";
} else if (currentFontSize < newFontSize) {
features.fontSize = "HIGHERFONT";
currentFontSize = newFontSize;
} else if (currentFontSize > newFontSize) {
features.fontSize = "LOWERFONT";
currentFontSize = newFontSize;
}
if (token.getBold())
features.bold = true;
if (token.getItalic())
features.italic = true;
// HERE horizontal information
// CENTERED
// LEFTAJUSTED
// CENTERED
if (features.capitalisation == null)
features.capitalisation = "NOCAPS";
if (features.digit == null)
features.digit = "NODIGIT";
//if (features.punctType == null)
// features.punctType = "NOPUNCT";
features.relativeDocumentPosition = featureFactory
.linearScaling(nn, documentLength, NBBINS_POSITION);
//System.out.println(nn + " " + documentLength + " " + NBBINS_POSITION + " " + features.relativeDocumentPosition);
features.relativePagePositionChar = featureFactory
.linearScaling(mm, pageLength, NBBINS_POSITION);
//System.out.println(mm + " " + pageLength + " " + NBBINS_POSITION + " " + features.relativePagePositionChar);
int pagePos = featureFactory
.linearScaling(coordinateLineY, pageHeight, NBBINS_POSITION);
//System.out.println(coordinateLineY + " " + pageHeight + " " + NBBINS_POSITION + " " + pagePos);
if (pagePos > NBBINS_POSITION)
pagePos = NBBINS_POSITION;
features.relativePagePosition = pagePos;
//System.out.println(coordinateLineY + "\t" + pageHeight);
if (spacingPreviousBlock != 0.0) {
features.spacingWithPreviousBlock = featureFactory
.linearScaling(spacingPreviousBlock-doc.getMinBlockSpacing(), doc.getMaxBlockSpacing()-doc.getMinBlockSpacing(), NBBINS_SPACE);
}
features.inMainArea = inPageMainArea;
if (density != -1.0) {
features.characterDensity = featureFactory
.linearScaling(density-doc.getMinCharacterDensity(), doc.getMaxCharacterDensity()-doc.getMinCharacterDensity(), NBBINS_DENSITY);
//System.out.println((density-doc.getMinCharacterDensity()) + " " + (doc.getMaxCharacterDensity()-doc.getMinCharacterDensity()) + " " + NBBINS_DENSITY + " " + features.characterDensity);
}
if (previousFeatures != null) {
String vector = previousFeatures.printVector();
fulltext.append(vector);
}
previousFeatures = features;
}
//System.out.println((spacingPreviousBlock-doc.getMinBlockSpacing()) + " " + (doc.getMaxBlockSpacing()-doc.getMinBlockSpacing()) + " " + NBBINS_SPACE + " "
// + featureFactory.linearScaling(spacingPreviousBlock-doc.getMinBlockSpacing(), doc.getMaxBlockSpacing()-doc.getMinBlockSpacing(), NBBINS_SPACE));
// lowest position of the block
lowestPos = block.getY() + block.getHeight();
// update page-level and document-level positions
if (tokens != null) {
mm += tokens.size();
nn += tokens.size();
}
}
}
if (previousFeatures != null)
fulltext.append(previousFeatures.printVector());
return fulltext.toString();
}
/**
* Process the content of the specified pdf and format the result as training data.
*
* @param inputFile input file
* @param pathFullText path to fulltext
* @param pathTEI path to TEI
* @param id id
*/
public void createTrainingSegmentation(String inputFile,
String pathFullText,
String pathTEI,
int id) {
DocumentSource documentSource = null;
try {
File file = new File(inputFile);
//documentSource = DocumentSource.fromPdf(file);
documentSource = DocumentSource.fromPdf(file, -1, -1, true, true, true);
Document doc = new Document(documentSource);
String PDFFileName = file.getName();
doc.addTokenizedDocument(GrobidAnalysisConfig.defaultInstance());
if (doc.getBlocks() == null) {
throw new Exception("PDF parsing resulted in empty content");
}
doc.produceStatistics();
String fulltext = //getAllTextFeatured(doc, false);
getAllLinesFeatured(doc);
//List<LayoutToken> tokenizations = doc.getTokenizationsFulltext();
List<LayoutToken> tokenizations = doc.getTokenizations();
// we write the full text untagged (but featurized)
String outPathFulltext = pathFullText + File.separator +
PDFFileName.replace(".pdf", ".training.segmentation");
Writer writer = new OutputStreamWriter(new FileOutputStream(new File(outPathFulltext), false), "UTF-8");
writer.write(fulltext + "\n");
writer.close();
// also write the raw text as seen before segmentation
StringBuffer rawtxt = new StringBuffer();
for(LayoutToken txtline : tokenizations) {
rawtxt.append(txtline.getText());
}
String outPathRawtext = pathFullText + File.separator +
PDFFileName.replace(".pdf", ".training.segmentation.rawtxt");
FileUtils.writeStringToFile(new File(outPathRawtext), rawtxt.toString(), "UTF-8");
if (isNotBlank(fulltext)) {
String rese = label(fulltext);
StringBuffer bufferFulltext = trainingExtraction(rese, tokenizations, doc);
// write the TEI file to reflect the extact layout of the text as extracted from the pdf
writer = new OutputStreamWriter(new FileOutputStream(new File(pathTEI +
File.separator +
PDFFileName.replace(".pdf", ".training.segmentation.tei.xml")), false), "UTF-8");
writer.write("<?xml version=\"1.0\" ?>\n<tei xml:space=\"preserve\">\n\t<teiHeader>\n\t\t<fileDesc xml:id=\"" + id +
"\"/>\n\t</teiHeader>\n\t<text xml:lang=\"en\">\n");
writer.write(bufferFulltext.toString());
writer.write("\n\t</text>\n</tei>\n");
writer.close();
}
} catch (Exception e) {
throw new GrobidException("An exception occured while running Grobid training" +
" data generation for segmentation model.", e);
} finally {
DocumentSource.close(documentSource, true, true, true);
}
}
/**
* Get the content of the pdf and produce a blank training data TEI file, i.e. a text only TEI file
* without any tags. This is usefull to start from scratch the creation of training data at the same
* level as the segmentation parser.
*
* @param inputFile input file
* @param pathFullText path to fulltext
* @param pathTEI path to TEI
* @param id id
*/
public void createBlankTrainingData(File file,
String pathFullText,
String pathTEI,
int id) {
DocumentSource documentSource = null;
try {
//File file = new File(inputFile);
//documentSource = DocumentSource.fromPdf(file);
documentSource = DocumentSource.fromPdf(file, -1, -1, true, true, true);
Document doc = new Document(documentSource);
String PDFFileName = file.getName();
doc.addTokenizedDocument(GrobidAnalysisConfig.defaultInstance());
if (doc.getBlocks() == null) {
throw new Exception("PDF parsing resulted in empty content");
}
doc.produceStatistics();
String fulltext = //getAllTextFeatured(doc, false);
getAllLinesFeatured(doc);
//List<LayoutToken> tokenizations = doc.getTokenizationsFulltext();
List<LayoutToken> tokenizations = doc.getTokenizations();
// we write the full text untagged (but featurized)
String outPathFulltext = pathFullText + File.separator +
PDFFileName.replace(".pdf", ".training.blank");
Writer writer = new OutputStreamWriter(new FileOutputStream(new File(outPathFulltext), false), "UTF-8");
writer.write(fulltext + "\n");
writer.close();
// also write the raw text as seen before segmentation
StringBuffer rawtxt = new StringBuffer();
for(LayoutToken txtline : tokenizations) {
rawtxt.append(TextUtilities.HTMLEncode(txtline.getText()));
}
fulltext = rawtxt.toString();
if (isNotBlank(fulltext)) {
// write the TEI file to reflect the extact layout of the text as extracted from the pdf
writer = new OutputStreamWriter(new FileOutputStream(new File(pathTEI +
File.separator +
PDFFileName.replace(".pdf", ".training.blank.tei.xml")), false), "UTF-8");
writer.write("<?xml version=\"1.0\" ?>\n<tei xml:space=\"preserve\">\n\t<teiHeader>\n\t\t<fileDesc xml:id=\"f" + id +
"\"/>\n\t</teiHeader>\n\t<text xml:lang=\"en\">\n");
writer.write(fulltext);
writer.write("\n\t</text>\n</tei>\n");
writer.close();
}
} catch (Exception e) {
throw new GrobidException("An exception occured while running Grobid training" +
" data generation for segmentation model.", e);
} finally {
DocumentSource.close(documentSource, true, true, true);
}
}
/**
* Extract results from a labelled full text in the training format without any string modification.
*
* @param result reult
* @param tokenizations toks
* @return extraction
*/
public StringBuffer trainingExtraction(String result,
List<LayoutToken> tokenizations,
Document doc) {
// this is the main buffer for the whole full text
StringBuffer buffer = new StringBuffer();
try {
List<Block> blocks = doc.getBlocks();
int currentBlockIndex = 0;
int indexLine = 0;
StringTokenizer st = new StringTokenizer(result, "\n");
String s1 = null; // current label/tag
String s2 = null; // current lexical token
String s3 = null; // current second lexical token
String lastTag = null;
// current token position
int p = 0;
boolean start = true;
while (st.hasMoreTokens()) {
boolean addSpace = false;
String tok = st.nextToken().trim();
String line = null; // current line
if (tok.length() == 0) {
continue;
}
StringTokenizer stt = new StringTokenizer(tok, " \t");
List<String> localFeatures = new ArrayList<String>();
int i = 0;
boolean newLine = true;
int ll = stt.countTokens();
while (stt.hasMoreTokens()) {
String s = stt.nextToken().trim();
if (i == 0) {
s2 = TextUtilities.HTMLEncode(s); // lexical token
} else if (i == 1) {
s3 = TextUtilities.HTMLEncode(s); // second lexical token
} else if (i == ll - 1) {
s1 = s; // current label
} else {
localFeatures.add(s); // we keep the feature values in case they appear useful
}
i++;
}
// as we process the document segmentation line by line, we don't use the usual
// tokenization to rebuild the text flow, but we get each line again from the
// text stored in the document blocks (similarly as when generating the features)
line = null;
while ((line == null) && (currentBlockIndex < blocks.size())) {
Block block = blocks.get(currentBlockIndex);
List<LayoutToken> tokens = block.getTokens();
if (tokens == null) {
currentBlockIndex++;
indexLine = 0;
continue;
}
String localText = block.getText();
if ((localText == null) || (localText.trim().length() == 0)) {
currentBlockIndex++;
indexLine = 0;
continue;
}
//String[] lines = localText.split("\n");
String[] lines = localText.split("[\\n\\r]");
if ((lines.length == 0) || (indexLine >= lines.length)) {
currentBlockIndex++;
indexLine = 0;
continue;
} else {
line = lines[indexLine];
indexLine++;
if (line.trim().length() == 0) {
line = null;
continue;
}
if (TextUtilities.filterLine(line)) {
line = null;
continue;
}
}
}
line = TextUtilities.HTMLEncode(line);
if (newLine && !start) {
buffer.append("<lb/>");
}
String lastTag0 = null;
if (lastTag != null) {
if (lastTag.startsWith("I-")) {
lastTag0 = lastTag.substring(2, lastTag.length());
} else {
lastTag0 = lastTag;
}
}
String currentTag0 = null;
if (s1 != null) {
if (s1.startsWith("I-")) {
currentTag0 = s1.substring(2, s1.length());
} else {
currentTag0 = s1;
}
}
//boolean closeParagraph = false;
if (lastTag != null) {
//closeParagraph =
testClosingTag(buffer, currentTag0, lastTag0, s1);
}
boolean output;
output = writeField(buffer, line, s1, lastTag0, s2, "<header>", "<front>", addSpace, 3);
if (!output) {
output = writeField(buffer, line, s1, lastTag0, s2, "<other>", "", addSpace, 3);
}
if (!output) {
output = writeField(buffer, line, s1, lastTag0, s2, "<headnote>", "<note place=\"headnote\">",
addSpace, 3);
}
if (!output) {
output = writeField(buffer, line, s1, lastTag0, s2, "<footnote>", "<note place=\"footnote\">",
addSpace, 3);
}
if (!output) {
output = writeField(buffer, line, s1, lastTag0, s2, "<marginnote>", "<note place=\"margin\">",
addSpace, 3);
}
if (!output) {
output = writeField(buffer, line, s1, lastTag0, s2, "<page>", "<page>", addSpace, 3);
}
if (!output) {
//output = writeFieldBeginEnd(buffer, s1, lastTag0, s2, "<reference>", "<listBibl>", addSpace, 3);
output = writeField(buffer, line, s1, lastTag0, s2, "<references>", "<listBibl>", addSpace, 3);
}
if (!output) {
//output = writeFieldBeginEnd(buffer, s1, lastTag0, s2, "<body>", "<body>", addSpace, 3);
output = writeField(buffer, line, s1, lastTag0, s2, "<body>", "<body>", addSpace, 3);
}
if (!output) {
output = writeField(buffer, line, s1, lastTag0, s2, "<cover>", "<titlePage>", addSpace, 3);
}
if (!output) {
output = writeField(buffer, line, s1, lastTag0, s2, "<toc>", "<div type=\"toc\">", addSpace, 3);
}
if (!output) {
output = writeField(buffer, line, s1, lastTag0, s2, "<annex>", "<div type=\"annex\">", addSpace, 3);
}
if (!output) {
output = writeField(buffer, line, s1, lastTag0, s2, "<acknowledgement>", "<div type=\"acknowledgement\">", addSpace, 3);
}
if (!output) {
output = writeField(buffer, line, s1, lastTag0, s2, "<availability>", "<div type=\"availability\">", addSpace, 3);
}
if (!output) {
output = writeField(buffer, line, s1, lastTag0, s2, "<funding>", "<div type=\"funding\">", addSpace, 3);
}
lastTag = s1;
if (!st.hasMoreTokens()) {
if (lastTag != null) {
testClosingTag(buffer, "", currentTag0, s1);
}
}
if (start) {
start = false;
}
}
return buffer;
} catch (Exception e) {
throw new GrobidException("An exception occured while running Grobid.", e);
}
}
/**
* TODO some documentation...
*
* @param buffer
* @param s1
* @param lastTag0
* @param s2
* @param field
* @param outField
* @param addSpace
* @param nbIndent
* @return
*/
private boolean writeField(StringBuffer buffer,
String line,
String s1,
String lastTag0,
String s2,
String field,
String outField,
boolean addSpace,
int nbIndent) {
boolean result = false;
// filter the output path
if ((s1.equals(field)) || (s1.equals("I-" + field))) {
result = true;
line = line.replace("@BULLET", "\u2022");
// if previous and current tag are the same, we output the token
if (s1.equals(lastTag0) || s1.equals("I-" + lastTag0)) {
buffer.append(line);
} else if (lastTag0 == null) {
// if previous tagname is null, we output the opening xml tag
for (int i = 0; i < nbIndent; i++) {
buffer.append("\t");
}
buffer.append(outField).append(line);
} else {
// new opening tag, we output the opening xml tag
for (int i = 0; i < nbIndent; i++) {
buffer.append("\t");
}
buffer.append(outField).append(line);
} /*else {
// otherwise we continue by ouputting the token
buffer.append(line);
}*/
}
return result;
}
/**
* This is for writing fields for fields where begin and end of field matter, like paragraph or item
*
* @param buffer
* @param s1
* @param lastTag0
* @param s2
* @param field
* @param outField
* @param addSpace
* @param nbIndent
* @return
*/
/*private boolean writeFieldBeginEnd(StringBuffer buffer,
String s1,
String lastTag0,
String s2,
String field,
String outField,
boolean addSpace,
int nbIndent) {
boolean result = false;
if ((s1.equals(field)) || (s1.equals("I-" + field))) {
result = true;
if (lastTag0.equals("I-" + field)) {
if (addSpace)
buffer.append(" " + s2);
else
buffer.append(s2);
} /*else if (lastTag0.equals(field) && s1.equals(field)) {
if (addSpace)
buffer.append(" " + s2);
else
buffer.append(s2);
} else if (!lastTag0.equals("<citation_marker>") && !lastTag0.equals("<figure_marker>")
&& !lastTag0.equals("<figure>") && !lastTag0.equals("<reference_marker>")) {
for (int i = 0; i < nbIndent; i++) {
buffer.append("\t");
}
buffer.append(outField + s2);
}
else {
if (addSpace)
buffer.append(" " + s2);
else
buffer.append(s2);
}
}
return result;
}*/
/**
* TODO some documentation
*
* @param buffer
* @param currentTag0
* @param lastTag0
* @param currentTag
* @return
*/
private boolean testClosingTag(StringBuffer buffer,
String currentTag0,
String lastTag0,
String currentTag) {
boolean res = false;
// reference_marker and citation_marker are two exceptions because they can be embedded
if (!currentTag0.equals(lastTag0)) {
/*if (currentTag0.equals("<citation_marker>") || currentTag0.equals("<figure_marker>")) {
return res;
}*/
res = false;
// we close the current tag
if (lastTag0.equals("<header>")) {
buffer.append("</front>\n\n");
res = true;
} else if (lastTag0.equals("<body>")) {
buffer.append("</body>\n\n");
res = true;
} else if (lastTag0.equals("<headnote>")) {
buffer.append("</note>\n\n");
res = true;
} else if (lastTag0.equals("<footnote>")) {
buffer.append("</note>\n\n");
res = true;
} else if (lastTag0.equals("<marginnote>")) {
buffer.append("</note>\n\n");
res = true;
} else if (lastTag0.equals("<references>")) {
buffer.append("</listBibl>\n\n");
res = true;
} else if (lastTag0.equals("<page>")) {
buffer.append("</page>\n\n");
res = true;
} else if (lastTag0.equals("<cover>")) {
buffer.append("</titlePage>\n\n");
res = true;
} else if (lastTag0.equals("<toc>")) {
buffer.append("</div>\n\n");
res = true;
} else if (lastTag0.equals("<annex>")) {
buffer.append("</div>\n\n");
res = true;
} else if (lastTag0.equals("<acknowledgement>")) {
buffer.append("</div>\n\n");
res = true;
} else if (lastTag0.equals("<availability>")) {
buffer.append("</div>\n\n");
res = true;
} else if (lastTag0.equals("<funding>")) {
buffer.append("</div>\n\n");
res = true;
} else if (lastTag0.equals("<other>")) {
buffer.append("\n\n");
} else {
res = false;
}
}
return res;
}
@Override
public void close() throws IOException {
super.close();
// ...
}
}
| 49,637 | 41.865285 | 199 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/HeaderParser.java
|
package org.grobid.core.engines;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair;
import org.grobid.core.GrobidModels;
import org.grobid.core.data.BiblioItem;
import org.grobid.core.data.Date;
import org.grobid.core.data.Keyword;
import org.grobid.core.data.Person;
import org.grobid.core.document.*;
import org.grobid.core.engines.config.GrobidAnalysisConfig;
import org.grobid.core.engines.label.SegmentationLabels;
import org.grobid.core.engines.label.TaggingLabel;
import org.grobid.core.engines.label.TaggingLabels;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.features.FeatureFactory;
import org.grobid.core.features.FeaturesVectorHeader;
import org.grobid.core.lang.Language;
import org.grobid.core.layout.Block;
import org.grobid.core.layout.LayoutToken;
import org.grobid.core.lexicon.Lexicon;
import org.grobid.core.tokenization.LabeledTokensContainer;
import org.grobid.core.tokenization.TaggingTokenCluster;
import org.grobid.core.tokenization.TaggingTokenClusteror;
import org.grobid.core.utilities.*;
import org.grobid.core.utilities.counters.CntManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.util.*;
import java.util.regex.Matcher;
import java.util.stream.Collectors;
import static org.apache.commons.collections4.CollectionUtils.isNotEmpty;
import static org.grobid.core.data.Date.toISOString;
public class HeaderParser extends AbstractParser {
private static final Logger LOGGER = LoggerFactory.getLogger(HeaderParser.class);
private LanguageUtilities languageUtilities = LanguageUtilities.getInstance();
private EngineParsers parsers;
// default bins for relative position
private static final int NBBINS_POSITION = 12;
// default bins for inter-block spacing
private static final int NBBINS_SPACE = 5;
// default bins for block character density
private static final int NBBINS_DENSITY = 5;
// projection scale for line length
private static final int LINESCALE = 10;
private Lexicon lexicon = Lexicon.getInstance();
public HeaderParser(EngineParsers parsers, CntManager cntManager) {
super(GrobidModels.HEADER, cntManager);
this.parsers = parsers;
GrobidProperties.getInstance();
}
public HeaderParser(EngineParsers parsers) {
super(GrobidModels.HEADER);
this.parsers = parsers;
GrobidProperties.getInstance();
}
/**
* Processing with application of the segmentation model
*/
public Pair<String, Document> processing(File input, String md5Str, BiblioItem resHeader, GrobidAnalysisConfig config) {
DocumentSource documentSource = null;
try {
documentSource = DocumentSource.fromPdf(input, config.getStartPage(), config.getEndPage());
documentSource.setMD5(md5Str);
Document doc = parsers.getSegmentationParser().processing(documentSource, config);
String tei = processingHeaderSection(config, doc, resHeader, true);
return new ImmutablePair<String, Document>(tei, doc);
} finally {
if (documentSource != null) {
documentSource.close(true, true, true);
}
}
}
/**
* Header processing after application of the segmentation model
*/
public String processingHeaderSection(GrobidAnalysisConfig config, Document doc, BiblioItem resHeader, boolean serialize) {
try {
SortedSet<DocumentPiece> documentHeaderParts = doc.getDocumentPart(SegmentationLabels.HEADER);
List<LayoutToken> tokenizations = doc.getTokenizations();
if (documentHeaderParts != null) {
// List<LayoutToken> tokenizationsHeader = Document.getTokenizationParts(documentHeaderParts, tokenizations);
//String header = getSectionHeaderFeatured(doc, documentHeaderParts, true);
Pair<String, List<LayoutToken>> featuredHeader = getSectionHeaderFeatured(doc, documentHeaderParts);
String header = featuredHeader.getLeft();
List<LayoutToken> headerTokenization = featuredHeader.getRight();
String res = null;
if (StringUtils.isNotBlank(header)) {
res = label(header);
resHeader = resultExtraction(res, headerTokenization, resHeader);
}
// language identification
StringBuilder contentSample = new StringBuilder();
if (resHeader.getTitle() != null) {
contentSample.append(resHeader.getTitle());
}
if (resHeader.getAbstract() != null) {
contentSample.append("\n");
contentSample.append(resHeader.getAbstract());
}
if (contentSample.length() < 200) {
// we can exploit more textual content to ensure that the language identification will be
// correct
SortedSet<DocumentPiece> documentBodyParts = doc.getDocumentPart(SegmentationLabels.BODY);
if (documentBodyParts != null) {
String stringSample = Document.getTokenizationParts(documentBodyParts, tokenizations)
.stream().map(LayoutToken::toString)
.collect(Collectors.joining(" "));
contentSample.append(stringSample);
}
}
Language langu = languageUtilities.runLanguageId(contentSample.toString());
if (langu != null) {
String lang = langu.getLang();
doc.setLanguage(lang);
resHeader.setLanguage(lang);
}
if (resHeader.getAbstract() != null) {
resHeader.setAbstract(TextUtilities.dehyphenizeHard(resHeader.getAbstract()));
//resHeader.setAbstract(TextUtilities.dehyphenize(resHeader.getAbstract()));
}
BiblioItem.cleanTitles(resHeader);
if (resHeader.getTitle() != null) {
// String temp =
// utilities.dehyphenizeHard(resHeader.getTitle());
String temp = TextUtilities.dehyphenize(resHeader.getTitle());
temp = temp.trim();
if (temp.length() > 1) {
if (temp.startsWith("1"))
temp = temp.substring(1, temp.length());
temp = temp.trim();
}
resHeader.setTitle(temp);
}
if (resHeader.getBookTitle() != null) {
resHeader.setBookTitle(TextUtilities.dehyphenize(resHeader.getBookTitle()));
}
resHeader.setOriginalAuthors(resHeader.getAuthors());
boolean fragmentedAuthors = false;
boolean hasMarker = false;
List<Integer> authorsBlocks = new ArrayList<>();
List<List<LayoutToken>> authorSegments = new ArrayList<>();
List<LayoutToken> authorLayoutTokens = resHeader.getAuthorsTokensWorkingCopy();
if (isNotEmpty(authorLayoutTokens)) {
// split the list of layout tokens when token "\t" is met
List<LayoutToken> currentSegment = new ArrayList<>();
for(LayoutToken theToken : authorLayoutTokens) {
if (theToken.getText() != null && theToken.getText().equals("\t")) {
if (currentSegment.size() > 0)
authorSegments.add(currentSegment);
currentSegment = new ArrayList<>();
} else
currentSegment.add(theToken);
}
// last segment
if (currentSegment.size() > 0)
authorSegments.add(currentSegment);
if (authorSegments.size() > 1) {
fragmentedAuthors = true;
}
for (int k = 0; k < authorSegments.size(); k++) {
if (authorSegments.get(k).size() == 0)
continue;
List<Person> localAuthors = parsers.getAuthorParser()
.processingHeaderWithLayoutTokens(authorSegments.get(k), doc.getPDFAnnotations());
if (localAuthors != null) {
for (Person pers : localAuthors) {
resHeader.addFullAuthor(pers);
if (pers.getMarkers() != null) {
hasMarker = true;
}
authorsBlocks.add(k);
}
}
}
}
// remove invalid authors (no last name, noise, etc.)
resHeader.setFullAuthors(Person.sanityCheck(resHeader.getFullAuthors()));
resHeader.setFullAffiliations(
parsers.getAffiliationAddressParser().processReflow(res, tokenizations));
resHeader.attachEmails();
boolean attached = false;
if (fragmentedAuthors && !hasMarker) {
if (resHeader.getFullAffiliations() != null) {
if (resHeader.getFullAffiliations().size() == authorSegments.size()) {
int k = 0;
List<Person> persons = resHeader.getFullAuthors();
for (Person pers : persons) {
if (k < authorsBlocks.size()) {
int indd = authorsBlocks.get(k);
if (indd < resHeader.getFullAffiliations().size()) {
pers.addAffiliation(resHeader.getFullAffiliations().get(indd));
}
}
k++;
}
attached = true;
resHeader.setFullAffiliations(null);
resHeader.setAffiliation(null);
}
}
}
if (!attached) {
resHeader.attachAffiliations();
}
// remove duplicated authors
resHeader.setFullAuthors(Person.deduplicate(resHeader.getFullAuthors()));
if (resHeader.getEditors() != null) {
// TBD: consider segments also for editors, like for authors above
resHeader.setFullEditors(parsers.getAuthorParser().processingHeader(resHeader.getEditors()));
}
// below using the reference strings to improve the metadata extraction, it will have to
// be reviewed for something safer as just a straightforward correction
/*if (resHeader.getReference() != null) {
BiblioItem refer = parsers.getCitationParser().processingString(resHeader.getReference(), 0);
BiblioItem.correct(resHeader, refer);
}*/
// keyword post-processing
if (resHeader.getKeyword() != null) {
String keywords = TextUtilities.dehyphenize(resHeader.getKeyword());
keywords = BiblioItem.cleanKeywords(keywords);
//resHeader.setKeyword(keywords.replace("\n", " ").replace(" ", " "));
resHeader.setKeyword(keywords);
List<Keyword> keywordsSegmented = BiblioItem.segmentKeywords(keywords);
if ((keywordsSegmented != null) && (keywordsSegmented.size() > 0))
resHeader.setKeywords(keywordsSegmented);
}
// DOI pass
List<String> dois = doc.getDOIMatches();
if (isNotEmpty(dois) && dois.size() == 1) {
resHeader.setDOI(dois.get(0));
}
// normalization of dates
if (resHeader != null) {
if (resHeader.getNormalizedPublicationDate() == null) {
Optional<Date> normalisedPublicationDate = getNormalizedDate(resHeader.getPublicationDate());
if (normalisedPublicationDate.isPresent()) {
resHeader.setNormalizedPublicationDate(normalisedPublicationDate.get());
}
} else {
resHeader.setPublicationDate(toISOString(resHeader.getNormalizedPublicationDate()));
}
if (resHeader.getNormalizedSubmissionDate() == null) {
Optional<Date> normalizedSubmissionDate = getNormalizedDate(resHeader.getSubmissionDate());
if(normalizedSubmissionDate.isPresent()) {
resHeader.setNormalizedSubmissionDate(normalizedSubmissionDate.get());
}
} else {
resHeader.setSubmissionDate(toISOString(resHeader.getNormalizedSubmissionDate()));
}
if (resHeader.getNormalizedDownloadDate() == null) {
Optional<Date> normalizedDownloadDate = getNormalizedDate(resHeader.getDownloadDate());
if (normalizedDownloadDate.isPresent()) {
resHeader.setNormalizedDownloadDate(normalizedDownloadDate.get());
}
}else {
resHeader.setDownloadDate(toISOString(resHeader.getNormalizedDownloadDate()));
}
if (resHeader.getNormalizedServerDate() == null) {
Optional<Date> normalizedServerDate = getNormalizedDate(resHeader.getServerDate());
if(normalizedServerDate.isPresent()) {
resHeader.setNormalizedServerDate(normalizedServerDate.get());
}
} else {
resHeader.setServerDate(toISOString(resHeader.getNormalizedServerDate()));
}
}
resHeader = consolidateHeader(resHeader, config.getConsolidateHeader());
// we don't need to serialize if we process the full text (it would be done 2 times)
if (serialize) {
TEIFormatter teiFormatter = new TEIFormatter(doc, null);
StringBuilder tei = teiFormatter.toTEIHeader(resHeader, null, null, null, config);
tei.append("\t</text>\n");
tei.append("</TEI>\n");
return tei.toString();
} else
return null;
}
} catch (Exception e) {
throw new GrobidException("An exception occurred while running Grobid.", e);
}
return null;
}
/**
* Return the date, normalised using the DateParser
*/
private Optional<Date> getNormalizedDate(String rawDate) {
if (rawDate != null) {
List<Date> dates = parsers.getDateParser().processing(rawDate);
// TODO: most basic heuristic, we take the first date
// LF: perhaps we could validate that the dates have are formatted decently
if (isNotEmpty(dates)) {
return Optional.of(dates.get(0));
} else {
return Optional.empty();
}
} else {
return Optional.empty();
}
}
/**
* Return the header section with features to be processed by the sequence labelling model
*/
public Pair<String, List<LayoutToken>> getSectionHeaderFeatured(Document doc,
SortedSet<DocumentPiece> documentHeaderParts) {
FeatureFactory featureFactory = FeatureFactory.getInstance();
StringBuilder header = new StringBuilder();
String currentFont = null;
int currentFontSize = -1;
// vector for features
FeaturesVectorHeader features;
FeaturesVectorHeader previousFeatures = null;
double lineStartX = Double.NaN;
boolean indented = false;
boolean centered = false;
boolean endblock;
//for (Integer blocknum : blockDocumentHeaders) {
List<Block> blocks = doc.getBlocks();
if ((blocks == null) || blocks.size() == 0) {
return null;
}
List<LayoutToken> headerTokenizations = new ArrayList<LayoutToken>();
// find the largest, smallest and average size font on the header section
// note: only largest font size information is used currently
double largestFontSize = 0.0;
double smallestFontSize = 100000.0;
double averageFontSize;
double accumulatedFontSize = 0.0;
int nbTokens = 0;
for (DocumentPiece docPiece : documentHeaderParts) {
DocumentPointer dp1 = docPiece.getLeft();
DocumentPointer dp2 = docPiece.getRight();
for (int blockIndex = dp1.getBlockPtr(); blockIndex <= dp2.getBlockPtr(); blockIndex++) {
Block block = blocks.get(blockIndex);
List<LayoutToken> tokens = block.getTokens();
if ((tokens == null) || (tokens.size() == 0)) {
continue;
}
for(LayoutToken token : tokens) {
/*if (" ".equals(token.getText()) || "\n".equals(token.getText())) {
// blank separators has font size 0.0,
// unicode normalization reduce to these 2 characters all the variants
continue;
}*/
if (token.getFontSize() > largestFontSize) {
largestFontSize = token.getFontSize();
}
if (token.getFontSize() < smallestFontSize) {
smallestFontSize = token.getFontSize();
}
accumulatedFontSize += token.getFontSize();
nbTokens++;
}
}
}
averageFontSize = accumulatedFontSize / nbTokens;
// TBD: this would need to be made more efficient, by applying the regex only to a limited
// part of the tokens
/*List<LayoutToken> tokenizations = doc.getTokenizations();
List<OffsetPosition> locationPositions = lexicon.tokenPositionsLocationNames(tokenizations);
List<OffsetPosition> urlPositions = lexicon.tokenPositionsUrlPattern(tokenizations);
List<OffsetPosition> emailPositions = lexicon.tokenPositionsEmailPattern(tokenizations);*/
for (DocumentPiece docPiece : documentHeaderParts) {
DocumentPointer dp1 = docPiece.getLeft();
DocumentPointer dp2 = docPiece.getRight();
for (int blockIndex = dp1.getBlockPtr(); blockIndex <= dp2.getBlockPtr(); blockIndex++) {
Block block = blocks.get(blockIndex);
boolean newline = false;
boolean previousNewline = true;
endblock = false;
double spacingPreviousBlock = 0.0; // discretized
if (previousFeatures != null)
previousFeatures.blockStatus = "BLOCKEND";
List<LayoutToken> tokens = block.getTokens();
if ((tokens == null) || (tokens.size() == 0)) {
continue;
}
String localText = block.getText();
if (localText == null)
continue;
int startIndex = 0;
int n = 0;
if (blockIndex == dp1.getBlockPtr()) {
//n = block.getStartToken();
n = dp1.getTokenDocPos() - block.getStartToken();
startIndex = dp1.getTokenDocPos() - block.getStartToken();
}
// character density of the block
double density = 0.0;
if ( (block.getHeight() != 0.0) && (block.getWidth() != 0.0) &&
(block.getText() != null) && (!block.getText().contains("@PAGE")) &&
(!block.getText().contains("@IMAGE")) )
density = (double)block.getText().length() / (block.getHeight() * block.getWidth());
String[] lines = localText.split("[\\n\\r]");
// set the max length of the lines in the block, in number of characters
int maxLineLength = 0;
for(int p=0; p<lines.length; p++) {
if (lines[p].length() > maxLineLength)
maxLineLength = lines[p].length();
}
/*for (int li = 0; li < lines.length; li++) {
String line = lines[li];
features.lineLength = featureFactory
.linearScaling(line.length(), maxLineLength, LINESCALE);
features.punctuationProfile = TextUtilities.punctuationProfile(line);
}*/
List<OffsetPosition> locationPositions = lexicon.tokenPositionsLocationNames(tokens);
List<OffsetPosition> emailPositions = lexicon.tokenPositionsEmailPattern(tokens);
List<OffsetPosition> urlPositions = lexicon.tokenPositionsUrlPattern(tokens);
/*for (OffsetPosition position : emailPositions) {
System.out.println(position.start + " " + position.end + " / " + tokens.get(position.start) + " ... " + tokens.get(position.end));
}*/
while (n < tokens.size()) {
if (blockIndex == dp2.getBlockPtr()) {
if (n > dp2.getTokenDocPos() - block.getStartToken()) {
break;
}
}
LayoutToken token = tokens.get(n);
headerTokenizations.add(token);
String text = token.getText();
if (text == null) {
n++;
continue;
}
text = text.replace(" ", "");
if (text.length() == 0) {
n++;
continue;
}
if (text.equals("\n") || text.equals("\r")) {
previousNewline = true;
newline = false;
n++;
continue;
}
if (previousNewline) {
newline = true;
previousNewline = false;
if (previousFeatures != null) {
double previousLineStartX = lineStartX;
lineStartX = token.getX();
double characterWidth = token.width / token.getText().length();
if (!Double.isNaN(previousLineStartX)) {
// Indentation if line start is > 1 character width to the right of previous line start
if (lineStartX - previousLineStartX > characterWidth)
indented = true;
// Indentation ends if line start is > 1 character width to the left of previous line start
else if (previousLineStartX - lineStartX > characterWidth)
indented = false;
// Otherwise indentation is unchanged
}
}
} else{
newline = false;
}
// centered ?
// final sanitisation and filtering for the token
text = text.replaceAll("[ \n]", "");
if (TextUtilities.filterLine(text)) {
n++;
continue;
}
features = new FeaturesVectorHeader();
features.token = token;
features.string = text;
if (newline)
features.lineStatus = "LINESTART";
Matcher m0 = featureFactory.isPunct.matcher(text);
if (m0.find()) {
features.punctType = "PUNCT";
}
if (text.equals("(") || text.equals("[")) {
features.punctType = "OPENBRACKET";
} else if (text.equals(")") || text.equals("]")) {
features.punctType = "ENDBRACKET";
} else if (text.equals(".")) {
features.punctType = "DOT";
} else if (text.equals(",")) {
features.punctType = "COMMA";
} else if (text.equals("-")) {
features.punctType = "HYPHEN";
} else if (text.equals("\"") || text.equals("\'") || text.equals("`")) {
features.punctType = "QUOTE";
}
if (n == startIndex) {
// beginning of block
features.lineStatus = "LINESTART";
features.blockStatus = "BLOCKSTART";
} else if ((n == tokens.size() - 1) || (n+1 > dp2.getTokenDocPos() - block.getStartToken())) {
// end of block
features.lineStatus = "LINEEND";
previousNewline = true;
features.blockStatus = "BLOCKEND";
endblock = true;
} else {
// look ahead to see if we are at the end of a line within the block
boolean endline = false;
int ii = 1;
boolean endloop = false;
while ((n + ii < tokens.size()) && (!endloop)) {
LayoutToken tok = tokens.get(n + ii);
if (tok != null) {
String toto = tok.getText();
if (toto != null) {
if (toto.equals("\n") || text.equals("\r")) {
endline = true;
endloop = true;
} else {
if ((toto.trim().length() != 0)
&& (!text.equals("\u00A0"))
&& (!(toto.contains("@IMAGE")))
&& (!(toto.contains("@PAGE")))
&& (!text.contains(".pbm"))
&& (!text.contains(".ppm"))
&& (!text.contains(".png"))
&& (!text.contains(".svg"))
&& (!text.contains(".jpg"))) {
endloop = true;
}
}
}
}
if (n + ii == tokens.size() - 1) {
endblock = true;
endline = true;
}
ii++;
}
if ((!endline) && !(newline)) {
features.lineStatus = "LINEIN";
} else if (!newline) {
features.lineStatus = "LINEEND";
previousNewline = true;
}
if ((!endblock) && (features.blockStatus == null))
features.blockStatus = "BLOCKIN";
else if (features.blockStatus == null)
features.blockStatus = "BLOCKEND";
}
if (indented) {
features.alignmentStatus = "LINEINDENT";
}
else {
features.alignmentStatus = "ALIGNEDLEFT";
}
if (text.length() == 1) {
features.singleChar = true;
}
if (Character.isUpperCase(text.charAt(0))) {
features.capitalisation = "INITCAP";
}
if (featureFactory.test_all_capital(text)) {
features.capitalisation = "ALLCAP";
}
if (featureFactory.test_digit(text)) {
features.digit = "CONTAINSDIGITS";
}
Matcher m = featureFactory.isDigit.matcher(text);
if (m.find()) {
features.digit = "ALLDIGIT";
}
if (featureFactory.test_common(text)) {
features.commonName = true;
}
if (featureFactory.test_names(text)) {
features.properName = true;
}
if (featureFactory.test_month(text)) {
features.month = true;
}
Matcher m2 = featureFactory.year.matcher(text);
if (m2.find()) {
features.year = true;
}
// check token offsets for email and http address, or known location
if (locationPositions != null) {
for(OffsetPosition thePosition : locationPositions) {
if (n >= thePosition.start && n <= thePosition.end) {
features.locationName = true;
break;
}
}
}
if (emailPositions != null) {
for(OffsetPosition thePosition : emailPositions) {
if (n >= thePosition.start && n <= thePosition.end) {
features.email = true;
break;
}
}
}
if (urlPositions != null) {
for(OffsetPosition thePosition : urlPositions) {
if (n >= thePosition.start && n <= thePosition.end) {
features.http = true;
break;
}
}
}
if (currentFont == null) {
currentFont = token.getFont();
features.fontStatus = "NEWFONT";
} else if (!currentFont.equals(token.getFont())) {
currentFont = token.getFont();
features.fontStatus = "NEWFONT";
} else
features.fontStatus = "SAMEFONT";
int newFontSize = (int) token.getFontSize();
if (currentFontSize == -1) {
currentFontSize = newFontSize;
features.fontSize = "HIGHERFONT";
} else if (currentFontSize == newFontSize) {
features.fontSize = "SAMEFONTSIZE";
} else if (currentFontSize < newFontSize) {
features.fontSize = "HIGHERFONT";
currentFontSize = newFontSize;
} else if (currentFontSize > newFontSize) {
features.fontSize = "LOWERFONT";
currentFontSize = newFontSize;
}
if (token.getFontSize() == largestFontSize)
features.largestFont = true;
if (token.getFontSize() == smallestFontSize)
features.smallestFont = true;
if (token.getFontSize() > averageFontSize)
features.largerThanAverageFont = true;
// not used
/*if (token.isSuperscript())
features.superscript = true;*/
if (token.getBold())
features.bold = true;
if (token.getItalic())
features.italic = true;
if (features.capitalisation == null)
features.capitalisation = "NOCAPS";
if (features.digit == null)
features.digit = "NODIGIT";
if (features.punctType == null)
features.punctType = "NOPUNCT";
/*if (spacingPreviousBlock != 0.0) {
features.spacingWithPreviousBlock = featureFactory
.linearScaling(spacingPreviousBlock-doc.getMinBlockSpacing(), doc.getMaxBlockSpacing()-doc.getMinBlockSpacing(), NBBINS_SPACE);
}*/
if (density != -1.0) {
features.characterDensity = featureFactory
.linearScaling(density-doc.getMinCharacterDensity(), doc.getMaxCharacterDensity()-doc.getMinCharacterDensity(), NBBINS_DENSITY);
//System.out.println((density-doc.getMinCharacterDensity()) + " " + (doc.getMaxCharacterDensity()-doc.getMinCharacterDensity()) + " " + NBBINS_DENSITY + " " + features.characterDensity);
}
if (previousFeatures != null)
header.append(previousFeatures.printVector());
previousFeatures = features;
n++;
}
if (previousFeatures != null) {
previousFeatures.blockStatus = "BLOCKEND";
previousFeatures.lineStatus = "LINEEND";
header.append(previousFeatures.printVector());
previousFeatures = null;
}
}
}
return Pair.of(header.toString(), headerTokenizations);
}
/**
* Extract results from a labelled header.
*
* @param result result
* @param tokenizations list of tokens
* @param biblio biblio item
* @return a biblio item
*/
public BiblioItem resultExtraction(String result, List<LayoutToken> tokenizations, BiblioItem biblio) {
TaggingTokenClusteror clusteror = new TaggingTokenClusteror(GrobidModels.HEADER, result, tokenizations);
List<TaggingTokenCluster> clusters = clusteror.cluster();
biblio.generalResultMapping(result, tokenizations);
for (TaggingTokenCluster cluster : clusters) {
if (cluster == null) {
continue;
}
TaggingLabel clusterLabel = cluster.getTaggingLabel();
Engine.getCntManager().i(clusterLabel);
String clusterContent = LayoutTokensUtil.normalizeDehyphenizeText(cluster.concatTokens());
String clusterNonDehypenizedContent = LayoutTokensUtil.toText(cluster.concatTokens());
if (clusterLabel.equals(TaggingLabels.HEADER_TITLE)) {
/*if (biblio.getTitle() != null && isDifferentContent(biblio.getTitle(), clusterContent))
biblio.setTitle(biblio.getTitle() + clusterContent);
else*/
if (biblio.getTitle() == null) {
biblio.setTitle(clusterContent);
}
} else if (clusterLabel.equals(TaggingLabels.HEADER_AUTHOR)) {
//if (biblio.getAuthors() != null && isDifferentandNotIncludedContent(biblio.getAuthors(), clusterContent)) {
if (biblio.getAuthors() != null) {
biblio.setAuthors(biblio.getAuthors() + "\t" + clusterNonDehypenizedContent);
//biblio.addAuthorsToken(new LayoutToken("\n", TaggingLabels.HEADER_AUTHOR));
biblio.collectAuthorsToken(new LayoutToken("\t", TaggingLabels.HEADER_AUTHOR));
List<LayoutToken> tokens = cluster.concatTokens();
biblio.collectAuthorsTokens(tokens);
} else {
biblio.setAuthors(clusterNonDehypenizedContent);
List<LayoutToken> tokens = cluster.concatTokens();
biblio.collectAuthorsTokens(tokens);
}
} /*else if (clusterLabel.equals(TaggingLabels.HEADER_TECH)) {
biblio.setItem(BiblioItem.TechReport);
if (biblio.getBookType() != null) {
biblio.setBookType(biblio.getBookType() + clusterContent);
} else
biblio.setBookType(clusterContent);
} else if (clusterLabel.equals(TaggingLabels.HEADER_LOCATION)) {
if (biblio.getLocation() != null) {
biblio.setLocation(biblio.getLocation() + clusterContent);
} else
biblio.setLocation(clusterContent);
}*/
else if (clusterLabel.equals(TaggingLabels.HEADER_MEETING)) {
if (biblio.getMeeting() != null) {
biblio.setMeeting(biblio.getMeeting() + ", " + clusterContent);
} else
biblio.setMeeting(clusterContent);
} else if (clusterLabel.equals(TaggingLabels.HEADER_DATE)) {
// it appears that the same date is quite often repeated,
// we should check, before adding a new date segment, if it is
// not already present
// alternatively we can only keep the first continuous date
/*if (biblio.getPublicationDate() != null && isDifferentandNotIncludedContent(biblio.getPublicationDate(), clusterContent))
biblio.setPublicationDate(biblio.getPublicationDate() + " " + clusterContent);
else*/
// for checking if the date is a server date, we simply look at the string
/*if (biblio.getServerDate() == null) {
if (clusterContent.toLowerCase().indexOf("server") != -1) {
biblio.setServerDate(clusterNonDehypenizedContent);
continue;
}
}*/
if (biblio.getPublicationDate() != null && biblio.getPublicationDate().length() < clusterNonDehypenizedContent.length())
biblio.setPublicationDate(clusterNonDehypenizedContent);
else if (biblio.getPublicationDate() == null)
biblio.setPublicationDate(clusterNonDehypenizedContent);
} /*else if (clusterLabel.equals(TaggingLabels.HEADER_DATESUB)) {
// it appears that the same date is quite often repeated,
// we should check, before adding a new date segment, if it is
// not already present
if (biblio.getSubmissionDate() != null && isDifferentandNotIncludedContent(biblio.getSubmissionDate(), clusterNonDehypenizedContent)) {
biblio.setSubmissionDate(biblio.getSubmissionDate() + " " + clusterNonDehypenizedContent);
} else
biblio.setSubmissionDate(clusterNonDehypenizedContent);
} else if (clusterLabel.equals(TaggingLabels.HEADER_DOWNLOAD)) {
// it appears that the same date is quite often repeated,
// we should check, before adding a new date segment, if it is
// not already present
if (biblio.getDownloadDate() != null && isDifferentandNotIncludedContent(biblio.getDownloadDate(), clusterNonDehypenizedContent)) {
biblio.setDownloadDate(biblio.getDownloadDate() + " " + clusterNonDehypenizedContent);
} else
biblio.setDownloadDate(clusterNonDehypenizedContent);
}*/ else if (clusterLabel.equals(TaggingLabels.HEADER_PAGE)) {
/*if (biblio.getPageRange() != null) {
biblio.setPageRange(biblio.getPageRange() + clusterContent);
}*/
if (biblio.getPageRange() == null)
biblio.setPageRange(clusterContent);
} else if (clusterLabel.equals(TaggingLabels.HEADER_EDITOR)) {
if (biblio.getEditors() != null) {
biblio.setEditors(biblio.getEditors() + "\n" + clusterNonDehypenizedContent);
} else
biblio.setEditors(clusterNonDehypenizedContent);
} /*else if (clusterLabel.equals(TaggingLabels.HEADER_INSTITUTION)) {
if (biblio.getInstitution() != null) {
biblio.setInstitution(biblio.getInstitution() + clusterContent);
} else
biblio.setInstitution(clusterContent);
}*/ else if (clusterLabel.equals(TaggingLabels.HEADER_NOTE)) {
if (biblio.getNote() != null) {
biblio.setNote(biblio.getNote() + " " + clusterContent);
} else
biblio.setNote(clusterContent);
} else if (clusterLabel.equals(TaggingLabels.HEADER_ABSTRACT)) {
if (biblio.getAbstract() != null) {
// this will need to be reviewed with more training data, for the moment
// avoid concatenation for abstracts as it brings more noise than correct pieces
//biblio.setAbstract(biblio.getAbstract() + " " + clusterContent);
} else {
biblio.setAbstract(clusterContent);
List<LayoutToken> tokens = cluster.concatTokens();
biblio.collectAbstractTokens(tokens);
}
} else if (clusterLabel.equals(TaggingLabels.HEADER_REFERENCE)) {
//if (biblio.getReference() != null) {
if (biblio.getReference() != null && biblio.getReference().length() < clusterNonDehypenizedContent.length()) {
biblio.setReference(clusterNonDehypenizedContent);
} else
biblio.setReference(clusterNonDehypenizedContent);
} else if (clusterLabel.equals(TaggingLabels.HEADER_FUNDING)) {
if (biblio.getFunding() != null) {
biblio.setFunding(biblio.getFunding() + " \n " + clusterContent);
} else
biblio.setFunding(clusterContent);
} else if (clusterLabel.equals(TaggingLabels.HEADER_COPYRIGHT)) {
if (biblio.getCopyright() != null) {
biblio.setCopyright(biblio.getCopyright() + " " + clusterContent);
} else
biblio.setCopyright(clusterContent);
} else if (clusterLabel.equals(TaggingLabels.HEADER_AFFILIATION)) {
// affiliation **makers** should be marked SINGLECHAR LINESTART
if (biblio.getAffiliation() != null) {
biblio.setAffiliation(biblio.getAffiliation() + " ; " + clusterContent);
} else
biblio.setAffiliation(clusterContent);
} else if (clusterLabel.equals(TaggingLabels.HEADER_ADDRESS)) {
if (biblio.getAddress() != null) {
biblio.setAddress(biblio.getAddress() + " " + clusterContent);
} else
biblio.setAddress(clusterContent);
} else if (clusterLabel.equals(TaggingLabels.HEADER_EMAIL)) {
if (biblio.getEmail() != null) {
biblio.setEmail(biblio.getEmail() + "\t" + clusterNonDehypenizedContent);
} else
biblio.setEmail(clusterNonDehypenizedContent);
} else if (clusterLabel.equals(TaggingLabels.HEADER_PUBNUM)) {
if (biblio.getPubnum() != null && isDifferentandNotIncludedContent(biblio.getPubnum(), clusterContent)) {
String currentPubnum = biblio.getPubnum();
biblio.setPubnum(clusterContent);
biblio.checkIdentifier();
biblio.setPubnum(currentPubnum);
} else {
biblio.setPubnum(clusterContent);
biblio.checkIdentifier();
}
} else if (clusterLabel.equals(TaggingLabels.HEADER_KEYWORD)) {
if (biblio.getKeyword() != null) {
biblio.setKeyword(biblio.getKeyword() + " \n " + clusterContent);
} else
biblio.setKeyword(clusterContent);
} else if (clusterLabel.equals(TaggingLabels.HEADER_AVAILABILITY)) {
if (StringUtils.isNotBlank(biblio.getAvailabilityStmt())) {
biblio.setAvailabilityStmt(biblio.getAvailabilityStmt() + " \n " + clusterContent);
} else{
biblio.setAvailabilityStmt(clusterContent);
}
} else if (clusterLabel.equals(TaggingLabels.HEADER_PHONE)) {
if (biblio.getPhone() != null) {
biblio.setPhone(biblio.getPhone() + clusterNonDehypenizedContent);
} else
biblio.setPhone(clusterNonDehypenizedContent);
} /*else if (clusterLabel.equals(TaggingLabels.HEADER_DEGREE)) {
if (biblio.getDegree() != null) {
biblio.setDegree(biblio.getDegree() + clusterContent);
} else
biblio.setDegree(clusterContent);
}*/ else if (clusterLabel.equals(TaggingLabels.HEADER_WEB)) {
if (biblio.getWeb() != null) {
biblio.setWeb(biblio.getWeb() + clusterNonDehypenizedContent);
} else
biblio.setWeb(clusterNonDehypenizedContent);
} /*else if (clusterLabel.equals(TaggingLabels.HEADER_DEDICATION)) {
if (biblio.getDedication() != null) {
biblio.setDedication(biblio.getDedication() + clusterContent);
} else
biblio.setDedication(clusterContent);
}*/ else if (clusterLabel.equals(TaggingLabels.HEADER_SUBMISSION)) {
if (biblio.getSubmission() != null) {
biblio.setSubmission(biblio.getSubmission() + " " + clusterContent);
} else
biblio.setSubmission(clusterContent);
} /*else if (clusterLabel.equals(TaggingLabels.HEADER_ENTITLE)) {
if (biblio.getEnglishTitle() != null) {
// if (cluster.getFeatureBlock().contains("LINESTART")) {
// biblio.setEnglishTitle(biblio.getEnglishTitle() + " " + clusterContent);
// } else
biblio.setEnglishTitle(biblio.getEnglishTitle() + clusterContent);
} else
biblio.setEnglishTitle(clusterContent);
} else if (clusterLabel.equals(TaggingLabels.HEADER_VERSION)) {
if (biblio.getVersion() != null && isDifferentandNotIncludedContent(biblio.getVersion(), clusterNonDehypenizedContent)) {
biblio.setVersion(biblio.getVersion() + clusterNonDehypenizedContent);
} else
biblio.setVersion(clusterNonDehypenizedContent);
}*/ else if (clusterLabel.equals(TaggingLabels.HEADER_DOCTYPE)) {
if (biblio.getDocumentType() != null && isDifferentContent(biblio.getDocumentType(), clusterContent)) {
biblio.setDocumentType(biblio.getDocumentType() + " \n " + clusterContent);
} else
biblio.setDocumentType(clusterContent);
} else if (clusterLabel.equals(TaggingLabels.HEADER_WORKINGGROUP)) {
/*if (biblio.getWorkingGroup() != null && isDifferentandNotIncludedContent(biblio.getWorkingGroup(), clusterContent)) {
biblio.setWorkingGroup(biblio.getWorkingGroup() + " " + clusterContent);
}*/
if (biblio.getWorkingGroup() == null)
biblio.setWorkingGroup(clusterContent);
} else if (clusterLabel.equals(TaggingLabels.HEADER_PUBLISHER)) {
/*if (biblio.getPublisher() != null && isDifferentandNotIncludedContent(biblio.getPublisher(), clusterContent)) {
biblio.setPublisher(biblio.getPublisher() + " " + clusterContent);
}*/
if (biblio.getPublisher() == null)
biblio.setPublisher(clusterContent);
} else if (clusterLabel.equals(TaggingLabels.HEADER_JOURNAL)) {
/*if (biblio.getJournal() != null && isDifferentandNotIncludedContent(biblio.getJournal(), clusterContent)) {
biblio.setJournal(biblio.getJournal() + " " + clusterContent);
}*/
if (biblio.getJournal() == null)
biblio.setJournal(clusterContent);
}
/*else if (clusterLabel.equals(TaggingLabels.HEADER_INTRO)) {
return biblio;
}*/
}
return biblio;
}
/**
* In the context of field extraction, check if a newly extracted content is not redundant
* with the already extracted content
*/
private boolean isDifferentContent(String existingContent, String newContent) {
if (existingContent == null) {
return true;
}
if (newContent == null) {
return false;
}
String newContentSimplified = newContent.toLowerCase();
newContentSimplified = newContentSimplified.replace(" ", "").trim();
String existinContentSimplified = existingContent.toLowerCase();
existinContentSimplified = existinContentSimplified.replace(" ", "").trim();
if (newContentSimplified.equals(existinContentSimplified))
return false;
else
return true;
}
/**
* In the context of field extraction, this variant of the previous method check if a newly
* extracted content is not redundant globally and as any substring combination with the already
* extracted content
*/
private boolean isDifferentandNotIncludedContent(String existingContent, String newContent) {
if (existingContent == null) {
return true;
}
if (newContent == null) {
return false;
}
String newContentSimplified = newContent.toLowerCase();
newContentSimplified = newContentSimplified.replace(" ", "").trim();
newContentSimplified = newContentSimplified.replace("-", "").trim();
String existingContentSimplified = existingContent.toLowerCase();
existingContentSimplified = existingContentSimplified.replace(" ", "").trim();
existingContentSimplified = existingContentSimplified.replace("-", "").trim();
if (newContentSimplified.equals(existingContentSimplified) ||
existingContentSimplified.contains(newContentSimplified)
)
return false;
else
return true;
}
private List<LayoutToken> getLayoutTokens(TaggingTokenCluster cluster) {
List<LayoutToken> tokens = new ArrayList<>();
for (LabeledTokensContainer container : cluster.getLabeledTokensContainers()) {
tokens.addAll(container.getLayoutTokens());
}
return tokens;
}
/**
* Extract results from a labelled header in the training format without any
* string modification.
*
* @param result result
* @param tokenizations list of tokens
* @return a result
*/
public StringBuilder trainingExtraction(String result, List<LayoutToken> tokenizations) {
// this is the main buffer for the whole header
StringBuilder buffer = new StringBuilder();
StringTokenizer st = new StringTokenizer(result, "\n");
String s1 = null;
String s2 = null;
String lastTag = null;
int p = 0;
while (st.hasMoreTokens()) {
boolean addSpace = false;
String tok = st.nextToken().trim();
if (tok.length() == 0) {
continue;
}
StringTokenizer stt = new StringTokenizer(tok, "\t");
// List<String> localFeatures = new ArrayList<String>();
int i = 0;
boolean newLine = false;
int ll = stt.countTokens();
while (stt.hasMoreTokens()) {
String s = stt.nextToken().trim();
if (i == 0) {
s2 = TextUtilities.HTMLEncode(s);
//s2 = s;
boolean strop = false;
while ((!strop) && (p < tokenizations.size())) {
String tokOriginal = tokenizations.get(p).t();
if (tokOriginal.equals(" ")
|| tokOriginal.equals("\u00A0")) {
addSpace = true;
} else if (tokOriginal.equals(s)) {
strop = true;
}
p++;
}
} else if (i == ll - 1) {
s1 = s;
} else {
if (s.equals("LINESTART"))
newLine = true;
// localFeatures.add(s);
}
i++;
}
if (newLine) {
buffer.append("<lb/>");
}
String lastTag0 = null;
if (lastTag != null) {
if (lastTag.startsWith("I-")) {
lastTag0 = lastTag.substring(2, lastTag.length());
} else {
lastTag0 = lastTag;
}
}
String currentTag0 = null;
if (s1 != null) {
if (s1.startsWith("I-")) {
currentTag0 = s1.substring(2, s1.length());
} else {
currentTag0 = s1;
}
}
if (lastTag != null) {
testClosingTag(buffer, currentTag0, lastTag0);
}
boolean output;
output = writeField(buffer, s1, lastTag0, s2, "<title>", "<docTitle>\n\t<titlePart>", addSpace);
if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<author>", "<byline>\n\t<docAuthor>", addSpace);
}
if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<location>", "<address>", addSpace);
}
if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<address>", "<address>", addSpace);
}
if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<date>", "<date>", addSpace);
}
/*if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<date-submission>", "<date type=\"submission\">", addSpace);
}
if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<booktitle>", "<booktitle>", addSpace);
}*/
if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<page>", "<page>", addSpace);
}
if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<publisher>", "<publisher>", addSpace);
}
if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<journal>", "<journal>", addSpace);
}
if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<institution>", "<byline>\n\t<affiliation>", addSpace);
}
if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<affiliation>", "<byline>\n\t<affiliation>", addSpace);
}
/*if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<volume>", "<volume>", addSpace);
}*/
if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<editor>", "<editor>", addSpace);
}
if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<note>", "", addSpace);
}
if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<abstract>", "<div type=\"abstract\">", addSpace);
}
if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<email>", "<email>", addSpace);
}
if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<pubnum>", "<idno>", addSpace);
}
if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<keyword>", "<keyword>", addSpace);
}
if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<phone>", "<phone>", addSpace);
}
/*if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<degree>", "<note type=\"degree\">", addSpace);
}*/
if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<web>", "<ptr type=\"web\">", addSpace);
}
/*if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<dedication>", "<dedication>", addSpace);
}*/
if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<meeting>", "<meeting>", addSpace);
}
if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<submission>", "<note type=\"submission\">", addSpace);
}
/*if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<entitle>", "<note type=\"title\">", addSpace);
}*/
if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<reference>", "<reference>", addSpace);
}
if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<copyright>", "<note type=\"copyright\">", addSpace);
}
if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<funding>", "<note type=\"funding\">", addSpace);
}
/*if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<intro>", "<p type=\"introduction\">", addSpace);
}*/
if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<doctype>", "<note type=\"doctype\">", addSpace);
}
/*if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<version>", "<note type=\"version\">", addSpace);
}*/
/*if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<date-download>", "<date type=\"download\">", addSpace);
}*/
if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<group>", "<note type=\"group\">", addSpace);
}
if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<availability>", "<note type=\"availability\">", addSpace);
}
if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<other>", "", addSpace);
}
/*if (((s1.equals("<intro>")) || (s1.equals("I-<intro>"))) && intro) {
break;
}*/
lastTag = s1;
if (!st.hasMoreTokens()) {
if (lastTag != null) {
testClosingTag(buffer, "", currentTag0);
}
}
}
return buffer;
}
private void testClosingTag(StringBuilder buffer, String currentTag0, String lastTag0) {
if (!currentTag0.equals(lastTag0)) {
// we close the current tag
if (lastTag0.equals("<title>")) {
buffer.append("</titlePart>\n\t</docTitle>\n");
} else if (lastTag0.equals("<author>")) {
buffer.append("</docAuthor>\n\t</byline>\n");
} else if (lastTag0.equals("<location>")) {
buffer.append("</address>\n");
} else if (lastTag0.equals("<meeting>")) {
buffer.append("</meeting>\n");
} else if (lastTag0.equals("<date>")) {
buffer.append("</date>\n");
} else if (lastTag0.equals("<abstract>")) {
buffer.append("</div>\n");
} else if (lastTag0.equals("<address>")) {
buffer.append("</address>\n");
} else if (lastTag0.equals("<date-submission>")) {
buffer.append("</date>\n");
} else if (lastTag0.equals("<booktitle>")) {
buffer.append("</booktitle>\n");
} else if (lastTag0.equals("<pages>")) {
buffer.append("</pages>\n");
} else if (lastTag0.equals("<email>")) {
buffer.append("</email>\n");
} else if (lastTag0.equals("<publisher>")) {
buffer.append("</publisher>\n");
} else if (lastTag0.equals("<institution>")) {
buffer.append("</affiliation>\n\t</byline>\n");
} else if (lastTag0.equals("<keyword>")) {
buffer.append("</keyword>\n");
} else if (lastTag0.equals("<affiliation>")) {
buffer.append("</affiliation>\n\t</byline>\n");
} else if (lastTag0.equals("<note>")) {
buffer.append("</note>\n");
} else if (lastTag0.equals("<reference>")) {
buffer.append("</reference>\n");
} else if (lastTag0.equals("<copyright>")) {
buffer.append("</note>\n");
} else if (lastTag0.equals("<funding>")) {
buffer.append("</note>\n");
} else if (lastTag0.equals("<entitle>")) {
buffer.append("</note>\n");
} else if (lastTag0.equals("<submission>")) {
buffer.append("</note>\n");
} else if (lastTag0.equals("<dedication>")) {
buffer.append("</dedication>\n");
} else if (lastTag0.equals("<web>")) {
buffer.append("</ptr>\n");
} else if (lastTag0.equals("<phone>")) {
buffer.append("</phone>\n");
} else if (lastTag0.equals("<pubnum>")) {
buffer.append("</idno>\n");
} else if (lastTag0.equals("<degree>")) {
buffer.append("</note>\n");
} /*else if (lastTag0.equals("<intro>")) {
buffer.append("</p>\n");
}*/ else if (lastTag0.equals("<editor>")) {
buffer.append("</editor>\n");
} else if (lastTag0.equals("<version>")) {
buffer.append("</note>\n");
} else if (lastTag0.equals("<doctype>")) {
buffer.append("</note>\n");
} else if (lastTag0.equals("<date-download>")) {
buffer.append("</date>\n");
} else if (lastTag0.equals("<group>")) {
buffer.append("</note>\n");
} else if (lastTag0.equals("<availability>")) {
buffer.append("</note>\n");
}
}
}
private boolean writeField(StringBuilder buffer, String s1, String lastTag0, String s2, String field, String outField, boolean addSpace) {
boolean result = false;
if ((s1.equals(field)) || (s1.equals("I-" + field))) {
result = true;
if (s1.equals(lastTag0) || (s1).equals("I-" + lastTag0)) {
if (addSpace)
buffer.append(" ").append(s2);
else
buffer.append(s2);
} else
buffer.append("\n\t").append(outField).append(s2);
}
return result;
}
/**
* Consolidate an existing list of recognized citations based on access to
* external internet bibliographic databases.
*
* @param resHeader original biblio item
* @return consolidated biblio item
*/
public BiblioItem consolidateHeader(BiblioItem resHeader, int consolidate) {
if (consolidate == 0) {
// no consolidation
return resHeader;
}
Consolidation consolidator = null;
try {
consolidator = Consolidation.getInstance();
if (consolidator.getCntManager() == null)
consolidator.setCntManager(cntManager);
BiblioItem bib = consolidator.consolidate(resHeader, null, consolidate);
if (bib != null) {
if (consolidate == 1 || consolidate == 3)
BiblioItem.correct(resHeader, bib);
else if (consolidate == 2)
BiblioItem.injectIdentifiers(resHeader, bib);
}
} catch (Exception e) {
throw new GrobidException("An exception occured while running bibliographical data consolidation.", e);
}
return resHeader;
}
@Override
public void close() throws IOException {
super.close();
}
}
| 67,033 | 45.746165 | 199 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/FigureParser.java
|
package org.grobid.core.engines;
import org.grobid.core.GrobidModels;
import org.grobid.core.data.Figure;
import org.grobid.core.engines.label.TaggingLabel;
import org.grobid.core.engines.tagging.GenericTaggerUtils;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.layout.LayoutToken;
import org.grobid.core.tokenization.TaggingTokenCluster;
import org.grobid.core.tokenization.TaggingTokenClusteror;
import org.grobid.core.utilities.LayoutTokensUtil;
import org.grobid.core.utilities.TextUtilities;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.commons.lang3.tuple.Pair;
import java.util.List;
import static org.grobid.core.engines.label.TaggingLabels.*;
class FigureParser extends AbstractParser {
private static final Logger LOGGER = LoggerFactory.getLogger(FigureParser.class);
FigureParser() {
super(GrobidModels.FIGURE);
}
/**
* The processing here is called from the full text parser in cascade.
* Start and end position in the higher level tokenization are indicated in
* the resulting Figure object.
*/
public Figure processing(List<LayoutToken> tokenizationFigure, String featureVector) {
String res;
try {
//System.out.println("---------------------featureVector-----------------------");
//System.out.println(featureVector);
res = label(featureVector);;
//System.out.println("---------------------res-----------------------");
//System.out.println(res);
} catch (Exception e) {
throw new GrobidException("CRF labeling with figure model fails.", e);
}
if (res == null) {
return null;
}
return getExtractionResult(tokenizationFigure, res);
}
private Figure getExtractionResult(List<LayoutToken> tokenizations, String result) {
TaggingTokenClusteror clusteror = new TaggingTokenClusteror(GrobidModels.FIGURE, result, tokenizations);
List<TaggingTokenCluster> clusters = clusteror.cluster();
Figure figure = new Figure();
figure.setLayoutTokens(tokenizations);
for (TaggingTokenCluster cluster : clusters) {
if (cluster == null) {
continue;
}
TaggingLabel clusterLabel = cluster.getTaggingLabel();
Engine.getCntManager().i(clusterLabel);
String clusterContent = LayoutTokensUtil.normalizeText(LayoutTokensUtil.toText(cluster.concatTokens()));
if (clusterLabel.equals(FIG_DESC)) {
figure.appendCaption(clusterContent);
figure.appendCaptionLayoutTokens(cluster.concatTokens());
} else if (clusterLabel.equals(FIG_HEAD)) {
figure.appendHeader(clusterContent);
} else if (clusterLabel.equals(FIG_LABEL)) {
figure.appendLabel(clusterContent);
//label should also go to head
figure.appendHeader(" " + clusterContent + " ");
} else if (clusterLabel.equals(FIG_OTHER)) {
} else if (clusterLabel.equals(FIG_CONTENT)) {
figure.appendContent(clusterContent);
} else {
LOGGER.warn("Unexpected figure model label - " + clusterLabel.getLabel() + " for " + clusterContent);
}
}
return figure;
}
/**
* The training data creation is called from the full text training creation in cascade.
*/
public Pair<String, String> createTrainingData(List<LayoutToken> tokenizations,
String featureVector, String id) {
//System.out.println(tokenizations.toString() + "\n" );
String res = null;
try {
res = label(featureVector);
} catch (Exception e) {
LOGGER.error("CRF labeling in FigureParser fails.", e);
}
if (res == null) {
return Pair.of(null, featureVector);
}
//System.out.println(res + "\n" );
List<Pair<String, String>> labeled = GenericTaggerUtils.getTokensAndLabels(res);
StringBuilder sb = new StringBuilder();
int tokPtr = 0;
boolean addSpace = false;
boolean addEOL = false;
String lastTag = null;
boolean figOpen = false;
for (Pair<String, String> l : labeled) {
String tok = l.getLeft();
String label = l.getRight();
int tokPtr2 = tokPtr;
for (; tokPtr2 < tokenizations.size(); tokPtr2++) {
if (tokenizations.get(tokPtr2).getText().equals(" ")) {
addSpace = true;
} else if (tokenizations.get(tokPtr2).getText().equals("\n") ||
tokenizations.get(tokPtr).getText().equals("\r")) {
addEOL = true;
} else {
break;
}
}
tokPtr = tokPtr2;
if (tokPtr >= tokenizations.size()) {
LOGGER.error("Implementation error: Reached the end of tokenizations, but current token is " + tok);
// we add a space to avoid concatenated text
addSpace = true;
} else {
String tokenizationToken = tokenizations.get(tokPtr).getText();
if ((tokPtr != tokenizations.size()) && !tokenizationToken.equals(tok)) {
// and we add a space by default to avoid concatenated text
addSpace = true;
if (!tok.startsWith(tokenizationToken)) {
// this is a very exceptional case due to a sequence of accent/diacresis, in this case we skip
// a shift in the tokenizations list and continue on the basis of the labeled token
// we check one ahead
tokPtr++;
tokenizationToken = tokenizations.get(tokPtr).getText();
if (!tok.equals(tokenizationToken) && (tokenizations.size() > tokPtr+1)) {
// we try another position forward (second hope!)
tokPtr++;
tokenizationToken = tokenizations.get(tokPtr).getText();
if (!tok.equals(tokenizationToken) && (tokenizations.size() > tokPtr+1)) {
// we try another position forward (last hope!)
tokPtr++;
tokenizationToken = tokenizations.get(tokPtr).getText();
if (!tok.equals(tokenizationToken)) {
// we return to the initial position
tokPtr = tokPtr - 3;
tokenizationToken = tokenizations.get(tokPtr).getText();
LOGGER.error("Implementation error, tokens out of sync: " +
tokenizationToken + " != " + tok + ", at position " + tokPtr);
}
}
}
}
// note: if the above condition is true, this is an exceptional case due to a
// sequence of accent/diacresis and we can go on as a full string match
}
}
String plainLabel = GenericTaggerUtils.getPlainLabel(label);
String output;
if (lastTag != null) {
testClosingTag(sb, plainLabel, lastTag, addSpace, addEOL);
}
output = writeField(label, lastTag, tok, "<figure_head>", "<head>", addSpace, addEOL, 3);
String figureOpening = " <figure>\n";
if (output != null) {
if (!figOpen) {
sb.append(figureOpening);
figOpen = true;
}
sb.append(output);
}
output = writeField(label, lastTag, tok, "<figDesc>", "<figDesc>", addSpace, addEOL, 3);
if (output != null) {
if (!figOpen) {
sb.append(figureOpening);
figOpen = true;
}
sb.append(output);
}
output = writeField(label, lastTag, tok, "<label>", "<label>", addSpace, addEOL, 3);
if (output != null) {
if (!figOpen) {
sb.append(figureOpening);
figOpen = true;
}
sb.append(output);
}
output = writeField(label, lastTag, tok, "<content>", "", addSpace, addEOL, 3);
if (output != null) {
if (!figOpen) {
sb.append(figureOpening);
figOpen = true;
}
sb.append(output);
//continue;
}
output = writeField(label, lastTag, tok, "<other>", "", addSpace, addEOL, 2);
if (output != null) {
sb.append(output);
}
lastTag = plainLabel;
addSpace = false;
addEOL = false;
tokPtr++;
}
if (figOpen) {
testClosingTag(sb, "", lastTag, addSpace, addEOL);
sb.append(" </figure>\n");
}
return Pair.of(sb.toString(), featureVector);
}
public String getTEIHeader(String id) {
return "<tei>\n" +
" <teiHeader>\n" +
" <fileDesc xml:id=\"_" + id + "\"/>\n" +
" </teiHeader>\n" +
" <text xml:lang=\"en\">\n";
}
private boolean testClosingTag(StringBuilder buffer,
String currentTag,
String lastTag,
boolean addSpace,
boolean addEOL) {
boolean res = false;
if (!currentTag.equals(lastTag)) {
res = true;
// we close the current tag
switch (lastTag) {
case "<other>":
if (addEOL)
buffer.append("<lb/>");
if (addSpace)
buffer.append(" ");
buffer.append("\n");
break;
case "<figure_head>":
if (addEOL)
buffer.append("<lb/>");
if (addSpace)
buffer.append(" ");
buffer.append("</head>\n");
break;
case "<figDesc>":
if (addEOL)
buffer.append("<lb/>");
if (addSpace)
buffer.append(" ");
buffer.append("</figDesc>\n");
break;
case "<label>":
if (addEOL)
buffer.append("<lb/>");
if (addSpace)
buffer.append(" ");
buffer.append("</label>\n");
break;
case "<content>":
if (addEOL)
buffer.append("<lb/>");
if (addSpace)
buffer.append(" ");
buffer.append("</content>\n");
break;
default:
res = false;
break;
}
}
return res;
}
private String writeField(String currentTag,
String lastTag,
String token,
String field,
String outField,
boolean addSpace,
boolean addEOL,
int nbIndent) {
String result = null;
if (currentTag.endsWith(field)) {
if (currentTag.endsWith("<other>") || currentTag.endsWith("<content>")) {
result = "";
if (currentTag.startsWith("I-") || (lastTag == null)) {
result += "\n";
for (int i = 0; i < nbIndent; i++) {
result += " ";
}
}
if (addEOL)
result += "<lb/>";
if (addSpace)
result += " ";
result += TextUtilities.HTMLEncode(token);
} else if ((lastTag != null) && currentTag.endsWith(lastTag)) {
result = "";
if (addEOL)
result += "<lb/>";
if (addSpace)
result += " ";
if (currentTag.startsWith("I-"))
result += outField;
result += TextUtilities.HTMLEncode(token);
} else {
result = "";
if (addEOL)
result += "<lb/>";
if (addSpace)
result += " ";
result += "\n";
if (outField.length() > 0) {
for (int i = 0; i < nbIndent; i++) {
result += " ";
}
}
result += outField + TextUtilities.HTMLEncode(token);
}
}
return result;
}
}
| 13,600 | 39.120944 | 118 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/AbstractParser.java
|
package org.grobid.core.engines;
import org.grobid.core.GrobidModel;
import org.grobid.core.analyzers.GrobidAnalyzer;
import org.grobid.core.engines.tagging.*;
import org.grobid.core.utilities.counters.CntManager;
import org.grobid.core.utilities.counters.impl.CntManagerFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.Closeable;
import java.io.IOException;
public abstract class AbstractParser implements GenericTagger, Closeable {
private static final Logger LOGGER = LoggerFactory.getLogger(AbstractParser.class);
private GenericTagger genericTagger;
protected GrobidAnalyzer analyzer = GrobidAnalyzer.getInstance();
protected CntManager cntManager = CntManagerFactory.getNoOpCntManager();
protected AbstractParser(GrobidModel model) {
this(model, CntManagerFactory.getNoOpCntManager());
}
protected AbstractParser(GrobidModel model, CntManager cntManager) {
this.cntManager = cntManager;
genericTagger = TaggerFactory.getTagger(model);
}
protected AbstractParser(GrobidModel model, CntManager cntManager, GrobidCRFEngine engine) {
this.cntManager = cntManager;
genericTagger = TaggerFactory.getTagger(model, engine);
}
protected AbstractParser(GrobidModel model, CntManager cntManager, GrobidCRFEngine engine, String architecture) {
this.cntManager = cntManager;
genericTagger = TaggerFactory.getTagger(model, engine, architecture);
}
@Override
public String label(Iterable<String> data) {
return genericTagger.label(data);
}
@Override
public String label(String data) {
return genericTagger.label(data);
}
@Override
public void close() throws IOException {
try {
genericTagger.close();
} catch (Exception e) {
LOGGER.warn("Cannot close the parser: " + e.getMessage());
//no op
}
}
}
| 1,948 | 31.483333 | 117 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/FullTextParser.java
|
package org.grobid.core.engines;
import com.google.common.collect.Iterables;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.commons.io.FileUtils;
import java.nio.charset.StandardCharsets;
import org.grobid.core.GrobidModels;
import org.grobid.core.data.BibDataSet;
import org.grobid.core.data.BiblioItem;
import org.grobid.core.data.Figure;
import org.grobid.core.data.Table;
import org.grobid.core.data.Equation;
import org.grobid.core.document.Document;
import org.grobid.core.document.DocumentPiece;
import org.grobid.core.document.DocumentPointer;
import org.grobid.core.document.DocumentSource;
import org.grobid.core.document.TEIFormatter;
import org.grobid.core.engines.citations.LabeledReferenceResult;
import org.grobid.core.engines.citations.ReferenceSegmenter;
import org.grobid.core.engines.config.GrobidAnalysisConfig;
import org.grobid.core.engines.counters.CitationParserCounters;
import org.grobid.core.engines.label.SegmentationLabels;
import org.grobid.core.engines.label.TaggingLabels;
import org.grobid.core.engines.label.TaggingLabel;
import org.grobid.core.engines.tagging.GenericTaggerUtils;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.exceptions.GrobidResourceException;
import org.grobid.core.features.FeatureFactory;
import org.grobid.core.features.FeaturesVectorFulltext;
import org.grobid.core.lang.Language;
import org.grobid.core.layout.*;
import org.grobid.core.tokenization.TaggingTokenCluster;
import org.grobid.core.tokenization.TaggingTokenClusteror;
import org.grobid.core.utilities.LanguageUtilities;
import org.grobid.core.utilities.TextUtilities;
import org.grobid.core.utilities.KeyGen;
import org.grobid.core.utilities.LayoutTokensUtil;
import org.grobid.core.utilities.GrobidProperties;
import org.grobid.core.utilities.Consolidation;
import org.grobid.core.utilities.matching.ReferenceMarkerMatcher;
import org.grobid.core.utilities.matching.EntityMatcherException;
import org.grobid.core.engines.citations.CalloutAnalyzer;
import org.grobid.core.engines.citations.CalloutAnalyzer.MarkerType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.util.Arrays;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.HashMap;
import java.util.SortedSet;
import java.util.StringTokenizer;
import java.util.TreeSet;
import java.util.regex.Matcher;
import static org.apache.commons.lang3.StringUtils.*;
public class FullTextParser extends AbstractParser {
private static final Logger LOGGER = LoggerFactory.getLogger(FullTextParser.class);
protected File tmpPath = null;
// default bins for relative position
private static final int NBBINS_POSITION = 12;
// default bins for inter-block spacing
private static final int NBBINS_SPACE = 5;
// default bins for block character density
private static final int NBBINS_DENSITY = 5;
// projection scale for line length
private static final int LINESCALE = 10;
protected EngineParsers parsers;
public FullTextParser(EngineParsers parsers) {
super(GrobidModels.FULLTEXT);
this.parsers = parsers;
tmpPath = GrobidProperties.getTempPath();
}
public Document processing(File inputPdf,
GrobidAnalysisConfig config) throws Exception {
DocumentSource documentSource =
DocumentSource.fromPdf(inputPdf, config.getStartPage(), config.getEndPage(),
config.getPdfAssetPath() != null, true, false);
return processing(documentSource, config);
}
public Document processing(File inputPdf,
String md5Str,
GrobidAnalysisConfig config) throws Exception {
DocumentSource documentSource =
DocumentSource.fromPdf(inputPdf, config.getStartPage(), config.getEndPage(),
config.getPdfAssetPath() != null, true, false);
documentSource.setMD5(md5Str);
return processing(documentSource, config);
}
/**
* Machine-learning recognition of the complete full text structures.
*
* @param documentSource input
* @param config config
* @return the document object with built TEI
*/
public Document processing(DocumentSource documentSource,
GrobidAnalysisConfig config) {
if (tmpPath == null) {
throw new GrobidResourceException("Cannot process pdf file, because temp path is null.");
}
if (!tmpPath.exists()) {
throw new GrobidResourceException("Cannot process pdf file, because temp path '" +
tmpPath.getAbsolutePath() + "' does not exists.");
}
try {
// general segmentation
Document doc = parsers.getSegmentationParser().processing(documentSource, config);
SortedSet<DocumentPiece> documentBodyParts = doc.getDocumentPart(SegmentationLabels.BODY);
// header processing
BiblioItem resHeader = new BiblioItem();
Pair<String, LayoutTokenization> featSeg = null;
// using the segmentation model to identify the header zones
parsers.getHeaderParser().processingHeaderSection(config, doc, resHeader, false);
// The commented part below makes use of the PDF embedded metadata (the so-called XMP) if available
// as fall back to set author and title if they have not been found.
// However tests on PMC set 1942 did not improve recognition. This will have to be re-evaluated with
// another, more diverse, testing set and with further updates of the header model.
// ---> DO NOT DELETE !
/*if (isBlank(resHeader.getTitle()) || isBlank(resHeader.getAuthors()) || CollectionUtils.isEmpty(resHeader.getFullAuthors())) {
// try to exploit PDF embedded metadata (the so-called XMP) if we are still without title/authors
// this is risky as those metadata are highly unreliable, but as last chance, why not :)
Metadata metadata = doc.getMetadata();
if (metadata != null) {
boolean titleUpdated = false;
boolean authorsUpdated = false;
if (isNotBlank(metadata.getTitle()) && isBlank(resHeader.getTitle())) {
if (!endsWithAny(lowerCase(metadata.getTitle()), ".doc", ".pdf", ".tex", ".dvi", ".docx", ".odf", ".odt", ".txt")) {
resHeader.setTitle(metadata.getTitle());
titleUpdated = true;
}
}
if (isNotBlank(metadata.getAuthor())
&& (isBlank(resHeader.getAuthors()) || CollectionUtils.isEmpty(resHeader.getFullAuthors()))) {
resHeader.setAuthors(metadata.getAuthor());
resHeader.setOriginalAuthors(metadata.getAuthor());
authorsUpdated = true;
List<Person> localAuthors = parsers.getAuthorParser().processingHeader(metadata.getAuthor());
if (localAuthors != null) {
for (Person pers : localAuthors) {
resHeader.addFullAuthor(pers);
}
}
}
// if title and author have been updated with embedded PDF metadata, we try to consolidate
// again as required
if ( titleUpdated || authorsUpdated ) {
parsers.getHeaderParser().consolidateHeader(resHeader, config.getConsolidateHeader());
}
}
}*/
// structure the abstract using the fulltext model
if (isNotBlank(resHeader.getAbstract())) {
//List<LayoutToken> abstractTokens = resHeader.getLayoutTokens(TaggingLabels.HEADER_ABSTRACT);
List<LayoutToken> abstractTokens = resHeader.getAbstractTokensWorkingCopy();
if (CollectionUtils.isNotEmpty(abstractTokens)) {
abstractTokens = BiblioItem.cleanAbstractLayoutTokens(abstractTokens);
Pair<String, List<LayoutToken>> abstractProcessed = processShort(abstractTokens, doc);
if (abstractProcessed != null) {
// neutralize figure and table annotations (will be considered as paragraphs)
String labeledAbstract = abstractProcessed.getLeft();
labeledAbstract = postProcessFullTextLabeledText(labeledAbstract);
resHeader.setLabeledAbstract(labeledAbstract);
resHeader.setLayoutTokensForLabel(abstractProcessed.getRight(), TaggingLabels.HEADER_ABSTRACT);
}
}
}
// citation processing
// consolidation, if selected, is not done individually for each citation but
// in a second stage for all citations which is much faster
List<BibDataSet> resCitations = parsers.getCitationParser().
processingReferenceSection(doc, parsers.getReferenceSegmenterParser(), 0);
// consolidate the set
if (config.getConsolidateCitations() != 0 && resCitations != null) {
Consolidation consolidator = Consolidation.getInstance();
if (consolidator.getCntManager() == null)
consolidator.setCntManager(Engine.getCntManager());
try {
Map<Integer,BiblioItem> resConsolidation = consolidator.consolidate(resCitations);
for(int i=0; i<resCitations.size(); i++) {
BiblioItem resCitation = resCitations.get(i).getResBib();
BiblioItem bibo = resConsolidation.get(i);
if (bibo != null) {
if (config.getConsolidateCitations() == 1)
BiblioItem.correct(resCitation, bibo);
else if (config.getConsolidateCitations() == 2)
BiblioItem.injectIdentifiers(resCitation, bibo);
}
}
} catch(Exception e) {
throw new GrobidException(
"An exception occured while running consolidation on bibliographical references.", e);
}
}
doc.setBibDataSets(resCitations);
// full text processing
featSeg = getBodyTextFeatured(doc, documentBodyParts);
String resultBody = null;
LayoutTokenization layoutTokenization = null;
List<Figure> figures = null;
List<Table> tables = null;
List<Equation> equations = null;
if (featSeg != null && isNotBlank(featSeg.getLeft())) {
// if featSeg is null, it usually means that no body segment is found in the
// document segmentation
String bodytext = featSeg.getLeft();
layoutTokenization = featSeg.getRight();
//tokenizationsBody = featSeg.getB().getTokenization();
//layoutTokensBody = featSeg.getB().getLayoutTokens();
resultBody = label(bodytext);
// we apply now the figure and table models based on the fulltext labeled output
figures = processFigures(resultBody, layoutTokenization.getTokenization(), doc);
// further parse the caption
for(Figure figure : figures) {
if (CollectionUtils.isNotEmpty(figure.getCaptionLayoutTokens()) ) {
Pair<String, List<LayoutToken>> captionProcess = processShort(figure.getCaptionLayoutTokens(), doc);
figure.setLabeledCaption(captionProcess.getLeft());
figure.setCaptionLayoutTokens(captionProcess.getRight());
}
}
tables = processTables(resultBody, layoutTokenization.getTokenization(), doc);
// further parse the caption
for(Table table : tables) {
if ( CollectionUtils.isNotEmpty(table.getCaptionLayoutTokens()) ) {
Pair<String, List<LayoutToken>> captionProcess = processShort(table.getCaptionLayoutTokens(), doc);
table.setLabeledCaption(captionProcess.getLeft());
table.setCaptionLayoutTokens(captionProcess.getRight());
}
if ( CollectionUtils.isNotEmpty(table.getNoteLayoutTokens())) {
Pair<String, List<LayoutToken>> noteProcess = processShort(table.getNoteLayoutTokens(), doc);
table.setLabeledNote(noteProcess.getLeft());
table.setNoteLayoutTokens(noteProcess.getRight());
}
}
equations = processEquations(resultBody, layoutTokenization.getTokenization(), doc);
} else {
LOGGER.debug("Fulltext model: The featured body is empty");
}
// possible annexes (view as a piece of full text similar to the body)
documentBodyParts = doc.getDocumentPart(SegmentationLabels.ANNEX);
featSeg = getBodyTextFeatured(doc, documentBodyParts);
String resultAnnex = null;
List<LayoutToken> tokenizationsBody2 = null;
if (featSeg != null && isNotEmpty(trim(featSeg.getLeft()))) {
// if featSeg is null, it usually means that no body segment is found in the
// document segmentation
String bodytext = featSeg.getLeft();
tokenizationsBody2 = featSeg.getRight().getTokenization();
resultAnnex = label(bodytext);
//System.out.println(rese);
}
// post-process reference and footnote callout to keep them consistent (e.g. for example avoid that a footnote
// callout in superscript is by error labeled as a numerical reference callout)
List<MarkerType> markerTypes = null;
if (resultBody != null)
markerTypes = postProcessCallout(resultBody, layoutTokenization);
// final combination
toTEI(doc, // document
resultBody, resultAnnex, // labeled data for body and annex
layoutTokenization, tokenizationsBody2, // tokenization for body and annex
resHeader, // header
figures, tables, equations, markerTypes,
config);
return doc;
} catch (GrobidException e) {
throw e;
} catch (Exception e) {
throw new GrobidException("An exception occurred while running Grobid.", e);
}
}
/**
* Process a simple segment of layout tokens with the full text model.
* Return null if provided Layout Tokens is empty or if structuring failed.
*/
public Pair<String, List<LayoutToken>> processShortNew(List<LayoutToken> tokens, Document doc) {
if (CollectionUtils.isEmpty(tokens))
return null;
SortedSet<DocumentPiece> documentParts = new TreeSet<DocumentPiece>();
// identify continuous sequence of layout tokens in the abstract
int posStartPiece = -1;
int currentOffset = -1;
int startBlockPtr = -1;
LayoutToken previousToken = null;
for(LayoutToken token : tokens) {
if (currentOffset == -1) {
posStartPiece = getDocIndexToken(doc, token);
startBlockPtr = token.getBlockPtr();
} else if (token.getOffset() != currentOffset + previousToken.getText().length()) {
// new DocumentPiece to be added
DocumentPointer dp1 = new DocumentPointer(doc, startBlockPtr, posStartPiece);
DocumentPointer dp2 = new DocumentPointer(doc,
previousToken.getBlockPtr(),
getDocIndexToken(doc, previousToken));
DocumentPiece piece = new DocumentPiece(dp1, dp2);
documentParts.add(piece);
// set index for the next DocumentPiece
posStartPiece = getDocIndexToken(doc, token);
startBlockPtr = token.getBlockPtr();
}
currentOffset = token.getOffset();
previousToken = token;
}
// we still need to add the last document piece
// conditional below should always be true because abstract is not null if we reach this part, but paranoia is good when programming
if (posStartPiece != -1) {
DocumentPointer dp1 = new DocumentPointer(doc, startBlockPtr, posStartPiece);
DocumentPointer dp2 = new DocumentPointer(doc,
previousToken.getBlockPtr(),
getDocIndexToken(doc, previousToken));
DocumentPiece piece = new DocumentPiece(dp1, dp2);
documentParts.add(piece);
}
Pair<String, LayoutTokenization> featSeg = getBodyTextFeatured(doc, documentParts);
String res = "";
List<LayoutToken> layoutTokenization = new ArrayList<>();
if (featSeg != null) {
String featuredText = featSeg.getLeft();
LayoutTokenization layouts = featSeg.getRight();
if (layouts != null)
layoutTokenization = layouts.getTokenization();
if (isNotBlank(featuredText)) {
res = label(featuredText);
}
} else
return null;
return Pair.of(res, layoutTokenization);
}
public Pair<String, List<LayoutToken>> processShort(List<LayoutToken> tokens, Document doc) {
if (CollectionUtils.isEmpty(tokens))
return null;
SortedSet<DocumentPiece> documentParts = new TreeSet<>();
// we need to identify all the continuous chunks of tokens, and ignore the others
List<List<LayoutToken>> tokenChunks = new ArrayList<>();
List<LayoutToken> currentChunk = new ArrayList<>();
int currentPos = 0;
for(LayoutToken token : tokens) {
if (currentChunk.size() != 0) {
int tokenPos = token.getOffset();
if (currentPos != tokenPos) {
// new chunk
tokenChunks.add(currentChunk);
currentChunk = new ArrayList<LayoutToken>();
}
}
currentChunk.add(token);
currentPos = token.getOffset() + token.getText().length();
}
// add last chunk
tokenChunks.add(currentChunk);
for(List<LayoutToken> chunk : tokenChunks) {
int endInd = chunk.size()-1;
int posStartAbstract = getDocIndexToken(doc, chunk.get(0));
int posEndAbstract = getDocIndexToken(doc, chunk.get(endInd));
DocumentPointer dp1 = new DocumentPointer(doc, chunk.get(0).getBlockPtr(), posStartAbstract);
DocumentPointer dp2 = new DocumentPointer(doc, chunk.get(endInd).getBlockPtr(), posEndAbstract);
DocumentPiece piece = new DocumentPiece(dp1, dp2);
documentParts.add(piece);
}
Pair<String, LayoutTokenization> featSeg = getBodyTextFeatured(doc, documentParts);
String res = null;
List<LayoutToken> layoutTokenization = null;
if (featSeg != null) {
String featuredText = featSeg.getLeft();
LayoutTokenization layouts = featSeg.getRight();
if (layouts != null)
layoutTokenization = layouts.getTokenization();
if ( (featuredText != null) && (featuredText.trim().length() > 0) ) {
res = label(featuredText);
res = postProcessFullTextLabeledText(res);
}
}
return Pair.of(res, layoutTokenization);
}
/**
* Post-process text labeled by the fulltext model on chunks that are known to be text (no table, or figure)
* It converts table and figure labels to paragraph labels.
*/
protected static String postProcessFullTextLabeledText(String fulltextLabeledText) {
if (fulltextLabeledText == null)
return null;
StringBuilder result = new StringBuilder();
String[] lines = fulltextLabeledText.split("\n");
String previousLabel = null;
for(int i=0; i<lines.length; i++) {
String line = lines[i];
if (line == null || line.trim().length() == 0)
continue;
String[] pieces = line.split("\t");
String label = pieces[pieces.length-1];
if (label.equals("I-"+TaggingLabels.FIGURE.getLabel()) || label.equals("I-"+TaggingLabels.TABLE.getLabel())) {
if (previousLabel == null || !previousLabel.endsWith(TaggingLabels.PARAGRAPH.getLabel())) {
pieces[pieces.length-1] = "I-"+TaggingLabels.PARAGRAPH.getLabel();
} else {
pieces[pieces.length-1] = TaggingLabels.PARAGRAPH.getLabel();
}
} else if (label.equals(TaggingLabels.FIGURE.getLabel()) || label.equals(TaggingLabels.TABLE.getLabel())) {
pieces[pieces.length-1] = TaggingLabels.PARAGRAPH.getLabel();
}
for(int j=0; j<pieces.length; j++) {
if (j != 0)
result.append("\t");
result.append(pieces[j]);
}
previousLabel = label;
result.append("\n");
}
return result.toString();
}
static public Pair<String, LayoutTokenization> getBodyTextFeatured(Document doc,
SortedSet<DocumentPiece> documentBodyParts) {
if ((documentBodyParts == null) || (documentBodyParts.size() == 0)) {
return null;
}
FeatureFactory featureFactory = FeatureFactory.getInstance();
StringBuilder fulltext = new StringBuilder();
String currentFont = null;
int currentFontSize = -1;
List<Block> blocks = doc.getBlocks();
if ( (blocks == null) || blocks.size() == 0) {
return null;
}
// vector for features
FeaturesVectorFulltext features;
FeaturesVectorFulltext previousFeatures = null;
ReferenceMarkerMatcher referenceMarkerMatcher = null;
// if bibliographical references are available from the bibliographical reference section, we look if we have
// a numbering associated to the bibliographical references (bib. ref. callout will likely be numerical then)
String bibRefCalloutType = "UNKNOWN";
List<BibDataSet> bibDataSets = doc.getBibDataSets();
if (bibDataSets != null) {
try {
referenceMarkerMatcher = doc.getReferenceMarkerMatcher();
// we look at the exising extracted labels in the bibliographical section (if available and if any) and set
// the value based on the majority of labels
int nbNumbType = 0;
int nbAuthorType = 0;
for(BibDataSet bibDataSet : bibDataSets) {
if ((bibDataSet == null) || (bibDataSet.getRefSymbol() == null))
continue;
boolean isNumb = referenceMarkerMatcher.isNumberedCitationReference(bibDataSet.getRefSymbol());
if (isNumb) {
nbNumbType++;
continue;
}
boolean isAuthor = referenceMarkerMatcher.isAuthorCitationStyle(bibDataSet.getRefSymbol());
if (isAuthor)
nbAuthorType++;
}
if (nbNumbType > (bibDataSets.size() / 2))
bibRefCalloutType = "NUMBER";
else if (nbAuthorType > (bibDataSets.size() / 2))
bibRefCalloutType = "AUTHOR";
} catch(EntityMatcherException e) {
LOGGER.info("Could not build the bibliographical matcher", e);
}
}
boolean endblock;
boolean endPage = true;
boolean newPage = true;
//boolean start = true;
int mm = 0; // page position
int nn = 0; // document position
double lineStartX = Double.NaN;
boolean indented = false;
int fulltextLength = 0;
int pageLength = 0; // length of the current page
double lowestPos = 0.0;
double spacingPreviousBlock = 0.0;
int currentPage = 0;
List<LayoutToken> layoutTokens = new ArrayList<LayoutToken>();
fulltextLength = getFulltextLength(doc, documentBodyParts, fulltextLength);
//System.out.println("fulltextLength: " + fulltextLength);
for(DocumentPiece docPiece : documentBodyParts) {
DocumentPointer dp1 = docPiece.getLeft();
DocumentPointer dp2 = docPiece.getRight();
//int blockPos = dp1.getBlockPtr();
for(int blockIndex = dp1.getBlockPtr(); blockIndex <= dp2.getBlockPtr(); blockIndex++) {
//System.out.println("blockIndex: " + blockIndex);
boolean graphicVector = false;
boolean graphicBitmap = false;
Block block = blocks.get(blockIndex);
// length of the page where the current block is
double pageHeight = block.getPage().getHeight();
int localPage = block.getPage().getNumber();
if (localPage != currentPage) {
newPage = true;
currentPage = localPage;
mm = 0;
lowestPos = 0.0;
spacingPreviousBlock = 0.0;
}
/*if (start) {
newPage = true;
start = false;
}*/
boolean newline;
boolean previousNewline = false;
endblock = false;
/*if (endPage) {
newPage = true;
mm = 0;
lowestPos = 0.0;
}*/
if (lowestPos > block.getY()) {
// we have a vertical shift, which can be due to a change of column or other particular layout formatting
spacingPreviousBlock = doc.getMaxBlockSpacing() / 5.0; // default
}
else
spacingPreviousBlock = block.getY() - lowestPos;
String localText = block.getText();
if (TextUtilities.filterLine(localText)) {
continue;
}
/*if (localText != null) {
if (localText.contains("@PAGE")) {
mm = 0;
// pageLength = 0;
endPage = true;
newPage = false;
} else {
endPage = false;
}
}*/
// character density of the block
double density = 0.0;
if ( (block.getHeight() != 0.0) && (block.getWidth() != 0.0) &&
(localText != null) && (!localText.contains("@PAGE")) &&
(!localText.contains("@IMAGE")) )
density = (double)localText.length() / (block.getHeight() * block.getWidth());
// check if we have a graphical object connected to the current block
List<GraphicObject> localImages = Document.getConnectedGraphics(block, doc);
if (localImages != null) {
for(GraphicObject localImage : localImages) {
if (localImage.getType() == GraphicObjectType.BITMAP)
graphicBitmap = true;
if (localImage.getType() == GraphicObjectType.VECTOR || localImage.getType() == GraphicObjectType.VECTOR_BOX)
graphicVector = true;
}
}
List<LayoutToken> tokens = block.getTokens();
if (tokens == null) {
continue;
}
int n = 0;// token position in current block
if (blockIndex == dp1.getBlockPtr()) {
// n = dp1.getTokenDocPos() - block.getStartToken();
n = dp1.getTokenBlockPos();
}
int lastPos = tokens.size();
// if it's a last block from a document piece, it may end earlier
if (blockIndex == dp2.getBlockPtr()) {
lastPos = dp2.getTokenBlockPos()+1;
if (lastPos > tokens.size()) {
LOGGER.warn("DocumentPointer for block " + blockIndex + " points to " +
dp2.getTokenBlockPos() + " token, but block token size is " +
tokens.size());
lastPos = tokens.size();
}
}
while (n < lastPos) {
if (blockIndex == dp2.getBlockPtr()) {
//if (n > block.getEndToken()) {
if (n > dp2.getTokenDocPos() - block.getStartToken()) {
break;
}
}
LayoutToken token = tokens.get(n);
layoutTokens.add(token);
features = new FeaturesVectorFulltext();
features.token = token;
double coordinateLineY = token.getY();
String text = token.getText();
if ( (text == null) || (text.length() == 0)) {
n++;
//mm++;
//nn++;
continue;
}
//text = text.replaceAll("\\s+", "");
text = text.replace(" ", "");
if (text.length() == 0) {
n++;
mm++;
nn++;
continue;
}
if (text.equals("\n")) {
newline = true;
previousNewline = true;
n++;
mm++;
nn++;
continue;
} else
newline = false;
// final sanitisation and filtering
text = text.replaceAll("[ \n]", "");
if (TextUtilities.filterLine(text)) {
n++;
continue;
}
if (previousNewline) {
newline = true;
previousNewline = false;
if ((token != null) && (previousFeatures != null)) {
double previousLineStartX = lineStartX;
lineStartX = token.getX();
double characterWidth = token.width / text.length();
if (!Double.isNaN(previousLineStartX)) {
if (previousLineStartX - lineStartX > characterWidth)
indented = false;
else if (lineStartX - previousLineStartX > characterWidth)
indented = true;
// Indentation ends if line start is > 1 character width to the left of previous line start
// Indentation starts if line start is > 1 character width to the right of previous line start
// Otherwise indentation is unchanged
}
}
}
features.string = text;
if (graphicBitmap) {
features.bitmapAround = true;
}
if (graphicVector) {
features.vectorAround = true;
}
if (newline) {
features.lineStatus = "LINESTART";
if (token != null)
lineStartX = token.getX();
// be sure that previous token is closing a line, except if it's a starting line
if (previousFeatures != null) {
if (!previousFeatures.lineStatus.equals("LINESTART"))
previousFeatures.lineStatus = "LINEEND";
}
}
Matcher m0 = featureFactory.isPunct.matcher(text);
if (m0.find()) {
features.punctType = "PUNCT";
}
if (text.equals("(") || text.equals("[")) {
features.punctType = "OPENBRACKET";
} else if (text.equals(")") || text.equals("]")) {
features.punctType = "ENDBRACKET";
} else if (text.equals(".")) {
features.punctType = "DOT";
} else if (text.equals(",")) {
features.punctType = "COMMA";
} else if (text.equals("-")) {
features.punctType = "HYPHEN";
} else if (text.equals("\"") || text.equals("\'") || text.equals("`")) {
features.punctType = "QUOTE";
}
if (indented) {
features.alignmentStatus = "LINEINDENT";
}
else {
features.alignmentStatus = "ALIGNEDLEFT";
}
if (n == 0) {
features.lineStatus = "LINESTART";
// be sure that previous token is closing a line, except if it's a starting line
if (previousFeatures != null) {
if (!previousFeatures.lineStatus.equals("LINESTART"))
previousFeatures.lineStatus = "LINEEND";
}
if (token != null)
lineStartX = token.getX();
features.blockStatus = "BLOCKSTART";
} else if (n == tokens.size() - 1) {
features.lineStatus = "LINEEND";
previousNewline = true;
features.blockStatus = "BLOCKEND";
endblock = true;
} else {
// look ahead...
boolean endline = false;
int ii = 1;
boolean endloop = false;
while ((n + ii < tokens.size()) && (!endloop)) {
LayoutToken tok = tokens.get(n + ii);
if (tok != null) {
String toto = tok.getText();
if (toto != null) {
if (toto.equals("\n")) {
endline = true;
endloop = true;
} else {
if ((toto.length() != 0)
&& (!(toto.startsWith("@IMAGE")))
&& (!(toto.startsWith("@PAGE")))
&& (!text.contains(".pbm"))
&& (!text.contains(".svg"))
&& (!text.contains(".png"))
&& (!text.contains(".jpg"))) {
endloop = true;
}
}
}
}
if (n + ii == tokens.size() - 1) {
endblock = true;
endline = true;
}
ii++;
}
if ((!endline) && !(newline)) {
features.lineStatus = "LINEIN";
}
else if (!newline) {
features.lineStatus = "LINEEND";
previousNewline = true;
}
if ((!endblock) && (features.blockStatus == null))
features.blockStatus = "BLOCKIN";
else if (features.blockStatus == null) {
features.blockStatus = "BLOCKEND";
//endblock = true;
}
}
if (text.length() == 1) {
features.singleChar = true;
}
if (Character.isUpperCase(text.charAt(0))) {
features.capitalisation = "INITCAP";
}
if (featureFactory.test_all_capital(text)) {
features.capitalisation = "ALLCAP";
}
if (featureFactory.test_digit(text)) {
features.digit = "CONTAINSDIGITS";
}
Matcher m = featureFactory.isDigit.matcher(text);
if (m.find()) {
features.digit = "ALLDIGIT";
}
if (currentFont == null) {
currentFont = token.getFont();
features.fontStatus = "NEWFONT";
} else if (!currentFont.equals(token.getFont())) {
currentFont = token.getFont();
features.fontStatus = "NEWFONT";
} else
features.fontStatus = "SAMEFONT";
int newFontSize = (int) token.getFontSize();
if (currentFontSize == -1) {
currentFontSize = newFontSize;
features.fontSize = "HIGHERFONT";
} else if (currentFontSize == newFontSize) {
features.fontSize = "SAMEFONTSIZE";
} else if (currentFontSize < newFontSize) {
features.fontSize = "HIGHERFONT";
currentFontSize = newFontSize;
} else if (currentFontSize > newFontSize) {
features.fontSize = "LOWERFONT";
currentFontSize = newFontSize;
}
if (token.getBold())
features.bold = true;
if (token.getItalic())
features.italic = true;
if (features.capitalisation == null)
features.capitalisation = "NOCAPS";
if (features.digit == null)
features.digit = "NODIGIT";
if (features.punctType == null)
features.punctType = "NOPUNCT";
features.relativeDocumentPosition = featureFactory
.linearScaling(nn, fulltextLength, NBBINS_POSITION);
// System.out.println(mm + " / " + pageLength);
features.relativePagePositionChar = featureFactory
.linearScaling(mm, pageLength, NBBINS_POSITION);
int pagePos = featureFactory
.linearScaling(coordinateLineY, pageHeight, NBBINS_POSITION);
if (pagePos > NBBINS_POSITION)
pagePos = NBBINS_POSITION;
features.relativePagePosition = pagePos;
//System.out.println((coordinateLineY) + " " + (pageHeight) + " " + NBBINS_POSITION + " " + pagePos);
if (spacingPreviousBlock != 0.0) {
features.spacingWithPreviousBlock = featureFactory
.linearScaling(spacingPreviousBlock - doc.getMinBlockSpacing(),
doc.getMaxBlockSpacing() - doc.getMinBlockSpacing(), NBBINS_SPACE);
}
if (density != -1.0) {
features.characterDensity = featureFactory
.linearScaling(density - doc.getMinCharacterDensity(), doc.getMaxCharacterDensity() - doc.getMinCharacterDensity(), NBBINS_DENSITY);
//System.out.println((density-doc.getMinCharacterDensity()) + " " + (doc.getMaxCharacterDensity()-doc.getMinCharacterDensity()) + " " + NBBINS_DENSITY + " " + features.characterDensity);
}
features.calloutType = bibRefCalloutType;
// check of the token is a known bib ref label
if ((referenceMarkerMatcher != null) && ( referenceMarkerMatcher.isKnownLabel(text) || referenceMarkerMatcher.isKnownFirstAuthor(text) ))
features.calloutKnown = true;
if (token.isSuperscript()) {
features.superscript = true;
}
// fulltext.append(features.printVector());
if (previousFeatures != null) {
if (features.blockStatus.equals("BLOCKSTART") &&
previousFeatures.blockStatus.equals("BLOCKIN")) {
// this is a post-correction due to the fact that the last character of a block
// can be a space or EOL character
previousFeatures.blockStatus = "BLOCKEND";
previousFeatures.lineStatus = "LINEEND";
}
fulltext.append(previousFeatures.printVector());
}
n++;
mm += text.length();
nn += text.length();
previousFeatures = features;
}
// lowest position of the block
lowestPos = block.getY() + block.getHeight();
//blockPos++;
}
}
if (previousFeatures != null) {
fulltext.append(previousFeatures.printVector());
}
return Pair.of(fulltext.toString(),
new LayoutTokenization(layoutTokens));
}
/**
* Evaluate the length of the fulltext
*/
private static int getFulltextLength(Document doc, SortedSet<DocumentPiece> documentBodyParts, int fulltextLength) {
for(DocumentPiece docPiece : documentBodyParts) {
DocumentPointer dp1 = docPiece.getLeft();
DocumentPointer dp2 = docPiece.getRight();
int tokenStart = dp1.getTokenDocPos();
int tokenEnd = dp2.getTokenDocPos();
for (int i = tokenStart; i <= tokenEnd && i < doc.getTokenizations().size(); i++) {
//tokenizationsBody.add(tokenizations.get(i));
fulltextLength += doc.getTokenizations().get(i).getText().length();
}
}
return fulltextLength;
}
/**
* Return the index of a token in a document tokenization
*/
private static int getDocIndexToken(Document doc, LayoutToken token) {
int blockPtr = token.getBlockPtr();
Block block = doc.getBlocks().get(blockPtr);
int startTokenBlockPos = block.getStartToken();
List<LayoutToken> tokens = doc.getTokenizations();
int i = startTokenBlockPos;
for(; i < tokens.size(); i++) {
int offset = tokens.get(i).getOffset();
if (offset >= token.getOffset())
break;
}
return i;
}
/**
* Process the specified pdf and format the result as training data for all the models.
*
* @param inputFile input file
* @param pathFullText path to fulltext
* @param pathTEI path to TEI
* @param id id
*/
public Document createTraining(File inputFile,
String pathFullText,
String pathTEI,
int id) {
if (tmpPath == null)
throw new GrobidResourceException("Cannot process pdf file, because temp path is null.");
if (!tmpPath.exists()) {
throw new GrobidResourceException("Cannot process pdf file, because temp path '" +
tmpPath.getAbsolutePath() + "' does not exists.");
}
DocumentSource documentSource = null;
try {
if (!inputFile.exists()) {
throw new GrobidResourceException("Cannot train for fulltext, becuase file '" +
inputFile.getAbsolutePath() + "' does not exists.");
}
String pdfFileName = inputFile.getName();
// SEGMENTATION MODEL
documentSource = DocumentSource.fromPdf(inputFile, -1, -1, false, true, true);
Document doc = new Document(documentSource);
doc.addTokenizedDocument(GrobidAnalysisConfig.defaultInstance());
if (doc.getBlocks() == null) {
throw new Exception("PDF parsing resulted in empty content");
}
doc.produceStatistics();
String fulltext = //getAllTextFeatured(doc, false);
parsers.getSegmentationParser().getAllLinesFeatured(doc);
//List<LayoutToken> tokenizations = doc.getTokenizationsFulltext();
List<LayoutToken> tokenizations = doc.getTokenizations();
// we write first the full text untagged (but featurized with segmentation features)
String outPathFulltext = pathFullText + File.separator +
pdfFileName.replace(".pdf", ".training.segmentation");
Writer writer = new OutputStreamWriter(new FileOutputStream(new File(outPathFulltext), false), StandardCharsets.UTF_8);
writer.write(fulltext + "\n");
writer.close();
// also write the raw text as seen before segmentation
StringBuffer rawtxt = new StringBuffer();
for(LayoutToken txtline : tokenizations) {
rawtxt.append(txtline.getText());
}
String outPathRawtext = pathFullText + File.separator +
pdfFileName.replace(".pdf", ".training.segmentation.rawtxt");
FileUtils.writeStringToFile(new File(outPathRawtext), rawtxt.toString(), StandardCharsets.UTF_8);
if (isNotBlank(fulltext)) {
String rese = parsers.getSegmentationParser().label(fulltext);
StringBuffer bufferFulltext = parsers.getSegmentationParser().trainingExtraction(rese, tokenizations, doc);
// write the TEI file to reflect the extact layout of the text as extracted from the pdf
writer = new OutputStreamWriter(new FileOutputStream(new File(pathTEI +
File.separator +
pdfFileName.replace(".pdf", ".training.segmentation.tei.xml")), false), StandardCharsets.UTF_8);
writer.write("<?xml version=\"1.0\" ?>\n<tei xml:space=\"preserve\">\n\t<teiHeader>\n\t\t<fileDesc xml:id=\"" + id +
"\"/>\n\t</teiHeader>\n\t<text xml:lang=\"en\">\n");
writer.write(bufferFulltext.toString());
writer.write("\n\t</text>\n</tei>\n");
writer.close();
}
doc = parsers.getSegmentationParser().processing(documentSource,
GrobidAnalysisConfig.defaultInstance());
// REFERENCE SEGMENTER MODEL
String referencesStr = doc.getDocumentPartText(SegmentationLabels.REFERENCES);
if (!referencesStr.isEmpty()) {
//String tei = parsers.getReferenceSegmenterParser().createTrainingData2(referencesStr, id);
Pair<String,String> result =
parsers.getReferenceSegmenterParser().createTrainingData(doc, id);
String tei = result.getLeft();
String raw = result.getRight();
if (tei != null) {
String outPath = pathTEI + "/" +
pdfFileName.replace(".pdf", ".training.references.referenceSegmenter.tei.xml");
writer = new OutputStreamWriter(new FileOutputStream(new File(outPath), false), StandardCharsets.UTF_8);
writer.write(tei + "\n");
writer.close();
// generate also the raw vector file with the features
outPath = pathTEI + "/" + pdfFileName.replace(".pdf", ".training.references.referenceSegmenter");
writer = new OutputStreamWriter(new FileOutputStream(new File(outPath), false), StandardCharsets.UTF_8);
writer.write(raw + "\n");
writer.close();
// also write the raw text as it is before reference segmentation
outPathRawtext = pathTEI + "/" + pdfFileName
.replace(".pdf", ".training.references.referenceSegmenter.rawtxt");
Writer strWriter = new OutputStreamWriter(
new FileOutputStream(new File(outPathRawtext), false), StandardCharsets.UTF_8);
strWriter.write(referencesStr + "\n");
strWriter.close();
}
}
// BIBLIO REFERENCE MODEL
StringBuilder allBufferReference = new StringBuilder();
if (!referencesStr.isEmpty()) {
cntManager.i(CitationParserCounters.NOT_EMPTY_REFERENCES_BLOCKS);
}
ReferenceSegmenter referenceSegmenter = parsers.getReferenceSegmenterParser();
List<LabeledReferenceResult> references = referenceSegmenter.extract(doc);
List<BibDataSet> resCitations = parsers.getCitationParser().
processingReferenceSection(doc, referenceSegmenter, 0);
doc.setBibDataSets(resCitations);
if (references == null) {
cntManager.i(CitationParserCounters.NULL_SEGMENTED_REFERENCES_LIST);
} else {
cntManager.i(CitationParserCounters.SEGMENTED_REFERENCES, references.size());
List<String> allInput = new ArrayList<String>();
for (LabeledReferenceResult ref : references) {
allInput.add(ref.getReferenceText());
}
StringBuilder bufferReference = parsers.getCitationParser().trainingExtraction(allInput);
if (bufferReference != null) {
bufferReference.append("\n");
Writer writerReference = new OutputStreamWriter(new FileOutputStream(new File(pathTEI +
File.separator +
pdfFileName.replace(".pdf", ".training.references.tei.xml")), false), StandardCharsets.UTF_8);
writerReference.write("<?xml version=\"1.0\" ?>\n<TEI xml:space=\"preserve\" xmlns=\"http://www.tei-c.org/ns/1.0\" " +
"xmlns:xlink=\"http://www.w3.org/1999/xlink\" " +
"\n xmlns:mml=\"http://www.w3.org/1998/Math/MathML\">\n");
if (id == -1) {
writerReference.write("\t<teiHeader/>\n\t<text>\n\t\t<front/>\n\t\t<body/>\n\t\t<back>\n");
}
else {
writerReference.write("\t<teiHeader>\n\t\t<fileDesc xml:id=\"" + id +
"\"/>\n\t</teiHeader>\n\t<text>\n\t\t<front/>\n\t\t<body/>\n\t\t<back>\n");
}
writerReference.write("<listBibl>\n");
writerReference.write(bufferReference.toString());
writerReference.write("\t\t</listBibl>\n\t</back>\n\t</text>\n</TEI>\n");
writerReference.close();
// BIBLIO REFERENCE AUTHOR NAMES
Writer writerName = new OutputStreamWriter(new FileOutputStream(new File(pathTEI +
File.separator +
pdfFileName.replace(".pdf", ".training.references.authors.tei.xml")), false), StandardCharsets.UTF_8);
writerName.write("<?xml version=\"1.0\" ?>\n<TEI xml:space=\"preserve\" xmlns=\"http://www.tei-c.org/ns/1.0\" " +
"xmlns:xlink=\"http://www.w3.org/1999/xlink\" " +
"\n xmlns:mml=\"http://www.w3.org/1998/Math/MathML\">\n");
writerName.write("\t<teiHeader>\n\t\t<fileDesc>\n\t\t\t<sourceDesc>\n" +
"\t\t\t\t<biblStruct>\n\t\t\t\t\t<analytic>\n\n");
for (LabeledReferenceResult ref : references) {
if ( (ref.getReferenceText() != null) && (ref.getReferenceText().trim().length() > 0) ) {
BiblioItem bib = parsers.getCitationParser().processingString(ref.getReferenceText(), 0);
String authorSequence = bib.getAuthors();
if ((authorSequence != null) && (authorSequence.trim().length() > 0) ) {
/*List<String> inputs = new ArrayList<String>();
inputs.add(authorSequence);*/
StringBuilder bufferName = parsers.getAuthorParser().trainingExtraction(authorSequence, false);
if ( (bufferName != null) && (bufferName.length()>0) ) {
writerName.write("\n\t\t\t\t\t\t<author>");
writerName.write(bufferName.toString());
writerName.write("</author>\n");
}
}
}
}
writerName.write("\n\t\t\t\t\t</analytic>");
writerName.write("\n\t\t\t\t</biblStruct>\n\t\t\t</sourceDesc>\n\t\t</fileDesc>");
writerName.write("\n\t</teiHeader>\n</TEI>\n");
writerName.close();
}
}
// FULLTEXT MODEL (body)
SortedSet<DocumentPiece> documentBodyParts = doc.getDocumentPart(SegmentationLabels.BODY);
if (documentBodyParts != null) {
Pair<String, LayoutTokenization> featSeg = getBodyTextFeatured(doc, documentBodyParts);
if (featSeg != null) {
// if no textual body part found, nothing to generate
String bodytext = featSeg.getLeft();
List<LayoutToken> tokenizationsBody = featSeg.getRight().getTokenization();
// we write the full text untagged
outPathFulltext = pathFullText + File.separator
+ pdfFileName.replace(".pdf", ".training.fulltext");
writer = new OutputStreamWriter(new FileOutputStream(new File(outPathFulltext), false), StandardCharsets.UTF_8);
writer.write(bodytext + "\n");
writer.close();
// StringTokenizer st = new StringTokenizer(fulltext, "\n");
String rese = label(bodytext);
//System.out.println(rese);
StringBuilder bufferFulltext = trainingExtraction(rese, tokenizationsBody);
// write the TEI file to reflect the extract layout of the text as extracted from the pdf
writer = new OutputStreamWriter(new FileOutputStream(new File(pathTEI +
File.separator +
pdfFileName.replace(".pdf", ".training.fulltext.tei.xml")), false), StandardCharsets.UTF_8);
if (id == -1) {
writer.write("<?xml version=\"1.0\" ?>\n<tei xml:space=\"preserve\">\n\t<teiHeader/>\n\t<text xml:lang=\"en\">\n");
}
else {
writer.write("<?xml version=\"1.0\" ?>\n<tei xml:space=\"preserve\">\n\t<teiHeader>\n\t\t<fileDesc xml:id=\"" + id +
"\"/>\n\t</teiHeader>\n\t<text xml:lang=\"en\">\n");
}
writer.write(bufferFulltext.toString());
writer.write("\n\t</text>\n</tei>\n");
writer.close();
// training data for FIGURES
Pair<String,String> trainingFigure = processTrainingDataFigures(rese, tokenizationsBody, inputFile.getName());
if (trainingFigure.getLeft().trim().length() > 0) {
String outPathFigures = pathFullText + File.separator
+ pdfFileName.replace(".pdf", ".training.figure");
writer = new OutputStreamWriter(new FileOutputStream(new File(outPathFigures), false), StandardCharsets.UTF_8);
writer.write(trainingFigure.getRight() + "\n\n");
writer.close();
String outPathFiguresTEI = pathTEI + File.separator
+ pdfFileName.replace(".pdf", ".training.figure.tei.xml");
writer = new OutputStreamWriter(new FileOutputStream(new File(outPathFiguresTEI), false), StandardCharsets.UTF_8);
writer.write(trainingFigure.getLeft() + "\n");
writer.close();
}
// training data for TABLES
Pair<String,String> trainingTable = processTrainingDataTables(rese, tokenizationsBody, inputFile.getName());
if (trainingTable.getLeft().trim().length() > 0) {
String outPathTables = pathFullText + File.separator
+ pdfFileName.replace(".pdf", ".training.table");
writer = new OutputStreamWriter(new FileOutputStream(new File(outPathTables), false), StandardCharsets.UTF_8);
writer.write(trainingTable.getRight() + "\n\n");
writer.close();
String outPathTablesTEI = pathTEI + File.separator
+ pdfFileName.replace(".pdf", ".training.table.tei.xml");
writer = new OutputStreamWriter(new FileOutputStream(new File(outPathTablesTEI), false), StandardCharsets.UTF_8);
writer.write(trainingTable.getLeft() + "\n");
writer.close();
}
}
}
// HEADER MODEL
SortedSet<DocumentPiece> documentHeaderParts = doc.getDocumentPart(SegmentationLabels.HEADER);
List<LayoutToken> tokenizationsFull = doc.getTokenizations();
if (documentHeaderParts != null) {
List<LayoutToken> headerTokenizations = new ArrayList<LayoutToken>();
for (DocumentPiece docPiece : documentHeaderParts) {
DocumentPointer dp1 = docPiece.getLeft();
DocumentPointer dp2 = docPiece.getRight();
int tokens = dp1.getTokenDocPos();
int tokene = dp2.getTokenDocPos();
for (int i = tokens; i < tokene; i++) {
headerTokenizations.add(tokenizationsFull.get(i));
}
}
Pair<String, List<LayoutToken>> featuredHeader = parsers.getHeaderParser().getSectionHeaderFeatured(doc, documentHeaderParts);
String header = featuredHeader.getLeft();
if ((header != null) && (header.trim().length() > 0)) {
// we write the header untagged
String outPathHeader = pathTEI + File.separator + pdfFileName.replace(".pdf", ".training.header");
writer = new OutputStreamWriter(new FileOutputStream(new File(outPathHeader), false), StandardCharsets.UTF_8);
writer.write(header + "\n");
writer.close();
String rese = parsers.getHeaderParser().label(header);
// buffer for the header block
StringBuilder bufferHeader = parsers.getHeaderParser().trainingExtraction(rese, headerTokenizations);
Language lang = LanguageUtilities.getInstance().runLanguageId(bufferHeader.toString());
if (lang != null) {
doc.setLanguage(lang.getLang());
}
// buffer for the affiliation+address block
StringBuilder bufferAffiliation =
parsers.getAffiliationAddressParser().trainingExtraction(rese, headerTokenizations);
// buffer for the date block
StringBuilder bufferDate = null;
// we need to rebuild the found date string as it appears
String input = "";
int q = 0;
StringTokenizer st = new StringTokenizer(rese, "\n");
while (st.hasMoreTokens() && (q < headerTokenizations.size())) {
String line = st.nextToken();
String theTotalTok = headerTokenizations.get(q).getText();
String theTok = headerTokenizations.get(q).getText();
while (theTok.equals(" ") || theTok.equals("\t") || theTok.equals("\n") || theTok.equals("\r")) {
q++;
if ((q > 0) && (q < headerTokenizations.size())) {
theTok = headerTokenizations.get(q).getText();
theTotalTok += theTok;
}
}
if (line.endsWith("<date>")) {
input += theTotalTok;
}
q++;
}
if (input.trim().length() > 1) {
List<String> inputs = new ArrayList<String>();
inputs.add(input.trim());
bufferDate = parsers.getDateParser().trainingExtraction(inputs);
}
// buffer for the name block
StringBuilder bufferName = null;
// we need to rebuild the found author string as it appears
input = "";
q = 0;
st = new StringTokenizer(rese, "\n");
while (st.hasMoreTokens() && (q < headerTokenizations.size())) {
String line = st.nextToken();
String theTotalTok = headerTokenizations.get(q).getText();
String theTok = headerTokenizations.get(q).getText();
while (theTok.equals(" ") || theTok.equals("\t") || theTok.equals("\n") || theTok.equals("\r")) {
q++;
if ((q > 0) && (q < headerTokenizations.size())) {
theTok = headerTokenizations.get(q).getText();
theTotalTok += theTok;
}
}
if (line.endsWith("<author>")) {
input += theTotalTok;
}
q++;
}
if (input.length() > 1) {
bufferName = parsers.getAuthorParser().trainingExtraction(input, true);
}
// buffer for the reference block
StringBuilder bufferReference = null;
// we need to rebuild the found citation string as it appears
input = "";
q = 0;
st = new StringTokenizer(rese, "\n");
while (st.hasMoreTokens() && (q < headerTokenizations.size())) {
String line = st.nextToken();
String theTotalTok = headerTokenizations.get(q).getText();
String theTok = headerTokenizations.get(q).getText();
while (theTok.equals(" ") || theTok.equals("\t") || theTok.equals("\n") || theTok.equals("\r")) {
q++;
if ((q > 0) && (q < headerTokenizations.size())) {
theTok = headerTokenizations.get(q).getText();
theTotalTok += theTok;
}
}
if (line.endsWith("<reference>")) {
input += theTotalTok;
}
q++;
}
if (input.length() > 1) {
List<String> inputs = new ArrayList<String>();
inputs.add(input.trim());
bufferReference = parsers.getCitationParser().trainingExtraction(inputs);
}
// write the training TEI file for header which reflects the extract layout of the text as
// extracted from the pdf
writer = new OutputStreamWriter(new FileOutputStream(new File(pathTEI + File.separator
+ pdfFileName.replace(".pdf", ".training.header.tei.xml")), false), StandardCharsets.UTF_8);
writer.write("<?xml version=\"1.0\" ?>\n<tei xml:space=\"preserve\">\n\t<teiHeader>\n\t\t<fileDesc xml:id=\""
+ pdfFileName.replace(".pdf", "")
+ "\"/>\n\t</teiHeader>\n\t<text");
if (lang != null) {
writer.write(" xml:lang=\"" + lang.getLang() + "\"");
}
writer.write(">\n\t\t<front>\n");
writer.write(bufferHeader.toString());
writer.write("\n\t\t</front>\n\t</text>\n</tei>\n");
writer.close();
// AFFILIATION-ADDRESS model
if (bufferAffiliation != null) {
if (bufferAffiliation.length() > 0) {
Writer writerAffiliation = new OutputStreamWriter(new FileOutputStream(new File(pathTEI +
File.separator
+ pdfFileName.replace(".pdf", ".training.header.affiliation.tei.xml")), false), StandardCharsets.UTF_8);
writerAffiliation.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>");
writerAffiliation.write("\n<tei xml:space=\"preserve\" xmlns=\"http://www.tei-c.org/ns/1.0\""
+ " xmlns:xlink=\"http://www.w3.org/1999/xlink\" " + "xmlns:mml=\"http://www.w3.org/1998/Math/MathML\">");
writerAffiliation.write("\n\t<teiHeader>\n\t\t<fileDesc>\n\t\t\t<sourceDesc>");
writerAffiliation.write("\n\t\t\t\t<biblStruct>\n\t\t\t\t\t<analytic>\n\t\t\t\t\t\t<author>\n\n");
writerAffiliation.write(bufferAffiliation.toString());
writerAffiliation.write("\n\t\t\t\t\t\t</author>\n\t\t\t\t\t</analytic>");
writerAffiliation.write("\n\t\t\t\t</biblStruct>\n\t\t\t</sourceDesc>\n\t\t</fileDesc>");
writerAffiliation.write("\n\t</teiHeader>\n</tei>\n");
writerAffiliation.close();
}
}
// DATE MODEL (for dates in header)
if (bufferDate != null) {
if (bufferDate.length() > 0) {
Writer writerDate = new OutputStreamWriter(new FileOutputStream(new File(pathTEI +
File.separator
+ pdfFileName.replace(".pdf", ".training.header.date.xml")), false), StandardCharsets.UTF_8);
writerDate.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
writerDate.write("<dates>\n");
writerDate.write(bufferDate.toString());
writerDate.write("</dates>\n");
writerDate.close();
}
}
// HEADER AUTHOR NAME model
if (bufferName != null) {
if (bufferName.length() > 0) {
Writer writerName = new OutputStreamWriter(new FileOutputStream(new File(pathTEI +
File.separator
+ pdfFileName.replace(".pdf", ".training.header.authors.tei.xml")), false), StandardCharsets.UTF_8);
writerName.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>");
writerName.write("\n<tei xml:space=\"preserve\" xmlns=\"http://www.tei-c.org/ns/1.0\"" + " xmlns:xlink=\"http://www.w3.org/1999/xlink\" "
+ "xmlns:mml=\"http://www.w3.org/1998/Math/MathML\">");
writerName.write("\n\t<teiHeader>\n\t\t<fileDesc>\n\t\t\t<sourceDesc>");
writerName.write("\n\t\t\t\t<biblStruct>\n\t\t\t\t\t<analytic>\n\n\t\t\t\t\t\t<author>");
writerName.write("\n\t\t\t\t\t\t\t<persName>\n");
writerName.write(bufferName.toString());
writerName.write("\t\t\t\t\t\t\t</persName>\n");
writerName.write("\t\t\t\t\t\t</author>\n\n\t\t\t\t\t</analytic>");
writerName.write("\n\t\t\t\t</biblStruct>\n\t\t\t</sourceDesc>\n\t\t</fileDesc>");
writerName.write("\n\t</teiHeader>\n</tei>\n");
writerName.close();
}
}
// CITATION MODEL (for bibliographical reference in header)
if (bufferReference != null) {
if (bufferReference.length() > 0) {
Writer writerReference = new OutputStreamWriter(new FileOutputStream(new File(pathTEI +
File.separator
+ pdfFileName.replace(".pdf", ".training.header.reference.xml")), false), StandardCharsets.UTF_8);
writerReference.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
writerReference.write("<citations>\n");
writerReference.write(bufferReference.toString());
writerReference.write("</citations>\n");
writerReference.close();
}
}
}
}
return doc;
} catch (Exception e) {
throw new GrobidException("An exception occurred while running Grobid training" +
" data generation for full text.", e);
} finally {
DocumentSource.close(documentSource, true, true, true);
}
}
/**
* Extract results from a labelled full text in the training format without any string modification.
*
* @param result reult
* @param tokenizations toks
* @return extraction
*/
private StringBuilder trainingExtraction(String result,
List<LayoutToken> tokenizations) {
// this is the main buffer for the whole full text
StringBuilder buffer = new StringBuilder();
try {
StringTokenizer st = new StringTokenizer(result, "\n");
String s1 = null;
String s2 = null;
String lastTag = null;
//System.out.println(tokenizations.toString());
//System.out.println(result);
// current token position
int p = 0;
boolean start = true;
boolean openFigure = false;
boolean headFigure = false;
boolean descFigure = false;
boolean tableBlock = false;
while (st.hasMoreTokens()) {
boolean addSpace = false;
String tok = st.nextToken().trim();
if (tok.length() == 0) {
continue;
}
StringTokenizer stt = new StringTokenizer(tok, " \t");
List<String> localFeatures = new ArrayList<String>();
int i = 0;
boolean newLine = false;
int ll = stt.countTokens();
while (stt.hasMoreTokens()) {
String s = stt.nextToken().trim();
if (i == 0) {
s2 = TextUtilities.HTMLEncode(s); // lexical token
int p0 = p;
boolean strop = false;
while ((!strop) && (p < tokenizations.size())) {
String tokOriginal = tokenizations.get(p).t();
if (tokOriginal.equals(" ")
|| tokOriginal.equals("\u00A0")) {
addSpace = true;
}
else if (tokOriginal.equals("\n")) {
newLine = true;
}
else if (tokOriginal.equals(s)) {
strop = true;
}
p++;
}
if (p == tokenizations.size()) {
// either we are at the end of the header, or we might have
// a problematic token in tokenization for some reasons
if ((p - p0) > 2) {
// we loose the synchronicity, so we reinit p for the next token
p = p0;
}
}
} else if (i == ll - 1) {
s1 = s; // current tag
} else {
if (s.equals("LINESTART"))
newLine = true;
localFeatures.add(s);
}
i++;
}
if (newLine && !start) {
buffer.append("<lb/>");
}
String lastTag0 = null;
if (lastTag != null) {
if (lastTag.startsWith("I-")) {
lastTag0 = lastTag.substring(2, lastTag.length());
} else {
lastTag0 = lastTag;
}
}
String currentTag0 = null;
if (s1 != null) {
if (s1.startsWith("I-")) {
currentTag0 = s1.substring(2, s1.length());
} else {
currentTag0 = s1;
}
}
boolean closeParagraph = false;
if (lastTag != null) {
closeParagraph = testClosingTag(buffer, currentTag0, lastTag0, s1);
}
boolean output;
//output = writeField(buffer, s1, lastTag0, s2, "<header>", "<front>", addSpace, 3);
//if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<other>",
"<note type=\"other\">", addSpace, 3, false);
//}
// for paragraph we must distinguish starting and closing tags
if (!output) {
if (closeParagraph) {
output = writeFieldBeginEnd(buffer, s1, "", s2, "<paragraph>",
"<p>", addSpace, 3, false);
} else {
output = writeFieldBeginEnd(buffer, s1, lastTag, s2, "<paragraph>",
"<p>", addSpace, 3, false);
}
}
/*if (!output) {
if (closeParagraph) {
output = writeField(buffer, s1, "", s2, "<reference_marker>", "<label>", addSpace, 3);
} else
output = writeField(buffer, s1, lastTag0, s2, "<reference_marker>", "<label>", addSpace, 3);
}*/
if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<citation_marker>", "<ref type=\"biblio\">",
addSpace, 3, false);
}
if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<table_marker>", "<ref type=\"table\">",
addSpace, 3, false);
}
if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<equation_marker>", "<ref type=\"formula\">",
addSpace, 3, false);
}
if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<section>",
"<head>", addSpace, 3, false);
}
/*if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<subsection>",
"<head>", addSpace, 3, false);
}*/
if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<equation>",
"<formula>", addSpace, 4, false);
}
if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<equation_label>",
"<label>", addSpace, 4, false);
}
if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<figure_marker>",
"<ref type=\"figure\">", addSpace, 3, false);
}
if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<figure>",
"<figure>", addSpace, 3, false);
}
if (!output) {
output = writeField(buffer, s1, lastTag0, s2, "<table>",
"<figure type=\"table\">", addSpace, 3, false);
}
// for item we must distinguish starting and closing tags
if (!output) {
output = writeFieldBeginEnd(buffer, s1, lastTag, s2, "<item>",
"<item>", addSpace, 3, false);
}
lastTag = s1;
if (!st.hasMoreTokens()) {
if (lastTag != null) {
testClosingTag(buffer, "", currentTag0, s1);
}
}
if (start) {
start = false;
}
}
return buffer;
} catch (Exception e) {
e.printStackTrace();
throw new GrobidException("An exception occured while running Grobid.", e);
}
}
/**
* TODO some documentation...
*
* @param buffer buffer
* @param s1
* @param lastTag0
* @param s2
* @param field
* @param outField
* @param addSpace
* @param nbIndent
* @return
*/
public static boolean writeField(StringBuilder buffer,
String s1,
String lastTag0,
String s2,
String field,
String outField,
boolean addSpace,
int nbIndent,
boolean generateIDs) {
boolean result = false;
if (s1 == null) {
return result;
}
if ((s1.equals(field)) || (s1.equals("I-" + field))) {
result = true;
String divID = null;
if (generateIDs) {
divID = KeyGen.getKey().substring(0,7);
if (outField.charAt(outField.length()-2) == '>')
outField = outField.substring(0, outField.length()-2) + " xml:id=\"_"+ divID + "\">";
}
if (s1.equals(lastTag0) || s1.equals("I-" + lastTag0)) {
if (addSpace)
buffer.append(" ").append(s2);
else
buffer.append(s2);
}
/*else if (lastTag0 == null) {
for(int i=0; i<nbIndent; i++) {
buffer.append("\t");
}
buffer.append(outField+s2);
}*/
else if (field.equals("<citation_marker>")) {
if (addSpace)
buffer.append(" ").append(outField).append(s2);
else
buffer.append(outField).append(s2);
} else if (field.equals("<figure_marker>")) {
if (addSpace)
buffer.append(" ").append(outField).append(s2);
else
buffer.append(outField).append(s2);
} else if (field.equals("<table_marker>")) {
if (addSpace)
buffer.append(" ").append(outField).append(s2);
else
buffer.append(outField).append(s2);
} else if (field.equals("<equation_marker>")) {
if (addSpace)
buffer.append(" ").append(outField).append(s2);
else
buffer.append(outField).append(s2);
} /*else if (field.equals("<label>")) {
if (addSpace)
buffer.append(" ").append(outField).append(s2);
else
buffer.append(outField).append(s2);
} */ /*else if (field.equals("<reference_marker>")) {
if (!lastTag0.equals("<reference>") && !lastTag0.equals("<reference_marker>")) {
for (int i = 0; i < nbIndent; i++) {
buffer.append("\t");
}
buffer.append("<bibl>");
}
if (addSpace)
buffer.append(" ").append(outField).append(s2);
else
buffer.append(outField).append(s2);
}*/ else if (lastTag0 == null) {
for (int i = 0; i < nbIndent; i++) {
buffer.append("\t");
}
buffer.append(outField).append(s2);
} else if (!lastTag0.equals("<citation_marker>")
&& !lastTag0.equals("<figure_marker>")
&& !lastTag0.equals("<equation_marker>")
//&& !lastTag0.equals("<figure>")
) {
for (int i = 0; i < nbIndent; i++) {
buffer.append("\t");
}
buffer.append(outField).append(s2);
} else {
if (addSpace)
buffer.append(" ").append(s2);
else
buffer.append(s2);
}
}
return result;
}
/**
* This is for writing fields for fields where begin and end of field matter, like paragraph or item
*
* @param buffer
* @param s1
* @param lastTag0
* @param s2
* @param field
* @param outField
* @param addSpace
* @param nbIndent
* @return
*/
public static boolean writeFieldBeginEnd(StringBuilder buffer,
String s1,
String lastTag0,
String s2,
String field,
String outField,
boolean addSpace,
int nbIndent,
boolean generateIDs) {
boolean result = false;
if (s1 == null) {
return false;
}
if ((s1.equals(field)) || (s1.equals("I-" + field))) {
result = true;
if (lastTag0 == null) {
lastTag0 = "";
}
String divID;
if (generateIDs) {
divID = KeyGen.getKey().substring(0,7);
if (outField.charAt(outField.length()-2) == '>')
outField = outField.substring(0, outField.length()-2) + " xml:id=\"_"+ divID + "\">";
}
if (lastTag0.equals("I-" + field)) {
if (addSpace)
buffer.append(" ").append(s2);
else
buffer.append(s2);
} else if (lastTag0.equals(field) && s1.equals(field)) {
if (addSpace)
buffer.append(" ").append(s2);
else
buffer.append(s2);
} else if (!lastTag0.endsWith("<citation_marker>") && !lastTag0.endsWith("<figure_marker>")
&& !lastTag0.endsWith("<table_marker>") && !lastTag0.endsWith("<equation_marker>")) {
for (int i = 0; i < nbIndent; i++) {
buffer.append("\t");
}
buffer.append(outField).append(s2);
} else {
if (addSpace)
buffer.append(" ").append(s2);
else
buffer.append(s2);
}
}
return result;
}
/**
* TODO some documentation
*
* @param buffer
* @param currentTag0
* @param lastTag0
* @param currentTag
* @return
*/
private static boolean testClosingTag(StringBuilder buffer,
String currentTag0,
String lastTag0,
String currentTag) {
boolean res = false;
// reference_marker and citation_marker are two exceptions because they can be embedded
if (!currentTag0.equals(lastTag0) || currentTag.equals("I-<paragraph>") || currentTag.equals("I-<item>")) {
if (currentTag0.equals("<citation_marker>") || currentTag0.equals("<equation_marker>") ||
currentTag0.equals("<figure_marker>") || currentTag0.equals("<table_marker>")) {
return res;
}
res = false;
// we close the current tag
if (lastTag0.equals("<other>")) {
buffer.append("</note>\n\n");
} else if (lastTag0.equals("<paragraph>") &&
!currentTag0.equals("<citation_marker>") &&
!currentTag0.equals("<table_marker>") &&
!currentTag0.equals("<equation_marker>") &&
!currentTag0.equals("<figure_marker>")
) {
buffer.append("</p>\n\n");
res = true;
} else if (lastTag0.equals("<section>")) {
buffer.append("</head>\n\n");
} else if (lastTag0.equals("<subsection>")) {
buffer.append("</head>\n\n");
} else if (lastTag0.equals("<equation>")) {
buffer.append("</formula>\n\n");
} else if (lastTag0.equals("<equation_label>")) {
buffer.append("</label>\n\n");
} else if (lastTag0.equals("<table>")) {
buffer.append("</figure>\n\n");
} else if (lastTag0.equals("<figure>")) {
buffer.append("</figure>\n\n");
} else if (lastTag0.equals("<item>")) {
buffer.append("</item>\n\n");
} else if (lastTag0.equals("<citation_marker>") ||
lastTag0.equals("<figure_marker>") ||
lastTag0.equals("<table_marker>") ||
lastTag0.equals("<equation_marker>")) {
buffer.append("</ref>");
// Make sure that paragraph is closed when markers are at the end of it
if (!currentTag0.equals("<paragraph>") &&
(!currentTag0.equals("<citation_marker>") ||
!currentTag0.equals("<figure_marker>") ||
!currentTag0.equals("<table_marker>") ||
!currentTag0.equals("<equation_marker>")
)
) {
buffer.append("</p>\n\n");
}
} else {
res = false;
}
}
return res;
}
/**
* Process figures identified by the full text model
*/
protected List<Figure> processFigures(String rese, List<LayoutToken> layoutTokens, Document doc) {
List<Figure> results = new ArrayList<>();
TaggingTokenClusteror clusteror = new TaggingTokenClusteror(GrobidModels.FULLTEXT, rese, layoutTokens, true);
for (TaggingTokenCluster cluster : Iterables.filter(clusteror.cluster(),
new TaggingTokenClusteror.LabelTypePredicate(TaggingLabels.FIGURE))) {
List<LayoutToken> tokenizationFigure = cluster.concatTokens();
Figure result = parsers.getFigureParser().processing(
tokenizationFigure,
cluster.getFeatureBlock()
);
SortedSet<Integer> blockPtrs = new TreeSet<>();
for (LayoutToken lt : tokenizationFigure) {
if (!LayoutTokensUtil.spaceyToken(lt.t()) && !LayoutTokensUtil.newLineToken(lt.t())) {
blockPtrs.add(lt.getBlockPtr());
}
}
result.setBlockPtrs(blockPtrs);
result.setLayoutTokens(tokenizationFigure);
// the first token could be a space from previous page
for (LayoutToken lt : tokenizationFigure) {
if (!LayoutTokensUtil.spaceyToken(lt.t()) && !LayoutTokensUtil.newLineToken(lt.t())) {
result.setPage(lt.getPage());
break;
}
}
results.add(result);
result.setId("" + (results.size() - 1));
}
doc.setFigures(results);
doc.assignGraphicObjectsToFigures();
return results;
}
/**
* Create training data for the figures as identified by the full text model.
* Return the pair (TEI fragment, CRF raw data).
*/
protected Pair<String,String> processTrainingDataFigures(String rese,
List<LayoutToken> tokenizations, String id) {
StringBuilder tei = new StringBuilder();
StringBuilder featureVector = new StringBuilder();
int nb = 0;
StringTokenizer st1 = new StringTokenizer(rese, "\n");
boolean openFigure = false;
StringBuilder figureBlock = new StringBuilder();
List<LayoutToken> tokenizationsFigure = new ArrayList<>();
List<LayoutToken> tokenizationsBuffer = null;
int p = 0; // position in tokenizations
int i = 0;
while(st1.hasMoreTokens()) {
String row = st1.nextToken();
String[] s = row.split("\t");
String token = s[0].trim();
int p0 = p;
boolean strop = false;
tokenizationsBuffer = new ArrayList<>();
while ((!strop) && (p < tokenizations.size())) {
String tokOriginal = tokenizations.get(p).getText().trim();
if (openFigure)
tokenizationsFigure.add(tokenizations.get(p));
tokenizationsBuffer.add(tokenizations.get(p));
if (tokOriginal.equals(token)) {
strop = true;
}
p++;
}
if (p == tokenizations.size()) {
// either we are at the end of the header, or we might have
// a problematic token in tokenization for some reasons
if ((p - p0) > 2) {
// we loose the synchronicity, so we reinit p for the next token
p = p0;
continue;
}
}
int ll = s.length;
String label = s[ll-1];
String plainLabel = GenericTaggerUtils.getPlainLabel(label);
if (label.equals("<figure>") || ((label.equals("I-<figure>") && !openFigure))) {
if (!openFigure) {
openFigure = true;
tokenizationsFigure.addAll(tokenizationsBuffer);
}
// we remove the label in the CRF row
int ind = row.lastIndexOf("\t");
figureBlock.append(row, 0, ind).append("\n");
} else if (label.equals("I-<figure>") || openFigure) {
// remove last tokens
if (tokenizationsFigure.size() > 0) {
int nbToRemove = tokenizationsBuffer.size();
for(int q = 0; q < nbToRemove; q++)
tokenizationsFigure.remove(tokenizationsFigure.size()-1);
}
// parse the recognized figure area
//System.out.println(tokenizationsFigure.toString());
//System.out.println(figureBlock.toString());
//adjustment
if ((p != tokenizations.size()) && (tokenizations.get(p).getText().equals("\n") ||
tokenizations.get(p).getText().equals("\r") ||
tokenizations.get(p).getText().equals(" ")) ) {
tokenizationsFigure.add(tokenizations.get(p));
p++;
}
while((tokenizationsFigure.size() > 0) &&
(tokenizationsFigure.get(0).getText().equals("\n") ||
tokenizationsFigure.get(0).getText().equals(" ")) )
tokenizationsFigure.remove(0);
// process the "accumulated" figure
Pair<String,String> trainingData = parsers.getFigureParser()
.createTrainingData(tokenizationsFigure, figureBlock.toString(), "Fig" + nb);
tokenizationsFigure = new ArrayList<>();
figureBlock = new StringBuilder();
if (trainingData!= null) {
if (tei.length() == 0) {
tei.append(parsers.getFigureParser().getTEIHeader(id)).append("\n\n");
}
if (trainingData.getLeft() != null)
tei.append(trainingData.getLeft()).append("\n\n");
if (trainingData.getRight() != null)
featureVector.append(trainingData.getRight()).append("\n\n");
}
if (label.equals("I-<figure>")) {
tokenizationsFigure.addAll(tokenizationsBuffer);
int ind = row.lastIndexOf("\t");
figureBlock.append(row.substring(0, ind)).append("\n");
} else {
openFigure = false;
}
nb++;
} else
openFigure = false;
}
// If there still an open figure
if (openFigure) {
while((tokenizationsFigure.size() > 0) &&
(tokenizationsFigure.get(0).getText().equals("\n") ||
tokenizationsFigure.get(0).getText().equals(" ")) )
tokenizationsFigure.remove(0);
// process the "accumulated" figure
Pair<String,String> trainingData = parsers.getFigureParser()
.createTrainingData(tokenizationsFigure, figureBlock.toString(), "Fig" + nb);
if (tei.length() == 0) {
tei.append(parsers.getFigureParser().getTEIHeader(id)).append("\n\n");
}
if (trainingData.getLeft() != null)
tei.append(trainingData.getLeft()).append("\n\n");
if (trainingData.getRight() != null)
featureVector.append(trainingData.getRight()).append("\n\n");
}
if (tei.length() != 0) {
tei.append("\n </text>\n" +
"</tei>\n");
}
return Pair.of(tei.toString(), featureVector.toString());
}
/**
* Process tables identified by the full text model
*/
protected List<Table> processTables(String rese,
List<LayoutToken> tokenizations,
Document doc) {
List<Table> results = new ArrayList<>();
TaggingTokenClusteror clusteror = new TaggingTokenClusteror(GrobidModels.FULLTEXT, rese, tokenizations, true);
for (TaggingTokenCluster cluster : Iterables.filter(clusteror.cluster(),
new TaggingTokenClusteror.LabelTypePredicate(TaggingLabels.TABLE))) {
List<LayoutToken> tokenizationTable = cluster.concatTokens();
List<Table> localResults = parsers.getTableParser().processing(
tokenizationTable,
cluster.getFeatureBlock()
);
for (Table result : localResults) {
List<LayoutToken> localTokenizationTable = result.getLayoutTokens();
//result.setLayoutTokens(tokenizationTable);
// block setting: we restrict to the tokenization of this particulart table
SortedSet<Integer> blockPtrs = new TreeSet<>();
for (LayoutToken lt : localTokenizationTable) {
if (!LayoutTokensUtil.spaceyToken(lt.t()) && !LayoutTokensUtil.newLineToken(lt.t())) {
blockPtrs.add(lt.getBlockPtr());
}
}
result.setBlockPtrs(blockPtrs);
// page setting: the first token could be a space from previous page
for (LayoutToken lt : localTokenizationTable) {
if (!LayoutTokensUtil.spaceyToken(lt.t()) && !LayoutTokensUtil.newLineToken(lt.t())) {
result.setPage(lt.getPage());
break;
}
}
results.add(result);
result.setId("" + (results.size() - 1));
}
}
doc.setTables(results);
doc.postProcessTables();
return results;
}
/**
* Create training data for the table as identified by the full text model.
* Return the pair (TEI fragment, CRF raw data).
*/
protected Pair<String,String> processTrainingDataTables(String rese,
List<LayoutToken> tokenizations, String id) {
StringBuilder tei = new StringBuilder();
StringBuilder featureVector = new StringBuilder();
int nb = 0;
StringTokenizer st1 = new StringTokenizer(rese, "\n");
boolean openTable = false;
StringBuilder tableBlock = new StringBuilder();
List<LayoutToken> tokenizationsTable = new ArrayList<LayoutToken>();
List<LayoutToken> tokenizationsBuffer = null;
int p = 0; // position in tokenizations
int i = 0;
while(st1.hasMoreTokens()) {
String row = st1.nextToken();
String[] s = row.split("\t");
String token = s[0].trim();
//System.out.println(s0 + "\t" + tokenizations.get(p).getText().trim());
int p0 = p;
boolean strop = false;
tokenizationsBuffer = new ArrayList<LayoutToken>();
while ((!strop) && (p < tokenizations.size())) {
String tokOriginal = tokenizations.get(p).getText().trim();
if (openTable)
tokenizationsTable.add(tokenizations.get(p));
tokenizationsBuffer.add(tokenizations.get(p));
if (tokOriginal.equals(token)) {
strop = true;
}
p++;
}
if (p == tokenizations.size()) {
// either we are at the end of the header, or we might have
// a problematic token in tokenization for some reasons
if ((p - p0) > 2) {
// we loose the synchronicity, so we reinit p for the next token
p = p0;
continue;
}
}
int ll = s.length;
String label = s[ll-1];
String plainLabel = GenericTaggerUtils.getPlainLabel(label);
if (label.equals("<table>") || ((label.equals("I-<table>") && !openTable) )) {
if (!openTable) {
openTable = true;
tokenizationsTable.addAll(tokenizationsBuffer); }
// we remove the label in the CRF row
int ind = row.lastIndexOf("\t");
tableBlock.append(row.substring(0, ind)).append("\n");
} else if (label.equals("I-<table>") || openTable) {
// remove last tokens
if (tokenizationsTable.size() > 0) {
int nbToRemove = tokenizationsBuffer.size();
for(int q=0; q<nbToRemove; q++)
tokenizationsTable.remove(tokenizationsTable.size()-1);
}
// parse the recognized table area
//System.out.println(tokenizationsTable.toString());
//System.out.println(tableBlock.toString());
//adjustment
if ((p != tokenizations.size()) && (tokenizations.get(p).getText().equals("\n") ||
tokenizations.get(p).getText().equals("\r") ||
tokenizations.get(p).getText().equals(" ")) ) {
tokenizationsTable.add(tokenizations.get(p));
p++;
}
while( (tokenizationsTable.size() > 0) &&
(tokenizationsTable.get(0).getText().equals("\n") ||
tokenizationsTable.get(0).getText().equals(" ")) )
tokenizationsTable.remove(0);
// process the "accumulated" table
Pair<String,String> trainingData = parsers.getTableParser().createTrainingData(tokenizationsTable, tableBlock.toString(), "Fig"+nb);
tokenizationsTable = new ArrayList<>();
tableBlock = new StringBuilder();
if (trainingData!= null) {
if (tei.length() == 0) {
tei.append(parsers.getTableParser().getTEIHeader(id)).append("\n\n");
}
if (trainingData.getLeft() != null)
tei.append(trainingData.getLeft()).append("\n\n");
if (trainingData.getRight() != null)
featureVector.append(trainingData.getRight()).append("\n\n");
}
if (label.equals("I-<table>")) {
tokenizationsTable.addAll(tokenizationsBuffer);
int ind = row.lastIndexOf("\t");
tableBlock.append(row.substring(0, ind)).append("\n");
}
else {
openTable = false;
}
nb++;
}
else
openTable = false;
}
// If there still an open table
if (openTable) {
while((tokenizationsTable.size() > 0) &&
(tokenizationsTable.get(0).getText().equals("\n") ||
tokenizationsTable.get(0).getText().equals(" ")) )
tokenizationsTable.remove(0);
// process the "accumulated" figure
Pair<String,String> trainingData = parsers.getTableParser()
.createTrainingData(tokenizationsTable, tableBlock.toString(), "Fig" + nb);
if (tei.length() == 0) {
tei.append(parsers.getTableParser().getTEIHeader(id)).append("\n\n");
}
if (trainingData.getLeft() != null)
tei.append(trainingData.getLeft()).append("\n\n");
if (trainingData.getRight() != null)
featureVector.append(trainingData.getRight()).append("\n\n");
}
if (tei.length() != 0) {
tei.append("\n </text>\n" +
"</tei>\n");
}
return Pair.of(tei.toString(), featureVector.toString());
}
/**
* Process equations identified by the full text model
*/
protected List<Equation> processEquations(String rese,
List<LayoutToken> tokenizations,
Document doc) {
List<Equation> results = new ArrayList<>();
TaggingTokenClusteror clusteror = new TaggingTokenClusteror(GrobidModels.FULLTEXT, rese, tokenizations, true);
List<TaggingTokenCluster> clusters = clusteror.cluster();
Equation currentResult = null;
TaggingLabel lastLabel = null;
for (TaggingTokenCluster cluster : clusters) {
if (cluster == null) {
continue;
}
TaggingLabel clusterLabel = cluster.getTaggingLabel();
Engine.getCntManager().i(clusterLabel);
if ( (clusterLabel != TaggingLabels.EQUATION) && (clusterLabel != TaggingLabels.EQUATION_LABEL) ) {
lastLabel = clusterLabel;
if (currentResult != null) {
results.add(currentResult);
currentResult.setId("" + (results.size() - 1));
currentResult = null;
}
continue;
}
List<LayoutToken> tokenizationEquation = cluster.concatTokens();
String clusterContent = LayoutTokensUtil.normalizeText(LayoutTokensUtil.toText(cluster.concatTokens()));
if (currentResult == null)
currentResult = new Equation();
if ( (!currentResult.getContent().isEmpty()) && (!currentResult.getLabel().isEmpty()) ) {
results.add(currentResult);
currentResult.setId("" + (results.size() - 1));
currentResult = new Equation();
}
if (clusterLabel.equals(TaggingLabels.EQUATION)) {
if (!currentResult.getContent().isEmpty()) {
results.add(currentResult);
currentResult.setId("" + (results.size() - 1));
currentResult = new Equation();
}
currentResult.appendContent(clusterContent);
currentResult.addLayoutTokens(cluster.concatTokens());
} else if (clusterLabel.equals(TaggingLabels.EQUATION_LABEL)) {
currentResult.appendLabel(clusterContent);
currentResult.addLayoutTokens(cluster.concatTokens());
}
lastLabel = clusterLabel;
}
// add last open result
if (currentResult != null) {
results.add(currentResult);
currentResult.setId("" + (results.size() - 1));
}
doc.setEquations(results);
return results;
}
/**
* Ensure consistent use of callouts in the entire document body
*/
private List<MarkerType> postProcessCallout(String result, LayoutTokenization layoutTokenization) {
if (layoutTokenization == null)
return null;
List<LayoutToken> tokenizations = layoutTokenization.getTokenization();
TaggingTokenClusteror clusteror = new TaggingTokenClusteror(GrobidModels.FULLTEXT, result, tokenizations);
String tokenLabel = null;
List<TaggingTokenCluster> clusters = clusteror.cluster();
MarkerType majorityReferenceMarkerType = MarkerType.UNKNOWN;
MarkerType majorityFigureMarkerType = MarkerType.UNKNOWN;
MarkerType majorityTableMarkerType = MarkerType.UNKNOWN;
MarkerType majorityEquationarkerType = MarkerType.UNKNOWN;
Map<MarkerType,Integer> referenceMarkerTypeCounts = new HashMap<>();
Map<MarkerType,Integer> figureMarkerTypeCounts = new HashMap<>();
Map<MarkerType,Integer> tableMarkerTypeCounts = new HashMap<>();
Map<MarkerType,Integer> equationMarkerTypeCounts = new HashMap<>();
List<String> referenceMarkerSeen = new ArrayList<>();
List<String> figureMarkerSeen = new ArrayList<>();
List<String> tableMarkerSeen = new ArrayList<>();
List<String> equationMarkerSeen = new ArrayList<>();
for (TaggingTokenCluster cluster : clusters) {
if (cluster == null) {
continue;
}
TaggingLabel clusterLabel = cluster.getTaggingLabel();
if (TEIFormatter.MARKER_LABELS.contains(clusterLabel)) {
List<LayoutToken> refTokens = cluster.concatTokens();
refTokens = LayoutTokensUtil.dehyphenize(refTokens);
String refText = LayoutTokensUtil.toText(refTokens);
refText = refText.replace("\n", "");
refText = refText.replace(" ", "");
if (refText.trim().length() == 0)
continue;
if (clusterLabel.equals(TaggingLabels.CITATION_MARKER)) {
if (referenceMarkerSeen.contains(refText)) {
// already seen reference marker sequence, we skip it
continue;
}
MarkerType localMarkerType = CalloutAnalyzer.getCalloutType(refTokens);
//System.out.println(LayoutTokensUtil.toText(refTokens) + " -> " + localMarkerType);
if (referenceMarkerTypeCounts.get(localMarkerType) == null)
referenceMarkerTypeCounts.put(localMarkerType, 1);
else
referenceMarkerTypeCounts.put(localMarkerType, referenceMarkerTypeCounts.get(localMarkerType)+1);
if (!referenceMarkerSeen.contains(refText))
referenceMarkerSeen.add(refText);
} else if (clusterLabel.equals(TaggingLabels.FIGURE_MARKER)) {
if (figureMarkerSeen.contains(refText)) {
// already seen reference marker sequence, we skip it
continue;
}
MarkerType localMarkerType = CalloutAnalyzer.getCalloutType(refTokens);
if (figureMarkerTypeCounts.get(localMarkerType) == null)
figureMarkerTypeCounts.put(localMarkerType, 1);
else
figureMarkerTypeCounts.put(localMarkerType, figureMarkerTypeCounts.get(localMarkerType)+1);
if (!figureMarkerSeen.contains(refText))
figureMarkerSeen.add(refText);
} else if (clusterLabel.equals(TaggingLabels.TABLE_MARKER)) {
if (tableMarkerSeen.contains(refText)) {
// already seen reference marker sequence, we skip it
continue;
}
MarkerType localMarkerType = CalloutAnalyzer.getCalloutType(refTokens);
if (tableMarkerTypeCounts.get(localMarkerType) == null)
tableMarkerTypeCounts.put(localMarkerType, 1);
else
tableMarkerTypeCounts.put(localMarkerType, tableMarkerTypeCounts.get(localMarkerType)+1);
if (!tableMarkerSeen.contains(refText))
tableMarkerSeen.add(refText);
} else if (clusterLabel.equals(TaggingLabels.EQUATION_MARKER)) {
if (equationMarkerSeen.contains(refText)) {
// already seen reference marker sequence, we skip it
continue;
}
MarkerType localMarkerType = CalloutAnalyzer.getCalloutType(refTokens);
if (equationMarkerTypeCounts.get(localMarkerType) == null)
equationMarkerTypeCounts.put(localMarkerType, 1);
else
equationMarkerTypeCounts.put(localMarkerType, equationMarkerTypeCounts.get(localMarkerType)+1);
if (!equationMarkerSeen.contains(refText))
equationMarkerSeen.add(refText);
}
}
}
majorityReferenceMarkerType = getBestType(referenceMarkerTypeCounts);
majorityFigureMarkerType = getBestType(figureMarkerTypeCounts);
majorityTableMarkerType = getBestType(tableMarkerTypeCounts);
majorityEquationarkerType = getBestType(equationMarkerTypeCounts);
/*System.out.println("majorityReferenceMarkerType: " + majorityReferenceMarkerType);
System.out.println("majorityFigureMarkerType: " + majorityFigureMarkerType);
System.out.println("majorityTableMarkerType: " + majorityTableMarkerType);
System.out.println("majorityEquationarkerType: " + majorityEquationarkerType);*/
return Arrays.asList(majorityReferenceMarkerType, majorityFigureMarkerType, majorityTableMarkerType, majorityEquationarkerType);
}
private static MarkerType getBestType(Map<MarkerType,Integer> markerTypeCount) {
MarkerType bestType = MarkerType.UNKNOWN;
int maxCount = 0;
for(Map.Entry<MarkerType,Integer> entry : markerTypeCount.entrySet()) {
if (entry.getValue() > maxCount) {
bestType = entry.getKey();
maxCount = entry.getValue();
}
}
return bestType;
}
/**
* Create the TEI representation for a document based on the parsed header, references
* and body sections.
*/
private void toTEI(Document doc,
String reseBody,
String reseAnnex,
LayoutTokenization layoutTokenization,
List<LayoutToken> tokenizationsAnnex,
BiblioItem resHeader,
List<Figure> figures,
List<Table> tables,
List<Equation> equations,
List<MarkerType> markerTypes,
GrobidAnalysisConfig config) {
if (doc.getBlocks() == null) {
return;
}
List<BibDataSet> resCitations = doc.getBibDataSets();
TEIFormatter teiFormatter = new TEIFormatter(doc, this);
StringBuilder tei;
try {
tei = teiFormatter.toTEIHeader(resHeader, null, resCitations, markerTypes, config);
//System.out.println(rese);
//int mode = config.getFulltextProcessingMode();
tei = teiFormatter.toTEIBody(tei, reseBody, resHeader, resCitations,
layoutTokenization, figures, tables, equations, markerTypes, doc, config);
tei.append("\t\t<back>\n");
// acknowledgement is in the back
StringBuilder acknowledgmentStmt = getSectionAsTEI("acknowledgement", "\t\t\t", doc, SegmentationLabels.ACKNOWLEDGEMENT,
teiFormatter, resCitations, config);
if (acknowledgmentStmt.length() > 0) {
tei.append(acknowledgmentStmt);
}
// availability statements in header
StringBuilder availabilityStmt = new StringBuilder();
if (StringUtils.isNotBlank(resHeader.getAvailabilityStmt())) {
List<LayoutToken> headerAvailabilityStatementTokens = resHeader.getLayoutTokens(TaggingLabels.HEADER_AVAILABILITY);
Pair<String, List<LayoutToken>> headerAvailabilityProcessed = processShort(headerAvailabilityStatementTokens, doc);
if (headerAvailabilityProcessed != null) {
availabilityStmt = teiFormatter.processTEIDivSection("availability",
"\t\t\t",
headerAvailabilityProcessed.getLeft(),
headerAvailabilityProcessed.getRight(),
resCitations,
config);
}
if (availabilityStmt.length() > 0) {
tei.append(availabilityStmt.toString());
}
}
// availability statements in non-header part
availabilityStmt = getSectionAsTEI("availability",
"\t\t\t",
doc,
SegmentationLabels.AVAILABILITY,
teiFormatter,
resCitations,
config);
if (availabilityStmt.length() > 0) {
tei.append(availabilityStmt.toString());
}
// funding in header
StringBuilder fundingStmt = new StringBuilder();
if (StringUtils.isNotBlank(resHeader.getFunding())) {
List<LayoutToken> headerFundingTokens = resHeader.getLayoutTokens(TaggingLabels.HEADER_FUNDING);
Pair<String, List<LayoutToken>> headerFundingProcessed = processShort(headerFundingTokens, doc);
if (headerFundingProcessed != null) {
fundingStmt = teiFormatter.processTEIDivSection("funding",
"\t\t\t",
headerFundingProcessed.getLeft(),
headerFundingProcessed.getRight(),
resCitations,
config);
}
if (fundingStmt.length() > 0) {
tei.append(fundingStmt.toString());
}
}
// funding statements in non-header part
fundingStmt = getSectionAsTEI("funding",
"\t\t\t",
doc,
SegmentationLabels.FUNDING,
teiFormatter,
resCitations,
config);
if (fundingStmt.length() > 0) {
tei.append(fundingStmt);
}
tei = teiFormatter.toTEIAnnex(tei, reseAnnex, resHeader, resCitations,
tokenizationsAnnex, markerTypes, doc, config);
tei = teiFormatter.toTEIReferences(tei, resCitations, config);
doc.calculateTeiIdToBibDataSets();
tei.append("\t\t</back>\n");
tei.append("\t</text>\n");
tei.append("</TEI>\n");
} catch (Exception e) {
throw new GrobidException("An exception occurred while running Grobid.", e);
}
doc.setTei(tei.toString());
//TODO: reevaluate
// doc.setTei(
// XmlBuilderUtils.toPrettyXml(
// XmlBuilderUtils.fromString(tei.toString())
// )
// );
}
private StringBuilder getSectionAsTEI(String xmlType,
String indentation,
Document doc,
TaggingLabel taggingLabel,
TEIFormatter teiFormatter,
List<BibDataSet> resCitations,
GrobidAnalysisConfig config) throws Exception {
StringBuilder output = new StringBuilder();
SortedSet<DocumentPiece> sectionPart = doc.getDocumentPart(taggingLabel);
if (sectionPart != null && sectionPart.size() > 0) {
Pair<String, LayoutTokenization> sectionTokenisation = getBodyTextFeatured(doc, sectionPart);
if (sectionTokenisation != null) {
// if featSeg is null, it usually means that no body segment is found in the
// document segmentation
String text = sectionTokenisation.getLeft();
List<LayoutToken> tokens = sectionTokenisation.getRight().getTokenization();
String resultLabelling = null;
if (StringUtils.isNotBlank(text) ) {
resultLabelling = label(text);
resultLabelling = postProcessFullTextLabeledText(resultLabelling);
}
output = teiFormatter.processTEIDivSection(xmlType, indentation, resultLabelling, tokens, resCitations, config);
}
}
return output;
}
private static List<TaggingLabel> inlineFullTextLabels = Arrays.asList(TaggingLabels.CITATION_MARKER, TaggingLabels.TABLE_MARKER,
TaggingLabels.FIGURE_MARKER, TaggingLabels.EQUATION_LABEL);
public static List<LayoutTokenization> getDocumentFullTextTokens(List<TaggingLabel> labels, String labeledResult, List<LayoutToken> tokenizations) {
TaggingTokenClusteror clusteror = new TaggingTokenClusteror(GrobidModels.FULLTEXT, labeledResult, tokenizations);
List<TaggingTokenCluster> clusters = clusteror.cluster();
List<LayoutTokenization> labeledTokenSequences = new ArrayList<LayoutTokenization>();
LayoutTokenization currentTokenization = null;
for (TaggingTokenCluster cluster : clusters) {
if (cluster == null) {
continue;
}
TaggingLabel clusterLabel = cluster.getTaggingLabel();
List<LayoutToken> clusterTokens = cluster.concatTokens();
if (inlineFullTextLabels.contains(clusterLabel)) {
// sequence is not interrupted
if (currentTokenization == null)
currentTokenization = new LayoutTokenization();
} else {
// we have an independent sequence
if ( (currentTokenization != null) && (currentTokenization.size() > 0) ) {
labeledTokenSequences.add(currentTokenization);
currentTokenization = new LayoutTokenization();
}
}
if (labels.contains(clusterLabel)) {
if (currentTokenization == null)
currentTokenization = new LayoutTokenization();
currentTokenization.addTokens(clusterTokens);
}
}
if ( (currentTokenization != null) && (currentTokenization.size() > 0) )
labeledTokenSequences.add(currentTokenization);
return labeledTokenSequences;
}
@Override
public void close() throws IOException {
super.close();
}
}
| 118,064 | 43.772469 | 186 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/ProcessEngine.java
|
package org.grobid.core.engines;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.pdfbox.pdmodel.PDDocument;
import org.grobid.core.data.*;
import org.grobid.core.engines.config.GrobidAnalysisConfig;
import org.grobid.core.exceptions.GrobidResourceException;
import org.grobid.core.factory.GrobidFactory;
import org.grobid.core.main.batch.GrobidMainArgs;
import org.grobid.core.utilities.IOUtilities;
import org.grobid.core.utilities.KeyGen;
import org.grobid.core.visualization.CitationsVisualizer;
import org.grobid.core.document.Document;
import org.grobid.core.document.DocumentSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.Closeable;
import java.io.File;
import java.io.IOException;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import static org.apache.commons.collections4.CollectionUtils.isEmpty;
public class ProcessEngine implements Closeable {
/**
* The logger.
*/
private static final Logger LOGGER = LoggerFactory.getLogger(ProcessEngine.class);
/**
* The engine.
*/
private static Engine engine;
/**
* @return the engine instance.
*/
protected Engine getEngine() {
if (engine == null) {
engine = GrobidFactory.getInstance().createEngine();
}
return engine;
}
/**
* Close engine resources.
*/
@Override
public void close() throws IOException {
if (engine != null) {
engine.close();
}
System.exit(0);
}
/**
* Process the headers using pGbdArgs parameters.
*
* @param pGbdArgs The parameters.
* @throws Exception
*/
public void processHeader(final GrobidMainArgs pGbdArgs) throws Exception {
inferPdfInputPath(pGbdArgs);
inferOutputPath(pGbdArgs);
final File pdfDirectory = new File(pGbdArgs.getPath2Input());
File[] files = pdfDirectory.listFiles();
if (files == null) {
LOGGER.warn("No files in directory: " + pdfDirectory);
} else {
processHeaderDirectory(files, pGbdArgs, pGbdArgs.getPath2Output());
}
}
/**
* Process the header recursively or not using pGbdArgs parameters.
*
* @param files list of files to be processed
* @param pGbdArgs The parameters.
* @throws Exception
*/
private void processHeaderDirectory(File[] files, final GrobidMainArgs pGbdArgs, String outputPath) {
if (files != null) {
boolean recurse = pGbdArgs.isRecursive();
String result;
for (final File currPdf : files) {
try {
if (currPdf.getName().toLowerCase().endsWith(".pdf")) {
result = getEngine().processHeader(currPdf.getAbsolutePath(), 0, null);
File outputPathFile = new File(outputPath);
if (!outputPathFile.exists()) {
outputPathFile.mkdirs();
}
if (currPdf.getName().endsWith(".pdf")) {
IOUtilities.writeInFile(outputPath + File.separator
+ new File(currPdf.getAbsolutePath())
.getName().replace(".pdf", ".tei.xml"), result.toString());
} else if (currPdf.getName().endsWith(".PDF")) {
IOUtilities.writeInFile(outputPath + File.separator
+ new File(currPdf.getAbsolutePath())
.getName().replace(".PDF", ".tei.xml"), result.toString());
}
} else if (recurse && currPdf.isDirectory()) {
File[] newFiles = currPdf.listFiles();
if (newFiles != null) {
String newLevel = currPdf.getName();
processHeaderDirectory(newFiles, pGbdArgs, outputPath +
File.separator + newLevel);
}
}
} catch (final Exception exp) {
LOGGER.error("An error occured while processing the file " + currPdf.getAbsolutePath()
+ ". Continuing the process for the other files", exp);
}
}
}
}
/**
* Process the full text using pGbdArgs parameters.
*
* @param pGbdArgs The parameters.
* @throws Exception
*/
public void processFullText(final GrobidMainArgs pGbdArgs) throws Exception {
inferPdfInputPath(pGbdArgs);
inferOutputPath(pGbdArgs);
final File pdfDirectory = new File(pGbdArgs.getPath2Input());
File[] files = pdfDirectory.listFiles();
if (files == null) {
LOGGER.warn("No files in directory: " + pdfDirectory);
} else {
List<String> elementCoordinates = null;
if (pGbdArgs.getTeiCoordinates()) {
elementCoordinates = Arrays.asList("figure", "persName", "ref", "biblStruct", "formula", "s", "note");
}
processFullTextDirectory(files, pGbdArgs, pGbdArgs.getPath2Output(), pGbdArgs.getSaveAssets(),
elementCoordinates, pGbdArgs.getSegmentSentences(), pGbdArgs.getAddElementId());
System.out.println(Engine.getCntManager());
}
}
/**
* Process the full text recursively or not using pGbdArgs parameters.
*
* @param files list of files to be processed
* @param pGbdArgs The parameters.
* @throws Exception
*/
private void processFullTextDirectory(File[] files,
final GrobidMainArgs pGbdArgs,
String outputPath,
boolean saveAssets,
List<String> elementCoordinates,
boolean segmentSentences,
boolean addElementId) {
if (files != null) {
boolean recurse = pGbdArgs.isRecursive();
String result;
for (final File currPdf : files) {
try {
if (currPdf.getName().toLowerCase().endsWith(".pdf")) {
System.out.println("Processing: " + currPdf.getPath());
GrobidAnalysisConfig config = null;
// path for saving assets
if (saveAssets) {
String baseName = currPdf.getName().replace(".pdf", "").replace(".PDF", "");
String assetPath = outputPath + File.separator + baseName + "_assets";
config = GrobidAnalysisConfig.builder()
.pdfAssetPath(new File(assetPath))
.generateTeiCoordinates(elementCoordinates)
.withSentenceSegmentation(segmentSentences)
.generateTeiIds(addElementId)
.build();
} else
config = GrobidAnalysisConfig.builder()
.generateTeiCoordinates(elementCoordinates)
.withSentenceSegmentation(segmentSentences)
.generateTeiIds(addElementId)
.build();
result = getEngine().fullTextToTEI(currPdf, config);
File outputPathFile = new File(outputPath);
if (!outputPathFile.exists()) {
outputPathFile.mkdir();
}
if (currPdf.getName().endsWith(".pdf")) {
IOUtilities.writeInFile(outputPath + File.separator
+ new File(currPdf.getAbsolutePath())
.getName().replace(".pdf", ".tei.xml"), result.toString());
} else if (currPdf.getName().endsWith(".PDF")) {
IOUtilities.writeInFile(outputPath + File.separator
+ new File(currPdf.getAbsolutePath())
.getName().replace(".PDF", ".tei.xml"), result.toString());
}
} else if (recurse && currPdf.isDirectory()) {
File[] newFiles = currPdf.listFiles();
if (newFiles != null) {
String newLevel = currPdf.getName();
processFullTextDirectory(newFiles, pGbdArgs, outputPath +
File.separator + newLevel, saveAssets, elementCoordinates, segmentSentences, addElementId);
}
}
} catch (final Exception exp) {
LOGGER.error("An error occured while processing the file " + currPdf.getAbsolutePath()
+ ". Continuing the process for the other files", exp);
}
}
}
}
/**
* Process the date using pGbdArgs parameters.
*
* @param pGbdArgs The parameters.
* @throws Exception
*/
public void processDate(final GrobidMainArgs pGbdArgs) throws Exception {
inferOutputPath(pGbdArgs);
final List<Date> result = getEngine().processDate(pGbdArgs.getInput());
if (isEmpty(result)) {
throw new GrobidResourceException("Cannot read the input data for date. Check the documentation. ");
}
IOUtilities.writeInFile(pGbdArgs.getPath2Output() + File.separator + "result", result.get(0).toTEI());
}
/**
* Process the author header using pGbdArgs parameters.
*
* @param pGbdArgs The parameters.
* @throws Exception
*/
public void processAuthorsHeader(final GrobidMainArgs pGbdArgs) throws Exception {
inferOutputPath(pGbdArgs);
final List<Person> result = getEngine().processAuthorsHeader(pGbdArgs.getInput());
if (isEmpty(result)) {
throw new GrobidResourceException("Cannot read the input data for processAuthorHeader. Check the documentation. ");
}
IOUtilities.writeInFile(pGbdArgs.getPath2Output() + File.separator + "result", result.get(0).toTEI(false));
}
/**
* Process the author citation using pGbdArgs parameters.
*
* @param pGbdArgs The parameters
* @throws Exception
*/
public void processAuthorsCitation(final GrobidMainArgs pGbdArgs) throws Exception {
inferOutputPath(pGbdArgs);
final List<Person> result = getEngine().processAuthorsCitation(pGbdArgs.getInput());
if (isEmpty(result)) {
throw new GrobidResourceException("Cannot read the input data for authorsCitation. Check the documentation. ");
}
IOUtilities.writeInFile(pGbdArgs.getPath2Output() + File.separator + "result", result.get(0).toTEI(false));
}
/**
* Process the affiliation using pGbdArgs parameters.
*
* @param pGbdArgs The parameters.
* @throws Exception
*/
public void processAffiliation(final GrobidMainArgs pGbdArgs) throws Exception {
inferOutputPath(pGbdArgs);
final List<Affiliation> result = getEngine().processAffiliation(pGbdArgs.getInput());
if (isEmpty(result)) {
throw new GrobidResourceException("Cannot read the input data for affiliations. Check the documentation. ");
}
IOUtilities.writeInFile(pGbdArgs.getPath2Output() + File.separator + "result", result.get(0).toTEI());
}
/**
* Process the raw reference using pGbdArgs parameters.
*
* @param pGbdArgs The parameters.
* @throws Exception
*/
public void processRawReference(final GrobidMainArgs pGbdArgs) throws Exception {
inferOutputPath(pGbdArgs);
final BiblioItem result = getEngine().processRawReference(pGbdArgs.getInput(), 0);
IOUtilities.writeInFile(pGbdArgs.getPath2Output() + File.separator + "result", result.toTEI(-1));
}
/**
* Process all the references using pGbdArgs parameters.
*
* @param pGbdArgs The parameters.
* @throws Exception
*/
public void processReferences(final GrobidMainArgs pGbdArgs) throws Exception {
inferPdfInputPath(pGbdArgs);
inferOutputPath(pGbdArgs);
final File pdfDirectory = new File(pGbdArgs.getPath2Input());
File[] files = pdfDirectory.listFiles();
if (files == null) {
LOGGER.warn("No files in directory: " + pdfDirectory);
} else {
processReferencesDirectory(files, pGbdArgs, pGbdArgs.getPath2Output());
}
}
/**
* Process the references recursively or not using pGbdArgs parameters.
*
* @param files list of files to be processed
* @param pGbdArgs The parameters.
*/
private void processReferencesDirectory(File[] files, final GrobidMainArgs pGbdArgs, String outputPath) {
if (files != null) {
boolean recurse = pGbdArgs.isRecursive();
int id = 0;
for (final File currPdf : files) {
try {
if (currPdf.getName().toLowerCase().endsWith(".pdf")) {
final List<BibDataSet> results =
getEngine().processReferences(currPdf, 0);
File outputPathFile = new File(outputPath);
if (!outputPathFile.exists()) {
outputPathFile.mkdir();
}
StringBuilder result = new StringBuilder();
// dummy header
result.append("<?xml version=\"1.0\" ?>\n<TEI xmlns=\"http://www.tei-c.org/ns/1.0\" " +
"xmlns:xlink=\"http://www.w3.org/1999/xlink\" " +
"\n xmlns:mml=\"http://www.w3.org/1998/Math/MathML\">\n");
result.append("\t<teiHeader>\n\t\t<fileDesc xml:id=\"f_" + id +
"\"/>\n\t</teiHeader>\n");
result.append("\t<text>\n\t\t<front/>\n\t\t<body/>\n\t\t<back>\n\t\t\t<listBibl>\n");
for (BibDataSet res : results) {
result.append(res.toTEI());
result.append("\n");
}
result.append("\t\t\t</listBibl>\n\t\t</back>\n\t</text>\n</TEI>\n");
if (currPdf.getName().endsWith(".pdf")) {
IOUtilities.writeInFile(outputPath + File.separator
+ new File(currPdf.getAbsolutePath()).getName().replace(".pdf", ".references.tei.xml"),
result.toString());
} else if (currPdf.getName().endsWith(".PDF")) {
IOUtilities.writeInFile(outputPath + File.separator
+ new File(currPdf.getAbsolutePath()).getName().replace(".PDF", ".references.tei.xml"),
result.toString());
}
} else if (recurse && currPdf.isDirectory()) {
File[] newFiles = currPdf.listFiles();
if (newFiles != null) {
String newLevel = currPdf.getName();
processReferencesDirectory(newFiles, pGbdArgs, outputPath +
File.separator + newLevel);
}
}
} catch (final Exception exp) {
LOGGER.error("An error occured while processing the file " + currPdf.getAbsolutePath()
+ ". Continuing the process for the other files", exp);
}
id++;
}
}
}
/**
* Generate training data for all models
*
* @param pGbdArgs The parameters.
*/
public void createTraining(final GrobidMainArgs pGbdArgs) {
inferPdfInputPath(pGbdArgs);
inferOutputPath(pGbdArgs);
int result = getEngine().batchCreateTraining(pGbdArgs.getPath2Input(), pGbdArgs.getPath2Output(), -1);
LOGGER.info(result + " files processed.");
}
/**
* Generate training data for the monograph model from provided directory of PDF documents.
*
* @param pGbdArgs The parameters.
* @throws Exception
*/
public void createTrainingMonograph(final GrobidMainArgs pGbdArgs) throws Exception {
inferPdfInputPath(pGbdArgs);
inferOutputPath(pGbdArgs);
int result = getEngine().batchCreateTrainingMonograph(pGbdArgs.getPath2Input(), pGbdArgs.getPath2Output(), -1);
LOGGER.info(result + " files processed.");
}
/**
* Generate blank training data from provided directory of PDF documents, i.e. where TEI files are text only
* without tags. This can be used to start from scratch any new model.
*
* @param pGbdArgs The parameters.
* @throws Exception
*/
public void createTrainingBlank(final GrobidMainArgs pGbdArgs) throws Exception {
inferPdfInputPath(pGbdArgs);
inferOutputPath(pGbdArgs);
int result = getEngine().batchCreateTrainingBlank(pGbdArgs.getPath2Input(), pGbdArgs.getPath2Output(), -1);
LOGGER.info(result + " files processed.");
}
/**
* Generate training data for citation extraction from patent documents.
*
* @param pGbdArgs The parameters.
* @throws Exception
*/
public void createTrainingCitationPatent(final GrobidMainArgs pGbdArgs) throws Exception {
inferPdfInputPath(pGbdArgs);
inferOutputPath(pGbdArgs);
int result = getEngine().batchCreateTrainingPatentcitations(pGbdArgs.getPath2Input(), pGbdArgs.getPath2Output());
LOGGER.info(result + " files processed.");
}
/**
* Generate training data from raw reference strings.
*
* @param pGbdArgs The parameters.
* @throws Exception
*/
public void createTrainingCitation(final GrobidMainArgs pGbdArgs) throws Exception {
inferPdfInputPath(pGbdArgs);
inferOutputPath(pGbdArgs);
int result = getEngine().batchCreateTrainingCitation(pGbdArgs.getPath2Input(), pGbdArgs.getPath2Output());
LOGGER.info(result + " files processed.");
}
/**
* Process a patent encoded in TEI using pGbdArgs parameters.
*
* @param pGbdArgs The parameters.
* @throws Exception
*/
/*public void processCitationPatentTEI(final GrobidMainArgs pGbdArgs) throws Exception {
inferPdfInputPath(pGbdArgs);
inferOutputPath(pGbdArgs);
final File teiDirectory = new File(pGbdArgs.getPath2Input());
String result = StringUtils.EMPTY;
for (final File currTEI : teiDirectory.listFiles()) {
try {
if (currTEI.getName().toLowerCase().endsWith(".tei") ||
currTEI.getName().toLowerCase().endsWith(".tei.xml")) {
getEngine().processCitationPatentTEI(pGbdArgs.getPath2Input() + File.separator + currTEI.getName(),
pGbdArgs.getPath2Output() + File.separator + currTEI.getName(), 0);
}
} catch (final Exception exp) {
LOGGER.error("An error occured while processing the file " + currTEI.getAbsolutePath()
+ ". Continuing the process for the other files", exp);
}
}
}*/
/**
* Process a patent encoded in ST.36 using pGbdArgs parameters.
*
* @param pGbdArgs The parameters.
* @throws Exception
*/
public void processCitationPatentST36(final GrobidMainArgs pGbdArgs) throws Exception {
inferPdfInputPath(pGbdArgs);
inferOutputPath(pGbdArgs);
final File xmlDirectory = new File(pGbdArgs.getPath2Input());
String result = StringUtils.EMPTY;
for (final File currXML : xmlDirectory.listFiles()) {
try {
if (currXML.getName().toLowerCase().endsWith(".xml") ||
currXML.getName().toLowerCase().endsWith(".xml.gz")) {
List<BibDataSet> articles = new ArrayList<BibDataSet>();
List<PatentItem> patents = new ArrayList<PatentItem>();
result = getEngine().processAllCitationsInXMLPatent(pGbdArgs.getPath2Input() + File.separator + currXML.getName(),
articles, patents, 0, false);
if (currXML.getName().endsWith(".gz")) {
IOUtilities.writeInFile(pGbdArgs.getPath2Output() + File.separator
+ new File(currXML.getAbsolutePath()).getName().replace(".xml.gz", ".tei.xml"), result);
} else {
IOUtilities.writeInFile(pGbdArgs.getPath2Output() + File.separator
+ new File(currXML.getAbsolutePath()).getName().replace(".xml", ".tei.xml"), result);
}
}
} catch (final Exception exp) {
LOGGER.error("An error occured while processing the file " + currXML.getAbsolutePath()
+ ". Continuing the process for the other files", exp);
}
}
}
/**
* Process a patent in utf-8 text using pGbdArgs parameters.
*
* @param pGbdArgs The parameters.
* @throws Exception
*/
public void processCitationPatentTXT(final GrobidMainArgs pGbdArgs) throws Exception {
inferPdfInputPath(pGbdArgs);
inferOutputPath(pGbdArgs);
final File txtDirectory = new File(pGbdArgs.getPath2Input());
String result = StringUtils.EMPTY;
for (final File currTXT : txtDirectory.listFiles()) {
try {
if (currTXT.getName().toLowerCase().endsWith(".txt")) {
String inputStr = FileUtils.readFileToString(currTXT, "UTF-8");
List<BibDataSet> articles = new ArrayList<BibDataSet>();
List<PatentItem> patents = new ArrayList<PatentItem>();
result = getEngine().processAllCitationsInPatent(inputStr, articles, patents, 0, false);
IOUtilities.writeInFile(pGbdArgs.getPath2Output() + File.separator
+ new File(currTXT.getAbsolutePath()).getName().replace(".txt", ".tei.xml"), result);
}
} catch (final Exception exp) {
LOGGER.error("An error occured while processing the file " + currTXT.getAbsolutePath()
+ ". Continuing the process for the other files", exp);
}
}
}
/**
* Process a patent available in PDF using pGbdArgs parameters.
*
* @param pGbdArgs The parameters.
* @throws Exception
*/
public void processCitationPatentPDF(final GrobidMainArgs pGbdArgs) throws Exception {
inferPdfInputPath(pGbdArgs);
inferOutputPath(pGbdArgs);
final File pdfDirectory = new File(pGbdArgs.getPath2Input());
String result = StringUtils.EMPTY;
for (final File currPDF : pdfDirectory.listFiles()) {
try {
if (currPDF.getName().toLowerCase().endsWith(".pdf")) {
List<BibDataSet> articles = new ArrayList<BibDataSet>();
List<PatentItem> patents = new ArrayList<PatentItem>();
result = getEngine().processAllCitationsInPDFPatent(pGbdArgs.getPath2Input() +
File.separator + currPDF.getName(), articles, patents, 0, false);
if (currPDF.getName().endsWith(".pdf")) {
IOUtilities.writeInFile(pGbdArgs.getPath2Output() + File.separator
+ new File(currPDF.getAbsolutePath()).getName().replace(".pdf", ".tei.xml"), result);
} else if (currPDF.getName().endsWith(".PDF")) {
IOUtilities.writeInFile(pGbdArgs.getPath2Output() + File.separator
+ new File(currPDF.getAbsolutePath()).getName().replace(".PDF", ".tei.xml"), result);
}
}
} catch (final Exception exp) {
LOGGER.error("An error occured while processing the file " + currPDF.getAbsolutePath()
+ ". Continuing the process for the other files", exp);
}
}
}
/**
* Process a patent available in PDF using pGbdArgs parameters.
*
* @param pGbdArgs The parameters.
* @throws Exception
*/
public void processPDFAnnotation(final GrobidMainArgs pGbdArgs) throws Exception {
inferPdfInputPath(pGbdArgs);
inferOutputPath(pGbdArgs);
final File pdfDirectory = new File(pGbdArgs.getPath2Input());
final File outDirectory = new File(pGbdArgs.getPath2Output());
PDDocument out = null;
PDDocument document = null;
for (final File currPDF : pdfDirectory.listFiles()) {
try {
if (currPDF.getName().toLowerCase().endsWith(".pdf")) {
System.out.println("Processing: " + currPDF.getName());
List<String> elementWithCoords = new ArrayList();
elementWithCoords.add("ref");
elementWithCoords.add("biblStruct");
GrobidAnalysisConfig config = new GrobidAnalysisConfig
.GrobidAnalysisConfigBuilder()
.consolidateCitations(1)
.generateTeiCoordinates(elementWithCoords)
.build();
Document teiDoc = getEngine().fullTextToTEIDoc(currPDF, null, config);
document = PDDocument.load(currPDF);
//If no pages, skip the document
if (document.getNumberOfPages() > 0) {
DocumentSource documentSource = teiDoc.getDocumentSource();
out = CitationsVisualizer.annotatePdfWithCitations(document, teiDoc, null);
} else {
throw new RuntimeException("Cannot identify any pages in the input document. " +
"The document cannot be annotated. Please check whether the document is valid or the logs.");
}
if (out != null) {
File outputFile = null;
if (outDirectory.getPath().equals(pdfDirectory.getPath()))
outputFile = new File(outDirectory.getPath() + "/" + currPDF.getName().replace(".pdf", ".grobid.pdf"));
else
outputFile = new File(outDirectory.getPath() + "/" + currPDF.getName());
out.save(outputFile);
System.out.println("Saved: " + outputFile.getPath());
}
}
} catch (final Exception exp) {
LOGGER.error("An error occured while processing the file " + currPDF.getAbsolutePath()
+ ". Continuing the process for the other files", exp);
} finally {
if (document != null)
document.close();
if (out != null) {
out.close();
}
}
}
}
/**
* List the engine methods that can be called.
*
* @return List<String> containing the list of the methods.
*/
public final static List<String> getUsableMethods() {
final Class<?> pClass = new ProcessEngine().getClass();
final List<String> availableMethods = new ArrayList<String>();
for (final Method method : pClass.getMethods()) {
if (isUsableMethod(method.getName())) {
availableMethods.add(method.getName());
}
}
return availableMethods;
}
/**
* Check if the method is usable.
*
* @param pMethod method name.
* @return if it is usable
*/
protected final static boolean isUsableMethod(final String pMethod) {
boolean isUsable = StringUtils.equals("wait", pMethod);
isUsable |= StringUtils.equals("equals", pMethod);
isUsable |= StringUtils.equals("toString", pMethod);
isUsable |= StringUtils.equals("hashCode", pMethod);
isUsable |= StringUtils.equals("getClass", pMethod);
isUsable |= StringUtils.equals("notify", pMethod);
isUsable |= StringUtils.equals("notifyAll", pMethod);
isUsable |= StringUtils.equals("isUsableMethod", pMethod);
isUsable |= StringUtils.equals("getUsableMethods", pMethod);
isUsable |= StringUtils.equals("inferPdfInputPath", pMethod);
isUsable |= StringUtils.equals("inferOutputPath", pMethod);
isUsable |= StringUtils.equals("close", pMethod);
return !isUsable;
}
/**
* Infer the input path for pdfs if not given in arguments.
*
* @param pGbdArgs The GrobidArgs.
*/
protected final static void inferPdfInputPath(final GrobidMainArgs pGbdArgs) {
String tmpFilePath;
if (pGbdArgs.getPath2Input() == null) {
tmpFilePath = new File(".").getAbsolutePath();
LOGGER.info("No path set for the input directory. Using: " + tmpFilePath);
pGbdArgs.setPath2Input(tmpFilePath);
}
}
/**
* Infer the output path if not given in arguments.
*
* @param pGbdArgs The GrobidArgs.
*/
protected final static void inferOutputPath(final GrobidMainArgs pGbdArgs) {
String tmpFilePath;
if (pGbdArgs.getPath2Output() == null) {
tmpFilePath = new File(".").getAbsolutePath();
LOGGER.info("No path set for the output directory. Using: " + tmpFilePath);
pGbdArgs.setPath2Output(tmpFilePath);
}
}
}
| 30,600 | 43.607872 | 134 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/TableParser.java
|
package org.grobid.core.engines;
import org.grobid.core.GrobidModels;
import org.grobid.core.data.Table;
import org.grobid.core.engines.label.TaggingLabel;
import org.grobid.core.engines.tagging.GenericTaggerUtils;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.layout.LayoutToken;
import org.grobid.core.tokenization.TaggingTokenCluster;
import org.grobid.core.tokenization.TaggingTokenClusteror;
import org.grobid.core.utilities.BoundingBoxCalculator;
import org.grobid.core.utilities.LayoutTokensUtil;
import org.grobid.core.utilities.TextUtilities;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.commons.lang3.StringUtils;
import java.util.Collections;
import java.util.List;
import java.util.ArrayList;
import static org.grobid.core.engines.label.TaggingLabels.*;
public class TableParser extends AbstractParser {
private static final Logger LOGGER = LoggerFactory.getLogger(TableParser.class);
protected TableParser() {
super(GrobidModels.TABLE);
}
/**
* The processing here is called from the full text parser in cascade.
* Normally we should find only one table in the sequence to be labelled.
* But for robustness and recovering error from the higher level, we allow
* sub-segmenting several tables that appears one after the other.
*/
public List<Table> processing(List<LayoutToken> tokenizationTable, String featureVector) {
String res;
try {
res = label(featureVector);
} catch (Exception e) {
throw new GrobidException("CRF labeling with table model fails.", e);
}
if (res == null) {
return null;
}
// List<Pair<String, String>> labeled = GenericTaggerUtils.getTokensAndLabels(res);
return getExtractionResult(tokenizationTable, res);
}
private List<Table> getExtractionResult(List<LayoutToken> tokenizations, String result) {
List<Table> tables = new ArrayList<>();
// first table
Table table = new Table();
TaggingTokenClusteror clusteror = new TaggingTokenClusteror(GrobidModels.TABLE, result, tokenizations);
List<TaggingTokenCluster> clusters = clusteror.cluster();
TaggingLabel previousLabel = null;
for (TaggingTokenCluster cluster : clusters) {
if (cluster == null) {
continue;
}
TaggingLabel clusterLabel = cluster.getTaggingLabel();
Engine.getCntManager().i(clusterLabel);
List<LayoutToken> tokens = cluster.concatTokens();
String clusterContent = LayoutTokensUtil.normalizeText(LayoutTokensUtil.toText(tokens));
if (clusterLabel.equals(TBL_DESC)) {
table.appendCaption(clusterContent);
table.appendCaptionLayoutTokens(tokens);
table.getFullDescriptionTokens().addAll(tokens);
table.addLayoutTokens(tokens);
} else if (clusterLabel.equals(TBL_HEAD)) {
// if we already have a header (it could be via label) and we are not continuing some header/label
// we consider the non-connected header field as the introduction of a new table
// TBD: this work fine for header located before the table content, but not sure otherwise
if (!StringUtils.isEmpty(table.getHeader()) &&
previousLabel != null &&
(previousLabel.equals(TBL_CONTENT) || previousLabel.equals(TBL_NOTE) || previousLabel.equals(TBL_DESC) )) {
// we already have a table header, this means that we have a distinct table starting now
tables.add(table);
table.setTextArea(Collections.singletonList(BoundingBoxCalculator.calculateOneBox(table.getLayoutTokens(), true)));
table = new Table();
}
table.appendHeader(clusterContent);
table.getFullDescriptionTokens().addAll(tokens);
table.addLayoutTokens(tokens);
} else if (clusterLabel.equals(TBL_LABEL)) {
//label should also go to head
table.appendHeader(" " + clusterContent + " ");
table.appendLabel(clusterContent);
table.getFullDescriptionTokens().addAll(tokens);
table.addLayoutTokens(tokens);
} else if (clusterLabel.equals(TBL_NOTE)) {
table.appendNote(clusterContent);
table.getFullDescriptionTokens().addAll(tokens);
table.addAllNoteLayoutTokens(tokens);
table.addLayoutTokens(tokens);
} else if (clusterLabel.equals(TBL_OTHER)) {
table.addLayoutTokens(tokens);
} else if (clusterLabel.equals(TBL_CONTENT)) {
table.appendContent(clusterContent);
table.getContentTokens().addAll(tokens);
table.addLayoutTokens(tokens);
} else {
LOGGER.warn("Unexpected table model label - " + clusterLabel.getLabel() + " for " + clusterContent);
}
previousLabel = clusterLabel;
}
// last table
table.setTextArea(Collections.singletonList(BoundingBoxCalculator.calculateOneBox(table.getLayoutTokens(), true)));
tables.add(table);
return tables;
}
/**
* The training data creation is called from the full text training creation in cascade.
*/
public Pair<String, String> createTrainingData(List<LayoutToken> tokenizations,
String featureVector, String id) {
String res = null;
try {
res = label(featureVector);
} catch (Exception e) {
LOGGER.error("CRF labeling in TableParser fails.", e);
}
if (res == null) {
return Pair.of(null, featureVector);
}
List<Pair<String, String>> labeled = GenericTaggerUtils.getTokensAndLabels(res);
StringBuilder sb = new StringBuilder();
int tokPtr = 0;
boolean addSpace = false;
boolean addEOL = false;
String lastTag = null;
boolean figOpen = false;
for (Pair<String, String> l : labeled) {
String tok = l.getLeft();
String label = l.getRight();
int tokPtr2 = tokPtr;
for (; tokPtr2 < tokenizations.size(); tokPtr2++) {
if (tokenizations.get(tokPtr2).getText().equals(" ")) {
addSpace = true;
} else if (tokenizations.get(tokPtr2).getText().equals("\n") ||
tokenizations.get(tokPtr).getText().equals("\r")) {
addEOL = true;
} else {
break;
}
}
tokPtr = tokPtr2;
if (tokPtr >= tokenizations.size()) {
LOGGER.error("Implementation error: Reached the end of tokenizations, but current token is " + tok);
// we add a space to avoid concatenated text
addSpace = true;
} else {
String tokenizationToken = tokenizations.get(tokPtr).getText();
if ((tokPtr != tokenizations.size()) && !tokenizationToken.equals(tok)) {
// and we add a space by default to avoid concatenated text
addSpace = true;
if (!tok.startsWith(tokenizationToken)) {
// this is a very exceptional case due to a sequence of accent/diacresis, in this case we skip
// a shift in the tokenizations list and continue on the basis of the labeled token
// we check one ahead
tokPtr++;
tokenizationToken = tokenizations.get(tokPtr).getText();
if (!tok.equals(tokenizationToken)) {
// we try another position forward (second hope!)
tokPtr++;
tokenizationToken = tokenizations.get(tokPtr).getText();
if (!tok.equals(tokenizationToken)) {
// we try another position forward (last hope!)
tokPtr++;
tokenizationToken = tokenizations.get(tokPtr).getText();
if (!tok.equals(tokenizationToken)) {
// we return to the initial position
tokPtr = tokPtr - 3;
tokenizationToken = tokenizations.get(tokPtr).getText();
LOGGER.error("Implementation error, tokens out of sync: " +
tokenizationToken + " != " + tok + ", at position " + tokPtr);
}
}
}
}
// note: if the above condition is true, this is an exceptional case due to a
// sequence of accent/diacresis and we can go on as a full string match
}
}
String plainLabel = GenericTaggerUtils.getPlainLabel(label);
String output = null;
if (lastTag != null) {
testClosingTag(sb, plainLabel, lastTag, addSpace, addEOL);
}
output = writeField(label, lastTag, tok, "<figure_head>", "<head>", addSpace, addEOL, 3);
String tableOpening = "\t\t<figure type=\"table\">\n";
if (output != null) {
if (!figOpen) {
sb.append(tableOpening);
figOpen = true;
}
sb.append(output);
}
output = writeField(label, lastTag, tok, "<figDesc>", "<figDesc>", addSpace, addEOL, 3);
if (output != null) {
if (!figOpen) {
sb.append(tableOpening);
figOpen = true;
}
sb.append(output);
}
output = writeField(label, lastTag, tok, "<label>", "<label>", addSpace, addEOL, 3);
if (output != null) {
if (!figOpen) {
sb.append(tableOpening);
figOpen = true;
}
sb.append(output);
}
output = writeField(label, lastTag, tok, "<content>", "<table>", addSpace, addEOL, 3);
if (output != null) {
if (!figOpen) {
sb.append(tableOpening);
figOpen = true;
}
sb.append(output);
//continue;
}
output = writeField(label, lastTag, tok, "<note>", "<note>", addSpace, addEOL, 3);
if (output != null) {
if (!figOpen) {
sb.append(tableOpening);
figOpen = true;
}
sb.append(output);
}
output = writeField(label, lastTag, tok, "<other>", "<other>", addSpace, addEOL, 2);
if (output != null) {
sb.append(output);
}
lastTag = plainLabel;
addSpace = false;
addEOL = false;
tokPtr++;
}
if (figOpen) {
testClosingTag(sb, "", lastTag, addSpace, addEOL);
sb.append("\t\t</figure>\n");
}
return Pair.of(sb.toString(), featureVector);
}
public String getTEIHeader(String id) {
StringBuilder sb = new StringBuilder();
sb.append("<tei>\n" +
" <teiHeader>\n" +
" <fileDesc xml:id=\"_" + id + "\"/>\n" +
" </teiHeader>\n" +
" <text xml:lang=\"en\">\n");
return sb.toString();
}
private boolean testClosingTag(StringBuilder buffer,
String currentTag,
String lastTag,
boolean addSpace,
boolean addEOL) {
boolean res = false;
if (!currentTag.equals(lastTag)) {
res = true;
// we close the current tag
if (lastTag.equals("<other>")) {
if (addEOL)
buffer.append("<lb/>");
if (addSpace)
buffer.append(" ");
buffer.append("<other>\n");
} else if (lastTag.equals("<figure_head>")) {
if (addEOL)
buffer.append("<lb/>");
if (addSpace)
buffer.append(" ");
buffer.append("</head>\n");
} else if (lastTag.equals("<content>")) {
if (addEOL)
buffer.append("<lb/>");
if (addSpace)
buffer.append(" ");
buffer.append("</table>\n");
} else if (lastTag.equals("<figDesc>")) {
if (addEOL)
buffer.append("<lb/>");
if (addSpace)
buffer.append(" ");
buffer.append("</figDesc>\n");
} else if (lastTag.equals("<label>")) {
if (addEOL)
buffer.append("<lb/>");
if (addSpace)
buffer.append(" ");
buffer.append("</label>\n");
} else if (lastTag.equals("<note>")) {
if (addEOL)
buffer.append("<lb/>");
if (addSpace)
buffer.append(" ");
buffer.append("</note>\n");
}else {
res = false;
}
}
return res;
}
private String writeField(String currentTag,
String lastTag,
String token,
String field,
String outField,
boolean addSpace,
boolean addEOL,
int nbIndent) {
String result = null;
if (currentTag.endsWith(field)) {
/*if (currentTag.endsWith("<other>") || currentTag.endsWith("<content>")) {
result = "";
if (currentTag.startsWith("I-") || (lastTag == null)) {
result += "\n";
for (int i = 0; i < nbIndent; i++) {
result += " ";
}
}
if (addEOL)
result += "<lb/>";
if (addSpace)
result += " ";
result += TextUtilities.HTMLEncode(token);
}
else*/
if ((lastTag != null) && currentTag.endsWith(lastTag)) {
result = "";
if (addEOL)
result += "<lb/>";
if (addSpace)
result += " ";
if (currentTag.startsWith("I-"))
result += outField;
result += TextUtilities.HTMLEncode(token);
} else {
result = "";
if (addEOL)
result += "<lb/>";
if (addSpace)
result += " ";
result += "\n";
if (outField.length() > 0) {
for (int i = 0; i < nbIndent; i++) {
result += " ";
}
}
result += outField + TextUtilities.HTMLEncode(token);
}
}
return result;
}
}
| 15,933 | 40.06701 | 135 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/AuthorParser.java
|
package org.grobid.core.engines;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.collections4.CollectionUtils;
import org.grobid.core.GrobidModels;
import org.grobid.core.data.Person;
import org.grobid.core.engines.tagging.GenericTagger;
import org.grobid.core.engines.tagging.TaggerFactory;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.features.FeaturesVectorName;
import org.grobid.core.layout.BoundingBox;
import org.grobid.core.layout.LayoutToken;
import org.grobid.core.layout.PDFAnnotation;
import org.grobid.core.lexicon.Lexicon;
import org.grobid.core.tokenization.TaggingTokenCluster;
import org.grobid.core.tokenization.TaggingTokenClusteror;
import org.grobid.core.utilities.LayoutTokensUtil;
import org.grobid.core.utilities.TextUtilities;
import org.grobid.core.utilities.OffsetPosition;
import org.grobid.core.analyzers.GrobidAnalyzer;
import org.grobid.core.lang.Language;
import org.grobid.core.engines.label.TaggingLabel;
import org.grobid.core.engines.label.TaggingLabels;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.StringTokenizer;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class AuthorParser {
private static Logger LOGGER = LoggerFactory.getLogger(AuthorParser.class);
private final GenericTagger namesHeaderParser;
private final GenericTagger namesCitationParser;
private static final Pattern ET_AL_REGEX_PATTERN = Pattern.compile("et\\.? al\\.?.*$");
public AuthorParser() {
namesHeaderParser = TaggerFactory.getTagger(GrobidModels.NAMES_HEADER);
namesCitationParser = TaggerFactory.getTagger(GrobidModels.NAMES_CITATION);
}
/**
* Processing of authors in citations
*/
public List<Person> processingCitation(String input) throws Exception {
if (StringUtils.isEmpty(input)) {
return null;
}
input = ET_AL_REGEX_PATTERN.matcher(input.trim()).replaceAll(" ");
// set the language to English for the analyser to avoid any bad surprises
List<LayoutToken> tokens = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(input, new Language("en", 1.0));
return processing(tokens, null, false);
}
public List<Person> processingCitationLayoutTokens(List<LayoutToken> tokens) throws Exception {
if (CollectionUtils.isEmpty(tokens)) {
return null;
}
return processing(tokens, null, false);
}
/**
* Processing of authors in authors
*/
public List<Person> processingHeader(String input) throws Exception {
if (StringUtils.isEmpty(input)) {
return null;
}
input = ET_AL_REGEX_PATTERN.matcher(input.trim()).replaceAll(" ");
// set the language to English for the analyser to avoid any bad surprises
List<LayoutToken> tokens = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(input, new Language("en", 1.0));
return processing(tokens, null, true);
}
public List<Person> processingHeaderWithLayoutTokens(List<LayoutToken> inputs, List<PDFAnnotation> pdfAnnotations) {
return processing(inputs, pdfAnnotations, true);
}
/**
* Common processing of authors in header or citation
*
* @param tokens list of LayoutToken object to process
* @param head - if true use the model for header's name, otherwise the model for names in citation
* @return List of identified Person entites as POJO.
*/
public List<Person> processing(List<LayoutToken> tokens, List<PDFAnnotation> pdfAnnotations, boolean head) {
if (CollectionUtils.isEmpty(tokens)) {
return null;
}
List<Person> fullAuthors = null;
try {
List<OffsetPosition> titlePositions = Lexicon.getInstance().tokenPositionsPersonTitle(tokens);
List<OffsetPosition> suffixPositions = Lexicon.getInstance().tokenPositionsPersonSuffix(tokens);
String sequence = FeaturesVectorName.addFeaturesName(tokens, null,
titlePositions, suffixPositions);
if (StringUtils.isEmpty(sequence))
return null;
GenericTagger tagger = head ? namesHeaderParser : namesCitationParser;
String res = tagger.label(sequence);
//System.out.println(res);
TaggingTokenClusteror clusteror = new TaggingTokenClusteror(head ? GrobidModels.NAMES_HEADER : GrobidModels.NAMES_CITATION, res, tokens);
org.grobid.core.data.Person aut = new Person();
boolean newMarker = false;
String currentMarker = null;
List<TaggingTokenCluster> clusters = clusteror.cluster();
for (TaggingTokenCluster cluster : clusters) {
if (cluster == null) {
continue;
}
if(pdfAnnotations != null) {
for (LayoutToken authorsToken : cluster.concatTokens()) {
for (PDFAnnotation pdfAnnotation : pdfAnnotations) {
BoundingBox intersectBox = pdfAnnotation.getIntersectionBox(authorsToken);
if (intersectBox != null) {
BoundingBox authorsBox = BoundingBox.fromLayoutToken(authorsToken);
if (intersectBox.equals(authorsBox)) {
} else {
double pixPerChar = authorsToken.getWidth() / authorsToken.getText().length();
int charsCovered = (int) ((intersectBox.getWidth() / pixPerChar) + 0.5);
if (pdfAnnotation.getDestination() != null && pdfAnnotation.getDestination().length() > 0) {
Matcher orcidMatcher = TextUtilities.ORCIDPattern.matcher(pdfAnnotation.getDestination());
if (orcidMatcher.find()) {
// !! here we consider the annot is at the tail or end of the names
String newToken = authorsToken.getText().substring(0, authorsToken.getText().length() - charsCovered);
aut.setORCID(orcidMatcher.group(1) + "-"
+ orcidMatcher.group(2) + "-" + orcidMatcher.group(3)+ "-" + orcidMatcher.group(4));
authorsToken.setText(newToken);
}
}
}
}
}
}
}
TaggingLabel clusterLabel = cluster.getTaggingLabel();
Engine.getCntManager().i(clusterLabel);
//String clusterContent = LayoutTokensUtil.normalizeText(LayoutTokensUtil.toText(cluster.concatTokens()));
String clusterContent = StringUtils.normalizeSpace(LayoutTokensUtil.toText(cluster.concatTokens()));
if (clusterContent.trim().length() == 0)
continue;
if (clusterLabel.equals(TaggingLabels.NAMES_HEADER_MARKER)) {
// a marker introduces a new author, and the marker could be attached to the previous (usual)
// or following author (rare)
currentMarker = clusterContent;
newMarker = true;
boolean markerAssigned = false;
if (aut.notNull()) {
if (fullAuthors == null) {
fullAuthors = new ArrayList<Person>();
}
aut.addMarker(currentMarker);
markerAssigned = true;
if (!fullAuthors.contains(aut)) {
fullAuthors.add(aut);
aut = new Person();
}
}
if (!markerAssigned) {
aut.addMarker(currentMarker);
}
} else if (clusterLabel.equals(TaggingLabels.NAMES_HEADER_TITLE) ||
clusterLabel.equals(TaggingLabels.NAMES_CITATION_TITLE)) {
if (newMarker) {
aut.setTitle(clusterContent);
newMarker = false;
} else if (aut.getTitle() != null) {
if (aut.notNull()) {
if (fullAuthors == null)
fullAuthors = new ArrayList<Person>();
fullAuthors.add(aut);
}
aut = new Person();
aut.setTitle(clusterContent);
} else {
aut.setTitle(clusterContent);
}
aut.addLayoutTokens(cluster.concatTokens());
} else if (clusterLabel.equals(TaggingLabels.NAMES_HEADER_FORENAME) ||
clusterLabel.equals(TaggingLabels.NAMES_CITATION_FORENAME)) {
if (newMarker) {
aut.setFirstName(clusterContent);
newMarker = false;
} else if (aut.getFirstName() != null) {
// new author
if (aut.notNull()) {
if (fullAuthors == null)
fullAuthors = new ArrayList<Person>();
fullAuthors.add(aut);
}
aut = new Person();
aut.setFirstName(clusterContent);
} else {
aut.setFirstName(clusterContent);
}
aut.addLayoutTokens(cluster.concatTokens());
} else if (clusterLabel.equals(TaggingLabels.NAMES_HEADER_MIDDLENAME) ||
clusterLabel.equals(TaggingLabels.NAMES_CITATION_MIDDLENAME)) {
if (newMarker) {
aut.setMiddleName(clusterContent);
newMarker = false;
} else if (aut.getMiddleName() != null) {
aut.setMiddleName(aut.getMiddleName() + " " + clusterContent);
} else {
aut.setMiddleName(clusterContent);
}
aut.addLayoutTokens(cluster.concatTokens());
} else if (clusterLabel.equals(TaggingLabels.NAMES_HEADER_SURNAME) ||
clusterLabel.equals(TaggingLabels.NAMES_CITATION_SURNAME)) {
if (newMarker) {
aut.setLastName(clusterContent);
newMarker = false;
} else if (aut.getLastName() != null) {
// new author
if (aut.notNull()) {
if (fullAuthors == null)
fullAuthors = new ArrayList<Person>();
fullAuthors.add(aut);
}
aut = new Person();
aut.setLastName(clusterContent);
} else {
aut.setLastName(clusterContent);
}
aut.addLayoutTokens(cluster.concatTokens());
} else if (clusterLabel.equals(TaggingLabels.NAMES_HEADER_SUFFIX) ||
clusterLabel.equals(TaggingLabels.NAMES_CITATION_SUFFIX)) {
/*if (newMarker) {
aut.setSuffix(clusterContent);
newMarker = false;
} else*/
if (aut.getSuffix() != null) {
aut.setSuffix(aut.getSuffix() + " " + clusterContent);
} else {
aut.setSuffix(clusterContent);
}
aut.addLayoutTokens(cluster.concatTokens());
}
}
// add last built author
if (aut.notNull()) {
if (fullAuthors == null) {
fullAuthors = new ArrayList<Person>();
}
fullAuthors.add(aut);
}
// some more person name normalisation
if (fullAuthors != null) {
for(Person author : fullAuthors) {
author.normalizeName();
}
}
} catch (Exception e) {
throw new GrobidException("An exception occurred while running Grobid.", e);
}
return fullAuthors;
}
private boolean nameLabel(String label) {
return label.endsWith("<surname>") || label.endsWith("<forename>") || label.endsWith("<middlename>");
}
/**
* Extract results from a list of name strings in the training format without any string modification.
*
* @param input - the sequence of author names to be processed as a string.
* @param head - if true use the model for header's name, otherwise the model for names in citation
* @return the pseudo-TEI training data
*/
public StringBuilder trainingExtraction(String input,
boolean head) {
if (StringUtils.isEmpty(input))
return null;
// force analyser with English, to avoid bad surprise
List<LayoutToken> tokens = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(input, new Language("en", 1.0));
StringBuilder buffer = new StringBuilder();
try {
if (CollectionUtils.isEmpty(tokens)) {
return null;
}
List<OffsetPosition> titlePositions = Lexicon.getInstance().tokenPositionsPersonTitle(tokens);
List<OffsetPosition> suffixPositions = Lexicon.getInstance().tokenPositionsPersonSuffix(tokens);
String sequence = FeaturesVectorName.addFeaturesName(tokens, null, titlePositions, suffixPositions);
if (StringUtils.isEmpty(sequence))
return null;
GenericTagger tagger = head ? namesHeaderParser : namesCitationParser;
String res = tagger.label(sequence);
// extract results from the processed file
StringTokenizer st2 = new StringTokenizer(res, "\n");
String lastTag = null;
boolean start = true;
boolean hasMarker = false;
boolean hasSurname = false;
boolean hasForename = false;
boolean tagClosed;
int q = 0;
boolean addSpace;
String lastTag0;
String currentTag0;
while (st2.hasMoreTokens()) {
String line = st2.nextToken();
addSpace = false;
if ((line.trim().length() == 0)) {
// new author
if (head)
buffer.append("/t<author>\n");
else {
//buffer.append("<author>");
}
continue;
} else {
String theTok = tokens.get(q).getText();
while (theTok.equals(" ") || theTok.equals("\n")) {
addSpace = true;
q++;
theTok = tokens.get(q).getText();
}
q++;
}
StringTokenizer st3 = new StringTokenizer(line, "\t");
int ll = st3.countTokens();
int i = 0;
String s1 = null;
String s2 = null;
boolean newLine = false;
List<String> localFeatures = new ArrayList<String>();
while (st3.hasMoreTokens()) {
String s = st3.nextToken().trim();
if (i == 0) {
s2 = TextUtilities.HTMLEncode(s); // string
} else if (i == ll - 2) {
} else if (i == ll - 1) {
s1 = s; // label
} else {
localFeatures.add(s);
if (s.equals("LINESTART") && !start) {
newLine = true;
start = false;
} else if (s.equals("LINESTART")) {
start = false;
}
}
i++;
}
lastTag0 = null;
if (lastTag != null) {
if (lastTag.startsWith("I-")) {
lastTag0 = lastTag.substring(2, lastTag.length());
} else {
lastTag0 = lastTag;
}
}
currentTag0 = null;
if (s1 != null) {
if (s1.startsWith("I-")) {
currentTag0 = s1.substring(2, s1.length());
} else {
currentTag0 = s1;
}
}
tagClosed = lastTag0 != null && testClosingTag(buffer, currentTag0, lastTag0, head);
if (newLine) {
if (tagClosed) {
buffer.append("\t\t\t\t\t\t\t<lb/>\n");
} else {
buffer.append("<lb/>");
}
}
String output = writeField(s1, lastTag0, s2, "<marker>", "<marker>", addSpace, 8, head);
if (output != null) {
if (hasMarker) {
if (head) {
buffer.append("\t\t\t\t\t\t\t</persName>\n");
} else {
//buffer.append("</author>\n");
}
hasForename = false;
hasSurname = false;
if (head) {
buffer.append("\t\t\t\t\t\t\t<persName>\n");
} else {
//buffer.append("<author>\n");
}
hasMarker = true;
}
buffer.append(output);
lastTag = s1;
continue;
} else {
output = writeField(s1, lastTag0, s2, "<other>", "<other>", addSpace, 8, head);
}
if (output == null) {
output = writeField(s1, lastTag0, s2, "<forename>", "<forename>", addSpace, 8, head);
} else {
if (buffer.length() > 0) {
if (buffer.charAt(buffer.length() - 1) == '\n') {
buffer.deleteCharAt(buffer.length() - 1);
}
}
buffer.append(output);
lastTag = s1;
continue;
}
if (output == null) {
output = writeField(s1, lastTag0, s2, "<middlename>", "<middlename>", addSpace, 8, head);
} else {
if (hasForename && !currentTag0.equals(lastTag0)) {
if (head) {
buffer.append("\t\t\t\t\t\t\t</persName>\n");
} else {
//buffer.append("</author>\n");
}
hasMarker = false;
hasSurname = false;
if (head) {
buffer.append("\t\t\t\t\t\t\t<persName>\n");
} else {
//buffer.append("<author>\n");
}
}
hasForename = true;
buffer.append(output);
lastTag = s1;
continue;
}
if (output == null) {
output = writeField(s1, lastTag0, s2, "<surname>", "<surname>", addSpace, 8, head);
} else {
buffer.append(output);
lastTag = s1;
continue;
}
if (output == null) {
output = writeField(s1, lastTag0, s2, "<title>", "<roleName>", addSpace, 8, head);
} else {
if (hasSurname && !currentTag0.equals(lastTag0)) {
if (head) {
buffer.append("\t\t\t\t\t\t\t</persName>\n");
} else {
//buffer.append("</author>\n");
}
hasMarker = false;
hasForename = false;
if (head) {
buffer.append("\t\t\t\t\t\t\t<persName>\n");
} else {
//buffer.append("<author>\n");
}
}
hasSurname = true;
buffer.append(output);
lastTag = s1;
continue;
}
if (output == null) {
output = writeField(s1, lastTag0, s2, "<suffix>", "<suffix>", addSpace, 8, head);
} else {
buffer.append(output);
lastTag = s1;
continue;
}
if (output != null) {
buffer.append(output);
lastTag = s1;
continue;
}
lastTag = s1;
}
if (lastTag != null) {
if (lastTag.startsWith("I-")) {
lastTag0 = lastTag.substring(2, lastTag.length());
} else {
lastTag0 = lastTag;
}
currentTag0 = "";
testClosingTag(buffer, currentTag0, lastTag0, head);
}
} catch (Exception e) {
// e.printStackTrace();
throw new GrobidException("An exception occured while running Grobid.", e);
}
return buffer;
}
private String writeField(String s1,
String lastTag0,
String s2,
String field,
String outField,
boolean addSpace,
int nbIndent,
boolean head) {
String result = null;
if ((s1.equals(field)) || (s1.equals("I-" + field))) {
if ((s1.equals("<other>") || s1.equals("I-<other>"))) {
if (addSpace)
result = " " + s2;
else
result = s2;
} else if ((s1.equals(lastTag0) || s1.equals("I-" + lastTag0))) {
if (addSpace)
result = " " + s2;
else
result = s2;
} else {
result = "";
if (head) {
for (int i = 0; i < nbIndent; i++) {
result += "\t";
}
}
if (addSpace)
result += " " + outField + s2;
else
result += outField + s2;
}
}
return result;
}
private boolean testClosingTag(StringBuilder buffer,
String currentTag0,
String lastTag0,
boolean head) {
boolean res = false;
if (!currentTag0.equals(lastTag0)) {
res = true;
// we close the current tag
if (lastTag0.equals("<other>")) {
if (head)
buffer.append("\n");
} else if (lastTag0.equals("<forename>")) {
buffer.append("</forename>");
if (head)
buffer.append("\n");
} else if (lastTag0.equals("<middlename>")) {
buffer.append("</middlename>");
if (head)
buffer.append("\n");
} else if (lastTag0.equals("<surname>")) {
buffer.append("</surname>");
if (head)
buffer.append("\n");
} else if (lastTag0.equals("<title>")) {
buffer.append("</roleName>");
if (head)
buffer.append("\n");
} else if (lastTag0.equals("<suffix>")) {
buffer.append("</suffix>");
if (head)
buffer.append("\n");
} else if (lastTag0.equals("<marker>")) {
buffer.append("</marker>");
if (head)
buffer.append("\n");
} else {
res = false;
}
}
return res;
}
public void close() throws IOException {
}
}
| 25,193 | 41.485666 | 154 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/MonographParser.java
|
package org.grobid.core.engines;
import eugfc.imageio.plugins.PNMRegistry;
import org.apache.commons.io.FileUtils;
import org.grobid.core.GrobidModels;
import org.grobid.core.document.BasicStructureBuilder;
import org.grobid.core.document.Document;
import org.grobid.core.document.DocumentNode;
import org.grobid.core.document.DocumentSource;
import org.grobid.core.engines.config.GrobidAnalysisConfig;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.exceptions.GrobidExceptionStatus;
import org.grobid.core.exceptions.GrobidResourceException;
import org.grobid.core.utilities.GrobidProperties;
import org.grobid.core.features.FeatureFactory;
import org.grobid.core.features.FeaturesVectorMonograph;
import org.grobid.core.layout.*;
import org.grobid.core.utilities.GrobidProperties;
import org.grobid.core.utilities.LanguageUtilities;
import org.grobid.core.utilities.TextUtilities;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.imageio.ImageIO;
import java.awt.image.BufferedImage;
import java.io.*;
import java.util.*;
import java.util.regex.Matcher;
import static org.apache.commons.lang3.StringUtils.*;
/**
* Realise a high level segmentation of a monograph. Monograph is to be understood here in the context library cataloging,
* basically as a standalone book. The monograph could be an ebook (novels), a conference proceedings volume, a book
* collection volume, a phd/msc thesis, a standalone report (with toc, etc.), a manual (with multiple chapters).
* Monographs, here, are NOT magazine volumes, journal issues, newspapers, standalone chapters, standalone scholar articles,
* tables of content, reference works, dictionaries, encyclopedia volumes, graphic novels.
*
*/
public class MonographParser extends AbstractParser {
/**
* 16 labels for this model:
* cover page (front of the book)
* title page (secondary title page)
* publisher page (publication information, including usually the copyrights info)
* summary (include executive summary)
* biography
* advertising (other works by the author/publisher)
* table of content
* table/list of figures
* preface (foreword)
* dedication (I dedicate this label to my family and my thesis director ;)
* unit (chapter or standalone article)
* reference (a full chapter of references, not to be confused with references attached to an article)
* annex
* index
* glossary (also abbreviations and acronyms)
* back cover page
* other
*/
private static final Logger LOGGER = LoggerFactory.getLogger(MonographParser.class);
// default bins for relative position
private static final int NBBINS_POSITION = 12;
// default bins for inter-block spacing
private static final int NBBINS_SPACE = 5;
// default bins for block character density
private static final int NBBINS_DENSITY = 5;
// projection scale for line length
private static final int LINESCALE = 10;
// projection scale for block length
private static final int BLOCKSCALE = 10;
private LanguageUtilities languageUtilities = LanguageUtilities.getInstance();
private FeatureFactory featureFactory = FeatureFactory.getInstance();
private File tmpPath = null;
/**
* TODO some documentation...
*/
public MonographParser() {
super(GrobidModels.MONOGRAPH);
tmpPath = GrobidProperties.getTempPath();
}
/**
* Segment a PDF document into high level subdocuments.
*
* @param documentSource document source
* @return Document object with segmentation information
*/
public Document processing(DocumentSource documentSource, GrobidAnalysisConfig config) {
try {
Document doc = new Document(documentSource);
if (config.getAnalyzer() != null)
doc.setAnalyzer(config.getAnalyzer());
doc.addTokenizedDocument(config);
doc = prepareDocument(doc);
// if assets is true, the images are still there under directory pathXML+"_data"
// we copy them to the assetPath directory
/*ile assetFile = config.getPdfAssetPath();
if (assetFile != null) {
dealWithImages(documentSource, doc, assetFile, config);
}*/
return doc;
} finally {
// keep it clean when leaving...
if (config.getPdfAssetPath() == null) {
// remove the pdfalto tmp file
DocumentSource.close(documentSource, false, true, true);
} else {
// remove the pdfalto tmp files, including the sub-directories
DocumentSource.close(documentSource, true, true, true);
}
}
}
public Document prepareDocument(Document doc) {
List<LayoutToken> tokenizations = doc.getTokenizations();
if (tokenizations.size() > GrobidProperties.getPdfTokensMax()) {
throw new GrobidException("The document has " + tokenizations.size() + " tokens, but the limit is " + GrobidProperties.getPdfTokensMax(),
GrobidExceptionStatus.TOO_MANY_TOKENS);
}
doc.produceStatistics();
//String content = getAllLinesFeatured(doc);
String content = getAllBlocksFeatured(doc);
if (isNotEmpty(trim(content))) {
String labelledResult = label(content);
// set the different sections of the Document object
doc = BasicStructureBuilder.generalResultSegmentation(doc, labelledResult, tokenizations);
}
return doc;
}
/**
* Addition of the features at line level for the complete document.
* <p/>
* This is an alternative to the token level, where the unit for labeling is the line - so allowing faster
* processing and involving less features.
* Lexical features becomes line prefix and suffix, the feature text unit is the first 10 characters of the
* line without space.
* The dictionary flags are at line level (i.e. the line contains a name mention, a place mention, a year, etc.)
* Regarding layout features: font, size and style are the one associated to the first token of the line.
*/
public String getAllLinesFeatured(Document doc) {
List<Block> blocks = doc.getBlocks();
if ((blocks == null) || blocks.size() == 0) {
return null;
}
//guaranteeing quality of service. Otherwise, there are some PDF that may contain 300k blocks and thousands of extracted "images" that ruins the performance
if (blocks.size() > GrobidProperties.getPdfBlocksMax()) {
throw new GrobidException("Postprocessed document is too big, contains: " + blocks.size(), GrobidExceptionStatus.TOO_MANY_BLOCKS);
}
//boolean graphicVector = false;
//boolean graphicBitmap = false;
// list of textual patterns at the head and foot of pages which can be re-occur on several pages
// (typically indicating a publisher foot or head notes)
Map<String, Integer> patterns = new TreeMap<String, Integer>();
Map<String, Boolean> firstTimePattern = new TreeMap<String, Boolean>();
for (Page page : doc.getPages()) {
// we just look at the two first and last blocks of the page
if ((page.getBlocks() != null) && (page.getBlocks().size() > 0)) {
for(int blockIndex=0; blockIndex < page.getBlocks().size(); blockIndex++) {
if ( (blockIndex < 2) || (blockIndex > page.getBlocks().size()-2)) {
Block block = page.getBlocks().get(blockIndex);
String localText = block.getText();
if ((localText != null) && (localText.length() > 0)) {
String[] lines = localText.split("[\\n\\r]");
if (lines.length > 0) {
String line = lines[0];
String pattern = featureFactory.getPattern(line);
if (pattern.length() > 8) {
Integer nb = patterns.get(pattern);
if (nb == null) {
patterns.put(pattern, Integer.valueOf("1"));
firstTimePattern.put(pattern, false);
}
else
patterns.put(pattern, Integer.valueOf(nb+1));
}
}
}
}
}
}
}
String featuresAsString = getFeatureVectorsAsString(doc,
patterns, firstTimePattern);
return featuresAsString;
}
/**
* Addition of the features at block level for the complete document.
* <p/>
* This is an alternative to the token and line level, where the unit for labeling is the block - so allowing even
* faster processing and involving less features.
* Lexical features becomes block prefix and suffix, the feature text unit is the first 10 characters of the
* block without space.
* The dictionary flags are at block level (i.e. the block contains a name mention, a place mention, a year, etc.)
* Regarding layout features: font, size and style are the one associated to the first token of the block.
*/
public String getAllBlocksFeatured(Document doc) {
List<Block> blocks = doc.getBlocks();
if ((blocks == null) || blocks.size() == 0) {
return null;
}
//guaranteeing quality of service. Otherwise, there are some PDF that may contain 300k blocks and thousands of extracted "images" that ruins the performance
if (blocks.size() > GrobidProperties.getPdfBlocksMax()) {
throw new GrobidException("Postprocessed document is too big, contains: " + blocks.size(), GrobidExceptionStatus.TOO_MANY_BLOCKS);
}
//boolean graphicVector = false;
//boolean graphicBitmap = false;
// list of textual patterns at the head and foot of pages which can be re-occur on several pages
// (typically indicating a publisher foot or head notes)
Map<String, Integer> patterns = new TreeMap<String, Integer>();
Map<String, Boolean> firstTimePattern = new TreeMap<String, Boolean>();
for (Page page : doc.getPages()) {
// we just look at the two first and last blocks of the page
if ((page.getBlocks() != null) && (page.getBlocks().size() > 0)) {
for(int blockIndex=0; blockIndex < page.getBlocks().size(); blockIndex++) {
if ( (blockIndex < 2) || (blockIndex > page.getBlocks().size()-2)) {
Block block = page.getBlocks().get(blockIndex);
String localText = block.getText();
if ((localText != null) && (localText.length() > 0)) {
String pattern = featureFactory.getPattern(localText);
if (pattern.length() > 8) {
Integer nb = patterns.get(pattern);
if (nb == null) {
patterns.put(pattern, Integer.valueOf("1"));
firstTimePattern.put(pattern, false);
}
else
patterns.put(pattern, Integer.valueOf(nb+1));
}
}
}
}
}
}
String featuresAsString = getFeatureVectorsAsString(doc,
patterns, firstTimePattern);
return featuresAsString;
}
private String getFeatureVectorsAsString(Document doc, Map<String, Integer> patterns,
Map<String, Boolean> firstTimePattern) {
StringBuilder fulltext = new StringBuilder();
int documentLength = doc.getDocumentLenghtChar();
String currentFont = null;
int currentFontSize = -1;
boolean newPage;
boolean start = true;
int mm = 0; // page position
int nn = 0; // document position
int pageLength = 0; // length of the current page
double pageHeight = 0.0;
// vector for features
FeaturesVectorMonograph features;
FeaturesVectorMonograph previousFeatures = null;
for (Page page : doc.getPages()) {
pageHeight = page.getHeight();
newPage = true;
double spacingPreviousBlock = 0.0; // discretized
double lowestPos = 0.0;
pageLength = page.getPageLengthChar();
BoundingBox pageBoundingBox = page.getMainArea();
mm = 0;
//endPage = true;
if ((page.getBlocks() == null) || (page.getBlocks().size() == 0))
continue;
for(int blockIndex=0; blockIndex < page.getBlocks().size(); blockIndex++) {
Block block = page.getBlocks().get(blockIndex);
/*if (start) {
newPage = true;
start = false;
}*/
boolean graphicVector = false;
boolean graphicBitmap = false;
boolean lastPageBlock = false;
boolean firstPageBlock = false;
if (blockIndex == page.getBlocks().size()-1) {
lastPageBlock = true;
}
if (blockIndex == 0) {
firstPageBlock = true;
}
//endblock = false;
/*if (endPage) {
newPage = true;
mm = 0;
}*/
// check if we have a graphical object connected to the current block
List<GraphicObject> localImages = Document.getConnectedGraphics(block, doc);
if (localImages != null) {
for(GraphicObject localImage : localImages) {
if (localImage.getType() == GraphicObjectType.BITMAP)
graphicBitmap = true;
if (localImage.getType() == GraphicObjectType.VECTOR)
graphicVector = true;
}
}
if (lowestPos > block.getY()) {
// we have a vertical shift, which can be due to a change of column or other particular layout formatting
spacingPreviousBlock = doc.getMaxBlockSpacing() / 5.0; // default
} else
spacingPreviousBlock = block.getY() - lowestPos;
String localText = block.getText();
if (localText == null)
continue;
// character density of the block
double density = 0.0;
if ( (block.getHeight() != 0.0) && (block.getWidth() != 0.0) &&
(block.getText() != null) && (!block.getText().contains("@PAGE")) &&
(!block.getText().contains("@IMAGE")) )
density = (double)block.getText().length() / (block.getHeight() * block.getWidth());
// is the current block in the main area of the page or not?
boolean inPageMainArea = true;
BoundingBox blockBoundingBox = BoundingBox.fromPointAndDimensions(page.getNumber(),
block.getX(), block.getY(), block.getWidth(), block.getHeight());
if (pageBoundingBox == null || (!pageBoundingBox.contains(blockBoundingBox) && !pageBoundingBox.intersect(blockBoundingBox)))
inPageMainArea = false;
String[] lines = localText.split("[\\n\\r]");
// set the max length of the lines in the block, in number of characters
int maxLineLength = 0;
for(int p=0; p<lines.length; p++) {
if (lines[p].length() > maxLineLength)
maxLineLength = lines[p].length();
}
List<LayoutToken> tokens = block.getTokens();
if ((tokens == null) || (tokens.size() == 0)) {
continue;
}
for (int li = 0; li < lines.length; li++) {
String line = lines[li];
/*boolean firstPageBlock = false;
boolean lastPageBlock = false;
if (newPage)
firstPageBlock = true;
if (endPage)
lastPageBlock = true;
*/
// for the layout information of the block, we take simply the first layout token
LayoutToken token = null;
if (tokens.size() > 0)
token = tokens.get(0);
double coordinateLineY = token.getY();
features = new FeaturesVectorMonograph();
features.token = token;
features.line = line;
if ( (blockIndex < 2) || (blockIndex > page.getBlocks().size()-2)) {
String pattern = featureFactory.getPattern(line);
Integer nb = patterns.get(pattern);
if ((nb != null) && (nb > 1)) {
features.repetitivePattern = true;
Boolean firstTimeDone = firstTimePattern.get(pattern);
if ((firstTimeDone != null) && !firstTimeDone) {
features.firstRepetitivePattern = true;
firstTimePattern.put(pattern, true);
}
}
}
// we consider the first token of the line as usual lexical CRF token
// and the second token of the line as feature
StringTokenizer st2 = new StringTokenizer(line, " \t");
// alternatively, use a grobid analyser
String text = null;
String text2 = null;
if (st2.hasMoreTokens())
text = st2.nextToken();
if (st2.hasMoreTokens())
text2 = st2.nextToken();
if (text == null)
continue;
// final sanitisation and filtering
text = text.replaceAll("[ \n]", "");
text = text.trim();
if ( (text.length() == 0) ||
// (text.equals("\n")) ||
// (text.equals("\r")) ||
// (text.equals("\n\r")) ||
(TextUtilities.filterLine(line))) {
continue;
}
features.string = text;
features.secondString = text2;
features.firstPageBlock = firstPageBlock;
features.lastPageBlock = lastPageBlock;
//features.lineLength = line.length() / LINESCALE;
features.lineLength = featureFactory
.linearScaling(line.length(), maxLineLength, LINESCALE);
features.punctuationProfile = TextUtilities.punctuationProfile(line);
if (graphicBitmap) {
features.bitmapAround = true;
}
if (graphicVector) {
features.vectorAround = true;
}
features.lineStatus = null;
features.punctType = null;
if ((li == 0) ||
((previousFeatures != null) && previousFeatures.blockStatus.equals("BLOCKEND"))) {
features.blockStatus = "BLOCKSTART";
} else if (li == lines.length - 1) {
features.blockStatus = "BLOCKEND";
//endblock = true;
} else if (features.blockStatus == null) {
features.blockStatus = "BLOCKIN";
}
if (newPage) {
features.pageStatus = "PAGESTART";
newPage = false;
//endPage = false;
if (previousFeatures != null)
previousFeatures.pageStatus = "PAGEEND";
} else {
features.pageStatus = "PAGEIN";
newPage = false;
//endPage = false;
}
if (text.length() == 1) {
features.singleChar = true;
}
if (Character.isUpperCase(text.charAt(0))) {
features.capitalisation = "INITCAP";
}
if (featureFactory.test_all_capital(text)) {
features.capitalisation = "ALLCAP";
}
if (featureFactory.test_digit(text)) {
features.digit = "CONTAINSDIGITS";
}
if (featureFactory.test_common(text)) {
features.commonName = true;
}
if (featureFactory.test_names(text)) {
features.properName = true;
}
if (featureFactory.test_month(text)) {
features.month = true;
}
Matcher m = featureFactory.isDigit.matcher(text);
if (m.find()) {
features.digit = "ALLDIGIT";
}
Matcher m2 = featureFactory.year.matcher(text);
if (m2.find()) {
features.year = true;
}
Matcher m3 = featureFactory.email.matcher(text);
if (m3.find()) {
features.email = true;
}
Matcher m4 = featureFactory.http.matcher(text);
if (m4.find()) {
features.http = true;
}
if (currentFont == null) {
currentFont = token.getFont();
features.fontStatus = "NEWFONT";
} else if (!currentFont.equals(token.getFont())) {
currentFont = token.getFont();
features.fontStatus = "NEWFONT";
} else
features.fontStatus = "SAMEFONT";
int newFontSize = (int) token.getFontSize();
if (currentFontSize == -1) {
currentFontSize = newFontSize;
features.fontSize = "HIGHERFONT";
} else if (currentFontSize == newFontSize) {
features.fontSize = "SAMEFONTSIZE";
} else if (currentFontSize < newFontSize) {
features.fontSize = "HIGHERFONT";
currentFontSize = newFontSize;
} else if (currentFontSize > newFontSize) {
features.fontSize = "LOWERFONT";
currentFontSize = newFontSize;
}
if (token.getBold())
features.bold = true;
if (token.getItalic())
features.italic = true;
// HERE horizontal information
// CENTERED
// LEFTAJUSTED
// CENTERED
if (features.capitalisation == null)
features.capitalisation = "NOCAPS";
if (features.digit == null)
features.digit = "NODIGIT";
//if (features.punctType == null)
// features.punctType = "NOPUNCT";
features.relativeDocumentPosition = featureFactory
.linearScaling(nn, documentLength, NBBINS_POSITION);
//System.out.println(nn + " " + documentLength + " " + NBBINS_POSITION + " " + features.relativeDocumentPosition);
features.relativePagePositionChar = featureFactory
.linearScaling(mm, pageLength, NBBINS_POSITION);
//System.out.println(mm + " " + pageLength + " " + NBBINS_POSITION + " " + features.relativePagePositionChar);
int pagePos = featureFactory
.linearScaling(coordinateLineY, pageHeight, NBBINS_POSITION);
//System.out.println(coordinateLineY + " " + pageHeight + " " + NBBINS_POSITION + " " + pagePos);
if (pagePos > NBBINS_POSITION)
pagePos = NBBINS_POSITION;
features.relativePagePosition = pagePos;
//System.out.println(coordinateLineY + "\t" + pageHeight);
if (spacingPreviousBlock != 0.0) {
features.spacingWithPreviousBlock = featureFactory
.linearScaling(spacingPreviousBlock-doc.getMinBlockSpacing(), doc.getMaxBlockSpacing()-doc.getMinBlockSpacing(), NBBINS_SPACE);
}
features.inMainArea = inPageMainArea;
if (density != -1.0) {
features.characterDensity = featureFactory
.linearScaling(density-doc.getMinCharacterDensity(), doc.getMaxCharacterDensity()-doc.getMinCharacterDensity(), NBBINS_DENSITY);
//System.out.println((density-doc.getMinCharacterDensity()) + " " + (doc.getMaxCharacterDensity()-doc.getMinCharacterDensity()) + " " + NBBINS_DENSITY + " " + features.characterDensity);
}
if (previousFeatures != null) {
String vector = previousFeatures.printVector();
fulltext.append(vector);
}
previousFeatures = features;
}
//System.out.println((spacingPreviousBlock-doc.getMinBlockSpacing()) + " " + (doc.getMaxBlockSpacing()-doc.getMinBlockSpacing()) + " " + NBBINS_SPACE + " "
// + featureFactory.linearScaling(spacingPreviousBlock-doc.getMinBlockSpacing(), doc.getMaxBlockSpacing()-doc.getMinBlockSpacing(), NBBINS_SPACE));
// lowest position of the block
lowestPos = block.getY() + block.getHeight();
// update page-level and document-level positions
if (tokens != null) {
mm += tokens.size();
nn += tokens.size();
}
}
}
if (previousFeatures != null)
fulltext.append(previousFeatures.printVector());
return fulltext.toString();
}
/**
* Process the specified pdf and format the result as training data for the monograph model.
*
* @param inputFile input PDF file
* @param pathFullText path to raw monograph featured sequence
* @param pathTEI path to TEI
* @param id id
*/
public Document createTrainingFromPDF(File inputFile,
String pathRaw,
String pathTEI,
int id) {
if (tmpPath == null)
throw new GrobidResourceException("Cannot process pdf file, because temp path is null.");
if (!tmpPath.exists()) {
throw new GrobidResourceException("Cannot process pdf file, because temp path '" +
tmpPath.getAbsolutePath() + "' does not exists.");
}
DocumentSource documentSource = null;
Document doc = null;
try {
if (!inputFile.exists()) {
throw new GrobidResourceException("Cannot train for monograph, because file '" +
inputFile.getAbsolutePath() + "' does not exists.");
}
String pdfFileName = inputFile.getName();
File outputTEIFile = new File(pathTEI+"/"+pdfFileName.replace(".pdf", "training.monograph.tei.xml"));
/* // commented out because it was making a test of the existence of a file before it was even created
if (!outputTEIFile.exists()) {
throw new GrobidResourceException("Cannot train for monograph, because directory '" +
pathTEI + "' is not valid.");
}*/
File outputRawFile = new File(pathRaw+"/"+pdfFileName.replace(".pdf", ".monograph.raw"));
/*if (!outputRawFile.exists()) {
throw new GrobidResourceException("Cannot train for monograph, because directory '" +
pathRaw + "' is not valid.");
}*/
documentSource = DocumentSource.fromPdf(inputFile, -1, -1, true, true, true);
doc = new Document(documentSource);
doc.addTokenizedDocument(GrobidAnalysisConfig.defaultInstance());
if (doc.getBlocks() == null) {
throw new Exception("PDF parsing resulted in empty content");
}
// TBD: language identifier here on content text sample
String lang = "en";
doc.produceStatistics();
StringBuilder builder = new StringBuilder();
builder.append("<?xml version=\"1.0\" ?>\n<tei xml:space=\"preserve\">\n\t<teiHeader>\n\t\t<fileDesc xml:id=\"" + id +
"\"/>\n\t</teiHeader>\n\t<text xml:lang=\""+ lang + "\">\n");
// get the document outline
DocumentNode outlineRoot = doc.getOutlineRoot();
// output an XML document based on the provided outline and the tokenization
List<LayoutToken> tokens = doc.getTokenizations();
DocumentNode currentNode = outlineRoot;
// get the first node
while(currentNode.getChildren() != null) {
List<DocumentNode> children = currentNode.getChildren();
if (children.size() == 0)
break;
currentNode = children.get(0);
}
for(LayoutToken token : tokens) {
builder.append(token.getText());
}
builder.append("\t</text>\n</tei>");
// write the TEI file
Writer writer = new OutputStreamWriter(new FileOutputStream(outputTEIFile, false), "UTF-8");
writer.write(builder.toString());
writer.close();
} catch (Exception e) {
e.printStackTrace();
throw new GrobidException("An exception occured while running Grobid training" +
" data generation for monograph.", e);
} finally {
DocumentSource.close(documentSource, true, true, true);
}
return doc;
}
}
| 31,627 | 43.050139 | 199 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/config/GrobidAnalysisConfig.java
|
package org.grobid.core.engines.config;
import java.io.File;
import java.util.List;
import org.grobid.core.analyzers.Analyzer;
/**
* A class representing the runtime configuration values needed in the analysis chain
* TODO: clean up the docs
* consolidateHeader - the consolidation option allows GROBID to exploit Crossref
* web services for improving header information
* consolidateCitations - the consolidation option allows GROBID to exploit Crossref
* web services for improving citations information
* includeRawCitations - the raw bibliographical string is added to parsed results
* assetPath if not null, the PDF assets (embedded images) will be extracted and
* saved under the indicated repository path
* startPage give the starting page to consider in case of segmentation of the
* PDF, -1 for the first page (default)
* endPage give the end page to consider in case of segmentation of the
* PDF, -1 for the last page (default)
* generateIDs if true, generate random attribute id on the textual elements of
* the resulting TEI
* generateTeiCoordinates give the list of TEI elements for which the coordinates
* of the corresponding element in the original PDF should be included in the
* resulting TEI
* analyzer in case a particular Grobid Analyzer to be used for
* tokenizing/filtering text
*/
public class GrobidAnalysisConfig {
private GrobidAnalysisConfig() {
}
// give the starting page to consider in case of segmentation of the
// PDF, -1 for the first page (default)
private int startPage = -1;
// give the end page to consider in case of segmentation of the
// PDF, -1 for the last page (default)
private int endPage = -1;
// if consolidate citations
private int consolidateCitations = 0;
// if consolidate header
private int consolidateHeader = 0;
// if the raw affiliation string should be included in the parsed results
private boolean includeRawAffiliations = false;
// if the raw bibliographical string should be included in the parsed results
private boolean includeRawCitations = false;
/// === TEI-specific settings ==
// if true, generate random attribute id on the textual elements of
// the resulting TEI
private boolean generateTeiIds = false;
// generates the coordinates in the PDF corresponding
// to the TEI full text substructures (e.g. reference markers)
// for the given list of TEI elements
private List<String> generateTeiCoordinates = null;
// if true, include image references into TEI
private boolean generateImageReferences = false;
private boolean withXslStylesheet = false;
// if not null, the PDF assets (embedded images) will be extracted
// and saved under the indicated repository path
private File pdfAssetPath = null;
// transform images to PNGs
private boolean preprocessImages = true;
private boolean processVectorGraphics = false;
// a particular Grobid Analyzer to be used for tokenizing/filtering text
private Analyzer analyzer = null;
// if true, the TEI text will be segmented into sentences
private boolean withSentenceSegmentation = false;
// BUILDER
public static class GrobidAnalysisConfigBuilder {
GrobidAnalysisConfig config = new GrobidAnalysisConfig();
public GrobidAnalysisConfigBuilder() {
}
public GrobidAnalysisConfigBuilder(GrobidAnalysisConfig config) {
// TODO add more properties
this.config.includeRawAffiliations = config.getIncludeRawAffiliations();
this.config.includeRawCitations = config.getIncludeRawCitations();
}
public GrobidAnalysisConfigBuilder consolidateHeader(int consolidate) {
config.consolidateHeader = consolidate;
return this;
}
/**
* @param consolidate the consolidation option allows GROBID to exploit Crossref web services for improving header
* information. 0 (no consolidation, default value), 1 (consolidate the citation and inject extra
* metadata) or 2 (consolidate the citation and inject DOI only)
*/
public GrobidAnalysisConfigBuilder consolidateCitations(int consolidate) {
config.consolidateCitations = consolidate;
return this;
}
public GrobidAnalysisConfigBuilder includeRawAffiliations(boolean rawAffiliations) {
config.includeRawAffiliations = rawAffiliations;
return this;
}
public GrobidAnalysisConfigBuilder includeRawCitations(boolean rawCitations) {
config.includeRawCitations = rawCitations;
return this;
}
public GrobidAnalysisConfigBuilder startPage(int p) {
config.startPage = p;
return this;
}
public GrobidAnalysisConfigBuilder endPage(int p) {
config.endPage = p;
return this;
}
public GrobidAnalysisConfigBuilder generateTeiIds(boolean b) {
config.generateTeiIds = b;
return this;
}
public GrobidAnalysisConfigBuilder pdfAssetPath(File path) {
config.pdfAssetPath = path;
return this;
}
public GrobidAnalysisConfigBuilder generateTeiCoordinates(List<String> elements) {
config.generateTeiCoordinates = elements;
return this;
}
public GrobidAnalysisConfigBuilder withXslStylesheet(boolean b) {
config.withXslStylesheet = b;
return this;
}
public GrobidAnalysisConfigBuilder withPreprocessImages(boolean b) {
config.preprocessImages = b;
return this;
}
public GrobidAnalysisConfigBuilder withProcessVectorGraphics(boolean b) {
config.processVectorGraphics = b;
return this;
}
public GrobidAnalysisConfigBuilder withSentenceSegmentation(boolean b) {
config.withSentenceSegmentation = b;
return this;
}
public GrobidAnalysisConfigBuilder analyzer(Analyzer a) {
config.analyzer = a;
return this;
}
public GrobidAnalysisConfig build() {
postProcessAndValidate();
return config;
}
private void postProcessAndValidate() {
if (config.getPdfAssetPath() != null) {
config.generateImageReferences = true;
}
if (config.generateImageReferences && config.getPdfAssetPath() == null) {
throw new InvalidGrobidAnalysisConfig("Generating image references is switched on, but no pdf asset path is provided");
}
}
}
public static GrobidAnalysisConfigBuilder builder() {
return new GrobidAnalysisConfigBuilder();
}
public static GrobidAnalysisConfigBuilder builder(GrobidAnalysisConfig config) {
return new GrobidAnalysisConfigBuilder(config);
}
public static GrobidAnalysisConfig defaultInstance() {
return new GrobidAnalysisConfig();
}
public int getStartPage() {
return startPage;
}
public int getEndPage() {
return endPage;
}
public int getConsolidateCitations() {
return consolidateCitations;
}
public int getConsolidateHeader() {
return consolidateHeader;
}
public boolean getIncludeRawAffiliations() {
return includeRawAffiliations;
}
public boolean getIncludeRawCitations() {
return includeRawCitations;
}
public boolean isGenerateTeiIds() {
return generateTeiIds;
}
public List<String> getGenerateTeiCoordinates() {
return generateTeiCoordinates;
}
public boolean isGenerateTeiCoordinates() {
return getGenerateTeiCoordinates() != null && getGenerateTeiCoordinates().size()>0;
}
public boolean isGenerateTeiCoordinates(String type) {
return getGenerateTeiCoordinates() != null && getGenerateTeiCoordinates().contains(type);
}
public File getPdfAssetPath() {
return pdfAssetPath;
}
public boolean isWithXslStylesheet() {
return withXslStylesheet;
}
public boolean isGenerateImageReferences() {
return generateImageReferences;
}
public boolean isPreprocessImages() {
return preprocessImages;
}
public boolean isProcessVectorGraphics() {
return processVectorGraphics;
}
public Analyzer getAnalyzer() {
return analyzer;
}
public boolean isWithSentenceSegmentation() {
return withSentenceSegmentation;
}
}
| 8,791 | 31.442804 | 135 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/config/InvalidGrobidAnalysisConfig.java
|
package org.grobid.core.engines.config;
/**
* Exception for invalid configs
*/
public class InvalidGrobidAnalysisConfig extends RuntimeException {
public InvalidGrobidAnalysisConfig(String message) {
super(message);
}
}
| 239 | 20.818182 | 67 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/tagging/WapitiTagger.java
|
package org.grobid.core.engines.tagging;
import com.google.common.base.Joiner;
import org.grobid.core.GrobidModel;
import org.grobid.core.GrobidModels;
import org.grobid.core.jni.WapitiModel;
import java.io.IOException;
public class WapitiTagger implements GenericTagger {
private final WapitiModel wapitiModel;
public WapitiTagger(GrobidModel model) {
wapitiModel = new WapitiModel(model);
}
@Override
public String label(Iterable<String> data) {
return label(Joiner.on('\n').join(data));
}
@Override
public String label(String data) {
return wapitiModel.label(data);
}
@Override
public void close() throws IOException {
wapitiModel.close();
}
}
| 735 | 21.30303 | 52 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/tagging/GenericTagger.java
|
package org.grobid.core.engines.tagging;
import java.io.Closeable;
public interface GenericTagger extends Closeable {
String label(Iterable<String> data);
String label(String data);
}
| 194 | 20.666667 | 50 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/tagging/CRFPPTagger.java
|
package org.grobid.core.engines.tagging;
import com.google.common.base.Splitter;
import org.chasen.crfpp.Model;
import org.chasen.crfpp.Tagger;
import org.grobid.core.GrobidModel;
import org.grobid.core.engines.ModelMap;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.exceptions.GrobidExceptionStatus;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
public class CRFPPTagger implements GenericTagger {
public static final Logger LOGGER = LoggerFactory.getLogger(CRFPPTagger.class);
private final Model model;
public CRFPPTagger(GrobidModel model) {
this.model = ModelMap.getModel(model);
}
@Override
public String label(Iterable<String> data) {
return getTaggerResult(data, null);
}
protected String getTaggerResult(Iterable<String> st, String type) {
Tagger tagger = null;
StringBuilder res;
try {
tagger = feedTaggerAndParse(st);
res = new StringBuilder();
for (int i = 0; i < tagger.size(); i++) {
for (int j = 0; j < tagger.xsize(); j++) {
res.append(tagger.x(i, j)).append("\t");
}
if (type != null) {
res.append(type).append("\t");
}
res.append(tagger.y2(i));
res.append("\n");
}
} finally {
if (tagger != null) {
tagger.delete();
}
}
return res.toString();
}
@Override
public String label(String data) {
return label(Splitter.on("\n").split(data));
}
@Override
public void close() throws IOException {
}
private Tagger feedTaggerAndParse(Iterable<String> st) {
Tagger tagger = getNewTagger();
feedTaggerAndParse(tagger, st);
return tagger;
}
// protected void feedTagger(StringTokenizer st) {
// Tagger tagger = getNewTagger();
// feedTagger(tagger, st);
// tagger.delete();
// }
public Tagger getNewTagger() {
return model.createTagger();
}
public static void feedTaggerAndParse(Tagger tagger, Iterable<String> st) {
tagger.clear();
feedTagger(tagger, st);
if (!tagger.parse()) {
throw new GrobidException("CRF++ tagging failed!", GrobidExceptionStatus.TAGGING_ERROR);
}
if (!tagger.what().isEmpty()) {
LOGGER.warn("CRF++ Tagger Warnings: " + tagger.what());
}
}
private static void feedTagger(Tagger tagger, Iterable<String> st) {
for (String piece : st) {
if (piece.trim().isEmpty()) {
continue;
}
if (!tagger.add(piece)) {
LOGGER.warn("CRF++ Tagger Warnings: " + tagger.what());
throw new GrobidException("Cannot add a feature row: " + piece
+ "\n Reason: " + tagger.what());
}
}
}
}
| 3,024 | 26.5 | 100 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/tagging/DeLFTTagger.java
|
package org.grobid.core.engines.tagging;
import com.google.common.base.Joiner;
import org.grobid.core.GrobidModel;
import org.grobid.core.GrobidModels;
import org.grobid.core.jni.DeLFTModel;
import java.io.IOException;
public class DeLFTTagger implements GenericTagger {
private final DeLFTModel delftModel;
public DeLFTTagger(GrobidModel model) {
delftModel = new DeLFTModel(model, null);
}
public DeLFTTagger(GrobidModel model, String architecture) {
delftModel = new DeLFTModel(model, architecture);
}
@Override
public String label(Iterable<String> data) {
return label(Joiner.on('\n').join(data));
}
@Override
public String label(String data) {
return delftModel.label(data);
}
@Override
public void close() throws IOException {
delftModel.close();
}
}
| 862 | 22.324324 | 64 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/tagging/TaggerFactory.java
|
package org.grobid.core.engines.tagging;
import org.grobid.core.GrobidModel;
import org.grobid.core.GrobidModels;
import org.grobid.core.utilities.GrobidProperties;
import org.grobid.core.main.LibraryLoader;
import java.util.HashMap;
import java.util.Map;
import java.io.File;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Factory for a sequence labelling, aka a tagger, instance.
* Supported implementations are CRF (CRFPP, Wapiti) and Deep Learning (DeLFT)
*
*/
public class TaggerFactory {
public static final Logger LOGGER = LoggerFactory.getLogger(TaggerFactory.class);
private static Map<GrobidModel, GenericTagger> cache = new HashMap<>();
private TaggerFactory() {}
public static synchronized GenericTagger getTagger(GrobidModel model) {
return getTagger(model, GrobidProperties.getGrobidCRFEngine(model), GrobidProperties.getDelftArchitecture(model));
}
public static synchronized GenericTagger getTagger(GrobidModel model, GrobidCRFEngine engine) {
return getTagger(model, engine, GrobidProperties.getDelftArchitecture(model));
}
public static synchronized GenericTagger getTagger(GrobidModel model, GrobidCRFEngine engine, String architecture) {
GenericTagger t = cache.get(model);
if (t == null) {
if(model.equals(GrobidModels.DUMMY)) {
return new DummyTagger(model);
}
if(engine != null) {
switch (engine) {
case CRFPP:
t = new CRFPPTagger(model);
break;
case WAPITI:
t = new WapitiTagger(model);
break;
case DELFT:
t = new DeLFTTagger(model, architecture);
break;
default:
throw new IllegalStateException("Unsupported Grobid sequence labelling engine: " + engine.getExt());
}
cache.put(model, t);
} else {
throw new IllegalStateException("Unsupported or null Grobid sequence labelling engine: " + engine.getExt());
}
}
return t;
}
}
| 2,252 | 34.203125 | 124 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/tagging/GenericTaggerUtils.java
|
package org.grobid.core.engines.tagging;
import com.google.common.base.Joiner;
import com.google.common.base.Splitter;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.tuple.Pair;
import org.grobid.core.engines.label.TaggingLabels;
import org.grobid.core.utilities.Triple;
import org.wipo.analyzers.wipokr.utils.StringUtil;
import java.util.ArrayList;
import java.util.List;
import java.util.function.Function;
import java.util.regex.Pattern;
public class GenericTaggerUtils {
// Deprecated, please use the constants from TaggingLabels
@Deprecated
public static final String START_ENTITY_LABEL_PREFIX = "I-";
@Deprecated
public static final String START_ENTITY_LABEL_PREFIX_ALTERNATIVE = "B-";
@Deprecated
public static final String START_ENTITY_LABEL_PREFIX_ALTERNATIVE_2 = "E-";
public static final Pattern SEPARATOR_PATTERN = Pattern.compile("[\t ]");
/**
* @param labeledResult labeled result from a tagger
* @return a list of pairs - first element in a pair is a token itself, the second is a label (e.g. <footnote> or I-<footnote>)
* Note an empty line in the result will be transformed to a 'null' pointer of a pair
*/
public static List<Pair<String, String>> getTokensAndLabels(String labeledResult) {
return processLabeledResult(labeledResult, splits -> Pair.of(splits.get(0), splits.get(splits.size() - 1)));
}
/**
* @param labeledResult labeled result from a tagger
* @return a list of triples - first element in a pair is a token itself, the second is a label (e.g. <footnote> or I-<footnote>)
* and the third element is a string with the features
* Note an empty line in the result will be transformed to a 'null' pointer of a pair
*/
public static List<Triple<String, String, String>> getTokensWithLabelsAndFeatures(String labeledResult,
final boolean addFeatureString) {
Function<List<String>, Triple<String, String, String>> fromSplits = splits -> {
String featureString = addFeatureString ? Joiner.on("\t").join(splits.subList(0, splits.size() - 1)) : null;
return new Triple<>(
splits.get(0),
splits.get(splits.size() - 1),
featureString);
};
return processLabeledResult(labeledResult, fromSplits);
}
private static <T> List<T> processLabeledResult(String labeledResult, Function<List<String>, T> fromSplits) {
String[] lines = labeledResult.split("\n");
List<T> res = new ArrayList<>(lines.length);
for (String line : lines) {
line = line.trim();
if (line.isEmpty()) {
res.add(null);
continue;
}
List<String> splits = Splitter.on(SEPARATOR_PATTERN).splitToList(line);
res.add(fromSplits.apply(splits));
}
return res;
}
public static String getPlainIOBLabel(String label) {
return isBeginningOfIOBEntity(label) ? StringUtil.substring(label, 2) : label;
}
public static boolean isBeginningOfIOBEntity(String label) {
return StringUtil.startsWith(label, TaggingLabels.IOB_START_ENTITY_LABEL_PREFIX)
|| StringUtil.startsWith(label, TaggingLabels.ENAMEX_START_ENTITY_LABEL_PREFIX);
}
// I-<citation> --> <citation>
// <citation> --> <citation>
public static String getPlainLabel(String label) {
return isBeginningOfEntity(label) ? StringUtil.substring(label, 2) : label;
}
public static boolean isBeginningOfEntity(String label) {
return StringUtils.startsWith(label, TaggingLabels.GROBID_START_ENTITY_LABEL_PREFIX)
|| StringUtil.startsWith(label, TaggingLabels.ENAMEX_START_ENTITY_LABEL_PREFIX);
}
}
| 3,882 | 42.144444 | 133 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/tagging/DummyTagger.java
|
package org.grobid.core.engines.tagging;
import com.google.common.base.Joiner;
import org.grobid.core.GrobidModel;
import org.grobid.core.GrobidModels;
import org.grobid.core.exceptions.GrobidException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
/**
* This tagger just return one label <dummy>
*/
public class DummyTagger implements GenericTagger {
public static final String DUMMY_LABEL = "<dummy>";
public DummyTagger(GrobidModel model) {
if(!model.equals(GrobidModels.DUMMY)) {
throw new GrobidException("Cannot use a non-dummy model with the dummy tagger. All dummies or no dummies. ");
}
}
@Override
public String label(Iterable<String> data) {
final List<String> output = new ArrayList<>();
data.forEach(d -> output.add(d + "\t" + DUMMY_LABEL));
return Joiner.on('\n').join(output);
}
@Override
public String label(String data) {
return "<dummy>";
}
@Override
public void close() throws IOException {
}
}
| 1,066 | 24.404762 | 121 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/tagging/GrobidCRFEngine.java
|
package org.grobid.core.engines.tagging;
import java.util.Arrays;
/**
* Sequence labeling engine in GROBID
*/
public enum GrobidCRFEngine {
WAPITI("wapiti"),
CRFPP("crf"),
DELFT("delft"),
DUMMY("dummy");
private final String ext;
GrobidCRFEngine(String ext) {
this.ext = ext;
}
public String getExt() {
return ext;
}
public static GrobidCRFEngine get(String name) {
if (name == null) {
throw new IllegalArgumentException("Name of a Grobid sequence labeling engine must not be null");
}
String n = name.toLowerCase();
for (GrobidCRFEngine e : values()) {
if (e.name().toLowerCase().equals(n)) {
return e;
}
}
throw new IllegalArgumentException("No Grobid sequence labeling engine with name '" + name +
"', possible values are: " + Arrays.toString(values()));
}
}
| 950 | 22.195122 | 109 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/patent/ReferenceExtractor.java
|
package org.grobid.core.engines.patent;
import org.chasen.crfpp.Tagger;
import org.grobid.core.GrobidModels;
import org.grobid.core.data.BibDataSet;
import org.grobid.core.data.BiblioItem;
import org.grobid.core.data.PatentItem;
import org.grobid.core.document.Document;
import org.grobid.core.document.DocumentSource;
import org.grobid.core.document.OPSService;
import org.grobid.core.document.PatentDocument;
import org.grobid.core.engines.CitationParser;
import org.grobid.core.engines.EngineParsers;
import org.grobid.core.engines.tagging.GenericTagger;
import org.grobid.core.engines.tagging.TaggerFactory;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.exceptions.GrobidResourceException;
import org.grobid.core.features.FeaturesVectorReference;
import org.grobid.core.lexicon.Lexicon;
import org.grobid.core.sax.PatentAnnotationSaxParser;
import org.grobid.core.sax.TextSaxParser;
import org.grobid.core.engines.config.GrobidAnalysisConfig;
import org.grobid.core.utilities.Consolidation;
import org.grobid.core.utilities.GrobidProperties;
import org.grobid.core.utilities.KeyGen;
import org.grobid.core.utilities.OffsetPosition;
import org.grobid.core.utilities.TextUtilities;
import org.grobid.core.utilities.LanguageUtilities;
import org.grobid.core.utilities.BoundingBoxCalculator;
import org.grobid.core.analyzers.GrobidAnalyzer;
import org.grobid.core.lang.Language;
import org.grobid.core.layout.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.xml.sax.EntityResolver;
import org.xml.sax.InputSource;
import org.xml.sax.XMLReader;
import org.xml.sax.helpers.XMLReaderFactory;
import javax.xml.parsers.SAXParserFactory;
import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.Closeable;
import java.io.DataInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.util.ArrayList;
import java.util.List;
import java.util.StringTokenizer;
import java.util.zip.GZIPInputStream;
/**
* Extraction of patent and NPL references from the content body of patent document with sequence labeling.
*
*/
public class ReferenceExtractor implements Closeable {
private static final Logger LOGGER = LoggerFactory.getLogger(ReferenceExtractor.class);
private GenericTagger taggerAll = null;
private PatentRefParser patentParser = null;
private Consolidation consolidator = null;
private String tmpPath = null;
public boolean debug = false;
public Lexicon lexicon = Lexicon.getInstance();
public String currentPatentNumber = null;
public OPSService ops = null;
private String description = null;
public ArrayList<org.grobid.core.data.BibDataSet> resBib = null; // identified current parsed
// bibliographical items and related information
private String path = null; // path where the patent file is stored
private EngineParsers parsers;
private GrobidAnalyzer analyzer = null;
private LanguageUtilities languageUtilities = LanguageUtilities.getInstance();
public void setDocumentPath(String dirName) {
path = dirName;
}
public ReferenceExtractor() {
this(new EngineParsers());
}
// constructors
public ReferenceExtractor(EngineParsers parsers) {
this.parsers = parsers;
taggerAll = TaggerFactory.getTagger(GrobidModels.PATENT_CITATION);
analyzer = GrobidAnalyzer.getInstance();
}
/**
* Extract all reference from the full text retrieve via OPS.
*/
public String extractAllReferencesOPS(boolean filterDuplicate,
int consolidate,
boolean includeRawCitations,
List<PatentItem> patents,
List<BibDataSet> articles) {
try {
if (description != null) {
return extractAllReferencesString(description,
filterDuplicate,
consolidate,
includeRawCitations,
patents,
articles);
}
} catch (Exception e) {
throw new GrobidException(e);
}
return null;
}
/**
* Extract all reference from a patent in XML ST.36 like.
*/
public String extractPatentReferencesXMLFile(String pathXML,
boolean filterDuplicate,
int consolidate,
boolean includeRawCitations,
List<PatentItem> patents) {
return extractAllReferencesXMLFile(pathXML,
filterDuplicate,
consolidate,
includeRawCitations,
patents,
null);
}
/**
* Extract all reference from an XML file in ST.36 or MAREC format.
*/
public String extractAllReferencesXMLFile(String pathXML,
boolean filterDuplicate,
int consolidate,
boolean includeRawCitations,
List<PatentItem> patents,
List<BibDataSet> articles) {
try {
if (patents == null) {
System.out.println("Warning patents List is null!");
}
TextSaxParser sax = new TextSaxParser();
sax.setFilter("description");
// get a factory
SAXParserFactory spf = SAXParserFactory.newInstance();
spf.setValidating(false);
spf.setFeature("http://xml.org/sax/features/namespaces", false);
spf.setFeature("http://xml.org/sax/features/validation", false);
//get a new instance of parser
XMLReader reader = XMLReaderFactory.createXMLReader();
reader.setEntityResolver(new EntityResolver() {
public InputSource resolveEntity(String publicId, String systemId) {
return new InputSource(
new ByteArrayInputStream("<?xml version=\"1.0\" encoding=\"UTF-8\"?>".getBytes()));
}
});
reader.setContentHandler(sax);
InputSource input = null;
if (pathXML.endsWith(".gz")) {
InputStream dataInputStream = new FileInputStream(pathXML);
GZIPInputStream gzip = new GZIPInputStream(dataInputStream);
DataInputStream tmp = new DataInputStream(gzip);
dataInputStream = tmp;
input = new InputSource(dataInputStream);
}
else {
input = new InputSource(pathXML);
}
input.setEncoding("UTF-8");
reader.parse(input);
description = sax.getText();
currentPatentNumber = sax.currentPatentNumber;
consolidate = 0;
filterDuplicate = true;
if (description != null) {
return extractAllReferencesString(description,
filterDuplicate,
consolidate,
includeRawCitations,
patents,
articles);
} else
return null;
} catch (Exception e) {
e.printStackTrace();
}
return null;
}
/**
* Extract all reference from the PDF file of a patent publication.
*/
public String extractAllReferencesPDFFile(String inputFile,
boolean filterDuplicate,
int consolidate,
boolean includeRawCitations,
List<PatentItem> patents,
List<BibDataSet> articles) {
DocumentSource documentSource = null;
String result = null;
try {
documentSource = DocumentSource.fromPdf(new File(inputFile));
PatentDocument doc = new PatentDocument(documentSource);
doc.addTokenizedDocument(GrobidAnalysisConfig.defaultInstance());
if (doc.getBlocks() == null) {
return result;
}
description = doc.getAllBlocksClean(25, -1);
if (description != null) {
result = extractAllReferencesString(description,
filterDuplicate,
consolidate,
includeRawCitations,
patents,
articles);
}
return result;
} finally {
DocumentSource.close(documentSource, true, true, true);
}
}
/**
* JSON annotations for all reference from the PDF file of a patent publication.
*/
public String annotateAllReferencesPDFFile(String inputFile,
boolean filterDuplicate,
int consolidate,
boolean includeRawCitations,
List<PatentItem> patents,
List<BibDataSet> articles) {
DocumentSource documentSource = null;
try {
documentSource = DocumentSource.fromPdf(new File(inputFile));
PatentDocument doc = new PatentDocument(documentSource);
List<LayoutToken> tokenizations = doc.addTokenizedDocument(GrobidAnalysisConfig.defaultInstance());
if (doc.getBlocks() == null) {
throw new GrobidException("PDF parsing resulted in empty content");
}
if ( (tokenizations != null) && (tokenizations.size() > 0) ) {
return annotateAllReferences(doc, tokenizations,
filterDuplicate,
consolidate,
includeRawCitations,
patents,
articles);
} else {
return null;
}
} catch (Exception e) {
LOGGER.error("Error in extractAllReferencesPDFFile", e);
} finally {
DocumentSource.close(documentSource, true, true, true);
}
return null;
}
/**
* Extract all reference from a simple piece of text and return results in an XML document.
*/
public String extractAllReferencesString(String text,
boolean filterDuplicate,
int consolidate,
boolean includeRawCitations,
List<PatentItem> patents,
List<BibDataSet> articles) {
try {
// if parameters are null, these lists will only be valid in the method
if (patents == null) {
patents = new ArrayList<PatentItem>();
}
if (articles == null) {
articles = new ArrayList<BibDataSet>();
}
// parser for patent references
if (patentParser == null) {
patentParser = new PatentRefParser();
}
// parser for non patent references
// tokenisation for the parser (with punctuation as tokens)
ArrayList<String> patentBlocks = new ArrayList<String>();
//text = TextUtilities.dehyphenize(text); // to be reviewed!
text = text.replace("\n", " ").replace("\t", " ");
//text = text.replace(" ", " ");
// identify the language of the patent document, we use only the first 500 characters
// which is enough normally for a very safe language prediction
// the text here is the patent description, so strictly monolingual
Language lang = languageUtilities.runLanguageId(text, 500);
List<String> tokenizations = analyzer.tokenize(text, lang);
int offset = 0;
if (tokenizations.size() == 0) {
return null;
}
List<OffsetPosition> journalPositions = null;
List<OffsetPosition> abbrevJournalPositions = null;
List<OffsetPosition> conferencePositions = null;
List<OffsetPosition> publisherPositions = null;
//if (articles != null)
{
journalPositions = lexicon.tokenPositionsJournalNames(text);
abbrevJournalPositions = lexicon.tokenPositionsAbbrevJournalNames(text);
conferencePositions = lexicon.tokenPositionsConferenceNames(text);
publisherPositions = lexicon.tokenPositionsPublisherNames(text);
}
boolean isJournalToken = false;
boolean isAbbrevJournalToken = false;
boolean isConferenceToken = false;
boolean isPublisherToken = false;
int currentJournalPositions = 0;
int currentAbbrevJournalPositions = 0;
int currentConferencePositions = 0;
int currentPublisherPositions = 0;
boolean skipTest = false;
//st = new StringTokenizer(text, " (["+ TextUtilities.punctuations, true);
//st = new StringTokenizer(text, delimiters, true);
int posit = 0;
//while (st.hasMoreTokens()) {
for(String tok : tokenizations) {
isJournalToken = false;
isAbbrevJournalToken = false;
isConferenceToken = false;
isPublisherToken = false;
skipTest = false;
//String tok = st.nextToken();
if ( (tok.trim().length() == 0) ||
(tok.equals(" ")) ||
(tok.equals("\t")) ||
(tok.equals("\n")) ||
(tok.equals("\r"))
) {
continue;
}
// check the position of matches for journals
if (journalPositions != null) {
if (currentJournalPositions == journalPositions.size() - 1) {
if (journalPositions.get(currentJournalPositions).end < posit) {
skipTest = true;
}
}
if (!skipTest) {
for (int i = currentJournalPositions; i < journalPositions.size(); i++) {
if ((journalPositions.get(i).start <= posit) &&
(journalPositions.get(i).end >= posit)) {
isJournalToken = true;
currentJournalPositions = i;
break;
} else if (journalPositions.get(i).start > posit) {
isJournalToken = false;
currentJournalPositions = i;
break;
}
}
}
}
// check the position of matches for abbreviated journals
skipTest = false;
if (abbrevJournalPositions != null) {
if (currentAbbrevJournalPositions == abbrevJournalPositions.size() - 1) {
if (abbrevJournalPositions.get(currentAbbrevJournalPositions).end < posit) {
skipTest = true;
}
}
if (!skipTest) {
for (int i = currentAbbrevJournalPositions; i < abbrevJournalPositions.size(); i++) {
if ((abbrevJournalPositions.get(i).start <= posit) &&
(abbrevJournalPositions.get(i).end >= posit)) {
isAbbrevJournalToken = true;
currentAbbrevJournalPositions = i;
break;
} else if (abbrevJournalPositions.get(i).start > posit) {
isAbbrevJournalToken = false;
currentAbbrevJournalPositions = i;
break;
}
}
}
}
// check the position of matches for conference names
skipTest = false;
if (conferencePositions != null) {
if (currentConferencePositions == conferencePositions.size() - 1) {
if (conferencePositions.get(currentConferencePositions).end < posit) {
skipTest = true;
}
}
if (!skipTest) {
for (int i = currentConferencePositions; i < conferencePositions.size(); i++) {
if ((conferencePositions.get(i).start <= posit) &&
(conferencePositions.get(i).end >= posit)) {
isConferenceToken = true;
currentConferencePositions = i;
break;
} else if (conferencePositions.get(i).start > posit) {
isConferenceToken = false;
currentConferencePositions = i;
break;
}
}
}
}
// check the position of matches for publisher names
skipTest = false;
if (publisherPositions != null) {
if (currentPublisherPositions == publisherPositions.size() - 1) {
if (publisherPositions.get(currentPublisherPositions).end < posit) {
skipTest = true;
}
}
if (!skipTest) {
for (int i = currentPublisherPositions; i < publisherPositions.size(); i++) {
if ((publisherPositions.get(i).start <= posit) &&
(publisherPositions.get(i).end >= posit)) {
isPublisherToken = true;
currentPublisherPositions = i;
break;
} else if (publisherPositions.get(i).start > posit) {
isPublisherToken = false;
currentPublisherPositions = i;
break;
}
}
}
}
FeaturesVectorReference featureVector =
FeaturesVectorReference.addFeaturesPatentReferences(tok,
tokenizations.size(),
posit,
isJournalToken,
isAbbrevJournalToken,
isConferenceToken,
isPublisherToken);
patentBlocks.add(featureVector.printVector());
posit++;
}
patentBlocks.add("\n");
String theResult = null;
theResult = taggerAll.label(patentBlocks);
//System.out.println(theResult);
StringTokenizer stt = new StringTokenizer(theResult, "\n");
List<String> referencesPatent = new ArrayList<String>();
List<String> referencesNPL = new ArrayList<String>();
List<Integer> offsets_patent = new ArrayList<Integer>();
List<Integer> offsets_NPL = new ArrayList<Integer>();
List<Double> probPatent = new ArrayList<Double>();
List<Double> probNPL = new ArrayList<Double>();
boolean currentPatent = true; // type of current reference
String reference = null;
double currentProb = 0.0;
offset = 0;
int currentOffset = 0;
int addedOffset = 0;
String label = null; // label
String actual = null; // token
int p = 0; // iterator for the tokenizations for restauring the original tokenization with
// respect to spaces
while (stt.hasMoreTokens()) {
String line = stt.nextToken();
if (line.trim().length() == 0) {
continue;
}
StringTokenizer st2 = new StringTokenizer(line, "\t ");
boolean start = true;
String separator = "";
label = null;
actual = null;
while (st2.hasMoreTokens()) {
if (start) {
actual = st2.nextToken().trim();
start = false;
boolean strop = false;
while ((!strop) && (p < tokenizations.size())) {
String tokOriginal = tokenizations.get(p);
addedOffset += tokOriginal.length();
if (tokOriginal.equals(" ")) {
separator += tokOriginal;
} else if (tokOriginal.equals(actual)) {
strop = true;
}
p++;
}
} else {
label = st2.nextToken().trim();
}
}
if (label == null) {
offset += addedOffset;
addedOffset = 0;
continue;
}
double prob = 0.0;
int segProb = label.lastIndexOf("/");
if (segProb != -1) {
String probString = label.substring(segProb+1, label.length());
//System.out.println("given prob: " + probString);
try {
prob = Double.parseDouble(probString);
//System.out.println("given prob: " + probString + ", parsed: " + prob);
}
catch(Exception e) {
LOGGER.debug(probString + " cannot be parsed.");
}
label = label.substring(0,segProb);
}
// TBD: use TaggingTokenClusteror and TaggingLabel as for the other parsers
if (actual != null) {
if (label.endsWith("<refPatent>")) {
if (reference == null) {
reference = separator + actual;
currentOffset = offset;
currentPatent = true;
currentProb = prob;
} else {
if (currentPatent) {
if (label.equals("I-<refPatent>")) {
referencesPatent.add(reference);
offsets_patent.add(currentOffset);
probPatent.add(Double.valueOf(currentProb));
currentPatent = true;
reference = separator + actual;
currentOffset = offset;
currentProb = prob;
} else {
reference += separator + actual;
if (prob > currentProb) {
currentProb = prob;
}
}
} else {
referencesNPL.add(reference);
offsets_NPL.add(currentOffset);
probNPL.add(Double.valueOf(currentProb));
currentPatent = true;
reference = separator + actual;
currentOffset = offset;
currentProb = prob;
}
}
} else if (label.endsWith("<refNPL>")) {
if (reference == null) {
reference = separator + actual;
currentOffset = offset;
currentPatent = false;
currentProb = prob;
} else {
if (currentPatent) {
referencesPatent.add(reference);
offsets_patent.add(currentOffset);
probPatent.add(Double.valueOf(currentProb));
currentPatent = false;
reference = separator + actual;
currentOffset = offset;
currentProb = prob;
} else {
if (label.equals("I-<refNPL>")) {
referencesNPL.add(reference);
offsets_NPL.add(currentOffset);
probNPL.add(Double.valueOf(currentProb));
currentPatent = false;
reference = separator + actual;
currentOffset = offset;
currentProb = prob;
} else {
reference += separator + actual;
if (prob > currentProb) {
currentProb = prob;
}
}
}
}
} else if (label.equals("<other>")) {
if (reference != null) {
if (currentPatent) {
referencesPatent.add(reference);
offsets_patent.add(currentOffset);
probPatent.add(Double.valueOf(currentProb));
} else {
referencesNPL.add(reference);
offsets_NPL.add(currentOffset);
probNPL.add(Double.valueOf(currentProb));
}
currentPatent = false;
}
reference = null;
currentProb = 0.0;
}
}
offset += addedOffset;
addedOffset = 0;
}
// run reference patent parser in isolation, and produce some traces
int j = 0;
for (String ref : referencesPatent) {
patentParser.setRawRefText(ref);
patentParser.setRawRefTextOffset(offsets_patent.get(j).intValue());
List<PatentItem> patents0 = patentParser.processRawRefText();
for (PatentItem pat : patents0) {
pat.setContext(ref);
pat.setConf(probPatent.get(j).doubleValue());
patents.add(pat);
/*if (pat.getApplication()) {
if (pat.getProvisional()) {
if (debug) {
System.out.println(pat.getAuthority() + " " + pat.getNumber()
+ " P application " + pat.getOffsetBegin()
+ ":" + pat.getOffsetEnd() + "\n");
}
} else {
if (debug) {
System.out.println(pat.getAuthority() + " " + pat.getNumber()
+ " application " + pat.getOffsetBegin()
+ ":" + pat.getOffsetEnd() + "\n");
}
}
} else if (pat.getReissued()) {
if (pat.getAuthority().equals("US")) {
if (debug) {
System.out.println(pat.getAuthority() + "RE" + pat.getNumber() + " E "
+ pat.getOffsetBegin() + ":" + pat.getOffsetEnd() + "\n");
}
}
} else if (pat.getPlant()) {
if (pat.getAuthority().equals("US")) {
if (debug)
System.out.println(pat.getAuthority() + "PP" + pat.getNumber() + " " +
pat.getOffsetBegin() + ":" + pat.getOffsetEnd() + "\n");
}
} else {
if (debug) {
if (pat.getKindCode() != null) {
System.out.println(pat.getAuthority() + " " + pat.getNumber() + " "
+ pat.getKindCode() + " "
+ pat.getOffsetBegin() + ":" + pat.getOffsetEnd() + "\n");
} else {
System.out.println(pat.getAuthority() + " " + pat.getNumber() + " " +
pat.getOffsetBegin() + ":" + pat.getOffsetEnd() + "\n");
}
System.out.println(pat.getContext());
}
}*/
}
j++;
}
// list for filtering duplicates, if we want to ignore the duplicate numbers
List<String> numberListe = new ArrayList<String>();
if (filterDuplicate) {
// list for filtering duplicates, if we want to ignore the duplicate numbers
List<PatentItem> toRemove = new ArrayList<PatentItem>();
for (PatentItem pat : patents) {
if (!numberListe.contains(pat.getNumberEpoDoc())) {
numberListe.add(pat.getNumberEpoDoc());
} else {
toRemove.add(pat);
}
}
for (PatentItem pat : toRemove) {
patents.remove(pat);
}
}
if (articles != null) {
int k = 0;
List<BiblioItem> bibResults = parsers.getCitationParser().processingStringMultiple(referencesNPL, consolidate);
for (String ref : referencesNPL) {
BiblioItem result = bibResults.get(k);
if (result == null) {
k++;
continue;
}
BibDataSet bds = new BibDataSet();
result.setReference(ref);
bds.setResBib(result);
bds.setRawBib(ref);
bds.addOffset(offsets_NPL.get(k).intValue());
//bds.setConfidence(probNPL.get(k).doubleValue());
articles.add(bds);
k++;
}
}
} catch (Exception e) {
throw new GrobidException("An exception occured while running Grobid.", e);
}
int nbs = 0;
if (patents != null) {
nbs = patents.size();
}
if (articles != null)
nbs += articles.size();
String resultTEI = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" +
"<TEI xml:space=\"preserve\" xmlns=\"http://www.tei-c.org/ns/1.0\" " +
"xmlns:xlink=\"http://www.w3.org/1999/xlink\">\n";
String divID = KeyGen.getKey().substring(0,7);
resultTEI += "<teiHeader />\n";
resultTEI += "<text>\n";
resultTEI += "<div id=\"_"+ divID +"\">\n";
resultTEI += TextUtilities.HTMLEncode(text);
resultTEI += "</div>\n";
resultTEI += "<div type=\"references\">\n";
if ( (patents != null) || (articles != null) ) {
resultTEI += "<listBibl>\n";
}
if (patents != null) {
for(PatentItem patentCitation : patents) {
resultTEI += patentCitation.toTEI(true, divID) + "\n"; // with offsets
}
}
if (articles != null) {
for(BibDataSet articleCitation : articles) {
resultTEI += articleCitation.toTEI(includeRawCitations) + "\n";
}
}
if ( (patents != null) || (articles != null) ) {
resultTEI += "</listBibl>\n";
}
resultTEI += "</div>\n";
resultTEI += "</text>\n";
resultTEI += "</TEI>";
return resultTEI;
}
/**
* Annotate all reference from a list of layout tokens and return annotation results in a JSON document.
*/
public String annotateAllReferences(Document doc,
List<LayoutToken> tokenizations,
boolean filterDuplicate,
int consolidate,
boolean includeRawCitations,
List<PatentItem> patents,
List<BibDataSet> articles) {
try {
if (tokenizations.size() == 0) {
return null;
}
// if parameters are null, these lists will only be valid in the method
if (patents == null) {
patents = new ArrayList<PatentItem>();
}
if (articles == null) {
articles = new ArrayList<BibDataSet>();
}
// parser for patent references
if (patentParser == null) {
patentParser = new PatentRefParser();
}
// parser for non patent references
// tokenisation for the CRF parser (with punctuation as tokens)
ArrayList<String> patentBlocks = new ArrayList<String>();
// identify the language of the patent document, we use only the last 500 characters
// which is enough normally for a very safe language prediction
// the text here is the patent description, so strictly monolingual
StringBuilder textBuffer = new StringBuilder();
int accumulated = 0;
for(int n=tokenizations.size()-1; n > 0; n--) {
LayoutToken token = tokenizations.get(n);
if ( (token != null) && (token.getText() != null) ) {
textBuffer.insert(0, token.getText());
accumulated += token.getText().length();
}
if (accumulated > 500)
break;
}
String text = textBuffer.toString();
text = text.replace("\n", " ").replace("\t", " ");
Language lang = languageUtilities.runLanguageId(text);
//List<String> tokenizations = analyzer.tokenize(lang, text);
int offset = 0;
List<OffsetPosition> journalPositions = null;
List<OffsetPosition> abbrevJournalPositions = null;
List<OffsetPosition> conferencePositions = null;
List<OffsetPosition> publisherPositions = null;
//if (articles != null)
{
journalPositions = lexicon.tokenPositionsJournalNames(text);
abbrevJournalPositions = lexicon.tokenPositionsAbbrevJournalNames(text);
conferencePositions = lexicon.tokenPositionsConferenceNames(text);
publisherPositions = lexicon.tokenPositionsPublisherNames(text);
}
boolean isJournalToken = false;
boolean isAbbrevJournalToken = false;
boolean isConferenceToken = false;
boolean isPublisherToken = false;
int currentJournalPositions = 0;
int currentAbbrevJournalPositions = 0;
int currentConferencePositions = 0;
int currentPublisherPositions = 0;
boolean skipTest = false;
//st = new StringTokenizer(text, " (["+ TextUtilities.punctuations, true);
//st = new StringTokenizer(text, delimiters, true);
int posit = 0;
//while (st.hasMoreTokens()) {
for(LayoutToken token : tokenizations) {
String tok = token.getText();
isJournalToken = false;
isAbbrevJournalToken = false;
isConferenceToken = false;
isPublisherToken = false;
skipTest = false;
//String tok = st.nextToken();
if ( (tok.trim().length() == 0) ||
(tok.equals(" ")) ||
(tok.equals("\t")) ||
(tok.equals("\n")) ||
(tok.equals("\r"))
) {
continue;
}
// check the position of matches for journals
if (journalPositions != null) {
if (currentJournalPositions == journalPositions.size() - 1) {
if (journalPositions.get(currentJournalPositions).end < posit) {
skipTest = true;
}
}
if (!skipTest) {
for (int i = currentJournalPositions; i < journalPositions.size(); i++) {
if ((journalPositions.get(i).start <= posit) &&
(journalPositions.get(i).end >= posit)) {
isJournalToken = true;
currentJournalPositions = i;
break;
} else if (journalPositions.get(i).start > posit) {
isJournalToken = false;
currentJournalPositions = i;
break;
}
}
}
}
// check the position of matches for abbreviated journals
skipTest = false;
if (abbrevJournalPositions != null) {
if (currentAbbrevJournalPositions == abbrevJournalPositions.size() - 1) {
if (abbrevJournalPositions.get(currentAbbrevJournalPositions).end < posit) {
skipTest = true;
}
}
if (!skipTest) {
for (int i = currentAbbrevJournalPositions; i < abbrevJournalPositions.size(); i++) {
if ((abbrevJournalPositions.get(i).start <= posit) &&
(abbrevJournalPositions.get(i).end >= posit)) {
isAbbrevJournalToken = true;
currentAbbrevJournalPositions = i;
break;
} else if (abbrevJournalPositions.get(i).start > posit) {
isAbbrevJournalToken = false;
currentAbbrevJournalPositions = i;
break;
}
}
}
}
// check the position of matches for conference names
skipTest = false;
if (conferencePositions != null) {
if (currentConferencePositions == conferencePositions.size() - 1) {
if (conferencePositions.get(currentConferencePositions).end < posit) {
skipTest = true;
}
}
if (!skipTest) {
for (int i = currentConferencePositions; i < conferencePositions.size(); i++) {
if ((conferencePositions.get(i).start <= posit) &&
(conferencePositions.get(i).end >= posit)) {
isConferenceToken = true;
currentConferencePositions = i;
break;
} else if (conferencePositions.get(i).start > posit) {
isConferenceToken = false;
currentConferencePositions = i;
break;
}
}
}
}
// check the position of matches for publisher names
skipTest = false;
if (publisherPositions != null) {
if (currentPublisherPositions == publisherPositions.size() - 1) {
if (publisherPositions.get(currentPublisherPositions).end < posit) {
skipTest = true;
}
}
if (!skipTest) {
for (int i = currentPublisherPositions; i < publisherPositions.size(); i++) {
if ((publisherPositions.get(i).start <= posit) &&
(publisherPositions.get(i).end >= posit)) {
isPublisherToken = true;
currentPublisherPositions = i;
break;
} else if (publisherPositions.get(i).start > posit) {
isPublisherToken = false;
currentPublisherPositions = i;
break;
}
}
}
}
FeaturesVectorReference featureVector =
FeaturesVectorReference.addFeaturesPatentReferences(tok,
tokenizations.size(),
posit,
isJournalToken,
isAbbrevJournalToken,
isConferenceToken,
isPublisherToken);
patentBlocks.add(featureVector.printVector());
posit++;
}
patentBlocks.add("\n");
String theResult = null;
theResult = taggerAll.label(patentBlocks);
//System.out.println(theResult);
StringTokenizer stt = new StringTokenizer(theResult, "\n");
List<String> referencesPatent = new ArrayList<String>();
List<String> referencesNPL = new ArrayList<String>();
List<Integer> offsets_patent = new ArrayList<Integer>();
List<Integer> offsets_NPL = new ArrayList<Integer>();
List<Double> probPatent = new ArrayList<Double>();
List<Double> probNPL = new ArrayList<Double>();
boolean currentPatent = true; // type of current reference
String reference = null;
double currentProb = 0.0;
offset = 0;
int currentOffset = 0;
int addedOffset = 0;
String label = null; // label
String actual = null; // token
int p = 0; // iterator for the tokenizations for restauring the original tokenization with
// respect to spaces
while (stt.hasMoreTokens()) {
String line = stt.nextToken();
if (line.trim().length() == 0) {
continue;
}
StringTokenizer st2 = new StringTokenizer(line, "\t ");
boolean start = true;
String separator = "";
label = null;
actual = null;
while (st2.hasMoreTokens()) {
if (start) {
actual = st2.nextToken().trim();
start = false;
boolean strop = false;
while ((!strop) && (p < tokenizations.size())) {
LayoutToken tokenOriginal = tokenizations.get(p);
if ( (tokenOriginal == null) || (tokenOriginal.getText() == null) )
continue;
String tokOriginal = tokenOriginal.getText();
addedOffset += tokOriginal.length();
if (tokOriginal.equals(" ")) {
separator += tokOriginal;
} else if (tokOriginal.equals(actual)) {
strop = true;
}
p++;
}
} else {
label = st2.nextToken().trim();
}
}
if (label == null) {
offset += addedOffset;
addedOffset = 0;
continue;
}
double prob = 0.0;
int segProb = label.lastIndexOf("/");
if (segProb != -1) {
String probString = label.substring(segProb+1, label.length());
//System.out.println("given prob: " + probString);
try {
prob = Double.parseDouble(probString);
//System.out.println("given prob: " + probString + ", parsed: " + prob);
}
catch(Exception e) {
LOGGER.debug(probString + " cannot be parsed.");
}
label = label.substring(0,segProb);
}
if (actual != null) {
if (label.endsWith("<refPatent>")) {
if (reference == null) {
reference = separator + actual;
currentOffset = offset;
currentPatent = true;
currentProb = prob;
} else {
if (currentPatent) {
if (label.equals("I-<refPatent>")) {
referencesPatent.add(reference);
offsets_patent.add(currentOffset);
probPatent.add(Double.valueOf(currentProb));
currentPatent = true;
reference = separator + actual;
currentOffset = offset;
currentProb = prob;
} else {
reference += separator + actual;
if (prob > currentProb) {
currentProb = prob;
}
}
} else {
referencesNPL.add(reference);
offsets_NPL.add(currentOffset);
probNPL.add(Double.valueOf(currentProb));
currentPatent = true;
reference = separator + actual;
currentOffset = offset;
currentProb = prob;
}
}
} else if (label.endsWith("<refNPL>")) {
if (reference == null) {
reference = separator + actual;
currentOffset = offset;
currentPatent = false;
currentProb = prob;
} else {
if (currentPatent) {
referencesPatent.add(reference);
offsets_patent.add(currentOffset);
probPatent.add(Double.valueOf(currentProb));
currentPatent = false;
reference = separator + actual;
currentOffset = offset;
currentProb = prob;
} else {
if (label.equals("I-<refNPL>")) {
referencesNPL.add(reference);
offsets_NPL.add(currentOffset);
probNPL.add(Double.valueOf(currentProb));
currentPatent = false;
reference = separator + actual;
currentOffset = offset;
currentProb = prob;
} else {
reference += separator + actual;
if (prob > currentProb) {
currentProb = prob;
}
}
}
}
} else if (label.equals("<other>")) {
if (reference != null) {
if (currentPatent) {
referencesPatent.add(reference);
offsets_patent.add(currentOffset);
probPatent.add(Double.valueOf(currentProb));
} else {
referencesNPL.add(reference);
offsets_NPL.add(currentOffset);
probNPL.add(Double.valueOf(currentProb));
}
currentPatent = false;
}
reference = null;
currentProb = 0.0;
}
}
offset += addedOffset;
addedOffset = 0;
}
// run reference patent parser in isolation, and produce some traces
int j = 0;
for (String ref : referencesPatent) {
patentParser.setRawRefText(ref);
patentParser.setRawRefTextOffset(offsets_patent.get(j).intValue());
List<PatentItem> patents0 = patentParser.processRawRefText();
for (PatentItem pat : patents0) {
pat.setContext(ref);
pat.setConf(probPatent.get(j).doubleValue());
patents.add(pat);
// get the list of LayoutToken corresponding to the offset positions
List<LayoutToken> localTokens = Document.getTokens(tokenizations,
pat.getOffsetBegin(),
pat.getOffsetEnd());
// associate the corresponding bounding box
if ( (localTokens != null) && (localTokens.size() > 0) )
pat.setCoordinates(BoundingBoxCalculator.calculate(localTokens));
/*if (pat.getApplication()) {
if (pat.getProvisional()) {
if (debug) {
System.out.println(pat.getAuthority() + " " + pat.getNumber()
+ " P application " + pat.getOffsetBegin()
+ ":" + pat.getOffsetEnd() + "\n");
}
} else {
if (debug) {
System.out.println(pat.getAuthority() + " " + pat.getNumber()
+ " application " + pat.getOffsetBegin()
+ ":" + pat.getOffsetEnd() + "\n");
}
}
} else if (pat.getReissued()) {
if (pat.getAuthority().equals("US")) {
if (debug) {
System.out.println(pat.getAuthority() + "RE" + pat.getNumber() + " E "
+ pat.getOffsetBegin() + ":" + pat.getOffsetEnd() + "\n");
}
}
} else if (pat.getPlant()) {
if (pat.getAuthority().equals("US")) {
if (debug)
System.out.println(pat.getAuthority() + "PP" + pat.getNumber() + " " +
pat.getOffsetBegin() + ":" + pat.getOffsetEnd() + "\n");
}
} else {
if (debug) {
if (pat.getKindCode() != null) {
System.out.println(pat.getAuthority() + " " + pat.getNumber() + " "
+ pat.getKindCode() + " "
+ pat.getOffsetBegin() + ":" + pat.getOffsetEnd() + "\n");
} else {
System.out.println(pat.getAuthority() + " " + pat.getNumber() + " " +
pat.getOffsetBegin() + ":" + pat.getOffsetEnd() + "\n");
}
System.out.println(pat.getContext());
}
}*/
}
j++;
}
// list for filtering duplicates, if we want to ignore the duplicate numbers
List<String> numberListe = new ArrayList<String>();
if (filterDuplicate) {
// list for filtering duplicates, if we want to ignore the duplicate numbers
List<PatentItem> toRemove = new ArrayList<PatentItem>();
for (PatentItem pat : patents) {
if (!numberListe.contains(pat.getNumberEpoDoc())) {
numberListe.add(pat.getNumberEpoDoc());
} else {
toRemove.add(pat);
}
}
for (PatentItem pat : toRemove) {
patents.remove(pat);
}
}
if (articles != null) {
int k = 0;
List<BiblioItem> bibResults = parsers.getCitationParser().processingStringMultiple(referencesNPL, consolidate);
for (String ref : referencesNPL) {
BiblioItem result = bibResults.get(k);
if (result == null) {
k++;
continue;
}
BibDataSet bds = new BibDataSet();
result.setReference(ref);
bds.setResBib(result);
bds.setRawBib(ref);
bds.addOffset(offsets_NPL.get(k).intValue());
//bds.setConfidence(probNPL.get(k).doubleValue());
articles.add(bds);
k++;
}
}
} catch (Exception e) {
throw new GrobidException("An exception occured while running Grobid.", e);
}
int nbs = 0;
if (patents != null) {
nbs = patents.size();
}
if (articles != null)
nbs += articles.size();
StringBuilder resultJson = new StringBuilder();
resultJson.append("{");
// page height and width
List<Page> pages = doc.getPages();
int pageNumber = 1;
resultJson.append("\"pages\": [");
for(Page page : pages) {
if (pageNumber > 1)
resultJson.append(", ");
resultJson.append("{\"page_height\":" + page.getHeight());
resultJson.append(", \"page_width\":" + page.getWidth() + "}");
pageNumber++;
}
resultJson.append("]");
if (patents != null) {
resultJson.append(", \"patents\": [");
boolean first = true;
for(PatentItem patentCitation : patents) {
if (first)
first = false;
else
resultJson.append(", ");
resultJson.append(patentCitation.toJson(null, true)); // with coordinates
}
resultJson.append("]");
}
if (articles != null) {
resultJson.append(", \"articles\": [");
boolean first = true;
for(BibDataSet articleCitation : articles) {
/*if (first)
first = false;
else
resultJson.append(", ");
resultJSON.append(articleCitation.toJson(); */
}
resultJson.append("]");
}
resultJson.append("}");
return resultJson.toString();
}
/**
* Get the TEI XML string corresponding to the recognized citation section for
* a particular citation
*/
public String reference2TEI(int i) {
String result = "";
if (resBib != null) {
if (i <= resBib.size()) {
BibDataSet bib = resBib.get(i);
BiblioItem bit = bib.getResBib();
if (path != null) {
bit.setPath(path);
}
result += bit.toTEI(i);
}
}
return result;
}
/**
* Get the BibTeX string corresponding to the recognized citation section
*/
public String references2BibTeX() {
String result = "";
for (BibDataSet bib : resBib) {
BiblioItem bit = bib.getResBib();
if (path != null) {
bit.setPath(path);
}
result += "\n" + bit.toBibTeX();
}
return result;
}
/**
* Get the TEI XML string corresponding to the recognized citation section,
* with pointers and advanced structuring
*/
public String references2TEI() {
String result = "<listbibl>\n";
int p = 0;
for (BibDataSet bib : resBib) {
BiblioItem bit = bib.getResBib();
if (path == null) {
bit.setPath(path);
}
result += "\n" + bit.toTEI(p);
p++;
}
result += "\n</listbibl>\n";
return result;
}
/**
* Get the BibTeX string corresponding to the recognized citation section
* for a given citation
*/
public String reference2BibTeX(int i) {
String result = "";
if (resBib != null) {
if (i <= resBib.size()) {
BibDataSet bib = resBib.get(i);
BiblioItem bit = bib.getResBib();
if (path == null) {
bit.setPath(path);
}
result += bit.toBibTeX();
}
}
return result;
}
/**
* Annotate XML files with extracted reference results. Not used.
*/
private void annotate(File file,
ArrayList<PatentItem> patents,
ArrayList<BibDataSet> articles) {
try {
// we simply rewrite lines based on identified reference strings without parsing
// special care for line breaks in the middle of a reference
ArrayList<String> sources = new ArrayList<String>();
ArrayList<String> targets = new ArrayList<String>();
for (PatentItem pi : patents) {
String context = pi.getContext();
String source = context;
sources.add(source);
String target = " <patcit>" + context + "</patcit> ";
targets.add(target);
System.out.println(source + " -> " + target);
}
for (BibDataSet bi : articles) {
String context = bi.getRawBib();
// we compile the corresponding regular expression
String source = context; //.replace(" ", "( |\\n)");
sources.add(source);
String target = " <nplcit>" + context + "</nplcit> ";
targets.add(target);
System.out.println(source + " -> " + target);
}
FileInputStream fileIn = new FileInputStream(file);
InputStreamReader reader = new InputStreamReader(fileIn, "UTF-8");
BufferedReader bufReader = new BufferedReader(reader);
String line;
StringBuffer content = new StringBuffer();
content.append("");
while ((line = bufReader.readLine()) != null) {
content.append(line);
content.append("\n");
}
bufReader.close();
reader.close();
int i = 0;
String contentString = content.toString();
for (String source : sources) {
String target = targets.get(i);
contentString = contentString.replace(source, target);
i++;
}
System.out.println(contentString);
} catch (Exception e) {
throw new GrobidException("An exception occured while running Grobid.", e);
}
}
/**
* Annotate a new XML patent document based on training data format with the current model.
*
* @param documentPath is the path to the file to be processed
* @param newTrainingPath new training path
*/
public void generateTrainingData(String documentPath, String newTrainingPath) {
if (documentPath == null) {
throw new GrobidResourceException("Cannot process the patent file, because the document path is null.");
}
if (!documentPath.endsWith(".xml") && !documentPath.endsWith(".xml.gz")) {
throw new GrobidResourceException("Only patent XML files (ST.36 or Marec) can be processed to " +
"generate traning data.");
}
File documentFile = new File(documentPath);
if (!documentFile.exists()) {
throw new GrobidResourceException("Cannot process the patent file, because path '" +
documentFile.getAbsolutePath() + "' does not exists.");
}
if (newTrainingPath == null) {
GrobidProperties.getInstance();
newTrainingPath = GrobidProperties.getTempPath().getAbsolutePath();
}
File newTrainingFile = new File(newTrainingPath);
if (!newTrainingFile.exists()) {
throw new GrobidResourceException("Cannot process the patent file, because path '" +
newTrainingFile.getAbsolutePath() + "' does not exists.");
}
try {
// first pass: we get the text to be processed
TextSaxParser sax = new TextSaxParser();
sax.setFilter("description");
// get a factory
SAXParserFactory spf = SAXParserFactory.newInstance();
spf.setValidating(false);
spf.setFeature("http://xml.org/sax/features/namespaces", false);
spf.setFeature("http://xml.org/sax/features/validation", false);
//get a new instance of parser
XMLReader reader = XMLReaderFactory.createXMLReader();
reader.setEntityResolver(new EntityResolver() {
public InputSource resolveEntity(String publicId, String systemId) {
return new InputSource(
new ByteArrayInputStream("<?xml version=\"1.0\" encoding=\"UTF-8\"?>".getBytes()));
}
});
reader.setContentHandler(sax);
InputSource input = null;
if (documentPath.endsWith(".gz")) {
InputStream dataInputStream = new FileInputStream(documentPath);
GZIPInputStream gzip = new GZIPInputStream(dataInputStream);
DataInputStream tmp = new DataInputStream(gzip);
dataInputStream = tmp;
input = new InputSource(dataInputStream);
}
else {
input = new InputSource(documentPath);
}
input.setEncoding("UTF-8");
reader.parse(input);
String description = sax.getText();
String currentPatentNumber = sax.currentPatentNumber;
ArrayList<PatentItem> patents = new ArrayList<PatentItem>();
ArrayList<BibDataSet> articles = new ArrayList<BibDataSet>();
// we process the patent description
if (description != null) {
extractAllReferencesString(description, false, 0, false, patents, articles);
// second pass: we add annotations corresponding to identified citation chunks based on
// stored offsets
Writer writer = new OutputStreamWriter(
new FileOutputStream(new File(newTrainingPath + "/" + currentPatentNumber + ".training.xml"),
false), "UTF-8");
PatentAnnotationSaxParser saxx = new PatentAnnotationSaxParser();
saxx.setWriter(writer);
saxx.setPatents(patents);
saxx.setArticles(articles);
spf = SAXParserFactory.newInstance();
spf.setValidating(false);
spf.setFeature("http://xml.org/sax/features/namespaces", false);
spf.setFeature("http://xml.org/sax/features/validation", false);
//get a new instance of parser
reader = XMLReaderFactory.createXMLReader();
reader.setEntityResolver(new EntityResolver() {
public InputSource resolveEntity(String publicId, String systemId) {
return new InputSource(
new ByteArrayInputStream("<?xml version=\"1.0\" encoding=\"UTF-8\"?>".getBytes()));
}
});
reader.setContentHandler(saxx);
if (documentPath.endsWith(".gz")) {
InputStream dataInputStream = new FileInputStream(documentPath);
GZIPInputStream gzip = new GZIPInputStream(dataInputStream);
DataInputStream tmp = new DataInputStream(gzip);
dataInputStream = tmp;
input = new InputSource(dataInputStream);
}
else {
input = new InputSource(documentPath);
}
input.setEncoding("UTF-8");
reader.parse(input);
writer.close();
// last, we generate the training data corresponding to the parsing of the identified NPL citations
// buffer for the reference block
StringBuffer allBufferReference = new StringBuffer();
ArrayList<String> inputs = new ArrayList<String>();
for (BibDataSet article : articles) {
String refString = article.getRawBib();
if (refString.trim().length() > 1) {
inputs.add(refString.trim());
}
}
if (inputs.size() > 0) {
for (String inpu : inputs) {
ArrayList<String> inpus = new ArrayList<String>();
inpus.add(inpu);
StringBuilder bufferReference = parsers.getCitationParser().trainingExtraction(inpus);
if (bufferReference != null) {
allBufferReference.append(bufferReference.toString() + "\n");
}
}
}
if (allBufferReference != null) {
if (allBufferReference.length() > 0) {
Writer writerReference = new OutputStreamWriter(new FileOutputStream(
new File(newTrainingPath + "/" + currentPatentNumber +
".training.references.xml"), false), "UTF-8");
writerReference.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
writerReference.write("<citations>\n");
writerReference.write(allBufferReference.toString());
writerReference.write("</citations>\n");
writerReference.close();
}
}
}
} catch (Exception e) {
throw new GrobidException("An exception occured while running Grobid.", e);
}
}
/**
* Get a patent description by its number and OPS
*/
public boolean getDocOPS(String number) {
try {
if (ops == null)
ops = new OPSService();
description = ops.descriptionRetrieval(number);
if (description == null)
return false;
else if (description.length() < 600)
return false;
else
return true;
} catch (Exception e) {
throw new GrobidException("An exception occured while running Grobid.", e);
}
}
/**
* Write the list of extracted references in an XML file
*/
public void generateXMLReport(File file,
ArrayList<PatentItem> patents,
ArrayList<BibDataSet> articles) {
try {
OutputStream tos = new FileOutputStream(file, false);
Writer writer = new OutputStreamWriter(tos, "UTF-8");
StringBuffer content = new StringBuffer();
// header
content.append("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
if ((patents.size() > 0) || (articles.size() > 0))
content.append("<citations>\n");
if (patents.size() > 0)
content.append("<patent-citations>\n");
int i = 0;
for (PatentItem pi : patents) {
String dnum = pi.getAuthority() + pi.getNumberEpoDoc();
if (pi.getKindCode() != null)
dnum += pi.getKindCode();
content.append("<patcit if=\"pcit" + i + " dnum=\"" + dnum + "\">" +
"<text>" + pi.getContext() + "</text></patcit>");
content.append("\n");
i++;
}
if (patents.size() > 0)
content.append("</patent-citations>\n");
if (articles.size() > 0)
content.append("<npl-citations>\n");
i = 0;
for (BibDataSet bds : articles) {
content.append("<nplcit if=\"ncit" + i + "\">");
content.append(bds.getResBib().toTEI(i));
content.append("<text>" + bds.getRawBib() + "</text></nplcit>");
content.append("\n");
i++;
}
if (articles.size() > 0)
content.append("</npl-citations>\n");
if ((patents.size() > 0) || (articles.size() > 0))
content.append("</citations>\n");
writer.write(content.toString());
writer.close();
tos.close();
} catch (Exception e) {
throw new GrobidException("An exception occured while running Grobid.", e);
}
}
/**
* not used...
*/
private static boolean checkPositionRange(int currentPosition,
int posit,
List<OffsetPosition> positions) {
boolean isInRange = false;
boolean skipTest = false;
if (currentPosition == positions.size() - 1) {
if (positions.get(currentPosition).end < posit) {
skipTest = true;
}
}
if (!skipTest) {
for (int i = currentPosition; i < positions.size(); i++) {
if ((positions.get(i).start <= posit) &&
(positions.get(i).end >= posit)) {
isInRange = true;
currentPosition = i;
break;
} else if (positions.get(i).start > posit) {
isInRange = false;
currentPosition = i;
break;
}
}
}
return isInRange;
}
@Override
public void close() throws IOException {
taggerAll.close();
taggerAll = null;
}
}
| 70,983 | 40.031214 | 127 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/patent/PatentRefParser.java
|
package org.grobid.core.engines.patent;
import java.util.*;
import java.util.regex.*;
import java.lang.NumberFormatException;
import org.apache.commons.io.IOUtils;
import org.grobid.core.data.PatentItem;
import org.grobid.core.utilities.GrobidProperties;
import org.grobid.core.utilities.TextUtilities;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.exceptions.GrobidResourceException;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Parser for patent references based on regular language rewriting.
* Input raw references are WISIWIG references (i.e. reference string as
* they appear). Expected ouput is the patent reference in the EPO Epoque
* format.
*
*/
public class PatentRefParser {
private static final Logger LOGGER = LoggerFactory.getLogger(ReferenceExtractor.class);
private String rawText = null;
private int rawTextOffset = 0; // starting offset of the current raw text
private Pattern patent_pattern = null;
private Pattern number_pattern = null;
// this is the complete list of existing authorities that was identified in the nature, always
// two upper-case letter codes
static public List<String> authorities = Arrays.asList("AP", "AL", "DZ", "AR", "AU", "AT", "BE", "BX",
"BR", "BG", "CA", "CL", "CN", "CO",
"HR", "CU", "CY", "CZ", "CS", "DK", "EG", "EA", "EP", "DE", "DD", "FI", "FR", "GB", "GR", "HK", "HU",
"IS", "IN", "ID", "IB", "TP", "IR", "IQ", "IE", "IL", "IT", "JP", "JO", "KE", "KP", "KR", "LV", "LT",
"LU", "MW", "MY", "MX", "MD", "MC", "MN", "MA", "NL", "NZ", "NG", "NO", "OA", "WO", "PE", "PH",
"PL", "PT", "RD", "RO", "RU", "SA", "SG", "SK", "SI", "ZA", "SU", "ES", "LK", "SE", "CH", "TW", "TH",
"TT", "TN", "TR", "UA", "GB", "US", "UY", "VE", "VN", "YU", "ZM", "ZW");
// this is the list of supported languages - language codes given ISO 639-1, two-letter codes
static public List<String> languages = Arrays.asList("en", "de", "fr", "es", "it", "ja", "kr", "pt", "zh", "ar");
// list of regular expressions for identifying the authority in the raw reference string
private List<Pattern> autority_patterns = new ArrayList<Pattern>();
// map giving for a language and an authority name the list of language specific expressions
// this uses the language resource files *.local under grobid-home/lexicon/patent/
private Map<String, List<String> > languageResources = null;
private Pattern application_pattern = null;
private Pattern publication_pattern = null;
private Pattern pct_application_pattern = null;
private Pattern provisional_pattern = null;
private Pattern non_provisional_pattern = null;
private Pattern us_serial_pattern = null;
private Pattern translation_pattern = null;
private Pattern utility_pattern = null;
private Pattern kindcode_pattern1 = null;
private Pattern kindcode_pattern2 = null;
private Pattern kindcode_pattern3 = null;
private Pattern jp_kokai_pattern = null;
private Pattern jp_heisei_pattern = null;
private Pattern standardText = null;
public PatentRefParser() {
patent_pattern = Pattern.compile("([UEWDJFA])[\\.\\s]?([SPOERKU])[\\.\\s]?-?(A|B|C)?\\s?-?([\\s,0-9/-]+(A|B|C)?[\\s,0-9/-]?)");
//number_pattern = Pattern.compile("[ABC]?([\\s,0-9/-]*)+[ABC]?([\\s,0-9/-])*[ABC]?");
number_pattern = Pattern.compile("((RE|PP)[\\s,0-9/\\-\\.\\\\]*)|(PCT(/|\\\\)[A-Z][A-Z]([\\s,0-9/\\-\\.\\\\]*))|([ABC]?([0-9][\\s,0-9/\\-\\.\\\\]*)+[ABCUT]?([\\s,0-9/\\-\\.\\\\])*[ABCUT]?)");
//number_pattern = Pattern.compile("((RE|PP)[\\s,0-9/\\-\\.\\\\]*)|(PCT(/|\\\\)[A-Z][A-Z]([\\s,0-9/\\-\\.\\\\]*))|([ABC]?([0-9][\\s,0-9/\\-\\.\\\\]*)+[ABCUT][0-9])|([ABC]?([0-9][\\s,0-9/\\-\\.\\\\]*)+[ABCUT])|([ABC]?([0-9][\\s,0-9/\\-\\.\\\\]*)+)");
//number_pattern = Pattern.compile("((RE|PP)[\\s,0-9/\\-\\.\\\\]*)|(PCT(/|\\\\)[A-Z][A-Z]([\\s,0-9/\\-\\.\\\\]*))|([ABC]?([\\s,0-9/\\-\\.\\\\ABC])+[ABCUT]?)");
kindcode_pattern1 = Pattern.compile("([ABC][0-9]?)"); // before number
kindcode_pattern2 = Pattern.compile("([ABCUT][0-9]?)"); // after number
kindcode_pattern3 = Pattern.compile("^([ABC][0-9]?)-"); // as prefix of the number
standardText = Pattern.compile("[a-z][A-Z]");
//application_pattern = Pattern.compile("((A|a)pplicat)|((a|A)ppln)");
//publication_pattern = Pattern.compile("((P|p)ublicat)|((p|P)ub)");
pct_application_pattern = Pattern.compile("(PCT/(GB|EP|US|JP|DE|FR|UK|BE|CA|CH|AT|AU|KR|RU|FI|NL|SE|ES|DK|DD)/?([0-9][0-9]([0-9][0-9])?))");
//provisional_pattern = Pattern.compile("((P|p)rovisional)");
non_provisional_pattern = Pattern.compile("((n|N)on.(P|p)rovisional)");
translation_pattern = Pattern.compile("((T|t)ranslation)");
//utility_pattern = Pattern.compile("((U|u)tility)");
us_serial_pattern = Pattern.compile("((S|s)erial(\\s|-)+((n|N)o(\\.)?)(\\s|-)*[0-9]*/)");
jp_kokai_pattern = Pattern.compile("(k|K)oka(l|i)");
jp_heisei_pattern = Pattern.compile("(H|h)(E|e)(I|i)");
initLanguageResources();
// we compile the different authority regular expression patterns based on the language resource files
for(String authorityName : authorities) {
autority_patterns.add(compilePattern(authorityName));
}
// compiling additional non-authority patterns: application, publication, provisional, utility
application_pattern = compilePattern("application");
publication_pattern = compilePattern("publication");
provisional_pattern = compilePattern("provisional");
utility_pattern = compilePattern("utility");
// these patterns are now expressed in external resource files under grobid-home/lexicon/patent/
/*EP_pattern = Pattern.compile("((\\s|,|\\.|^|\\-)EPO?)|(E\\.(\\s)?P)|((E|e)uropean)|(européen)|(europ)");
DE_pattern = Pattern.compile("((\\s|,|\\.|^|\\-)DE)|(D\\.(\\s)?E)|((G|g)erman)|((D|d)eutsch)|(allemand)");
US_pattern = Pattern.compile("((\\s|,|\\.|^|\\-)US)|(U\\.(\\s)?S)|((U|u)nited(\\s|-)*(S|s)tate)|(USA)");
FR_pattern = Pattern.compile("((\\s|,|\\.|^|\\-)FR)|(F\\.(\\s)?R)|((F|f)rench)|((F|f)rance)|(français)|(F|f)ranz");
UK_pattern = Pattern.compile("((\\s|,|\\.|^|\\-)UK)|(U\\.(\\s)?K)|(GB)|(G\\.B)|((b|B)ritish)|((e|E)nglish)|((U|u)nited(\\s|-)*(K|k)ingdom)|((g|G)reat(\\s|-)(B|b)ritain)");
BE_pattern =
Pattern.compile("((\\s|,|\\.|^|\\-)BE)|(B\\.(\\s)?E)|((B|b)elgian)|((B|b)elge)|((B|b)elgique)|((B|b)elgium)|((B|b)elgisch)|((B|b)elgie)");
WO_pattern = Pattern.compile("((\\s|,|\\.|^|\\-)W(O|0))|(W\\.(\\s)?O)|(PCT)|(WIPO)|((w|W)orld(\\s|-)(p|P)atent)");
JP_pattern = Pattern.compile("((\\s|,|\\.|^|\\-)JP)|(J\\.(\\s)?P)|((J|j)apan)|((J|j)apon)|(Nippon)|(HEI)");
CA_pattern = Pattern.compile("((\\s|,|\\.|^|\\-)CA)|(C\\.(\\s)?A)|((C|c)anadian)|((C|c)anada)|((c|C)anadien)|((K|k)anad)");
CH_pattern = Pattern.compile("((\\s|,|\\.|^|\\-)CH)|(C\\.(\\s)?H)|((S|w)iss)|((S|s)wizerland)|((s|S)uisse)|((H|h)elveti)|((S|s)chweiz)");
AT_pattern = Pattern.compile("((\\s|,|\\.|^|\\-)AT)|(A\\.(\\s)?T)|((A|a)ustria)|((A|a)utrich)|((Ö|ö)sterreich)");
AU_pattern = Pattern.compile("((\\s|,|\\.|^|\\-)AU)|(A\\.(\\s)?U)|((A|a)ustralia)|((A|a)ustrali)");
KR_pattern = Pattern.compile("((\\s|,|\\.|^|\\-)KR)|(K\\.(\\s)?R)|((K|k)orean)|((K|k)orea)|((C|c)orée)|((S|s)üdkorea)|Sud(\\.|-)?Korea");
RU_pattern = Pattern.compile("((\\s|,|\\.|^|\\-)RU)|(R\\.(\\s)?U)|((R|r)ussia)|((R|r)usse)|((R|r)usse)|((R|r)ussisch)");
FI_pattern = Pattern.compile("((\\s|,|\\.|^|\\-)FI)|(R\\.(\\s)?U)|((R|r)ussia)|((R|r)usse)|((R|r)usse)|((R|r)ussisch)");
NL_pattern =
Pattern.compile("((\\s|,|\\.|^|\\-)NL)|(N\\.(\\s)?L)|((H|h)olland)|((N|n)etherland)|((P|p)ays(\\.|-)bas)|((D|d)utch)|((h|H)olländisch)");
SE_pattern = Pattern.compile("((\\s|,|\\.|^|\\-)SE)|(S\\.(\\s)?E)|((S|s)weden)|((S|s)wedish)|((S|s)wedisch)|((S|s)u\\.de)|((S|s)u\\.dois)");
IT_pattern = Pattern.compile("((\\s|,|\\.|^|\\-)IT)|(I\\.(\\s)?T)|((I|i)taly)|((I|i)tali(a|e)n)|((I|i)talie)");
ES_pattern = Pattern.compile("((\\s|,|\\.|^|\\-)ES)|(E\\.(\\s)?S)|((S|s)panish)|((S|s)panie)|((E|e)spagnol)|((S|s)pain)");
DK_pattern = Pattern.compile("((\\s|,|\\.|^|\\-)DK)|(D\\.(\\s)?K)|((D|d)anish)|((D|d)anois)|((d|D)(a|ä)nemark)|(dänisch)");
DD_pattern = Pattern.compile("((\\s|,|\\.|^|\\-)DD)|(D\\.(\\s)?D)|(DDR)");*/
}
private final void initLanguageResources() {
languageResources = new TreeMap<String, List<String>>();
for(String language : languages) {
// opening the corresponding language resource file
String path = GrobidProperties.getGrobidHomePath() + "/lexicon/patent/" + language + ".local";
File localFile = new File(path);
if (!localFile.exists()) {
throw new GrobidResourceException(
"Cannot add language resources for patent processing (language '" + language +
"'), because file '" + localFile.getAbsolutePath() + "' does not exists.");
}
if (!localFile.canRead()) {
throw new GrobidResourceException(
"Cannot add language resources for patent processing (language '" + language +
"'), because cannot read file '" + localFile.getAbsolutePath() + "'.");
}
InputStream ist = null;
InputStreamReader isr = null;
BufferedReader dis = null;
try {
ist = new FileInputStream(localFile);
isr = new InputStreamReader(ist, "UTF8");
dis = new BufferedReader(isr);
String l = null;
while ((l = dis.readLine()) != null) {
if (l.length() == 0) continue;
// the first token, separated by a '=', gives the authority name
String[] parts = l.split("=");
String authority = parts[0].trim();
// this will cover authority as well as some other patterns such as publication, application, ...
String expressions = parts[1].trim();
if (expressions.trim().length() > 0) {
String[] subparts = expressions.split(",");
List<String> listExpressions = new ArrayList<String>();
for(int i=0; i < subparts.length; i++) {
listExpressions.add(subparts[i].trim());
}
languageResources.put(language+authority, listExpressions);
}
}
}
catch (FileNotFoundException e) {
throw new GrobidException("An exception occured while running Grobid.", e);
}
catch (IOException e) {
throw new GrobidException("An exception occured while running Grobid.", e);
}
finally {
IOUtils.closeQuietly(ist, isr, dis);
}
}
}
private Pattern compilePattern(String authorityName) {
// default authority two character name
String er = "((\\s|,|\\.|^|\\-)";
er += authorityName + ")";
if (authorityName.length() == 2) {
// authority name with dots
er += "|(" + authorityName.charAt(0) + "\\.(\\s)?" + authorityName.charAt(1) + ")";
}
// using language ressources for authority patterns
for(String language : languages) {
List<String> expressions = languageResources.get(language+authorityName);
if (expressions != null) {
for(String expression : expressions) {
if ( (expression != null) && (expression.trim().length()>1) ) {
expression = expression.trim();
if (!expression.contains("-") && !expression.contains(".")) {
if (TextUtilities.isAllLowerCase(expression)) {
expression =
"(" + expression.charAt(0) + "|" + Character.toUpperCase(expression.charAt(0)) + ")"
+ expression.substring(1,expression.length());
}
}
else {
if (expression.contains("-")) {
String[] parts = expression.split("-");
expression = "";
for(int j=0; j<parts.length; j++) {
String part = parts[j];
if (j >0) {
expression += "(\\s|-)*";
}
if (TextUtilities.isAllLowerCase(part)) {
expression +=
"(" + part.charAt(0) + "|" + Character.toUpperCase(part.charAt(0)) + ")"
+ part.substring(1,part.length());
}
}
}
if (expression.contains(".")) {
String[] parts = expression.split(".");
expression = "";
for(int j=0; j<parts.length; j++) {
String part = parts[j];
if (j >0) {
expression += "(\\s)?\\.(\\s)?";
}
if (TextUtilities.isAllLowerCase(part)) {
expression +=
"(" + part.charAt(0) + "|" + Character.toUpperCase(part.charAt(0)) + ")"
+ part.substring(1,part.length());
}
}
}
}
er += "|(" + expression + ")";
}
}
}
}
return Pattern.compile(er);
}
public void setRawRefText(String s) {
rawText = s;
}
public void setRawRefTextOffset(int s) {
rawTextOffset = s;
}
public List<PatentItem> processRawRefText() {
List<PatentItem> res = new ArrayList<PatentItem>();
//System.out.println("processRawRefText: " + rawText);
String country = null;
int country_position = -1;
while (true) {
Matcher fitCountry = null;
int i = 0;
for(String authority : authorities) {
Pattern thePattern = autority_patterns.get(i);
fitCountry = thePattern.matcher(rawText);
if (fitCountry.find()) {
country = authority;
country_position = fitCountry.end();
break;
}
i++;
}
break;
}
if (country != null) {
List<String> numbers = new ArrayList<String>();
List<Integer> offsets_begin = new ArrayList<Integer>();
List<Integer> offsets_end = new ArrayList<Integer>();
Matcher fitNumber = number_pattern.matcher(rawText);
while (fitNumber.find()) {
String toto = fitNumber.group(0);
int inde_begin = rawText.indexOf(toto) + rawTextOffset;
int inde_end = inde_begin + toto.length() -1;
//toto = toto.replaceAll("(A|B|C|\\s|-|/)", "");
//toto = toto.replaceAll("(-)", "");
toto = toto.replaceAll("()", "");
if (toto.length() > 0) {
boolean notPieces = true;
// additional tests are necessary for , and .
if (toto.length() > 14) {
// we have mostlikely two patents separated by a ,
// count the number of ,
int countComma = 0;
String[] pieces = null;
pieces = toto.split(",");
countComma = pieces.length - 1;
if (countComma > 0) {
// we split depending on number of comma
double ratio = (double) toto.length() / countComma;
if (ratio < 10)
pieces = toto.split(", ");
if (pieces.length == 2) {
if (((pieces[0].length() > pieces[1].length()) &&
(pieces[0].length() - pieces[1].length() < 4)) ||
((pieces[0].length() <= pieces[1].length()) &&
(pieces[1].length() - pieces[0].length() < 4))
) {
for (int i = 0; i < 2; i++) {
String piece = pieces[i];
addNumber(numbers, offsets_begin, offsets_end, piece, inde_begin, inde_end, toto);
}
notPieces = false;
}
} else if ((toto.length() > (6 * pieces.length))) {
for (int i = 0; i < pieces.length; i++) {
String piece = pieces[i];
addNumber(numbers, offsets_begin, offsets_end, piece, inde_begin, inde_end, toto);
}
notPieces = false;
}
}
}
if (notPieces) {
addNumber(numbers, offsets_begin, offsets_end, toto, inde_begin, inde_end, null);
}
}
}
List<Boolean> applications = new ArrayList<Boolean>();
List<Boolean> provisionals = new ArrayList<Boolean>();
List<Boolean> pctapps = new ArrayList<Boolean>();
List<Boolean> designs = new ArrayList<Boolean>();
List<Boolean> reissueds = new ArrayList<Boolean>();
List<Boolean> plants = new ArrayList<Boolean>();
List<String> kindcodes = new ArrayList<String>();
for (String number : numbers) {
applications.add(Boolean.valueOf(false));
provisionals.add(Boolean.valueOf(false));
pctapps.add(Boolean.valueOf(false));
designs.add(Boolean.valueOf(false));
reissueds.add(Boolean.valueOf(false));
plants.add(Boolean.valueOf(false));
kindcodes.add(null);
}
List<String> newNumbers = new ArrayList<String>();
List<String> originalNumbers = new ArrayList<String>();
int i = 0;
int lastPositionVisited = country_position;
for (String number : numbers) {
String originalNumber = number;
// try to get the kind code
boolean kindCodeFound = false;
// do we have the kind code directly in the number prefix?
Matcher fitKindCode = kindcode_pattern3.matcher(number);
if (fitKindCode.find()) {
String tata = fitKindCode.group(0);
int posKind = fitKindCode.end();
// if we have standard text between the kind code and the number, the kind code is not valid
tata = tata.replaceAll("[- ]", "");
kindcodes.set(i, tata);
lastPositionVisited = offsets_end.get(i) - rawTextOffset;
kindCodeFound = true;
int ind = number.indexOf("-");
number = number.substring(ind, number.length());
}
if (!kindCodeFound && (offsets_begin.get(i)-rawTextOffset >= lastPositionVisited)) {
// is there a kind code between the last position and position of this number?
String interChunk = rawText.substring(lastPositionVisited, (offsets_begin.get(i)-rawTextOffset));
fitKindCode = kindcode_pattern1.matcher(interChunk);
if (fitKindCode.find()) {
String tata = fitKindCode.group(0);
int posKind = fitKindCode.end();
// if we have standard text between the kind code and the number, the kind code is not valid
String subChunk = interChunk.substring(posKind, interChunk.length());
Matcher m = standardText.matcher(subChunk);
// just try to find a match
if (!m.find()) {
// if the distance between the kind code and the number is too large,
// the kind code is not valid
if (interChunk.length() - posKind <= 4) {
// otherwise, we validated the kind code for this patent reference
kindcodes.set(i, tata);
if (offsets_end.get(i) < rawTextOffset) {
offsets_end.set(i, offsets_end.get(i) + rawTextOffset);
}
lastPositionVisited = offsets_end.get(i) - rawTextOffset;
kindCodeFound = true;
}
}
}
}
if (!kindCodeFound) {
// is there a kind code immediatly after the number?
int postLength = 0;
if (rawText.length() - (offsets_end.get(i)-rawTextOffset) >= 3)
postLength = 3;
else
postLength = rawText.length() - (offsets_end.get(i)-rawTextOffset);
if (postLength>0) {
String postChunk = rawText.substring((offsets_end.get(i)-rawTextOffset-1),
(offsets_end.get(i)-rawTextOffset) + postLength);
fitKindCode = kindcode_pattern2.matcher(postChunk);
if (fitKindCode.find()) {
String tata = fitKindCode.group(0);
kindcodes.set(i, tata);
kindCodeFound = true;
lastPositionVisited = (offsets_end.get(i)+postLength)-rawTextOffset;
}
}
}
number = number.replace("-", "");
// do we have an application or a patent publication?
if (country.equals("WO") || country.equals("W0")) {
number = number.replaceAll("[\\.\\s]", "");
// in case of usual typo W0 for WO
String numm = number.replaceAll("[/,\\.]", "").trim();
originalNumber = numm;
if ((numm.startsWith("0")) && (numm.length() == 11)) {
// a useless zero has been inserted
number = number.substring(1, number.length());
} else if (((numm.startsWith("09")) && (numm.length() == 8)) ||
((numm.startsWith("00")) && (numm.length() == 8))
) {
// a useless zero has been inserted (WO format before July 2002!)
number = number.substring(1, number.length());
}
// PCT application checking
Matcher fitApplication = pct_application_pattern.matcher(number);
if (fitApplication.find()) {
String titi = fitApplication.group(0);
int move = titi.length();
boolean application = true;
titi = titi.replace("PCT/", "");
if (titi.length() > 2) {
String countr = titi.substring(0, 2);
String year = null;
if (titi.charAt(2) != '/')
year = titi.substring(2, titi.length());
else
year = titi.substring(3, titi.length());
if (year.length() == 2) {
if ((year.charAt(0) == '7') || (year.charAt(0) == '8') || (year.charAt(0) == '9')) {
year = "19" + year;
} else {
year = "20" + year;
}
} else if ((year.length() != 4) && (year.length() > 1)) {
year = year.substring(0, 2);
if ((year.charAt(0) == '7') || (year.charAt(0) == '8') || (year.charAt(0) == '9')) {
year = "19" + year;
} else {
year = "20" + year;
}
} else if ((titi.charAt(2) == '/') && (year.length() == 4) && (year.length() > 1)) {
year = year.substring(0, 2);
move = move - 1;
if ((year.charAt(0) == '7') || (year.charAt(0) == '8') || (year.charAt(0) == '9')) {
year = "19" + year;
} else {
year = "20" + year;
}
}
number = year + countr + number.substring(move, number.length());
number = number.replaceAll("[/,\\.]", "").trim();
if (number.length() == 12) {
if (number.charAt(6) == '0')
number = number.substring(0, 6) + number.substring(7, 12);
}
}
applications.set(i, Boolean.valueOf(true));
pctapps.set(i, Boolean.valueOf(true));
}
} else {
Matcher fitApplication = application_pattern.matcher(rawText);
Matcher fitPublication = publication_pattern.matcher(rawText);
boolean appli = fitApplication.find();
boolean publi = fitPublication.find();
if (appli && !publi) {
applications.set(i, Boolean.valueOf(true));
}
if (publi) {
applications.set(i, Boolean.valueOf(false));
}
if (country.equals("EP")) {
String numm = number.replaceAll("[ABCU,\\.\\s/]", "").trim();
originalNumber = numm;
if ((numm.length() == 8)) {
applications.set(i, Boolean.valueOf(true));
// epodoc format with the full year as prefix
if (numm.startsWith("0") || numm.startsWith("1") ) {
number = "20" + numm.substring(0, 2) + "0" + numm.substring(2, numm.length());
}
else {
// we will have a problem in 2078 guys...
number = "19" + numm.substring(0, 2) + "0" + numm.substring(2, numm.length());
}
}
else if (numm.length() <= 7) {
applications.set(i, Boolean.valueOf(false));
}
}
if (country.equals("US")) {
// do we have a provisional?
Matcher fitProvisional = provisional_pattern.matcher(rawText);
Matcher fitNonProvisional = non_provisional_pattern.matcher(rawText);
if ((fitProvisional.find()) && (!fitNonProvisional.find())) {
provisionals.set(i, Boolean.valueOf(true));
}
// interpretation of prefix "serial code" is given here:
// http://www.uspto.gov/patents/process/search/filingyr.jsp
// we need to identify the year based on the serial number range
// provisional starts with 60 or 61
if (number.startsWith("60") && (appli || number.startsWith("60/"))) {
applications.set(i, Boolean.valueOf(true));
provisionals.set(i, Boolean.valueOf(true));
originalNumber = number;
number = number.substring(3, number.length());
number = number.replaceAll("[\\.\\s/,]", "");
// we check the range of the number for deciding about a year
int numb = -1;
try {
numb = Integer.parseInt(number);
} catch(NumberFormatException e) {
LOGGER.warn("Cannot parse extracted patent number: " + number);
}
if (numb == -1)
continue;
String year = null;
if (numb < 9474)
year = "1995";
else if (numb < 34487)
year = "1996";
else if (numb < 70310)
year = "1997";
else if (numb < 113787)
year = "1998";
else if (numb < 173038)
year = "1999";
else if (numb < 256730)
year = "2000";
else if (numb < 343564)
year = "2001";
else if (numb < 437173)
year = "2002";
else if (numb < 532638)
year = "2003";
else if (numb < 639450)
year = "2004";
else if (numb < 754464)
year = "2005";
else if (numb < 877460)
year = "2006";
else if (numb < 999999)
year = "2007";
number = year + "0" + number;
} else if (number.startsWith("61") && (appli || number.startsWith("61/"))) {
// same as for 60 but the ranges are different
applications.set(i, Boolean.valueOf(true));
provisionals.set(i, Boolean.valueOf(true));
originalNumber = number;
number = number.substring(3, number.length());
number = number.replaceAll("[\\.\\s/,]", "");
// we check the range of the number for deciding about a year
int numb = -1;
try {
numb = Integer.parseInt(number);
} catch(NumberFormatException e) {
LOGGER.warn("Cannot parse extracted patent number: " + number);
}
if (numb == -1)
continue;
String year = null;
if (numb < 9389)
year = "2007";
else if (numb < 203947)
year = "2008";
else if (numb < 335046)
year = "2009";
else if (numb < 460301)
year = "2010";
else if (numb < 631245)
year = "2011";
else if (numb < 848274)
year = "2012";
else if (numb < 964276)
year = "2013";
else if (numb < 999999)
year = "2014";
number = year + "0" + number;
}
else if (number.startsWith("62") && (appli || number.startsWith("62/"))) {
// same as for 60 but the ranges are different
applications.set(i, Boolean.valueOf(true));
provisionals.set(i, Boolean.valueOf(true));
originalNumber = number;
number = number.substring(3, number.length());
number = number.replaceAll("[\\.\\s/,]", "");
// we check the range of the number for deciding about a year
int numb = -1;
try {
numb = Integer.parseInt(number);
} catch(NumberFormatException e) {
LOGGER.warn("Cannot parse extracted patent number: " + number);
}
if (numb == -1)
continue;
String year = null;
if (numb < 124715)
year = "2014";
else if (numb < 387330)
year = "2015";
else
year = "2016";
number = year + "0" + number;
}
else if (number.startsWith("29") && (appli || number.startsWith("29/"))) {
// design patent application starts with 29
applications.set(i, Boolean.valueOf(true));
provisionals.set(i, Boolean.valueOf(false));
designs.set(i, Boolean.valueOf(true));
originalNumber = number;
number = number.substring(3, number.length());
number = number.replaceAll("[\\.\\s/,]", "");
// we check the range of the number for deciding about a year
int numb = -1;
try {
numb = Integer.parseInt(number);
} catch(NumberFormatException e) {
LOGGER.warn("Cannot parse extracted patent number: " + number);
}
if (numb == -1)
continue;
String year = null;
if (numb < 3180)
year = "1992";
else if (numb < 16976)
year = "1993";
else if (numb < 32919)
year = "1994";
else if (numb < 48507)
year = "1995";
else if (numb < 64454)
year = "1996";
else if (numb < 81426)
year = "1997";
else if (numb < 98302)
year = "1998";
else if (numb < 116135)
year = "1999";
else if (numb < 134406)
year = "2000";
else if (numb < 152739)
year = "2001";
else if (numb < 173499)
year = "2002";
else if (numb < 196307)
year = "2003";
else if (numb < 220177)
year = "2004";
else if (numb < 245663)
year = "2005";
else if (numb < 270581)
year = "2006";
else if (numb < 294213)
year = "2007";
else if (numb < 313375)
year = "2008";
else if (numb < 348400)
year = "2009";
else if (numb < 372670)
year = "2010";
else if (numb < 395318)
year = "2011";
else if (numb < 442191)
year = "2012";
else if (numb < 463549)
year = "2013";
else if (numb < 474693)
year = "2014";
else if (numb < 505607)
year = "2015";
else
year = "2016";
number = year + "0" + number;
}
else if (number.startsWith("14") && (appli || number.startsWith("14/"))) {
// standard patent application, most recent serial code
applications.set(i, Boolean.valueOf(true));
provisionals.set(i, Boolean.valueOf(false));
originalNumber = number;
number = number.substring(3, number.length());
number = number.replaceAll("[\\.\\s/,]", "");
// we check the range of the number for deciding about a year
int numb = -1;
try {
numb = Integer.parseInt(number);
} catch(NumberFormatException e) {
LOGGER.warn("Cannot parse extracted patent number: " + number);
}
if (numb == -1)
continue;
String year = null;
if (numb < 544379)
year = "2014";
else if (numb < 757791)
year = "2015";
else
year = "2016";
number = year + "0" + number;
}
else if (number.startsWith("13") && (appli || number.startsWith("13/"))) {
// standard patent application
applications.set(i, Boolean.valueOf(true));
provisionals.set(i, Boolean.valueOf(false));
originalNumber = number;
number = number.substring(3, number.length());
number = number.replaceAll("[\\.\\s/,]", "");
// we check the range of the number for deciding about a year
int numb = -1;
try {
numb = Integer.parseInt(number);
} catch(NumberFormatException e) {
LOGGER.warn("Cannot parse extracted patent number: " + number);
}
if (numb == -1)
continue;
String year = null;
if (numb < 374487)
year = "2011";
else if (numb < 694748)
year = "2012";
else if (numb < 998975)
year = "2013";
else
year = "2014";
number = year + "0" + number;
} else if (number.startsWith("12") && (appli || number.startsWith("12/"))) {
// standard patent application
applications.set(i, Boolean.valueOf(true));
provisionals.set(i, Boolean.valueOf(false));
originalNumber = number;
number = number.substring(3, number.length());
number = number.replaceAll("[\\.\\s/,]", "");
// we check the range of the number for deciding about a year
int numb = -1;
try {
numb = Integer.parseInt(number);
} catch(NumberFormatException e) {
LOGGER.warn("Cannot parse extracted patent number: " + number);
}
if (numb == -1)
continue;
String year = null;
if (numb < 5841)
year = "2007";
else if (numb < 317884)
year = "2008";
else if (numb < 655475)
year = "2009";
else if (numb < 930166)
year = "2010";
else
year = "2011";
number = year + "0" + number;
} else if (number.startsWith("11") && (appli || number.startsWith("11/"))) {
// standard patent application
applications.set(i, Boolean.valueOf(true));
provisionals.set(i, Boolean.valueOf(false));
originalNumber = number;
number = number.substring(3, number.length());
number = number.replaceAll("[\\.\\s/,]", "");
// we check the range of the number for deciding about a year
int numb = -1;
try {
numb = Integer.parseInt(number);
} catch(NumberFormatException e) {
LOGGER.warn("Cannot parse extracted patent number: " + number);
}
if (numb == -1)
continue;
String year = null;
if (numb < 023305)
year = "2004";
else if (numb < 320178)
year = "2005";
else if (numb < 646743)
year = "2006";
else
year = "2007";
number = year + "0" + number;
} else if (number.startsWith("10") && (appli || number.startsWith("10/"))) {
// standard patent application
applications.set(i, Boolean.valueOf(true));
provisionals.set(i, Boolean.valueOf(false));
originalNumber = number;
number = number.substring(3, number.length());
number = number.replaceAll("[\\.\\s/,]", "");
// we check the range of the number for deciding about a year
int numb = -1;
try {
numb = Integer.parseInt(number);
} catch(NumberFormatException e) {
LOGGER.warn("Cannot parse extracted patent number: " + number);
}
if (numb == -1)
continue;
String year = null;
if (numb < 32443)
year = "2001";
else if (numb < 334164)
year = "2002";
else if (numb < 746297)
year = "2003";
else
year = "2004";
number = year + "0" + number;
} else if (number.startsWith("9/") || number.startsWith("09/")) {
// standard patent application
applications.set(i, Boolean.valueOf(true));
provisionals.set(i, Boolean.valueOf(false));
originalNumber = number;
if (number.startsWith("9/"))
number = number.substring(2, number.length());
else
number = number.substring(3, number.length());
number = number.replaceAll("[\\.\\s/,]", "");
// we check the range of the number for deciding about a year
int numb = -1;
try {
numb = Integer.parseInt(number);
} catch(NumberFormatException e) {
LOGGER.warn("Cannot parse extracted patent number: " + number);
}
if (numb == -1)
continue;
String year = null;
if (numb < 219723)
year = "1998";
else if (numb < 471932)
year = "1999";
else if (numb < 740756)
year = "2000";
else
year = "2001";
number = year + "0" + number;
} else if (number.startsWith("8/") || number.startsWith("08/")) {
// standard patent application
applications.set(i, Boolean.valueOf(true));
provisionals.set(i, Boolean.valueOf(false));
originalNumber = number;
if (number.startsWith("8/"))
number = number.substring(2, number.length());
else
number = number.substring(3, number.length());
number = number.replaceAll("[\\.\\s/,]", "");
// we check the range of the number for deciding about a year
int numb = -1;
try {
numb = Integer.parseInt(number);
} catch(NumberFormatException e) {
LOGGER.warn("Cannot parse extracted patent number: " + number);
}
if (numb == -1)
continue;
String year = null;
if (numb < 176047)
year = "1993";
else if (numb < 367542)
year = "1994";
else if (numb < 581739)
year = "1995";
else if (numb < 777991)
year = "1996";
else
year = "1997";
number = year + "0" + number;
} else if (number.startsWith("7/") || number.startsWith("07/")) {
// standard patent application
applications.set(i, Boolean.valueOf(true));
provisionals.set(i, Boolean.valueOf(false));
originalNumber = number;
if (number.startsWith("7/"))
number = number.substring(2, number.length());
else
number = number.substring(3, number.length());
number = number.replaceAll("[\\.\\s/,]", "");
// we check the range of the number for deciding about a year
int numb = -1;
try {
numb = Integer.parseInt(number);
} catch(NumberFormatException e) {
LOGGER.warn("Cannot parse extracted patent number: " + number);
}
if (numb == -1)
continue;
String year = null;
if (numb < 140321)
year = "1987";
else if (numb < 292671)
year = "1988";
else if (numb < 459413)
year = "1989";
else if (numb < 636609)
year = "1990";
else if (numb < 815501)
year = "1991";
else
year = "1992";
number = year + "0" + number;
} else if (number.startsWith("6/") || number.startsWith("06/")) {
// standard patent application
applications.set(i, Boolean.valueOf(true));
provisionals.set(i, Boolean.valueOf(false));
originalNumber = number;
if (number.startsWith("6/"))
number = number.substring(2, number.length());
else
number = number.substring(3, number.length());
number = number.replaceAll("[\\.\\s/,]", "");
// we check the range of the number for deciding about a year
int numb = -1;
try {
numb = Integer.parseInt(number);
} catch(NumberFormatException e) {
LOGGER.warn("Cannot parse extracted patent number: " + number);
}
if (numb == -1)
continue;
String year = null;
if (numb < 108971)
year = "1979";
else if (numb < 221957)
year = "1980";
else if (numb < 336510)
year = "1981";
else if (numb < 454954)
year = "1982";
else if (numb < 567457)
year = "1983";
else if (numb < 688174)
year = "1984";
else if (numb < 815454)
year = "1985";
else
year = "1986";
number = year + "0" + number;
} else if (number.startsWith("5/") || number.startsWith("05/")) {
// standard patent application
applications.set(i, Boolean.valueOf(true));
provisionals.set(i, Boolean.valueOf(false));
originalNumber = number;
if (number.startsWith("5/"))
number = number.substring(2, number.length());
else
number = number.substring(3, number.length());
number = number.replaceAll("[\\.\\s/,]", "");
// we check the range of the number for deciding about a year
int numb = -1;
try {
numb = Integer.parseInt(number);
} catch(NumberFormatException e) {
LOGGER.warn("Cannot parse extracted patent number: " + number);
}
if (numb == -1)
continue;
String year = null;
if (numb < 103000)
year = "1970";
else if (numb < 214538)
year = "1971";
else if (numb < 319971)
year = "1972";
else if (numb < 429701)
year = "1973";
else if (numb < 537821)
year = "1974";
else if (numb < 645931)
year = "1975";
else if (numb < 756051)
year = "1976";
else if (numb < 866211)
year = "1977";
else
year = "1978";
number = year + "0" + number;
} else if (number.startsWith("4/") || number.startsWith("04/")) {
// standard patent application
applications.set(i, Boolean.valueOf(true));
provisionals.set(i, Boolean.valueOf(false));
originalNumber = number;
if (number.startsWith("4/"))
number = number.substring(2, number.length());
else
number = number.substring(3, number.length());
number = number.replaceAll("[\\.\\s/,]", "");
// we check the range of the number for deciding about a year
int numb = -1;
try {
numb = Integer.parseInt(number);
} catch(NumberFormatException e) {
LOGGER.warn("Cannot parse extracted patent number: " + number);
}
if (numb == -1)
continue;
String year = null;
if (numb < 80000)
year = "1960";
else if (numb < 163000)
year = "1961";
else if (numb < 248000)
year = "1962";
else if (numb < 335000)
year = "1963";
else if (numb < 423000)
year = "1964";
else if (numb < 518000)
year = "1965";
else if (numb < 606000)
year = "1966";
else if (numb < 695000)
year = "1967";
else if (numb < 788000)
year = "1968";
else
year = "1969";
number = year + "0" + number;
} else if (number.startsWith("3/") || number.startsWith("03/")) {
applications.set(i, Boolean.valueOf(true));
provisionals.set(i, Boolean.valueOf(false));
originalNumber = number;
if (number.startsWith("3/"))
number = number.substring(2, number.length());
else
number = number.substring(3, number.length());
number = number.replaceAll("[\\.\\s/,]", "");
// we check the range of the number for deciding about a year
int numb = -1;
try {
numb = Integer.parseInt(number);
} catch(NumberFormatException e) {
LOGGER.warn("Cannot parse extracted patent number: " + number);
}
if (numb == -1)
continue;
String year = null;
if (numb < 68000)
year = "1948";
else if (numb < 136000)
year = "1949";
else if (numb < 204000)
year = "1950";
else if (numb < 264000)
year = "1951";
else if (numb < 329000)
year = "1952";
else if (numb < 401000)
year = "1953";
else if (numb < 479000)
year = "1954";
else if (numb < 557000)
year = "1955";
else if (numb < 632000)
year = "1956";
else if (numb < 706000)
year = "1957";
else if (numb < 784000)
year = "1958";
else
year = "1959";
} else if (number.startsWith("2/") || number.startsWith("02/")) {
applications.set(i, Boolean.valueOf(true));
provisionals.set(i, Boolean.valueOf(false));
originalNumber = number;
if (number.startsWith("2/"))
number = number.substring(2, number.length());
else
number = number.substring(3, number.length());
number = number.replaceAll("[\\.\\s/,]", "");
// we check the range of the number for deciding about a year
int numb = -1;
try {
numb = Integer.parseInt(number);
} catch(NumberFormatException e) {
LOGGER.warn("Cannot parse extracted patent number: " + number);
}
if (numb == -1)
continue;
String year = null;
if (numb < 57000)
year = "1935";
else if (numb < 119000)
year = "1936";
else if (numb < 183000)
year = "1937";
else if (numb < 249000)
year = "1938";
else if (numb < 312000)
year = "1939";
else if (numb < 372000)
year = "1940";
else if (numb < 425000)
year = "1941";
else if (numb < 471000)
year = "1942";
else if (numb < 516000)
year = "1943";
else if (numb < 570000)
year = "1944";
else if (numb < 638000)
year = "1945";
else if (numb < 719000)
year = "1946";
else
year = "1947";
} else if (number.startsWith("1/") || number.startsWith("01/")) {
applications.set(i, Boolean.valueOf(true));
provisionals.set(i, Boolean.valueOf(false));
originalNumber = number;
if (number.startsWith("1/"))
number = number.substring(2, number.length());
else
number = number.substring(3, number.length());
number = number.replaceAll("[\\.\\s/,]", "");
// we check the range of the number for deciding about a year
int numb = -1;
try {
numb = Integer.parseInt(number);
} catch(NumberFormatException e) {
LOGGER.warn("Cannot parse extracted patent number: " + number);
}
if (numb == -1)
continue;
String year = null;
/*if (numb < 70000)
year = "1915";
else if (numb < 140000)
year = "1916";
else if (numb < 210000)
year = "1917";
else if (numb < 270000)
year = "1918";
else if (numb < 349000)
year = "1919";
else if (numb < 435000)
year = "1920";
else if (numb < 526000)
year = "1921";
else if (numb < 610000)
year = "1922";
else if (numb < 684000)
year = "1923";
else if (numb < )
year = "1924";
else */
if (numb < 78000)
year = "1925";
else if (numb < 158000)
year = "1926";
else if (numb < 244000)
year = "1927";
else if (numb < 330000)
year = "1928";
else if (numb < 418000)
year = "1929";
else if (numb < 506000)
year = "1930";
else if (numb < 584000)
year = "1931";
else if (numb < 650000)
year = "1932";
else if (numb < 705000)
year = "1933";
else
year = "1934";
} else if (number.startsWith("RE")) {
// we have a reissued patent USRE with 5 digits number normally
reissueds.set(i, Boolean.valueOf(true));
applications.set(i, Boolean.valueOf(false));
provisionals.set(i, Boolean.valueOf(false));
} else if (number.startsWith("PP")) {
// we have a plant patent USPP
plants.set(i, Boolean.valueOf(true));
applications.set(i, Boolean.valueOf(false));
provisionals.set(i, Boolean.valueOf(false));
} else {
// even if it is indicated as an application, the serial coding indicates
// that it is maybe not !
// access to OPS would be necessary to decide but heuristitics can help !
String numm = number.replaceAll("[ABCU,\\.\\s/\\\\]", "").trim();
originalNumber = numm;
if ((numm.length() == 10) || (numm.length() == 11)) {
applications.set(i, Boolean.valueOf(false));
provisionals.set(i, Boolean.valueOf(false));
//if (publi && (numm.length() == 11)) {
if ((!applications.get(i).booleanValue()) && (numm.length() == 11)) {
if (numm.charAt(4) == '0') {
number = numm.substring(0, 4) + numm.substring(5, numm.length());
}
}
} else if ((number.indexOf("/") != -1) && !publi) {
applications.set(i, Boolean.valueOf(true));
}
}
} else if (country.equals("JP")) {
String numm = number.replaceAll("[ABCU,\\.\\s/]", "").trim();
originalNumber = numm;
if ((numm.length() == 10)) {
applications.set(i, Boolean.valueOf(false));
provisionals.set(i, Boolean.valueOf(false));
}
// first do we have a modern numbering
if ((numm.length() == 9) && (numm.startsWith("20") || numm.startsWith("19"))) {
// publication, we need to add a 0 after the 4 digit year
number = numm.substring(0, 4) + "0" + numm.substring(4, numm.length());
}
//else if ((numm.length() == 7)) {
//}
else if (applications.get(i)
&& ((numm.length() == 7) || (numm.length() == 8))) {
// for application !
// emperor reign post processing
// we need to get the prefix in the original number
String prefix = "" + number.charAt(0);
int move = 0;
if ((prefix.equals("A")) || (prefix.equals("B")) || (prefix.equals("C"))) {
// kind code
kindcodes.set(i, prefix);
prefix = null;
move = 1;
applications.set(i, Boolean.valueOf(false));
// it was not an application number but a publication !
} else if (Character.isDigit(prefix.charAt(0))) {
if ((originalNumber.charAt(1) == '-') ||
(originalNumber.charAt(1) == '/')) {
move = 1;
} else if (Character.isDigit(number.charAt(1))) {
prefix += number.charAt(1);
move = 2;
} else
move = 1;
} else {
if (Character.isDigit(number.charAt(1))) {
prefix = "" + number.charAt(1);
move = 2;
} else
prefix = null;
}
if (prefix != null) {
String year = null;
// this is an heuristics: for small numbers (<25) we have Heisei reign
// for higher, we have Showa reign... this works from 1950
int emperorYear = Integer.parseInt(prefix);
if (emperorYear <= 25) {
year = "" + (emperorYear + 1988);
} else if (emperorYear <= 63) {
year = "" + (emperorYear + 1925);
}
number = year + number.substring(move, number.length());
}
}
} else if (country.equals("DE")) {
// Application numbering format up to 2003. The first digit indicates the type of
// application (1 for patent). The next 2 digits are the filing year. the remaining
// digits are the serial number
// ex: 195 00 002.1 -> DE19951000002
// Numbering format (as of 1st January 2004). First two digits indicates application
// type (10 for patent). The 4-digit year of filing is next, followed by a 6-digit
// serial number, and an optional check digit.
// ex: 102004005106.7 -> DE200410005106
// otherwise a publication
} else if (country.equals("GB")) {
if (applications.get(i).booleanValue()) {
String numm = number.replaceAll("[ABCU,\\.\\s/]", "").trim();
originalNumber = numm;
if (numm.length() == 7) {
String year = numm.substring(0, 2);
if ((year.charAt(0) == '7') || (year.charAt(0) == '8') || (year.charAt(0) == '9')) {
year = "19" + year;
} else {
year = "20" + year;
}
number = year + "00" + numm.substring(2, numm.length());
}
}
}
else if (country.equals("FR")) {
// A 2 digit year followed by a 5-digit serial number in sequential order according to
// year
// ex: 96 03098 -> FR19960003098
}
}
newNumbers.add(number);
if (originalNumber == null)
originalNumber = number;
originalNumbers.add(originalNumber);
i++;
}
numbers = newNumbers;
i = 0;
for (String number : numbers) {
if (number != null) {
PatentItem res0 = new PatentItem();
res0.setAuthority(country);
res0.setApplication(applications.get(i).booleanValue());
res0.setProvisional(provisionals.get(i).booleanValue());
res0.setReissued(reissueds.get(i).booleanValue());
res0.setPlant(plants.get(i).booleanValue());
if (pctapps.get(i).booleanValue())
res0.setNumberEpoDoc(number.replaceAll("[\\.\\s/\\\\]", ""));
else
res0.setNumberEpoDoc(number.replaceAll("[ABCU,\\.\\s/\\\\]", ""));
if (i<originalNumbers.size()) {
res0.setNumberWysiwyg(originalNumbers.get(i).replaceAll("[ABCU,\\.\\s/\\\\]", ""));
}
// number completion
if (country.equals("EP")) {
if (!res0.getApplication()) {
while (res0.getNumberEpoDoc().length() < 7) {
res0.setNumberEpoDoc("0" + res0.getNumberEpoDoc());
}
}
}
res0.setKindCode(kindcodes.get(i));
res0.setOffsetBegin(offsets_begin.get(i).intValue());
res0.setOffsetEnd(offsets_end.get(i).intValue());
res.add(res0);
}
i++;
}
}
return res;
}
private void addNumber(List<String> numbers, List<Integer> offsets_begin, List<Integer> offsets_end,
String toto, int offset_begin, int offset_end, String sequence) {
// we have to check if we have a check code at the end of the number
toto = toto.trim();
if (toto.length() > 2) {
if ((toto.charAt(toto.length() - 2) == '.') && (Character.isDigit(toto.charAt(toto.length() - 1)))) {
toto = toto.substring(0, toto.length() - 2);
}
if ( (toto.length() > 2) &&
((toto.charAt(toto.length() - 2) == 'A') ||
(toto.charAt(toto.length() - 2) == 'B') ||
(toto.charAt(toto.length() - 2) == 'C')) &&
(Character.isDigit(toto.charAt(toto.length() - 1)))
) {
toto = toto.substring(0, toto.length() - 2);
}
}
if ((toto.length() > 4) && toto.length() < 20) {
if (sequence == null) {
numbers.add(toto.trim());
offsets_begin.add(Integer.valueOf(offset_begin));
offsets_end.add(Integer.valueOf(offset_end));
} else {
// we might have an enumeration and we need to match the target number in it
int localStart = sequence.indexOf(toto);
int localEnd = localStart + toto.length();
numbers.add(toto.trim());
if (localStart != -1) {
offsets_begin.add(Integer.valueOf(localStart+offset_begin));
offsets_end.add(Integer.valueOf(localEnd+offset_begin));
} else {
offsets_begin.add(Integer.valueOf(offset_begin));
offsets_end.add(Integer.valueOf(offset_end));
}
}
}
}
}
| 73,436 | 50.426471 | 257 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/label/TaggingLabelImpl.java
|
package org.grobid.core.engines.label;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.grobid.core.GrobidModel;
import org.grobid.core.engines.tagging.GenericTaggerUtils;
/**
* Representing label that can be tagged
*/
public class TaggingLabelImpl implements TaggingLabel {
public static final long serialVersionUID = 1L;
private final GrobidModel grobidModel;
private final String label;
TaggingLabelImpl(GrobidModel grobidModel, String label) {
this.grobidModel = grobidModel;
this.label = label;
}
public GrobidModel getGrobidModel() {
return grobidModel;
}
public String getLabel() {
return label;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof TaggingLabelImpl)) return false;
TaggingLabelImpl that = (TaggingLabelImpl) o;
return new EqualsBuilder()
.append(getGrobidModel(), that.getGrobidModel())
.append(getLabel(), that.getLabel())
.isEquals();
}
@Override
public int hashCode() {
return new HashCodeBuilder(17, 37)
.append(getGrobidModel())
.append(getLabel())
.toHashCode();
}
@Override
public String getName() {
final String tmp = getLabel().replaceAll("[<>]", "");
return StringUtils.upperCase(getGrobidModel().getModelName() + "_" + tmp.replace(GenericTaggerUtils.START_ENTITY_LABEL_PREFIX, ""));
}
}
| 1,648 | 26.483333 | 140 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/label/TaggingLabel.java
|
package org.grobid.core.engines.label;
import java.io.Serializable;
import org.grobid.core.GrobidModel;
import org.grobid.core.engines.counters.Countable;
public interface TaggingLabel extends Countable, Serializable {
GrobidModel getGrobidModel();
String getLabel();
}
| 283 | 19.285714 | 63 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/label/SegmentationLabels.java
|
package org.grobid.core.engines.label;
import org.grobid.core.GrobidModels;
public class SegmentationLabels extends TaggingLabels{
/**
* cover page <cover>,
* document header <header>,
* page footer <footnote>,
* page header <headnote>,
* note in margin <marginnote>,
* document body <body>,
* bibliographical section <references>,
* page number <page>,
* annexes <annex>,
* acknowledgement <acknowledgement>,
* availability <availability>,
* funding <funding>,
* other <other>,
* toc <toc> -> not yet used because not yet training data for this
*/
public final static String COVER_LABEL = "<cover>";
public final static String HEADER_LABEL = "<header>";
public final static String FOOTNOTE_LABEL = "<footnote>";
public final static String HEADNOTE_LABEL = "<headnote>";
public final static String MARGINNOTE_LABEL = "<marginnote>";
public final static String BODY_LABEL = "<body>";
public final static String PAGE_NUMBER_LABEL = "<page>";
public final static String ANNEX_LABEL = "<annex>";
public final static String REFERENCES_LABEL = "<references>";
public final static String ACKNOWLEDGEMENT_LABEL = "<acknowledgement>";
public final static String TOC_LABEL = "<toc>";
SegmentationLabels() {
super();
}
public static final TaggingLabel COVER = new TaggingLabelImpl(GrobidModels.SEGMENTATION, COVER_LABEL);
public static final TaggingLabel HEADER = new TaggingLabelImpl(GrobidModels.SEGMENTATION, HEADER_LABEL);
public static final TaggingLabel FOOTNOTE = new TaggingLabelImpl(GrobidModels.SEGMENTATION, FOOTNOTE_LABEL);
public static final TaggingLabel HEADNOTE = new TaggingLabelImpl(GrobidModels.SEGMENTATION, HEADNOTE_LABEL);
public static final TaggingLabel MARGINNOTE = new TaggingLabelImpl(GrobidModels.SEGMENTATION, MARGINNOTE_LABEL);
public static final TaggingLabel BODY = new TaggingLabelImpl(GrobidModels.SEGMENTATION, BODY_LABEL);
public static final TaggingLabel PAGE_NUMBER = new TaggingLabelImpl(GrobidModels.SEGMENTATION, PAGE_NUMBER_LABEL);
public static final TaggingLabel ANNEX = new TaggingLabelImpl(GrobidModels.SEGMENTATION, ANNEX_LABEL);
public static final TaggingLabel REFERENCES = new TaggingLabelImpl(GrobidModels.SEGMENTATION, REFERENCES_LABEL);
public static final TaggingLabel ACKNOWLEDGEMENT = new TaggingLabelImpl(GrobidModels.SEGMENTATION, ACKNOWLEDGEMENT_LABEL);
public static final TaggingLabel AVAILABILITY = new TaggingLabelImpl(GrobidModels.SEGMENTATION, AVAILABILITY_LABEL);
public static final TaggingLabel FUNDING = new TaggingLabelImpl(GrobidModels.SEGMENTATION, FUNDING_LABEL);
public static final TaggingLabel TOC = new TaggingLabelImpl(GrobidModels.SEGMENTATION, TOC_LABEL);
static {
register(COVER);
register(HEADER);
register(FOOTNOTE);
register(HEADNOTE);
register(MARGINNOTE);
register(BODY);
register(PAGE_NUMBER);
register(ANNEX);
register(REFERENCES);
register(ACKNOWLEDGEMENT);
register(AVAILABILITY);
register(FUNDING);
register(TOC);
}
}
| 3,197 | 44.042254 | 126 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/label/TaggingLabels.java
|
package org.grobid.core.engines.label;
import org.grobid.core.GrobidModel;
import org.grobid.core.GrobidModels;
import org.grobid.core.engines.tagging.GenericTaggerUtils;
import org.grobid.core.utilities.Pair;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
public class TaggingLabels {
protected static final ConcurrentMap<Pair<GrobidModel, String>, TaggingLabel> cache = new ConcurrentHashMap<>();
//IOB labels and prefixes
public static final String IOB_START_ENTITY_LABEL_PREFIX = "B-";
public static final String IOB_INSIDE_LABEL_PREFIX = "I-";
public static final String IOB_OTHER_LABEL = "O";
//ENAMEX NER label and prefixes
public static final String ENAMEX_START_ENTITY_LABEL_PREFIX = "E-";
//Grobid generic labels
public static final String GROBID_START_ENTITY_LABEL_PREFIX = "I-";
public static final String GROBID_INSIDE_ENTITY_LABEL_PREFIX = "";
public final static String OTHER_LABEL = "<other>";
//Grobid specific labels
public final static String AVAILABILITY_LABEL = "<availability>";
public final static String FUNDING_LABEL = "<funding>";
public static final String CITATION_MARKER_LABEL = "<citation_marker>";
public static final String TABLE_MARKER_LABEL = "<table_marker>";
public static final String FIGURE_MARKER_LABEL = "<figure_marker>";
public static final String EQUATION_MARKER_LABEL = "<equation_marker>";
public final static String PARAGRAPH_LABEL = "<paragraph>";
public final static String ITEM_LABEL = "<item>";
public final static String SECTION_LABEL = "<section>";
public final static String FIGURE_LABEL = "<figure>";
public final static String TABLE_LABEL = "<table>";
public final static String EQUATION_LAB = "<equation>";
public final static String EQUATION_ID_LABEL = "<equation_label>";
public final static String DESCRIPTION_LABEL = "<figDesc>";
public final static String HEADER_LABEL = "<figure_head>";
public final static String CONTENT_LABEL = "<content>";
public final static String LABEL_LABEL = "<label>";
public final static String DATE_LABEL = "<date>";
public final static String DATE_YEAR_LABEL = "<year>";
public final static String DATE_MONTH_LABEL = "<month>";
public final static String DATE_DAY_LABEL = "<day>";
public final static String TITLE_LABEL = "<title>";
public final static String ABSTRACT_LABEL = "<abstract>";
public final static String AUTHOR_LABEL = "<author>";
public final static String TECH_LABEL = "<tech>";
public final static String LOCATION_LABEL = "<location>";
public final static String DATESUB_LABEL = "<date-submission>";
public final static String PAGE_LABEL = "<page>";
public final static String EDITOR_LABEL = "<editor>";
public final static String INSTITUTION_LABEL = "<institution>";
public final static String NOTE_LABEL = "<note>";
public final static String REFERENCE_LABEL = "<reference>";
public final static String COPYRIGHT_LABEL = "<copyright>";
public final static String AFFILIATION_LABEL = "<affiliation>";
public final static String ADDRESS_LABEL = "<address>";
public final static String EMAIL_LABEL = "<email>";
public final static String PUBNUM_LABEL = "<pubnum>";
public final static String KEYWORD_LABEL = "<keyword>";
public final static String PHONE_LABEL = "<phone>";
public final static String DEGREE_LABEL = "<degree>";
public final static String WEB_LABEL = "<web>";
public final static String DEDICATION_LABEL = "<dedication>";
public final static String SUBMISSION_LABEL = "<submission>";
public final static String ENTITLE_LABEL = "<entitle>";
//public final static String INTRO_LABEL = "<intro>";
public final static String VERSION_LABEL = "<version>";
public final static String DOCTYPE_LABEL = "<doctype>";
public final static String DOWNLOAD_LABEL = "<date-download>";
public final static String WORKINGGROUP_LABEL = "<group>";
public final static String MEETING_LABEL = "<meeting>";
public final static String COLLABORATION_LABEL = "<collaboration>";
public final static String JOURNAL_LABEL = "<journal>";
public final static String BOOKTITLE_LABEL = "<booktitle>";
public final static String SERIES_LABEL = "<series>";
public final static String VOLUME_LABEL = "<volume>";
public final static String ISSUE_LABEL = "<issue>";
public final static String PAGES_LABEL = "<pages>";
public final static String PUBLISHER_LABEL = "<publisher>";
public final static String MARKER_LABEL = "<marker>";
public final static String FORENAME_LABEL = "<forename>";
public final static String MIDDLENAME_LABEL = "<middlename>";
public final static String SURNAME_LABEL = "<surname>";
public final static String SUFFIX_LABEL = "<suffix>";
public final static String COVER_LABEL = "<cover>";
public final static String SUMMARY_LABEL = "<summary>";
public final static String BIOGRAPHY_LABEL = "<biography>";
public final static String ADVERTISEMENT_LABEL = "<advertisement>";
public final static String TOC_LABEL = "<toc>";
public final static String TOF_LABEL = "<tof>";
public final static String PREFACE_LABEL = "<preface>";
public final static String UNIT_LABEL = "<unit>";
public final static String ANNEX_LABEL = "<annex>";
public final static String INDEX_LABEL = "<index>";
public final static String GLOSSARY_LABEL = "<glossary>";
public final static String BACK_LABEL = "<back>";
public final static String PATENT_CITATION_PL_LABEL = "<refPatent>";
public final static String PATENT_CITATION_NPL_LABEL = "<refNPL>";
/* title page (secondary title page)
* publisher page (publication information, including usually the copyrights info)
* summary (include executive summary)
* biography
* advertising (other works by the author/publisher)
* table of content
* preface (foreword)
* dedication (I dedicate this label to my family and my thesis director ;)
* unit (chapter or standalone article)
* reference (a full chapter of references, not to be confused with references attached to an article)
* annex
* index
* glossary (also abbreviations and acronyms)
* back cover page
* other
*/
public static final TaggingLabel CITATION_MARKER = new TaggingLabelImpl(GrobidModels.FULLTEXT, CITATION_MARKER_LABEL);
public static final TaggingLabel TABLE_MARKER = new TaggingLabelImpl(GrobidModels.FULLTEXT, TABLE_MARKER_LABEL);
public static final TaggingLabel FIGURE_MARKER = new TaggingLabelImpl(GrobidModels.FULLTEXT, FIGURE_MARKER_LABEL);
public static final TaggingLabel EQUATION_MARKER = new TaggingLabelImpl(GrobidModels.FULLTEXT, EQUATION_MARKER_LABEL);
public static final TaggingLabel PARAGRAPH = new TaggingLabelImpl(GrobidModels.FULLTEXT, PARAGRAPH_LABEL);
public static final TaggingLabel ITEM = new TaggingLabelImpl(GrobidModels.FULLTEXT, ITEM_LABEL);
public static final TaggingLabel OTHER = new TaggingLabelImpl(GrobidModels.FULLTEXT, OTHER_LABEL);
public static final TaggingLabel SECTION = new TaggingLabelImpl(GrobidModels.FULLTEXT, SECTION_LABEL);
public static final TaggingLabel FIGURE = new TaggingLabelImpl(GrobidModels.FULLTEXT, FIGURE_LABEL);
public static final TaggingLabel TABLE = new TaggingLabelImpl(GrobidModels.FULLTEXT, TABLE_LABEL);
public static final TaggingLabel EQUATION = new TaggingLabelImpl(GrobidModels.FULLTEXT, EQUATION_LAB);
public static final TaggingLabel EQUATION_LABEL = new TaggingLabelImpl(GrobidModels.FULLTEXT, EQUATION_ID_LABEL);
public static final TaggingLabel HEADER_DATE = new TaggingLabelImpl(GrobidModels.HEADER, DATE_LABEL);
public static final TaggingLabel HEADER_TITLE = new TaggingLabelImpl(GrobidModels.HEADER, TITLE_LABEL);
public static final TaggingLabel HEADER_ABSTRACT = new TaggingLabelImpl(GrobidModels.HEADER, ABSTRACT_LABEL);
public static final TaggingLabel HEADER_AUTHOR = new TaggingLabelImpl(GrobidModels.HEADER, AUTHOR_LABEL);
public static final TaggingLabel HEADER_TECH = new TaggingLabelImpl(GrobidModels.HEADER, TECH_LABEL);
public static final TaggingLabel HEADER_LOCATION = new TaggingLabelImpl(GrobidModels.HEADER, LOCATION_LABEL);
public static final TaggingLabel HEADER_DATESUB = new TaggingLabelImpl(GrobidModels.HEADER, DATESUB_LABEL);
public static final TaggingLabel HEADER_PAGE = new TaggingLabelImpl(GrobidModels.HEADER, PAGE_LABEL);
public static final TaggingLabel HEADER_EDITOR = new TaggingLabelImpl(GrobidModels.HEADER, EDITOR_LABEL);
public static final TaggingLabel HEADER_INSTITUTION = new TaggingLabelImpl(GrobidModels.HEADER, INSTITUTION_LABEL);
public static final TaggingLabel HEADER_NOTE = new TaggingLabelImpl(GrobidModels.HEADER, NOTE_LABEL);
public static final TaggingLabel HEADER_OTHER = new TaggingLabelImpl(GrobidModels.HEADER, OTHER_LABEL);
public static final TaggingLabel HEADER_REFERENCE = new TaggingLabelImpl(GrobidModels.HEADER, REFERENCE_LABEL);
public static final TaggingLabel HEADER_FUNDING = new TaggingLabelImpl(GrobidModels.HEADER, FUNDING_LABEL);
public static final TaggingLabel HEADER_COPYRIGHT = new TaggingLabelImpl(GrobidModels.HEADER, COPYRIGHT_LABEL);
public static final TaggingLabel HEADER_AFFILIATION = new TaggingLabelImpl(GrobidModels.HEADER, AFFILIATION_LABEL);
public static final TaggingLabel HEADER_ADDRESS = new TaggingLabelImpl(GrobidModels.HEADER, ADDRESS_LABEL);
public static final TaggingLabel HEADER_EMAIL = new TaggingLabelImpl(GrobidModels.HEADER, EMAIL_LABEL);
public static final TaggingLabel HEADER_PUBNUM = new TaggingLabelImpl(GrobidModels.HEADER, PUBNUM_LABEL);
public static final TaggingLabel HEADER_KEYWORD = new TaggingLabelImpl(GrobidModels.HEADER, KEYWORD_LABEL);
public static final TaggingLabel HEADER_PHONE = new TaggingLabelImpl(GrobidModels.HEADER, PHONE_LABEL);
public static final TaggingLabel HEADER_DEGREE = new TaggingLabelImpl(GrobidModels.HEADER, DEGREE_LABEL);
public static final TaggingLabel HEADER_WEB = new TaggingLabelImpl(GrobidModels.HEADER, WEB_LABEL);
public static final TaggingLabel HEADER_DEDICATION = new TaggingLabelImpl(GrobidModels.HEADER, DEDICATION_LABEL);
public static final TaggingLabel HEADER_SUBMISSION = new TaggingLabelImpl(GrobidModels.HEADER, SUBMISSION_LABEL);
public static final TaggingLabel HEADER_ENTITLE = new TaggingLabelImpl(GrobidModels.HEADER, ENTITLE_LABEL);
//public static final TaggingLabel HEADER_INTRO = new TaggingLabelImpl(GrobidModels.HEADER, INTRO_LABEL);
public static final TaggingLabel HEADER_COLLABORATION = new TaggingLabelImpl(GrobidModels.HEADER, COLLABORATION_LABEL);
public static final TaggingLabel HEADER_VERSION = new TaggingLabelImpl(GrobidModels.HEADER, VERSION_LABEL);
public static final TaggingLabel HEADER_DOCTYPE = new TaggingLabelImpl(GrobidModels.HEADER, DOCTYPE_LABEL);
public static final TaggingLabel HEADER_DOWNLOAD = new TaggingLabelImpl(GrobidModels.HEADER, DOWNLOAD_LABEL);
public static final TaggingLabel HEADER_WORKINGGROUP = new TaggingLabelImpl(GrobidModels.HEADER, WORKINGGROUP_LABEL);
public static final TaggingLabel HEADER_MEETING = new TaggingLabelImpl(GrobidModels.HEADER, MEETING_LABEL);
public static final TaggingLabel HEADER_PUBLISHER = new TaggingLabelImpl(GrobidModels.HEADER, PUBLISHER_LABEL);
public static final TaggingLabel HEADER_JOURNAL = new TaggingLabelImpl(GrobidModels.HEADER, JOURNAL_LABEL);
public static final TaggingLabel HEADER_AVAILABILITY = new TaggingLabelImpl(GrobidModels.HEADER, AVAILABILITY_LABEL);
public static final TaggingLabel DATE_YEAR = new TaggingLabelImpl(GrobidModels.DATE, DATE_YEAR_LABEL);
public static final TaggingLabel DATE_MONTH = new TaggingLabelImpl(GrobidModels.DATE, DATE_MONTH_LABEL);
public static final TaggingLabel DATE_DAY = new TaggingLabelImpl(GrobidModels.DATE, DATE_DAY_LABEL);
public static final TaggingLabel FIG_DESC = new TaggingLabelImpl(GrobidModels.FIGURE, DESCRIPTION_LABEL);
public static final TaggingLabel FIG_HEAD = new TaggingLabelImpl(GrobidModels.FIGURE, HEADER_LABEL);
public static final TaggingLabel FIG_CONTENT = new TaggingLabelImpl(GrobidModels.FIGURE, CONTENT_LABEL);
public static final TaggingLabel FIG_LABEL = new TaggingLabelImpl(GrobidModels.FIGURE, LABEL_LABEL);
public static final TaggingLabel FIG_OTHER = new TaggingLabelImpl(GrobidModels.FIGURE, OTHER_LABEL);
public static final TaggingLabel TBL_DESC = new TaggingLabelImpl(GrobidModels.TABLE, DESCRIPTION_LABEL);
public static final TaggingLabel TBL_HEAD = new TaggingLabelImpl(GrobidModels.TABLE, HEADER_LABEL);
public static final TaggingLabel TBL_CONTENT = new TaggingLabelImpl(GrobidModels.TABLE, CONTENT_LABEL);
public static final TaggingLabel TBL_LABEL = new TaggingLabelImpl(GrobidModels.TABLE, LABEL_LABEL);
public static final TaggingLabel TBL_OTHER = new TaggingLabelImpl(GrobidModels.TABLE, OTHER_LABEL);
public static final TaggingLabel TBL_NOTE = new TaggingLabelImpl(GrobidModels.TABLE, NOTE_LABEL);
public static final TaggingLabel CITATION_TITLE = new TaggingLabelImpl(GrobidModels.CITATION, TITLE_LABEL);
public static final TaggingLabel CITATION_JOURNAL = new TaggingLabelImpl(GrobidModels.CITATION, JOURNAL_LABEL);
public static final TaggingLabel CITATION_BOOKTITLE = new TaggingLabelImpl(GrobidModels.CITATION, BOOKTITLE_LABEL);
public static final TaggingLabel CITATION_COLLABORATION = new TaggingLabelImpl(GrobidModels.CITATION, COLLABORATION_LABEL);
public static final TaggingLabel CITATION_AUTHOR = new TaggingLabelImpl(GrobidModels.CITATION, AUTHOR_LABEL);
public static final TaggingLabel CITATION_EDITOR = new TaggingLabelImpl(GrobidModels.CITATION, EDITOR_LABEL);
public static final TaggingLabel CITATION_DATE = new TaggingLabelImpl(GrobidModels.CITATION, DATE_LABEL);
public static final TaggingLabel CITATION_INSTITUTION = new TaggingLabelImpl(GrobidModels.CITATION, INSTITUTION_LABEL);
public static final TaggingLabel CITATION_NOTE = new TaggingLabelImpl(GrobidModels.CITATION, NOTE_LABEL);
public static final TaggingLabel CITATION_TECH = new TaggingLabelImpl(GrobidModels.CITATION, TECH_LABEL);
public static final TaggingLabel CITATION_VOLUME = new TaggingLabelImpl(GrobidModels.CITATION, VOLUME_LABEL);
public static final TaggingLabel CITATION_ISSUE = new TaggingLabelImpl(GrobidModels.CITATION, ISSUE_LABEL);
public static final TaggingLabel CITATION_PAGES = new TaggingLabelImpl(GrobidModels.CITATION, PAGES_LABEL);
public static final TaggingLabel CITATION_LOCATION = new TaggingLabelImpl(GrobidModels.CITATION, LOCATION_LABEL);
public static final TaggingLabel CITATION_PUBLISHER = new TaggingLabelImpl(GrobidModels.CITATION, PUBLISHER_LABEL);
public static final TaggingLabel CITATION_WEB = new TaggingLabelImpl(GrobidModels.CITATION, WEB_LABEL);
public static final TaggingLabel CITATION_PUBNUM = new TaggingLabelImpl(GrobidModels.CITATION, PUBNUM_LABEL);
public static final TaggingLabel CITATION_SERIES = new TaggingLabelImpl(GrobidModels.CITATION, SERIES_LABEL);
public static final TaggingLabel CITATION_OTHER = new TaggingLabelImpl(GrobidModels.CITATION, OTHER_LABEL);
public static final TaggingLabel NAMES_HEADER_MARKER = new TaggingLabelImpl(GrobidModels.NAMES_HEADER, MARKER_LABEL);
public static final TaggingLabel NAMES_HEADER_TITLE = new TaggingLabelImpl(GrobidModels.NAMES_HEADER, TITLE_LABEL);
public static final TaggingLabel NAMES_HEADER_FORENAME = new TaggingLabelImpl(GrobidModels.NAMES_HEADER, FORENAME_LABEL);
public static final TaggingLabel NAMES_HEADER_MIDDLENAME = new TaggingLabelImpl(GrobidModels.NAMES_HEADER, MIDDLENAME_LABEL);
public static final TaggingLabel NAMES_HEADER_SURNAME = new TaggingLabelImpl(GrobidModels.NAMES_HEADER, SURNAME_LABEL);
public static final TaggingLabel NAMES_HEADER_SUFFIX = new TaggingLabelImpl(GrobidModels.NAMES_HEADER, SUFFIX_LABEL);
public static final TaggingLabel NAMES_CITATION_TITLE = new TaggingLabelImpl(GrobidModels.NAMES_CITATION, TITLE_LABEL);
public static final TaggingLabel NAMES_CITATION_FORENAME = new TaggingLabelImpl(GrobidModels.NAMES_CITATION, FORENAME_LABEL);
public static final TaggingLabel NAMES_CITATION_MIDDLENAME = new TaggingLabelImpl(GrobidModels.NAMES_CITATION, MIDDLENAME_LABEL);
public static final TaggingLabel NAMES_CITATION_SURNAME = new TaggingLabelImpl(GrobidModels.NAMES_CITATION, SURNAME_LABEL);
public static final TaggingLabel NAMES_CITATION_SUFFIX = new TaggingLabelImpl(GrobidModels.NAMES_CITATION, SUFFIX_LABEL);
public static final TaggingLabel PATENT_CITATION_PL = new TaggingLabelImpl(GrobidModels.PATENT_CITATION, PATENT_CITATION_PL_LABEL);
public static final TaggingLabel PATENT_CITATION_NPL = new TaggingLabelImpl(GrobidModels.PATENT_CITATION, PATENT_CITATION_NPL_LABEL);
public static final TaggingLabel MONOGRAPH_COVER = new TaggingLabelImpl(GrobidModels.MONOGRAPH, COVER_LABEL);
public static final TaggingLabel MONOGRAPH_TITLE = new TaggingLabelImpl(GrobidModels.MONOGRAPH, TITLE_LABEL);
public static final TaggingLabel MONOGRAPH_PUBLISHER = new TaggingLabelImpl(GrobidModels.MONOGRAPH, PUBLISHER_LABEL);
public static final TaggingLabel MONOGRAPH_SUMMARY = new TaggingLabelImpl(GrobidModels.MONOGRAPH, SUMMARY_LABEL);
public static final TaggingLabel MONOGRAPH_BIOGRAPHY = new TaggingLabelImpl(GrobidModels.MONOGRAPH, BIOGRAPHY_LABEL);
public static final TaggingLabel MONOGRAPH_ADVERTISEMENT = new TaggingLabelImpl(GrobidModels.MONOGRAPH, ADVERTISEMENT_LABEL);
public static final TaggingLabel MONOGRAPH_TOC = new TaggingLabelImpl(GrobidModels.MONOGRAPH, TOC_LABEL);
public static final TaggingLabel MONOGRAPH_TOF = new TaggingLabelImpl(GrobidModels.MONOGRAPH, TOF_LABEL);
public static final TaggingLabel MONOGRAPH_PREFACE = new TaggingLabelImpl(GrobidModels.MONOGRAPH, PREFACE_LABEL);
public static final TaggingLabel MONOGRAPH_DEDICATION = new TaggingLabelImpl(GrobidModels.MONOGRAPH, DEDICATION_LABEL);
public static final TaggingLabel MONOGRAPH_UNIT = new TaggingLabelImpl(GrobidModels.MONOGRAPH, UNIT_LABEL);
public static final TaggingLabel MONOGRAPH_REFERENCE = new TaggingLabelImpl(GrobidModels.MONOGRAPH, REFERENCE_LABEL);
public static final TaggingLabel MONOGRAPH_ANNEX = new TaggingLabelImpl(GrobidModels.MONOGRAPH, ANNEX_LABEL);
public static final TaggingLabel MONOGRAPH_INDEX = new TaggingLabelImpl(GrobidModels.MONOGRAPH, INDEX_LABEL);
public static final TaggingLabel MONOGRAPH_GLOSSARY = new TaggingLabelImpl(GrobidModels.MONOGRAPH, GLOSSARY_LABEL);
public static final TaggingLabel MONOGRAPH_BACK = new TaggingLabelImpl(GrobidModels.MONOGRAPH, BACK_LABEL);
public static final TaggingLabel MONOGRAPH_OTHER = new TaggingLabelImpl(GrobidModels.MONOGRAPH, OTHER_LABEL);
protected static void register(TaggingLabel label) {
cache.putIfAbsent(new Pair<>(label.getGrobidModel(), label.getLabel()), label);
}
static {
//fulltext
register(CITATION_MARKER);
register(TABLE_MARKER);
register(FIGURE_MARKER);
register(EQUATION_MARKER);
register(PARAGRAPH);
register(ITEM);
register(OTHER);
register(SECTION);
register(FIGURE);
register(TABLE);
register(EQUATION);
register(EQUATION_LABEL);
//header
register(HEADER_DATE);
register(HEADER_TITLE);
register(HEADER_ABSTRACT);
register(HEADER_AUTHOR);
//register(HEADER_LOCATION);
//register(HEADER_DATESUB);
register(HEADER_EDITOR);
//register(HEADER_INSTITUTION);
register(HEADER_NOTE);
register(HEADER_OTHER);
register(HEADER_REFERENCE);
register(HEADER_FUNDING);
register(HEADER_COPYRIGHT);
register(HEADER_AFFILIATION);
register(HEADER_ADDRESS);
register(HEADER_EMAIL);
register(HEADER_PUBNUM);
register(HEADER_KEYWORD);
register(HEADER_PHONE);
//register(HEADER_DEGREE);
register(HEADER_WEB);
//register(HEADER_DEDICATION);
register(HEADER_SUBMISSION);
//register(HEADER_ENTITLE);
//register(HEADER_INTRO);
//register(HEADER_COLLABORATION);
//register(HEADER_VERSION);
register(HEADER_DOCTYPE);
//register(HEADER_DOWNLOAD);
register(HEADER_WORKINGGROUP);
register(HEADER_MEETING);
register(HEADER_PUBLISHER);
register(HEADER_JOURNAL);
register(HEADER_PAGE);
register(HEADER_AVAILABILITY);
//date
register(DATE_YEAR);
register(DATE_MONTH);
register(DATE_DAY);
//figures
register(FIG_DESC);
register(FIG_HEAD);
register(FIG_CONTENT);
register(FIG_LABEL);
register(FIG_OTHER);
// table
register(TBL_DESC);
register(TBL_HEAD);
register(TBL_CONTENT);
register(TBL_LABEL);
register(TBL_OTHER);
// citation
register(CITATION_TITLE);
register(CITATION_JOURNAL);
register(CITATION_BOOKTITLE);
register(CITATION_COLLABORATION);
register(CITATION_AUTHOR);
register(CITATION_EDITOR);
register(CITATION_DATE);
register(CITATION_INSTITUTION);
register(CITATION_NOTE);
register(CITATION_TECH);
register(CITATION_VOLUME);
register(CITATION_ISSUE);
register(CITATION_PAGES);
register(CITATION_LOCATION);
register(CITATION_PUBLISHER);
register(CITATION_WEB);
register(CITATION_PUBNUM);
register(CITATION_OTHER);
register(CITATION_SERIES);
// person names
register(NAMES_HEADER_MARKER);
register(NAMES_HEADER_TITLE);
register(NAMES_HEADER_FORENAME);
register(NAMES_HEADER_MIDDLENAME);
register(NAMES_HEADER_SURNAME);
register(NAMES_HEADER_SUFFIX);
register(NAMES_CITATION_TITLE);
register(NAMES_CITATION_FORENAME);
register(NAMES_CITATION_MIDDLENAME);
register(NAMES_CITATION_SURNAME);
register(NAMES_CITATION_SUFFIX);
// citations in patent
register(PATENT_CITATION_PL);
register(PATENT_CITATION_NPL);
// monograph
register(MONOGRAPH_COVER);
register(MONOGRAPH_TITLE);
register(MONOGRAPH_PUBLISHER);
register(MONOGRAPH_BIOGRAPHY);
register(MONOGRAPH_SUMMARY);
register(MONOGRAPH_ADVERTISEMENT);
register(MONOGRAPH_TOC);
register(MONOGRAPH_TOF);
register(MONOGRAPH_PREFACE);
register(MONOGRAPH_DEDICATION);
register(MONOGRAPH_UNIT);
register(MONOGRAPH_REFERENCE);
register(MONOGRAPH_ANNEX);
register(MONOGRAPH_INDEX);
register(MONOGRAPH_GLOSSARY);
register(MONOGRAPH_BACK);
register(MONOGRAPH_OTHER);
}
protected TaggingLabels() {
}
public static TaggingLabel labelFor(final GrobidModel model, final String label) {
final String plainLabel = GenericTaggerUtils.getPlainLabel(label);
cache.putIfAbsent(new Pair<>(model, plainLabel.toString(/*null-check*/)),
new TaggingLabelImpl(model, plainLabel));
return cache.get(new Pair(model, plainLabel));
}
}
| 23,746 | 58.3675 | 137 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/entities/NameToStructureResolver.java
|
package org.grobid.core.engines.entities;
import org.grobid.core.data.ChemicalEntity;
/**
* Chemical name-to-structure processing based on external Open Source libraries.
*
*/
public class NameToStructureResolver {
public static ChemicalEntity process(String name) {
ChemicalEntity result = new ChemicalEntity(name);
//
return result;
}
public static void depict(ChemicalEntity structure, String path) {
}
}
| 458 | 18.956522 | 81 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/entities/ChemicalParser.java
|
package org.grobid.core.engines.entities;
import org.grobid.core.GrobidModels;
import org.grobid.core.data.ChemicalEntity;
import org.grobid.core.engines.AbstractParser;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.features.FeaturesVectorChemicalEntity;
import org.grobid.core.utilities.TextUtilities;
import java.util.ArrayList;
import java.util.List;
import java.util.StringTokenizer;
/**
* Chemical entities extraction.
*
*/
public class ChemicalParser extends AbstractParser {
// private FeatureFactory featureFactory = null;
public ChemicalParser() {
super(GrobidModels.ENTITIES_CHEMISTRY);
// featureFactory = FeatureFactory.getInstance();
}
/**
* Extract all reference from a simple piece of text.
*/
public List<ChemicalEntity> extractChemicalEntities(String text) throws Exception {
// int nbRes = 0;
if (text == null)
return null;
if (text.length() == 0)
return null;
List<ChemicalEntity> entities;
try {
text = text.replace("\n", " ");
StringTokenizer st = new StringTokenizer(text, TextUtilities.fullPunctuations, true);
if (st.countTokens() == 0)
return null;
ArrayList<String> textBlocks = new ArrayList<String>();
ArrayList<String> tokenizations = new ArrayList<String>();
while (st.hasMoreTokens()) {
String tok = st.nextToken();
tokenizations.add(tok);
if (!tok.equals(" ")) {
textBlocks.add(tok + "\t<chemical>");
}
}
String ress = "";
int posit = 0;
for (String block : textBlocks) {
//System.out.println(block);
ress += FeaturesVectorChemicalEntity
.addFeaturesChemicalEntities(block, textBlocks.size(), posit, false, false)
.printVector();
posit++;
}
ress += "\n";
String res = label(ress);
entities = resultExtraction(res, tokenizations);
} catch (Exception e) {
throw new GrobidException("An exception occured while running Grobid.", e);
}
return entities;
}
/**
* Extract results from a labelled header.
*/
public List<ChemicalEntity> resultExtraction(String result,
ArrayList<String> tokenizations) {
List<ChemicalEntity> entities = new ArrayList<ChemicalEntity>();
StringTokenizer stt = new StringTokenizer(result, "\n");
ArrayList<String> nameEntities = new ArrayList<String>();
ArrayList<Integer> offsets_entities = new ArrayList<Integer>();
String entity = null;
int offset = 0;
int currentOffset = 0;
String label; // label
String actual; // token
String lastTag = null; // previous label
int p = 0; // iterator for the tokenizations for restauring the original tokenization with
// respect to spaces
while (stt.hasMoreTokens()) {
String line = stt.nextToken();
if (line.trim().length() == 0) {
continue;
}
StringTokenizer st2 = new StringTokenizer(line, "\t");
boolean start = true;
boolean addSpace = false;
label = null;
actual = null;
int offset_addition = 0;
while (st2.hasMoreTokens()) {
if (start) {
actual = st2.nextToken();
start = false;
actual = actual.trim();
boolean strop = false;
while ((!strop) && (p < tokenizations.size())) {
String tokOriginal = tokenizations.get(p);
offset_addition += tokOriginal.length();
if (tokOriginal.equals(" ")) {
addSpace = true;
} else if (tokOriginal.equals(actual)) {
strop = true;
}
p++;
}
} else {
label = st2.nextToken().trim();
}
}
if (label == null) {
continue;
}
if (actual != null) {
if (label.endsWith("<chemName>")) {
if (entity == null) {
entity = actual;
currentOffset = offset;
} else {
if (label.equals("I-<chemName>")) {
if (entity != null) {
nameEntities.add(entity);
offsets_entities.add(currentOffset);
}
entity = actual;
currentOffset = offset;
} else {
if (addSpace) {
entity += " " + actual;
} else {
entity += actual;
}
}
}
} else if (label.equals("<other>")) {
if (entity != null) {
nameEntities.add(entity);
offsets_entities.add(currentOffset);
}
entity = null;
}
}
offset += offset_addition;
lastTag = label;
}
// call the name-to-structure processing
int j = 0;
if (nameEntities.size() == 0) {
return null;
}
for (String name : nameEntities) {
//ChemicalEntity structure = NameToStructureResolver.process(name);
ChemicalEntity structure = new ChemicalEntity();
structure.setRawName(name);
structure.setOffsetStart(offsets_entities.get(j));
structure.setOffsetEnd(offsets_entities.get(j) + name.length());
entities.add(structure);
j++;
}
return entities;
}
}
| 6,340 | 34.623596 | 99 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/counters/CitationParserCounters.java
|
package org.grobid.core.engines.counters;
public class CitationParserCounters {
public static final Countable SEGMENTED_REFERENCES = new Countable() {
@Override
public String getName() {
return "SEGMENTED_REFERENCES";
}
};
public static final Countable NULL_SEGMENTED_REFERENCES_LIST = new Countable() {
@Override
public String getName() {
return "NULL_SEGMENTED_REFERENCES_LIST";
}
};
public static final Countable EMPTY_REFERENCES_BLOCKS = new Countable() {
@Override
public String getName() {
return "EMPTY_REFERENCES_BLOCKS";
}
};
public static final Countable NOT_EMPTY_REFERENCES_BLOCKS = new Countable() {
@Override
public String getName() {
return "NOT_EMPTY_REFERENCES_BLOCKS";
}
};
}
| 871 | 29.068966 | 84 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/counters/ReferenceMarkerMatcherCounters.java
|
package org.grobid.core.engines.counters;
public class ReferenceMarkerMatcherCounters {
public static final Countable MATCHED_REF_MARKERS = new Countable() {
@Override
public String getName() {
return "MATCHED_REF_MARKERS";
}
};
public static final Countable UNMATCHED_REF_MARKERS = new Countable() {
@Override
public String getName() {
return "UNMATCHED_REF_MARKERS";
}
};
public static final Countable NO_CANDIDATES = new Countable() {
@Override
public String getName() {
return "NO_CANDIDATES";
}
};
public static final Countable MANY_CANDIDATES = new Countable() {
@Override
public String getName() {
return "MANY_CANDIDATES";
}
};
public static final Countable STYLE_AUTHORS = new Countable() {
@Override
public String getName() {
return "STYLE_AUTHORS";
}
};
public static final Countable STYLE_NUMBERED = new Countable() {
@Override
public String getName() {
return "STYLE_NUMBERED";
}
};
public static final Countable MATCHED_REF_MARKERS_AFTER_POST_FILTERING = new Countable() {
@Override
public String getName() {
return "MATCHED_REF_MARKERS_AFTER_POST_FILTERING";
}
};
public static final Countable MANY_CANDIDATES_AFTER_POST_FILTERING = new Countable() {
@Override
public String getName() {
return "MANY_CANDIDATES_AFTER_POST_FILTERING";
}
};
public static final Countable NO_CANDIDATES_AFTER_POST_FILTERING = new Countable() {
@Override
public String getName() {
return "NO_CANDIDATES_AFTER_POST_FILTERING";
}
};
public static final Countable STYLE_OTHER = new Countable() {
@Override
public String getName() {
return "STYLE_OTHER";
}
};
public static final Countable INPUT_REF_STRINGS_CNT = new Countable() {
@Override
public String getName() {
return "INPUT_REF_STRINGS_CNT";
}
};
}
| 2,192 | 26.074074 | 94 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/counters/FigureCounters.java
|
package org.grobid.core.engines.counters;
public class FigureCounters {
public static final Countable TOO_MANY_FIGURES_PER_PAGE = new Countable() {
@Override
public String getName() {
return "TOO_MANY_FIGURES_PER_PAGE";
}
};
public static final Countable STANDALONE_FIGURES = new Countable() {
@Override
public String getName() {
return "STANDALONE_FIGURES";
}
};
public static final Countable SKIPPED_BAD_STANDALONE_FIGURES = new Countable() {
@Override
public String getName() {
return "SKIPPED_BAD_STANDALONE_FIGURES";
}
};
public static final Countable SKIPPED_SMALL_STANDALONE_FIGURES = new Countable() {
@Override
public String getName() {
return "SKIPPED_SMALL_STANDALONE_FIGURES";
}
};
public static final Countable SKIPPED_BIG_STANDALONE_FIGURES = new Countable() {
@Override
public String getName() {
return "SKIPPED_BIG_STANDALONE_FIGURES";
}
};
public static final Countable ASSIGNED_GRAPHICS_TO_FIGURES = new Countable() {
@Override
public String getName() {
return "ASSIGNED_GRAPHICS_TO_FIGURES";
}
};
public static final Countable SKIPPED_DUE_TO_MISMATCH_OF_CAPTIONS_AND_VECTOR_AND_BITMAP_GRAPHICS = new Countable() {
@Override
public String getName() {
return "SKIPPED_DUE_TO_MISMATCH_OF_CAPTIONS_AND_VECTOR_AND_BITMAP_GRAPHICS";
}
};
}
| 1,566 | 29.72549 | 120 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/counters/TableRejectionCounters.java
|
package org.grobid.core.engines.counters;
public class TableRejectionCounters {
public static final Countable CANNOT_PARSE_LABEL_TO_INT = new Countable() {
@Override
public String getName() {
return "CANNOT_PARSE_LABEL_TO_INT";
}
};
public static final Countable HEADER_NOT_STARTS_WITH_TABLE_WORD = new Countable() {
@Override
public String getName() {
return "HEADER_NOT_STARTS_WITH_TABLE_WORD";
}
};
public static final Countable HEADER_AND_CONTENT_INTERSECT = new Countable() {
@Override
public String getName() {
return "HEADER_AND_CONTENT_INTERSECT";
}
};
public static final Countable HEADER_AREA_BIGGER_THAN_CONTENT = new Countable() {
@Override
public String getName() {
return "HEADER_NOT_STARTS_WITH_TABLE_WORD";
}
};
public static final Countable CONTENT_SIZE_TOO_SMALL = new Countable() {
@Override
public String getName() {
return "CONTENT_SIZE_TOO_SMALL";
}
};
public static final Countable CONTENT_WIDTH_TOO_SMALL = new Countable() {
@Override
public String getName() {
return "CONTENT_WIDTH_TOO_SMALL";
}
};
public static final Countable FEW_TOKENS_IN_HEADER = new Countable() {
@Override
public String getName() {
return "FEW_TOKENS_IN_HEADER";
}
};
public static final Countable FEW_TOKENS_IN_CONTENT = new Countable() {
@Override
public String getName() {
return "FEW_TOKENS_IN_CONTENT";
}
};
public static final Countable EMPTY_LABEL_OR_HEADER_OR_CONTENT = new Countable() {
@Override
public String getName() {
return "EMPTY_LABEL_OR_HEADER_OR_CONTENT";
}
};
public static final Countable HEADER_AND_CONTENT_DIFFERENT_PAGES = new Countable() {
@Override
public String getName() {
return "HEADER_AND_CONTENT_DIFFERENT_PAGES";
}
};
public static final Countable HEADER_NOT_CONSECUTIVE = new Countable() {
@Override
public String getName() {
return "HEADER_NOT_CONSECUTIVE";
}
};
}
| 2,296 | 26.674699 | 88 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/counters/Countable.java
|
package org.grobid.core.engines.counters;
public interface Countable {
String getName();
}
| 96 | 15.166667 | 41 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/citations/AdditionalRegexTextSegmenter.java
|
package org.grobid.core.engines.citations;
import org.apache.commons.io.FileUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* DEPRECATED !
*
* A machine learning model is used now to segment references, see org.grobid.core.engines.ReferenceSegmenterParser
*
*/
public class AdditionalRegexTextSegmenter {
public static final Logger LOGGER = LoggerFactory.getLogger(AdditionalRegexTextSegmenter.class.getName());
private final static Pattern BRACKET_NUMBER_LOOKUP_PATTERN = Pattern.compile("(?s).{0,15}\\[\\d\\] .{10,701}\\n\\[\\d+\\] .*");
private final static Pattern BULLET_NUMBER_LOOKUP_PATTERN = Pattern.compile("(?s).{0,10}1\\. .{10,701}\\n[\\s0]*2\\. .*");
private final static Pattern BRACKET_SPLIT_PATTERN = Pattern.compile("\\[(\\d+)\\] ");
private final static Pattern BULLET_SPLIT_PATTERN = Pattern.compile("\\n(\\d+)\\. ");
private final static Pattern GENERIC_SPLIT_PATTERN = Pattern.compile("\\.[\\s]*\\n");
private final static Pattern BROKEN_RACKETS_PATTERN = Pattern.compile("(\\[\\d+\\]\\s*\\n){5,}");
private final static int MAX_CITATION_COUNT = 512;
private final static int MAXIMUM_SEGMENT_LENGTH = 700;
private static final int MINIMUM_SEGMENT_LENGTH = 15;
// Letters which are (not only directly) followed by uncommon letters.
private static List<Character> sparseLetters = Arrays.asList('O', 'P', 'T', 'U', 'V', 'W', 'X', 'Y');
// Locate the References part of the fulltext by looking for keywords,
// if found, the style of citations are detected and the citations are split accordingly.
// if no References block is found, an attempt will be made to detect certain citation styles from the fulltext.
public List<String> extractCitationSegments(String referencesText) {
if (referencesText == null || referencesText.isEmpty()) {
return Collections.emptyList();
}
Matcher brokenBracketMatcher = BROKEN_RACKETS_PATTERN.matcher(referencesText);
if (brokenBracketMatcher.find()) {
return cleanCitations(splitGenerically(referencesText));
}
List<String> parts;
try {
if (BRACKET_NUMBER_LOOKUP_PATTERN.matcher(referencesText).find()) {
parts = splitAlongBracketedNumbers(referencesText);
} else if (BULLET_NUMBER_LOOKUP_PATTERN.matcher(referencesText).find()) {
parts = splitAlongBulletNumbers(referencesText);
} else {
parts = splitGenerically(referencesText);
}
} catch (StackOverflowError e) {
//TODO: FIX regexps properly
LOGGER.error("Stackoverflow");
throw new RuntimeException("Runtime exception with stackoverflow in AdditionalRegexTextSegmenter");
}
return cleanCitations(parts);
}
private List<String> cleanCitations(List<String> parts) {
if (parts.size() > MAX_CITATION_COUNT) {
parts = parts.subList(0, MAX_CITATION_COUNT);
}
if (parts.size() <= 1) {
// failed to do splitting
return Collections.emptyList();
}
List<String> citations = new ArrayList<String>(parts.size());
for (String part : parts) {
// this checks to avoid including any false positive long segment
if (part.length() >= MAXIMUM_SEGMENT_LENGTH) {
continue;
}
if (part.trim().length() != 0) {
citations.add(part);
}
}
return citations;
}
// splits along bracketed number citations: [1] [2].
// checks for consecutive numbering.
private List<String> splitAlongBracketedNumbers(String referencesText) {
List<String> parts = new ArrayList<String>();
Matcher matcher = BRACKET_SPLIT_PATTERN.matcher(referencesText);
Integer currentNumber;
Integer currentSegmentEndIndex;
if (!matcher.find()) {
return Collections.emptyList();
}
Integer currentSegmentStartIndex = matcher.end();
Integer previousNumber = Integer.valueOf(matcher.group(1));
while (matcher.find()) {
currentNumber = Integer.valueOf(matcher.group(1));
if (currentNumber == previousNumber + 1) {
currentSegmentEndIndex = matcher.start() - 1;
if (currentSegmentEndIndex - currentSegmentStartIndex < 0) {
continue;
}
parts.add(referencesText.substring(currentSegmentStartIndex, currentSegmentEndIndex));
currentSegmentStartIndex = matcher.end();
previousNumber++;
}
}
parts.add(referencesText.substring(currentSegmentStartIndex));
return parts;
}
// splits along numbered citations: 1. 2. 3.
// checks for consecutive numbering
private List<String> splitAlongBulletNumbers(String referencesText) {
List<String> parts = new ArrayList<String>();
Matcher matcher = BULLET_SPLIT_PATTERN.matcher(referencesText);
Integer currentNumber;
Integer currentSegmentEndIndex;
//init
if (!matcher.find()) {
return Collections.emptyList();
}
Integer currentSegmentStartIndex = matcher.end();
Integer previousNumber = Integer.valueOf(matcher.group(1));
// a workaround to add the first citation, where there might be no linebreak at the beginning of the referencesText.
if (previousNumber == 2) {
parts.add(referencesText.substring(2, matcher.start()));
}
while (matcher.find()) {
currentNumber = Integer.valueOf(matcher.group(1));
if (currentNumber == previousNumber + 1) {
currentSegmentEndIndex = matcher.start() - 1;
if (currentSegmentEndIndex - currentSegmentStartIndex < 0) {
continue;
}
parts.add(referencesText.substring(currentSegmentStartIndex, currentSegmentEndIndex));
currentSegmentStartIndex = matcher.end();
previousNumber++;
}
}
parts.add(referencesText.substring(currentSegmentStartIndex));
return parts;
}
// splits along lines ended with dots.
// checks for reasonable first letter of each segment, i.e not before, or way after the previous first letter,
// unless the citation do not seem to be ordered.
private List<String> splitGenerically(String referencesText) {
List<String> parts = new ArrayList<String>();
Matcher matcher = GENERIC_SPLIT_PATTERN.matcher(referencesText);
// determine the gap size between two consecutive first letters.
// determine if the citations are lexicographically ordered.
boolean citationsAreOrdered = true;
int numSegments = 0;
int orderViolations = 0;
char lastFirstLetter = 'A';
while (matcher.find()) {
numSegments++;
if (matcher.end() >= referencesText.length()) {
break;
}
if (referencesText.charAt(matcher.end()) < lastFirstLetter) {
orderViolations++;
}
}
if (numSegments == 0) {
LOGGER.info("Single segment found!");
return Arrays.asList(referencesText);
}
if (orderViolations > .25 * numSegments) {
LOGGER.info("Citations not ordered.");
citationsAreOrdered = false;
}
int gapsize = ((26 / numSegments) + 1) * 2;
matcher.reset();
char previousFirstChar = referencesText.charAt(0);
Integer currentSegmentStartIndex = 0;
Integer currentSegmentEndIndex;
char currentFirstChar;
while (matcher.find()) {
if (matcher.end() >= referencesText.length()) {
break;
}
currentFirstChar = referencesText.charAt(matcher.end());
currentSegmentEndIndex = matcher.start();
if (currentSegmentEndIndex - currentSegmentStartIndex > MINIMUM_SEGMENT_LENGTH &&
(!citationsAreOrdered || isValidNextFirstLetter(previousFirstChar, currentFirstChar, gapsize))) {
parts.add(referencesText.substring(currentSegmentStartIndex, currentSegmentEndIndex));
previousFirstChar = currentFirstChar;
currentSegmentStartIndex = currentSegmentEndIndex + 2;
}
}
parts.add(referencesText.substring(currentSegmentStartIndex));
return parts;
}
/**
* @param maxGapsize the maximum number of letters that are allowed to skip.
* if previousFirstLetter is followed by uncommon letters (like Q), then
* gapsize is increased.
*/
private boolean isValidNextFirstLetter(char previousFirstLetter, char firstLetter, int maxGapsize) {
if (firstLetter < previousFirstLetter) {
return false;
}
if (sparseLetters.contains(previousFirstLetter)) {
maxGapsize = maxGapsize + 2;
}
return firstLetter - previousFirstLetter <= maxGapsize;
}
}
| 9,496 | 39.07173 | 131 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/citations/RegexReferenceSegmenter.java
|
package org.grobid.core.engines.citations;
import com.google.common.base.Function;
import com.google.common.base.Predicate;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import org.grobid.core.utilities.TextUtilities;
import org.grobid.core.document.Document;
import org.grobid.core.engines.label.SegmentationLabels;
import java.util.ArrayList;
import java.util.List;
import java.util.StringTokenizer;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* DEPRECATED !
*
* A machine learning model is used now to segment references, see org.grobid.core.engines.ReferenceSegmenterParser
*
*/
public class RegexReferenceSegmenter implements ReferenceSegmenter {
private static final Pattern m1 = Pattern.compile("((^|\\n)( )*\\[.+?\\])");
private static final Pattern m2 = Pattern.compile("((^|\\n)( )*\\(.+?\\))");
private static final Pattern m3 = Pattern.compile("((^|\\n)( )*\\d{1,3}\\.)");
// static private Pattern m4 = Pattern.compile("(\\d{1,3})");
private final static Pattern SPACE_DASH_PATTERN = Pattern.compile("[a-zA-Z]-\\s*[\\n\\r]+\\s*[a-zA-Z]");
private static final Pattern[] CITATION_MARKERS = {m1, m2, m3};
private static final AdditionalRegexTextSegmenter citationTextSegmenter = new AdditionalRegexTextSegmenter();
public static final Function<String,LabeledReferenceResult> LABELED_REFERENCE_RESULT_FUNCTION = new Function<String, LabeledReferenceResult>() {
@Override
public LabeledReferenceResult apply(String input) {
return new LabeledReferenceResult(input);
}
};
@Override
public List<LabeledReferenceResult> extract(String referenceBlock) {
return Lists.transform(segmentReferences(referenceBlock), LABELED_REFERENCE_RESULT_FUNCTION);
}
@Override
//public List<LabeledReferenceResult> extract(String referenceBlock) {
public List<LabeledReferenceResult> extract(Document doc) {
String referencesStr = doc.getDocumentPartText(SegmentationLabels.REFERENCES);
return Lists.transform(segmentReferences(referencesStr), LABELED_REFERENCE_RESULT_FUNCTION);
}
private static class StringLengthPredicate implements Predicate<String> {
private int len;
private StringLengthPredicate(int len) {
this.len = len;
}
@Override
public boolean apply(String s) {
return s != null && s.length() >= len;
}
}
private static List<String> segmentReferences(String references) {
List<String> grobidResults = new ArrayList<String>();
int best = 0;
Matcher bestMatcher;
int bestIndex = -1;
for (int i = 0; i < CITATION_MARKERS.length; i++) {
Matcher ma = CITATION_MARKERS[i].matcher(references);
int count = 0;
while (ma.find()) {
count++;
}
if (count > best) {
bestIndex = i;
best = count;
}
}
List<String> diggitReferences = citationTextSegmenter.extractCitationSegments(references);
if (bestIndex == -1) {
return diggitReferences;
} else {
bestMatcher = CITATION_MARKERS[bestIndex].matcher(references);
}
int last = 0;
int i = 0;
while (bestMatcher.find()) {
if (i == 0) {
last = bestMatcher.end();
} else {
int newLast = bestMatcher.start();
String lastRef = references.substring(last, newLast);
if (testCitationProfile(lastRef)) {
grobidResults.add(lastRef);
}
last = bestMatcher.end();
}
i++;
}
// the last one - if at least one, has not been considered
if (i > 0) {
String lastRef = references.substring(last, references.length());
if (testCitationProfile(lastRef)) {
grobidResults.add(lastRef);
}
}
diggitReferences = sanitizeCitationReferenceList(diggitReferences);
grobidResults = sanitizeCitationReferenceList(grobidResults);
return grobidResults.size() > diggitReferences.size() ? grobidResults : diggitReferences;
}
private static List<String> sanitizeCitationReferenceList(List<String> references) {
List<String> res = new ArrayList<String>();
for (String r : references) {
res.add(TextUtilities.dehyphenizeHard(stripCitation(r)));
}
return Lists.newArrayList(Iterables.filter(res, new StringLengthPredicate(15)));
}
private static boolean testCitationProfile(String lastRef) {
if (lastRef.length() < 400) {
// we assume that a reference extracted from a full text cannot be be more than 400 characters
StringTokenizer st = new StringTokenizer(lastRef, "\n");
if (st.countTokens() < 9) {
return true;
}
}
return false;
}
private static String stripCitation(String citation) {
// process hashes at the end of line
citation = processSpaceDash(citation);
citation = citation
.replaceAll("\\r\\d* ", " ") // remove the page number
.replaceAll("\\n\\d\\. ", " ") // remove citation bullet number
.replaceAll("\\n", " ")
.replaceAll("\\\\", " ")
.replaceAll("\"", " ")
.replaceAll(",\\s*,", ",") // resolve double commas
.replaceAll("\\r", " ")
.replaceAll("\\s\\s+", " ")
.trim().replaceAll("^[\\d]+\\s", "");
return citation;
}
//TODO move these functions to a separate class and add test units
private static String processSpaceDash(String s) {
while (true) {
Matcher matcher = SPACE_DASH_PATTERN.matcher(s);
if (matcher.find()) {
s = s.substring(0, matcher.start() + 1) + "-" + s.substring(matcher.end() - 1);
} else {
break;
}
}
return s;
}
}
| 6,215 | 35.564706 | 148 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/citations/CalloutAnalyzer.java
|
package org.grobid.core.engines.citations;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.grobid.core.layout.LayoutToken;
import org.grobid.core.utilities.LayoutTokensUtil;
/**
* Identify the type of the marker callout with regex
*
*/
public class CalloutAnalyzer {
// callout/marker type, this is used to discard incorrect numerical reference marker candidates
// that do not follow the majority reference marker pattern
public enum MarkerType {
UNKNOWN, BRACKET_TEXT, BRACKET_NUMBER, PARENTHESIS_TEXT, PARENTHESIS_NUMBER, SUPERSCRIPT_NUMBER, NUMBER, ROMAN
}
// simple patterns just to capture the majority callout style
private final static Pattern BRACKET_TEXT_PATTERN = Pattern.compile("\\[(.)+\\]");
//private final static Pattern BRACKET_NUMBER_PATTERN = Pattern.compile("\\[((\\d{0,4}[a-f]?)|[,-;•])+\\]");
private final static Pattern BRACKET_NUMBER_PATTERN = Pattern.compile("\\[(?>[0-9]{1,4}[a-f]?[\\-;•,]?((and)|&|(et))?)+\\]");
private final static Pattern PARENTHESIS_TEXT_PATTERN = Pattern.compile("\\((.)+\\)");
//private final static Pattern PARENTHESIS_NUMBER_PATTERN = Pattern.compile("\\(((\\d+[a-f]?)|[,-;•])+\\)");
private final static Pattern PARENTHESIS_NUMBER_PATTERN = Pattern.compile("\\((?>[0-9]{1,4}[a-f]?[\\-;•,]?((and)|&|(et))?)+\\)");
private final static Pattern NUMBER_PATTERN = Pattern.compile("(?>\\d+)[a-f]?");
private final static Pattern ROMAN_PATTERN = Pattern.compile("(IX|IV|V?I{0,3})");
public static MarkerType getCalloutType(List<LayoutToken> callout) {
if (callout == null)
return MarkerType.UNKNOWN;
String calloutString = LayoutTokensUtil.toText(callout);
if (calloutString == null || calloutString.trim().length() == 0)
return MarkerType.UNKNOWN;
calloutString = calloutString.replace(" ", "");
boolean isSuperScript = true;
for(LayoutToken token : callout) {
if (token.getText().trim().length() == 0)
continue;
if (!token.isSuperscript()) {
isSuperScript = false;
break;
}
}
Matcher matcher = NUMBER_PATTERN.matcher(calloutString);
if (matcher.find()) {
if (isSuperScript) {
return MarkerType.SUPERSCRIPT_NUMBER;
}
}
matcher = BRACKET_NUMBER_PATTERN.matcher(calloutString);
if (matcher.find()) {
return MarkerType.BRACKET_NUMBER;
}
matcher = PARENTHESIS_NUMBER_PATTERN.matcher(calloutString);
if (matcher.find()) {
return MarkerType.PARENTHESIS_NUMBER;
}
matcher = BRACKET_TEXT_PATTERN.matcher(calloutString);
if (matcher.find()) {
return MarkerType.BRACKET_TEXT;
}
matcher = PARENTHESIS_TEXT_PATTERN.matcher(calloutString);
if (matcher.find()) {
return MarkerType.PARENTHESIS_TEXT;
}
matcher = NUMBER_PATTERN.matcher(calloutString);
if (matcher.find()) {
return MarkerType.NUMBER;
}
matcher = ROMAN_PATTERN.matcher(calloutString);
if (matcher.find()) {
return MarkerType.ROMAN;
}
return MarkerType.UNKNOWN;
}
}
| 3,363 | 35.172043 | 133 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/citations/LabeledReferenceResult.java
|
package org.grobid.core.engines.citations;
import org.grobid.core.layout.BoundingBox;
import org.grobid.core.layout.LayoutToken;
import java.util.List;
public class LabeledReferenceResult {
private String label = null;
private final String referenceText;
private String features; // optionally the vector of features corresponding to the token referenceText
private List<BoundingBox> coordinates = null;
private List<LayoutToken> tokens = null;
public LabeledReferenceResult(String referenceText) {
this.referenceText = referenceText;
}
public LabeledReferenceResult(String label, String referenceText,
List<LayoutToken> referenceTokens, String features,
List<BoundingBox> coordinates) {
this.label = label;
this.referenceText = referenceText;
this.tokens = referenceTokens;
this.features = features;
this.coordinates = coordinates;
}
public String getLabel() {
return label;
}
public String getReferenceText() {
return referenceText;
}
public String getFeatures() {
return features;
}
public List<BoundingBox> getCoordinates() {
return coordinates;
}
public List<LayoutToken> getTokens() {
return this.tokens;
}
@Override
public String toString() {
return "** " + (label == null ? "" : label) + " ** " + referenceText;
}
}
| 1,439 | 25.666667 | 103 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/engines/citations/ReferenceSegmenter.java
|
package org.grobid.core.engines.citations;
import java.util.List;
import org.grobid.core.document.Document;
public interface ReferenceSegmenter {
List<LabeledReferenceResult> extract(String referenceBlock);
List<LabeledReferenceResult> extract(Document document);
}
| 273 | 26.4 | 64 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/lexicon/FastMatcher.java
|
package org.grobid.core.lexicon;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
import org.grobid.core.layout.LayoutToken;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.exceptions.GrobidResourceException;
import org.grobid.core.utilities.OffsetPosition;
import org.grobid.core.utilities.Pair;
import org.grobid.core.utilities.TextUtilities;
import org.grobid.core.utilities.LayoutTokensUtil;
import org.grobid.core.utilities.UnicodeUtil;
import org.grobid.core.lang.Language;
import org.grobid.core.analyzers.GrobidAnalyzer;
import java.io.*;
import java.util.*;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.commons.lang3.StringUtils.isBlank;
/**
* Class for fast matching of word sequences over text stream.
*
*/
public final class FastMatcher {
private Map terms = null;
public FastMatcher() {
if (terms == null) {
terms = new HashMap();
}
}
public FastMatcher(File file) {
if (!file.exists()) {
throw new GrobidResourceException("Cannot add term to matcher, because file '" +
file.getAbsolutePath() + "' does not exist.");
}
if (!file.canRead()) {
throw new GrobidResourceException("Cannot add terms to matcher, because cannot read file '" +
file.getAbsolutePath() + "'.");
}
try {
loadTerms(file, GrobidAnalyzer.getInstance(), false);
} catch (Exception e) {
throw new GrobidException("An exception occurred while running Grobid FastMatcher.", e);
}
}
public FastMatcher(File file, org.grobid.core.analyzers.Analyzer analyzer) {
if (!file.exists()) {
throw new GrobidResourceException("Cannot add term to matcher, because file '" +
file.getAbsolutePath() + "' does not exist.");
}
if (!file.canRead()) {
throw new GrobidResourceException("Cannot add terms to matcher, because cannot read file '" +
file.getAbsolutePath() + "'.");
}
try {
loadTerms(file, analyzer, false);
} catch (Exception e) {
throw new GrobidException("An exception occurred while running Grobid FastMatcher.", e);
}
}
public FastMatcher(File file, org.grobid.core.analyzers.Analyzer analyzer, boolean caseSensitive) {
if (!file.exists()) {
throw new GrobidResourceException("Cannot add term to matcher, because file '" +
file.getAbsolutePath() + "' does not exist.");
}
if (!file.canRead()) {
throw new GrobidResourceException("Cannot add terms to matcher, because cannot read file '" +
file.getAbsolutePath() + "'.");
}
try {
loadTerms(file, analyzer, caseSensitive);
} catch (Exception e) {
throw new GrobidException("An exception occurred while running Grobid FastMatcher.", e);
}
}
public FastMatcher(InputStream is) {
try {
loadTerms(is, GrobidAnalyzer.getInstance(), false);
} catch (Exception e) {
throw new GrobidException("An exception occurred while running Grobid FastMatcher.", e);
}
}
public FastMatcher(InputStream is, org.grobid.core.analyzers.Analyzer analyzer) {
try {
loadTerms(is, analyzer, false);
} catch (Exception e) {
throw new GrobidException("An exception occurred while running Grobid FastMatcher.", e);
}
}
public FastMatcher(InputStream is, org.grobid.core.analyzers.Analyzer analyzer, boolean caseSensitive) {
try {
loadTerms(is, analyzer, caseSensitive);
} catch (Exception e) {
throw new GrobidException("An exception occurred while running Grobid FastMatcher.", e);
}
}
/**
* Load a set of terms to the fast matcher from a file listing terms one per line
*/
public int loadTerms(File file) throws IOException {
InputStream fileIn = new FileInputStream(file);
return loadTerms(fileIn, GrobidAnalyzer.getInstance(), false);
}
/**
* Load a set of terms to the fast matcher from a file listing terms one per line
*/
public int loadTerms(File file, boolean caseSensitive) throws IOException {
InputStream fileIn = new FileInputStream(file);
return loadTerms(fileIn, GrobidAnalyzer.getInstance(), caseSensitive);
}
/**
* Load a set of terms to the fast matcher from a file listing terms one per line
*/
public int loadTerms(File file, org.grobid.core.analyzers.Analyzer analyzer, boolean caseSensitive) throws IOException {
InputStream fileIn = new FileInputStream(file);
return loadTerms(fileIn, analyzer, caseSensitive);
}
/**
* Load a set of term to the fast matcher from an input stream
*/
public int loadTerms(InputStream is, org.grobid.core.analyzers.Analyzer analyzer, boolean caseSensitive) throws IOException {
InputStreamReader reader = new InputStreamReader(is, UTF_8);
BufferedReader bufReader = new BufferedReader(reader);
String line;
if (terms == null) {
terms = new HashMap();
}
int nbTerms = 0;
while ((line = bufReader.readLine()) != null) {
if (line.length() == 0) continue;
line = UnicodeUtil.normaliseText(line);
line = StringUtils.normalizeSpace(line);
if (!caseSensitive)
line = line.toLowerCase();
nbTerms += loadTerm(line, analyzer, true);
}
bufReader.close();
reader.close();
return nbTerms;
}
/**
* Load a term to the fast matcher, by default the standard delimiters will be ignored
*/
public int loadTerm(String term, org.grobid.core.analyzers.Analyzer analyzer) {
return loadTerm(term, analyzer, true);
}
/**
* Load a term to the fast matcher, by default the loading will be case sensitive
*/
public int loadTerm(String term, org.grobid.core.analyzers.Analyzer analyzer, boolean ignoreDelimiters) {
return loadTerm(term, analyzer, ignoreDelimiters, true);
}
/**
* Load a term to the fast matcher
*/
public int loadTerm(String term, org.grobid.core.analyzers.Analyzer analyzer, boolean ignoreDelimiters, boolean caseSensitive) {
int nbTerms = 0;
if (isBlank(term))
return 0;
Map t = terms;
List<String> tokens = analyzer.tokenize(term, new Language("en", 1.0));
for(String token : tokens) {
if (token.length() == 0) {
continue;
}
if (token.equals(" ") || token.equals("\n")) {
continue;
}
if ( ignoreDelimiters && (delimiters.indexOf(token) != -1) ) {
continue;
}
if (!caseSensitive) {
token = token.toLowerCase();
}
Map t2 = (Map) t.get(token);
if (t2 == null) {
t2 = new HashMap();
t.put(token, t2);
}
t = t2;
}
// end of the term
if (t != terms) {
Map t2 = (Map) t.get("#");
if (t2 == null) {
t2 = new HashMap();
t.put("#", t2);
}
nbTerms++;
t = terms;
}
return nbTerms;
}
private static String delimiters = TextUtilities.delimiters;
/**
* Identify terms in a piece of text and gives corresponding token positions.
* All the matches are returned.
*
* @param text: the text to be processed
* @return the list of offset positions of the matches, an empty list if no match have been found
*/
public List<OffsetPosition> matchToken(String text) {
return matchToken(text, false);
}
/**
* Identify terms in a piece of text and gives corresponding token positions.
* All the matches are returned.
*
* @param text: the text to be processed
* @param caseSensitive: ensure case sensitive matching or not
* @return the list of offset positions of the matches, an empty list if no match have been found
*/
public List<OffsetPosition> matchToken(String text, boolean caseSensitive) {
List<OffsetPosition> results = new ArrayList<OffsetPosition>();
List<Integer> startPos = new ArrayList<Integer>();
List<Integer> lastNonSeparatorPos = new ArrayList<Integer>();
List<Map> t = new ArrayList<Map>();
int currentPos = 0;
StringTokenizer st = new StringTokenizer(text, delimiters, true);
while (st.hasMoreTokens()) {
String token = st.nextToken();
if (token.equals(" ") || token.equals("\n")) {
continue;
}
if (delimiters.indexOf(token) != -1) {
currentPos++;
continue;
}
if (!caseSensitive)
token = token.toLowerCase();
// we try to complete opened matching
int i = 0;
List<Map> new_t = new ArrayList<Map>();
List<Integer> new_startPos = new ArrayList<Integer>();
List<Integer> new_lastNonSeparatorPos = new ArrayList<Integer>();
// continuation of current opened matching
for (Map tt : t) {
Map t2 = (Map) tt.get(token);
if (t2 != null) {
new_t.add(t2);
new_startPos.add(startPos.get(i));
new_lastNonSeparatorPos.add(currentPos);
}
//else
{
t2 = (Map) tt.get("#");
if (t2 != null) {
// end of the current term, matching sucesssful
OffsetPosition ofp = new OffsetPosition();
ofp.start = startPos.get(i).intValue();
ofp.end = lastNonSeparatorPos.get(i).intValue();
results.add(ofp);
}
}
i++;
}
// we start new matching starting at the current token
Map t2 = (Map) terms.get(token);
if (t2 != null) {
new_t.add(t2);
new_startPos.add(Integer.valueOf(currentPos));
new_lastNonSeparatorPos.add(currentPos);
}
t = new_t;
startPos = new_startPos;
lastNonSeparatorPos = new_lastNonSeparatorPos;
currentPos++;
}
// test if the end of the string correspond to the end of a term
int i = 0;
if (t != null) {
for (Map tt : t) {
Map t2 = (Map) tt.get("#");
if (t2 != null) {
// end of the current term, matching sucesssful
OffsetPosition ofp = new OffsetPosition();
ofp.start = startPos.get(i).intValue();
ofp.end = lastNonSeparatorPos.get(i).intValue();
results.add(ofp);
}
i++;
}
}
return results;
}
/**
* Identify terms in a piece of text and gives corresponding token positions.
* All the matches are returned. Here the input text is already tokenized.
*
* @param tokens: the text to be processed
* @return the list of offset positions of the matches, an empty list if no match have been found
*/
/*public List<OffsetPosition> matcher(List<String> tokens) {
StringBuilder text = new StringBuilder();
for (String token : tokens) {
text.append(processToken(token));
}
return matcher(text.toString());
}*/
/**
* Identify terms in a piece of text and gives corresponding token positions.
* All the matches are returned. Here the input is a list of LayoutToken object.
*
* @param tokens the text to be processed as a list of LayoutToken objects
* @return the list of offset positions of the matches, an empty list if no match have been found
*/
public List<OffsetPosition> matchLayoutToken(List<LayoutToken> tokens) {
return matchLayoutToken(tokens, true, false);
}
/**
* Identify terms in a piece of text and gives corresponding token positions.
* All the matches are returned. Here the input is a list of LayoutToken object.
*
* @param tokens the text to be processed as a list of LayoutToken objects
* @param ignoreDelimiters if true, ignore the delimiters in the matching process
* @param caseSensitive: ensure case sensitive matching or not
* @return the list of offset positions of the matches, an empty list if no match have been found
*/
public List<OffsetPosition> matchLayoutToken(List<LayoutToken> tokens, boolean ignoreDelimiters, boolean caseSensitive) {
if (CollectionUtils.isEmpty(tokens)) {
return new ArrayList<OffsetPosition>();
}
List<OffsetPosition> results = new ArrayList<>();
List<Integer> startPosition = new ArrayList<>();
List<Integer> lastNonSeparatorPos = new ArrayList<>();
List<Map> currentMatches = new ArrayList<>();
int currentPos = 0;
for(LayoutToken token : tokens) {
if (token.getText().equals(" ") || token.getText().equals("\n")) {
currentPos++;
continue;
}
if ( ignoreDelimiters && (delimiters.indexOf(token.getText()) != -1)) {
currentPos++;
continue;
}
String tokenText = UnicodeUtil.normaliseText(token.getText());
if (!caseSensitive)
tokenText = tokenText.toLowerCase();
// we try to complete opened matching
int i = 0;
List<Map> matchesTreeList = new ArrayList<>();
List<Integer> matchesPosition = new ArrayList<>();
List<Integer> new_lastNonSeparatorPos = new ArrayList<>();
// we check whether the current token matches as continuation of a previous match.
for (Map currentMatch : currentMatches) {
Map childMatches = (Map) currentMatch.get(tokenText);
if (childMatches != null) {
matchesTreeList.add(childMatches);
matchesPosition.add(startPosition.get(i));
new_lastNonSeparatorPos.add(currentPos);
}
//check if the token itself is present, I add the match in the list of results
childMatches = (Map) currentMatch.get("#");
if (childMatches != null) {
// end of the current term, matching successful
OffsetPosition ofp = new OffsetPosition(startPosition.get(i), lastNonSeparatorPos.get(i));
results.add(ofp);
}
i++;
}
// we start new matching starting at the current token
Map match = (Map) terms.get(tokenText);
if (match != null) {
matchesTreeList.add(match);
matchesPosition.add(currentPos);
new_lastNonSeparatorPos.add(currentPos);
}
currentMatches = matchesTreeList;
startPosition = matchesPosition;
lastNonSeparatorPos = new_lastNonSeparatorPos;
currentPos++;
}
// test if the end of the string correspond to the end of a term
int i = 0;
if (currentMatches != null) {
for (Map tt : currentMatches) {
Map t2 = (Map) tt.get("#");
if (t2 != null) {
// end of the current term, matching successful
OffsetPosition ofp = new OffsetPosition(startPosition.get(i), lastNonSeparatorPos.get(i));
results.add(ofp);
}
i++;
}
}
return results;
}
/**
*
* Gives the character positions within a text where matches occur.
* <p>
* By iterating over the OffsetPosition and applying substring, we get all the matches.
* <p>
* All the matches are returned.
*
* @param text: the text to be processed
* @param caseSensitive: ensure case sensitive matching or not
* @return the list of offset positions of the matches referred to the input string, an empty
* list if no match have been found
*/
public List<OffsetPosition> matchCharacter(String text) {
return matchCharacter(text, false);
}
/**
*
* Gives the character positions within a text where matches occur.
* <p>
* By iterating over the OffsetPosition and applying substring, we get all the matches.
* <p>
* All the matches are returned.
*
* @param text: the text to be processed
* @param caseSensitive: ensure case sensitive matching or not
* @return the list of offset positions of the matches referred to the input string, an empty
* list if no match have been found
*/
public List<OffsetPosition> matchCharacter(String text, boolean caseSensitive) {
List<OffsetPosition> results = new ArrayList<>();
List<Integer> startPosition = new ArrayList<>();
List<Integer> lastNonSeparatorPos = new ArrayList<>();
List<Map> currentMatches = new ArrayList<>();
int currentPos = 0;
StringTokenizer st = new StringTokenizer(text, delimiters, true);
while (st.hasMoreTokens()) {
String token = st.nextToken();
if (token.equals(" ")) {
currentPos++;
continue;
}
if (delimiters.indexOf(token) != -1) {
currentPos++;
continue;
}
if (!caseSensitive)
token = token.toLowerCase();
// we try to complete opened matching
int i = 0;
List<Map> matchesTreeList = new ArrayList<>();
List<Integer> matchesPosition = new ArrayList<>();
List<Integer> new_lastNonSeparatorPos = new ArrayList<>();
// we check whether the current token matches as continuation of a previous match.
for (Map currentMatch : currentMatches) {
Map childMatches = (Map) currentMatch.get(token);
if (childMatches != null) {
matchesTreeList.add(childMatches);
matchesPosition.add(startPosition.get(i));
new_lastNonSeparatorPos.add(currentPos + token.length());
}
//check if the token itself is present, I add the match in the list of results
childMatches = (Map) currentMatch.get("#");
if (childMatches != null) {
// end of the current term, matching successful
OffsetPosition ofp = new OffsetPosition(startPosition.get(i), lastNonSeparatorPos.get(i));
results.add(ofp);
}
i++;
}
//TODO: e.g. The Bronx matches 'The Bronx' and 'Bronx' is this correct?
// we start new matching starting at the current token
Map match = (Map) terms.get(token);
if (match != null) {
matchesTreeList.add(match);
matchesPosition.add(currentPos);
new_lastNonSeparatorPos.add(currentPos + token.length());
}
currentMatches = matchesTreeList;
startPosition = matchesPosition;
lastNonSeparatorPos = new_lastNonSeparatorPos;
currentPos += token.length();
}
// test if the end of the string correspond to the end of a term
int i = 0;
if (currentMatches != null) {
for (Map tt : currentMatches) {
Map t2 = (Map) tt.get("#");
if (t2 != null) {
// end of the current term, matching successful
OffsetPosition ofp = new OffsetPosition(startPosition.get(i), lastNonSeparatorPos.get(i));
results.add(ofp);
}
i++;
}
}
return results;
}
/**
*
* Gives the character positions within a tokenized text where matches occur.
* <p>
* All the matches are returned.
*
* @param tokens the text to be processed as a list of LayoutToken objects
* @return the list of offset positions of the matches referred to the input string, an empty
* list if no match have been found
*/
public List<OffsetPosition> matchCharacterLayoutToken(List<LayoutToken> tokens) {
return matchCharacterLayoutToken(tokens, false);
}
/**
*
* Gives the character positions within a tokenized text where matches occur.
* <p>
* All the matches are returned.
*
* @param tokens the text to be processed as a list of LayoutToken objects
* @param caseSensitive ensure case sensitive matching or not
* @return the list of offset positions of the matches referred to the input string, an empty
* list if no match have been found
*/
public List<OffsetPosition> matchCharacterLayoutToken(List<LayoutToken> tokens, boolean caseSensitive) {
List<OffsetPosition> results = new ArrayList<>();
List<Integer> startPosition = new ArrayList<>();
List<Integer> lastNonSeparatorPos = new ArrayList<>();
List<Map> currentMatches = new ArrayList<>();
int currentPos = 0;
for (LayoutToken token : tokens) {
if (token.getText().equals(" ")) {
currentPos++;
continue;
}
if (delimiters.indexOf(token.getText()) != -1) {
currentPos++;
continue;
}
String tokenString = token.getText();
if (!caseSensitive)
tokenString = tokenString.toLowerCase();
// we try to complete opened matching
int i = 0;
List<Map> matchesTreeList = new ArrayList<>();
List<Integer> matchesPosition = new ArrayList<>();
List<Integer> new_lastNonSeparatorPos = new ArrayList<>();
// we check whether the current token matches as continuation of a previous match.
for (Map currentMatch : currentMatches) {
Map childMatches = (Map) currentMatch.get(tokenString);
if (childMatches != null) {
matchesTreeList.add(childMatches);
matchesPosition.add(startPosition.get(i));
new_lastNonSeparatorPos.add(currentPos);
}
//check if the token itself is present, I add the match in the list of results
childMatches = (Map) currentMatch.get("#");
if (childMatches != null) {
// end of the current term, matching successful
OffsetPosition ofp = new OffsetPosition(startPosition.get(i), lastNonSeparatorPos.get(i));
results.add(ofp);
}
i++;
}
// we start new matching starting at the current token
Map match = (Map) terms.get(tokenString);
if (match != null) {
matchesTreeList.add(match);
matchesPosition.add(currentPos);
new_lastNonSeparatorPos.add(currentPos);
}
currentMatches = matchesTreeList;
startPosition = matchesPosition;
lastNonSeparatorPos = new_lastNonSeparatorPos;
currentPos++;
}
// test if the end of the string correspond to the end of a term
int i = 0;
if (currentMatches != null) {
for (Map tt : currentMatches) {
Map t2 = (Map) tt.get("#");
if (t2 != null) {
// end of the current term, matching successful
OffsetPosition ofp = new OffsetPosition(startPosition.get(i), lastNonSeparatorPos.get(i));
results.add(ofp);
}
i++;
}
}
return results;
}
/**
* Identify terms in a piece of text and gives corresponding token positions.
* All the matches are returned. This case correspond to text from a trainer,
* where the text is already tokenized with some labeled that can be ignored.
*
* @param tokens: the text to be processed
* @return the list of offset positions of the matches, an empty list if no match have been found
*/
public List<OffsetPosition> matcherPairs(List<Pair<String, String>> tokens) {
return matcherPairs(tokens, false);
}
/**
* Identify terms in a piece of text and gives corresponding token positions.
* All the matches are returned. This case correspond to text from a trainer,
* where the text is already tokenized with some labeled that can be ignored.
*
* @param tokens: the text to be processed
* @param caseSensitive: ensure case sensitive matching or not
* @return the list of offset positions of the matches, an empty list if no match have been found
*/
public List<OffsetPosition> matcherPairs(List<Pair<String, String>> tokens, boolean caseSensitive) {
StringBuilder text = new StringBuilder();
for (Pair<String, String> tokenP : tokens) {
String token = tokenP.getA();
text.append(processToken(token));
}
return matchToken(text.toString(), caseSensitive);
}
/**
* Process token, if different than @newline
*/
protected String processToken(String token) {
if (!token.trim().equals("@newline")) {
int ind = token.indexOf(" ");
if (ind == -1)
ind = token.indexOf("\t");
if (ind == -1)
return " " + token;
else
return " " + token.substring(0, ind);
}
return "";
}
}
| 26,579 | 37.299712 | 132 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/lexicon/Lexicon.java
|
package org.grobid.core.lexicon;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.util.Set;
import java.util.StringTokenizer;
import java.util.regex.*;
import javax.xml.parsers.SAXParser;
import javax.xml.parsers.SAXParserFactory;
import org.apache.commons.io.IOUtils;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.exceptions.GrobidResourceException;
import org.grobid.core.lang.Language;
import org.grobid.core.layout.LayoutToken;
import org.grobid.core.layout.PDFAnnotation;
import org.grobid.core.sax.CountryCodeSaxParser;
import org.grobid.core.utilities.GrobidProperties;
import org.grobid.core.utilities.OffsetPosition;
import org.grobid.core.utilities.LayoutTokensUtil;
import org.grobid.core.utilities.Utilities;
import org.grobid.core.utilities.TextUtilities;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Class for managing all the lexical resources.
*
*/
public class Lexicon {
private static final Logger LOGGER = LoggerFactory.getLogger(Lexicon.class);
// private static volatile Boolean instanceController = false;
private static volatile Lexicon instance;
private Set<String> dictionary_en = null;
private Set<String> dictionary_de = null;
private Set<String> lastNames = null;
private Set<String> firstNames = null;
private Map<String, String> countryCodes = null;
private Set<String> countries = null;
private FastMatcher abbrevJournalPattern = null;
private FastMatcher conferencePattern = null;
private FastMatcher publisherPattern = null;
private FastMatcher journalPattern = null;
private FastMatcher cityPattern = null;
private FastMatcher organisationPattern = null;
private FastMatcher locationPattern = null;
private FastMatcher orgFormPattern = null;
private FastMatcher collaborationPattern = null;
private FastMatcher personTitlePattern = null;
private FastMatcher personSuffixPattern = null;
public static Lexicon getInstance() {
if (instance == null) {
synchronized (Lexicon.class) {
if (instance == null) {
getNewInstance();
}
}
}
return instance;
}
/**
* Creates a new instance.
*/
private static synchronized void getNewInstance() {
LOGGER.debug("Get new instance of Lexicon");
GrobidProperties.getInstance();
instance = new Lexicon();
}
/**
* Hidden constructor
*/
private Lexicon() {
initDictionary();
initNames();
// the loading of the journal and conference names is lazy
addDictionary(GrobidProperties.getGrobidHomePath() + File.separator +
"lexicon"+File.separator+"wordforms"+File.separator+"english.wf", Language.EN);
addDictionary(GrobidProperties.getGrobidHomePath() + File.separator +
"lexicon"+File.separator+"wordforms"+File.separator+"german.wf", Language.EN);
addLastNames(GrobidProperties.getGrobidHomePath() + File.separator +
"lexicon"+File.separator+"names"+File.separator+"names.family");
addLastNames(GrobidProperties.getGrobidHomePath() + File.separator +
"lexicon"+File.separator+"names"+File.separator+"lastname.5k");
addFirstNames(GrobidProperties.getGrobidHomePath() + File.separator +
"lexicon"+File.separator+"names"+File.separator+"names.female");
addFirstNames(GrobidProperties.getGrobidHomePath() + File.separator +
"lexicon"+File.separator+"names"+File.separator+"names.male");
addFirstNames(GrobidProperties.getGrobidHomePath() + File.separator +
"lexicon"+File.separator+"names"+File.separator+"firstname.5k");
initCountryCodes();
addCountryCodes(GrobidProperties.getGrobidHomePath() + File.separator +
"lexicon"+File.separator+"countries"+File.separator+"CountryCodes.xml");
}
private void initDictionary() {
LOGGER.info("Initiating dictionary");
dictionary_en = new HashSet<>();
dictionary_de = new HashSet<>();
LOGGER.info("End of Initialization of dictionary");
}
public final void addDictionary(String path, String lang) {
File file = new File(path);
if (!file.exists()) {
throw new GrobidResourceException("Cannot add entries to dictionary (language '" + lang +
"'), because file '" + file.getAbsolutePath() + "' does not exists.");
}
if (!file.canRead()) {
throw new GrobidResourceException("Cannot add entries to dictionary (language '" + lang +
"'), because cannot read file '" + file.getAbsolutePath() + "'.");
}
InputStream ist = null;
InputStreamReader isr = null;
BufferedReader dis = null;
try {
ist = new FileInputStream(file);
isr = new InputStreamReader(ist, "UTF8");
dis = new BufferedReader(isr);
String l = null;
while ((l = dis.readLine()) != null) {
if (l.length() == 0) continue;
// the first token, separated by a tabulation, gives the word form
if (lang.equals(Language.EN)) {
// multext format
StringTokenizer st = new StringTokenizer(l, "\t");
if (st.hasMoreTokens()) {
String word = st.nextToken();
if (!dictionary_en.contains(word))
dictionary_en.add(word);
}
} else if (lang.equals(Language.DE)) {
// celex format
StringTokenizer st = new StringTokenizer(l, "\\");
if (st.hasMoreTokens()) {
st.nextToken(); // id
String word = st.nextToken();
word = word.replace("\"a", "ä");
word = word.replace("\"u", "ü");
word = word.replace("\"o", "ö");
word = word.replace("$", "ß");
if (!dictionary_de.contains(word))
dictionary_de.add(word);
}
}
}
} catch (IOException e) {
throw new GrobidException("An exception occurred while running Grobid.", e);
} finally {
IOUtils.closeQuietly(ist, isr, dis);
}
}
public boolean isCountry(String tok) {
return countries.contains(tok.toLowerCase());
}
private void initNames() {
LOGGER.info("Initiating names");
firstNames = new HashSet<String>();
lastNames = new HashSet<String>();
LOGGER.info("End of initialization of names");
}
private void initCountryCodes() {
LOGGER.info("Initiating country codes");
countryCodes = new HashMap<String, String>();
countries = new HashSet<String>();
LOGGER.info("End of initialization of country codes");
}
private void addCountryCodes(String path) {
File file = new File(path);
if (!file.exists()) {
throw new GrobidResourceException("Cannot add country codes to dictionary, because file '" +
file.getAbsolutePath() + "' does not exists.");
}
if (!file.canRead()) {
throw new GrobidResourceException("Cannot add country codes to dictionary, because cannot read file '" +
file.getAbsolutePath() + "'.");
}
InputStream ist = null;
//InputStreamReader isr = null;
//BufferedReader dis = null;
try {
ist = new FileInputStream(file);
CountryCodeSaxParser parser = new CountryCodeSaxParser(countryCodes, countries);
SAXParserFactory spf = SAXParserFactory.newInstance();
//get a new instance of parser
SAXParser p = spf.newSAXParser();
p.parse(ist, parser);
} catch (Exception e) {
throw new GrobidException("An exception occured while running Grobid.", e);
} finally {
try {
if (ist != null)
ist.close();
} catch (Exception e) {
throw new GrobidResourceException("Cannot close all streams.", e);
}
}
}
public String getCountryCode(String country) {
String code = (String) countryCodes.get(country.toLowerCase());
return code;
}
public final void addFirstNames(String path) {
File file = new File(path);
if (!file.exists()) {
throw new GrobidResourceException("Cannot add first names to dictionary, because file '" +
file.getAbsolutePath() + "' does not exists.");
}
if (!file.canRead()) {
throw new GrobidResourceException("Cannot add first names to dictionary, because cannot read file '" +
file.getAbsolutePath() + "'.");
}
InputStream ist = null;
BufferedReader dis = null;
try {
ist = new FileInputStream(file);
dis = new BufferedReader(new InputStreamReader(ist, "UTF8"));
String l = null;
while ((l = dis.readLine()) != null) {
// read the line
// the first token, separated by a tabulation, gives the word form
StringTokenizer st = new StringTokenizer(l, "\t\n-");
if (st.hasMoreTokens()) {
String word = st.nextToken().toLowerCase().trim();
if (!firstNames.contains(word)) {
firstNames.add(word);
}
}
}
} catch (FileNotFoundException e) {
throw new GrobidException("An exception occured while running Grobid.", e);
} catch (IOException e) {
throw new GrobidException("An exception occured while running Grobid.", e);
} finally {
try {
if (ist != null)
ist.close();
if (dis != null)
dis.close();
} catch (Exception e) {
throw new GrobidResourceException("Cannot close all streams.", e);
}
}
}
public final void addLastNames(String path) {
File file = new File(path);
if (!file.exists()) {
throw new GrobidResourceException("Cannot add last names to dictionary, because file '" +
file.getAbsolutePath() + "' does not exists.");
}
if (!file.canRead()) {
throw new GrobidResourceException("Cannot add last names to dictionary, because cannot read file '" +
file.getAbsolutePath() + "'.");
}
InputStream ist = null;
BufferedReader dis = null;
try {
ist = new FileInputStream(file);
dis = new BufferedReader(new InputStreamReader(ist, "UTF8"));
String l = null;
while ((l = dis.readLine()) != null) {
// read the line
// the first token, separated by a tabulation, gives the word form
StringTokenizer st = new StringTokenizer(l, "\t\n-");
if (st.hasMoreTokens()) {
String word = st.nextToken().toLowerCase().trim();
if (!lastNames.contains(word)) {
lastNames.add(word);
}
}
}
} catch (FileNotFoundException e) {
throw new GrobidException("An exception occured while running Grobid.", e);
} catch (IOException e) {
throw new GrobidException("An exception occured while running Grobid.", e);
} finally {
try {
if (ist != null)
ist.close();
if (dis != null)
dis.close();
} catch (Exception e) {
throw new GrobidResourceException("Cannot close all streams.", e);
}
}
}
/**
* Lexical look-up, default is English
* @param s a string to test
* @return true if in the dictionary
*/
public boolean inDictionary(String s) {
return inDictionary(s, Language.EN);
}
public boolean inDictionary(String s, String lang) {
if (s == null)
return false;
if ((s.endsWith(".")) | (s.endsWith(",")) | (s.endsWith(":")) | (s.endsWith(";")) | (s.endsWith(".")))
s = s.substring(0, s.length() - 1);
int i1 = s.indexOf('-');
int i2 = s.indexOf(' ');
if (i1 != -1) {
String s1 = s.substring(0, i1);
String s2 = s.substring(i1 + 1, s.length());
if (lang.equals(Language.DE)) {
if ((dictionary_de.contains(s1)) & (dictionary_de.contains(s2)))
return true;
else
return false;
} else {
if ((dictionary_en.contains(s1)) & (dictionary_en.contains(s2)))
return true;
else
return false;
}
}
if (i2 != -1) {
String s1 = s.substring(0, i2);
String s2 = s.substring(i2 + 1, s.length());
if (lang.equals(Language.DE)) {
if ((dictionary_de.contains(s1)) & (dictionary_de.contains(s2)))
return true;
else
return false;
} else {
if ((dictionary_en.contains(s1)) & (dictionary_en.contains(s2)))
return true;
else
return false;
}
} else {
if (lang.equals(Language.DE)) {
return dictionary_de.contains(s);
} else {
return dictionary_en.contains(s);
}
}
}
public void initJournals() {
try {
abbrevJournalPattern = new FastMatcher(new
File(GrobidProperties.getGrobidHomePath() + "/lexicon/journals/abbrev_journals.txt"));
journalPattern = new FastMatcher(new
File(GrobidProperties.getGrobidHomePath() + "/lexicon/journals/journals.txt"));
} catch (PatternSyntaxException e) {
throw new GrobidResourceException(
"Error when compiling lexicon matcher for abbreviated journal names.", e);
}
}
public void initConferences() {
// ArrayList<String> conferences = new ArrayList<String>();
try {
conferencePattern = new FastMatcher(new
File(GrobidProperties.getGrobidHomePath() + "/lexicon/journals/proceedings.txt"));
} catch (PatternSyntaxException e) {
throw new GrobidResourceException("Error when compiling lexicon matcher for conference names.", e);
}
}
public void initPublishers() {
try {
publisherPattern = new FastMatcher(new
File(GrobidProperties.getGrobidHomePath() + "/lexicon/publishers/publishers.txt"));
} catch (PatternSyntaxException e) {
throw new GrobidResourceException("Error when compiling lexicon matcher for conference names.", e);
}
}
public void initCities() {
try {
cityPattern = new FastMatcher(new
File(GrobidProperties.getGrobidHomePath() + "/lexicon/places/cities15000.txt"));
} catch (PatternSyntaxException e) {
throw new GrobidResourceException("Error when compiling lexicon matcher for cities.", e);
}
}
public void initCollaborations() {
try {
//collaborationPattern = new FastMatcher(new
// File(GrobidProperties.getGrobidHomePath() + "/lexicon/organisations/collaborations.txt"));
collaborationPattern = new FastMatcher(new
File(GrobidProperties.getGrobidHomePath() + "/lexicon/organisations/inspire_collaborations.txt"));
} catch (PatternSyntaxException e) {
throw new GrobidResourceException("Error when compiling lexicon matcher for collaborations.", e);
}
}
public void initOrganisations() {
try {
organisationPattern = new FastMatcher(new
File(GrobidProperties.getGrobidHomePath() + "/lexicon/organisations/WikiOrganizations.lst"));
organisationPattern.loadTerms(new File(GrobidProperties.getGrobidHomePath() +
"/lexicon/organisations/government.government_agency"));
organisationPattern.loadTerms(new File(GrobidProperties.getGrobidHomePath() +
"/lexicon/organisations/known_corporations.lst"));
organisationPattern.loadTerms(new File(GrobidProperties.getGrobidHomePath() +
"/lexicon/organisations/venture_capital.venture_funded_company"));
} catch (PatternSyntaxException e) {
throw new GrobidResourceException("Error when compiling lexicon matcher for organisations.", e);
} catch (IOException e) {
throw new GrobidResourceException("Cannot add term to matcher, because the lexicon resource file " +
"does not exist or cannot be read.", e);
} catch (Exception e) {
throw new GrobidException("An exception occured while running Grobid Lexicon init.", e);
}
}
public void initOrgForms() {
try {
orgFormPattern = new FastMatcher(new
File(GrobidProperties.getGrobidHomePath() + "/lexicon/organisations/orgClosings.txt"));
} catch (PatternSyntaxException e) {
throw new GrobidResourceException("Error when compiling lexicon matcher for organisations.", e);
} catch (Exception e) {
throw new GrobidException("An exception occured while running Grobid Lexicon init.", e);
}
}
public void initLocations() {
try {
locationPattern = new FastMatcher(new
File(GrobidProperties.getGrobidHomePath() + "/lexicon/places/location.txt"));
} catch (PatternSyntaxException e) {
throw new GrobidResourceException("Error when compiling lexicon matcher for locations.", e);
}
}
public void initPersonTitles() {
try {
personTitlePattern = new FastMatcher(new
File(GrobidProperties.getGrobidHomePath() + "/lexicon/names/VincentNgPeopleTitles.txt"));
} catch (PatternSyntaxException e) {
throw new GrobidResourceException("Error when compiling lexicon matcher for person titles.", e);
}
}
public void initPersonSuffix() {
try {
personSuffixPattern = new FastMatcher(new
File(GrobidProperties.getGrobidHomePath() + "/lexicon/names/suffix.txt"));
} catch (PatternSyntaxException e) {
throw new GrobidResourceException("Error when compiling lexicon matcher for person name suffix.", e);
}
}
/**
* Look-up in first name gazetteer
*/
public boolean inFirstNames(String s) {
return firstNames.contains(s);
}
/**
* Look-up in last name gazetteer
*/
public boolean inLastNames(String s) {
return lastNames.contains(s);
}
/**
* Indicate if we have a punctuation
*/
public boolean isPunctuation(String s) {
if (s.length() != 1)
return false;
else {
char c = s.charAt(0);
if ((!Character.isLetterOrDigit(c)) & !(c == '-'))
return true;
}
return false;
}
/**
* Map the language codes used by the language identifier component to the normal
* language name.
*
* @param code the language to be mapped
*/
public String mapLanguageCode(String code) {
if (code == null)
return "";
else if (code.length() == 0)
return "";
else if (code.equals(Language.EN))
return "English";
else if (code.equals(Language.FR))
return "French";
else if (code.equals(Language.DE))
return "German";
else if (code.equals("cat"))
return "Catalan";
else if (code.equals("dk"))
return "Danish";
else if (code.equals("ee"))
return "Estonian";
else if (code.equals("fi"))
return "Finish";
else if (code.equals("it"))
return "Italian";
else if (code.equals("jp"))
return "Japanese";
else if (code.equals("kr"))
return "Korean";
else if (code.equals("nl"))
return "Deutch";
else if (code.equals("no"))
return "Norvegian";
else if (code.equals("se"))
return "Swedish";
else if (code.equals("sorb"))
return "Sorbian";
else if (code.equals("tr"))
return "Turkish";
else
return "";
}
/**
* Soft look-up in journal name gazetteer with token positions
*/
public List<OffsetPosition> tokenPositionsJournalNames(String s) {
if (journalPattern == null) {
initJournals();
}
List<OffsetPosition> results = journalPattern.matchToken(s);
return results;
}
/**
* Soft look-up in journal name gazetteer for a given list of LayoutToken objects
* with token positions
*/
public List<OffsetPosition> tokenPositionsJournalNames(List<LayoutToken> s) {
if (journalPattern == null) {
initJournals();
}
List<OffsetPosition> results = journalPattern.matchLayoutToken(s);
return results;
}
/**
* Soft look-up in journal abbreviated name gazetteer with token positions
*/
public List<OffsetPosition> tokenPositionsAbbrevJournalNames(String s) {
if (abbrevJournalPattern == null) {
initJournals();
}
List<OffsetPosition> results = abbrevJournalPattern.matchToken(s);
return results;
}
/**
* Soft look-up in journal abbreviated name gazetteer for a given list of LayoutToken objects
* with token positions
*/
public List<OffsetPosition> tokenPositionsAbbrevJournalNames(List<LayoutToken> s) {
if (abbrevJournalPattern == null) {
initJournals();
}
List<OffsetPosition> results = abbrevJournalPattern.matchLayoutToken(s);
return results;
}
/**
* Soft look-up in conference/proceedings name gazetteer with token positions
*/
public List<OffsetPosition> tokenPositionsConferenceNames(String s) {
if (conferencePattern == null) {
initConferences();
}
List<OffsetPosition> results = conferencePattern.matchToken(s);
return results;
}
/**
* Soft look-up in conference/proceedings name gazetteer for a given list of LayoutToken objects
* with token positions
*/
public List<OffsetPosition> tokenPositionsConferenceNames(List<LayoutToken> s) {
if (conferencePattern == null) {
initConferences();
}
List<OffsetPosition> results = conferencePattern.matchLayoutToken(s);
return results;
}
/**
* Soft look-up in conference/proceedings name gazetteer with token positions
*/
public List<OffsetPosition> tokenPositionsPublisherNames(String s) {
if (publisherPattern == null) {
initPublishers();
}
List<OffsetPosition> results = publisherPattern.matchToken(s);
return results;
}
/**
* Soft look-up in publisher name gazetteer for a given list of LayoutToken objects
* with token positions
*/
public List<OffsetPosition> tokenPositionsPublisherNames(List<LayoutToken> s) {
if (publisherPattern == null) {
initPublishers();
}
List<OffsetPosition> results = publisherPattern.matchLayoutToken(s);
return results;
}
/**
* Soft look-up in collaboration name gazetteer for a given list of LayoutToken objects
* with token positions
*/
public List<OffsetPosition> tokenPositionsCollaborationNames(List<LayoutToken> s) {
if (collaborationPattern == null) {
initCollaborations();
}
List<OffsetPosition> results = collaborationPattern.matchLayoutToken(s);
return results;
}
/**
* Soft look-up in city name gazetteer for a given string with token positions
*/
public List<OffsetPosition> tokenPositionsCityNames(String s) {
if (cityPattern == null) {
initCities();
}
List<OffsetPosition> results = cityPattern.matchToken(s);
return results;
}
/**
* Soft look-up in city name gazetteer for a given list of LayoutToken objects
* with token positions
*/
public List<OffsetPosition> tokenPositionsCityNames(List<LayoutToken> s) {
if (cityPattern == null) {
initCities();
}
List<OffsetPosition> results = cityPattern.matchLayoutToken(s);
return results;
}
/** Organisation names **/
/**
* Soft look-up in organisation name gazetteer for a given string with token positions
*/
public List<OffsetPosition> tokenPositionsOrganisationNames(String s) {
if (organisationPattern == null) {
initOrganisations();
}
List<OffsetPosition> results = organisationPattern.matchToken(s);
return results;
}
/**
* Soft look-up in organisation name gazetteer for a given list of LayoutToken objects
* with token positions
*/
public List<OffsetPosition> tokenPositionsOrganisationNames(List<LayoutToken> s) {
if (organisationPattern == null) {
initOrganisations();
}
List<OffsetPosition> results = organisationPattern.matchLayoutToken(s);
return results;
}
/**
* Soft look-up in organisation names gazetteer for a string.
* It return a list of positions referring to the character positions within the string.
*
* @param s the input string
* @return a list of positions referring to the character position in the input string
*/
public List<OffsetPosition> charPositionsOrganisationNames(String s) {
if (organisationPattern == null) {
initOrganisations();
}
List<OffsetPosition> results = organisationPattern.matchCharacter(s);
return results;
}
/**
* Soft look-up in organisation names gazetteer for a tokenize sequence.
* It return a list of positions referring to the character positions within the input
* sequence.
*
* @param s the input list of LayoutToken
* @return a list of positions referring to the character position in the input sequence
*/
public List<OffsetPosition> charPositionsOrganisationNames(List<LayoutToken> s) {
if (organisationPattern == null) {
initOrganisations();
}
List<OffsetPosition> results = organisationPattern.matchCharacterLayoutToken(s);
return results;
}
/**
* Soft look-up in organisation form name gazetteer for a given string with token positions
*/
public List<OffsetPosition> tokenPositionsOrgForm(String s) {
if (orgFormPattern == null) {
initOrgForms();
}
List<OffsetPosition> results = orgFormPattern.matchToken(s);
return results;
}
/**
* Soft look-up in organisation form name gazetteer for a given list of LayoutToken objects
* with token positions
*/
public List<OffsetPosition> tokenPositionsOrgForm(List<LayoutToken> s) {
if (orgFormPattern == null) {
initOrgForms();
}
List<OffsetPosition> results = orgFormPattern.matchLayoutToken(s);
return results;
}
/**
* Soft look-up in org form names gazetteer for a string.
* It return a list of positions referring to the character positions within the string.
*
* @param s the input string
* @return a list of positions referring to the character position in the input string
*/
public List<OffsetPosition> charPositionsOrgForm(String s) {
if (orgFormPattern == null) {
initOrgForms();
}
List<OffsetPosition> results = orgFormPattern.matchCharacter(s);
return results;
}
/**
* Soft look-up in org form names gazetteer for a tokenized string.
* It return a list of positions referring to the character positions within the sequence.
*
* @param s the input list of LayoutToken
* @return a list of positions referring to the character position in the input sequence
*/
public List<OffsetPosition> charPositionsOrgForm(List<LayoutToken> s) {
if (orgFormPattern == null) {
initOrgForms();
}
List<OffsetPosition> results = orgFormPattern.matchCharacterLayoutToken(s);
return results;
}
/**
* Soft look-up in location name gazetteer for a given string with token positions
*/
public List<OffsetPosition> tokenPositionsLocationNames(String s) {
if (locationPattern == null) {
initLocations();
}
List<OffsetPosition> results = locationPattern.matchToken(s);
return results;
}
/**
* Soft look-up in location name gazetteer for a given list of LayoutToken objects
* with token positions
*/
public List<OffsetPosition> tokenPositionsLocationNames(List<LayoutToken> s) {
if (locationPattern == null) {
initLocations();
}
List<OffsetPosition> results = locationPattern.matchLayoutToken(s);
return results;
}
/**
* Soft look-up in location name gazetteer for a string, return a list of positions referring
* to the character positions within the string.
*
* For example "The car is in Milan" as Milan is a location, would return OffsetPosition(14,19)
*
* @param s the input string
* @return a list of positions referring to the character position in the input string
*/
public List<OffsetPosition> charPositionsLocationNames(String s) {
if (locationPattern == null) {
initLocations();
}
List<OffsetPosition> results = locationPattern.matchCharacter(s);
return results;
}
/**
* Soft look-up in location name gazetteer for a list of LayoutToken, return a list of
* positions referring to the character positions in the input sequence.
*
* For example "The car is in Milan" as Milan is a location, would return OffsetPosition(14,19)
*
* @param s the input list of LayoutToken
* @return a list of positions referring to the character position in the input sequence
*/
public List<OffsetPosition> charPositionsLocationNames(List<LayoutToken> s) {
if (locationPattern == null) {
initLocations();
}
List<OffsetPosition> results = locationPattern.matchCharacterLayoutToken(s);
return results;
}
/**
* Soft look-up in person title gazetteer for a given string with token positions
*/
public List<OffsetPosition> tokenPositionsPersonTitle(String s) {
if (personTitlePattern == null) {
initPersonTitles();
}
List<OffsetPosition> results = personTitlePattern.matchToken(s);
return results;
}
/**
* Soft look-up in person title gazetteer for a given list of LayoutToken objects
* with token positions
*/
public List<OffsetPosition> tokenPositionsPersonTitle(List<LayoutToken> s) {
if (personTitlePattern == null) {
initPersonTitles();
}
List<OffsetPosition> results = personTitlePattern.matchLayoutToken(s);
return results;
}
/**
* Soft look-up in person name suffix gazetteer for a given list of LayoutToken objects
* with token positions
*/
public List<OffsetPosition> tokenPositionsPersonSuffix(List<LayoutToken> s) {
if (personSuffixPattern == null) {
initPersonSuffix();
}
List<OffsetPosition> results = personSuffixPattern.matchLayoutToken(s);
return results;
}
/**
* Soft look-up in person title name gazetteer for a string.
* It return a list of positions referring to the character positions within the string.
*
* @param s the input string
* @return a list of positions referring to the character position in the input string
*/
public List<OffsetPosition> charPositionsPersonTitle(String s) {
if (personTitlePattern == null) {
initPersonTitles();
}
List<OffsetPosition> results = personTitlePattern.matchCharacter(s);
return results;
}
/**
* Soft look-up in person title name gazetteer for a list of LayoutToken.
* It return a list of positions referring to the character positions in the input
* sequence.
*
* @param s the input list of LayoutToken
* @return a list of positions referring to the character position in the input sequence
*/
public List<OffsetPosition> charPositionsPersonTitle(List<LayoutToken> s) {
if (personTitlePattern == null) {
initPersonTitles();
}
List<OffsetPosition> results = personTitlePattern.matchCharacterLayoutToken(s);
return results;
}
/**
* Identify in tokenized input the positions of identifier patterns with token positions
*/
public List<OffsetPosition> tokenPositionsIdentifierPattern(List<LayoutToken> tokens) {
List<OffsetPosition> result = new ArrayList<OffsetPosition>();
String text = LayoutTokensUtil.toText(tokens);
// DOI positions
result = tokenPositionsDOIPattern(tokens, text);
// arXiv
List<OffsetPosition> positions = tokenPositionsArXivPattern(tokens, text);
result = Utilities.mergePositions(result, positions);
// ISSN and ISBN
/*positions = tokenPositionsISSNPattern(tokens);
result = Utilities.mergePositions(result, positions);
positions = tokenPositionsISBNPattern(tokens);
result = Utilities.mergePositions(result, positions);*/
return result;
}
/**
* Identify in tokenized input the positions of the DOI patterns with token positons
*/
public List<OffsetPosition> tokenPositionsDOIPattern(List<LayoutToken> tokens, String text) {
List<OffsetPosition> textResult = new ArrayList<OffsetPosition>();
Matcher doiMatcher = TextUtilities.DOIPattern.matcher(text);
while (doiMatcher.find()) {
textResult.add(new OffsetPosition(doiMatcher.start(), doiMatcher.end()));
}
return Utilities.convertStringOffsetToTokenOffset(textResult, tokens);
}
/**
* Identify in tokenized input the positions of the arXiv identifier patterns
* with token positions
*/
public List<OffsetPosition> tokenPositionsArXivPattern(List<LayoutToken> tokens, String text) {
List<OffsetPosition> textResult = new ArrayList<OffsetPosition>();
Matcher arXivMatcher = TextUtilities.arXivPattern.matcher(text);
while (arXivMatcher.find()) {
//System.out.println(arXivMatcher.start() + " / " + arXivMatcher.end() + " / " + text.substring(arXivMatcher.start(), arXivMatcher.end()));
textResult.add(new OffsetPosition(arXivMatcher.start(), arXivMatcher.end()));
}
return Utilities.convertStringOffsetToTokenOffset(textResult, tokens);
}
/**
* Identify in tokenized input the positions of ISSN patterns with token positions
*/
public List<OffsetPosition> tokenPositionsISSNPattern(List<LayoutToken> tokens) {
List<OffsetPosition> result = new ArrayList<OffsetPosition>();
// TBD !
return result;
}
/**
* Identify in tokenized input the positions of ISBN patterns with token positions
*/
public List<OffsetPosition> tokenPositionsISBNPattern(List<LayoutToken> tokens) {
List<OffsetPosition> result = new ArrayList<OffsetPosition>();
// TBD !!
return result;
}
/**
* Identify in tokenized input the positions of an URL pattern with token positions
*/
public List<OffsetPosition> tokenPositionsUrlPattern(List<LayoutToken> tokens) {
//List<OffsetPosition> result = new ArrayList<OffsetPosition>();
String text = LayoutTokensUtil.toText(tokens);
List<OffsetPosition> textResult = new ArrayList<OffsetPosition>();
Matcher urlMatcher = TextUtilities.urlPattern.matcher(text);
while (urlMatcher.find()) {
//System.out.println(urlMatcher.start() + " / " + urlMatcher.end() + " / " + text.substring(urlMatcher.start(), urlMatcher.end()));
textResult.add(new OffsetPosition(urlMatcher.start(), urlMatcher.end()));
}
return Utilities.convertStringOffsetToTokenOffset(textResult, tokens);
}
/**
* Identify in tokenized input the positions of an URL pattern with character positions
*/
public List<OffsetPosition> characterPositionsUrlPattern(List<LayoutToken> tokens) {
//List<OffsetPosition> result = new ArrayList<OffsetPosition>();
String text = LayoutTokensUtil.toText(tokens);
List<OffsetPosition> textResult = new ArrayList<OffsetPosition>();
Matcher urlMatcher = TextUtilities.urlPattern.matcher(text);
while (urlMatcher.find()) {
textResult.add(new OffsetPosition(urlMatcher.start(), urlMatcher.end()));
}
return textResult;
}
/**
* Identify in tokenized input the positions of an URL pattern with character positions,
* and refine positions based on possible PDF URI annotations.
*
* This will produce better quality recognized URL, avoiding missing suffixes and problems
* with break lines and spaces.
**/
public static List<OffsetPosition> characterPositionsUrlPatternWithPdfAnnotations(
List<LayoutToken> layoutTokens,
List<PDFAnnotation> pdfAnnotations,
String text) {
List<OffsetPosition> urlPositions = Lexicon.getInstance().characterPositionsUrlPattern(layoutTokens);
List<OffsetPosition> resultPositions = new ArrayList<>();
// do we need to extend the url position based on additional position of the corresponding
// PDF annotation?
for(OffsetPosition urlPosition : urlPositions) {
int startPos = urlPosition.start;
int endPos = urlPosition.end;
int startTokenIndex = -1;
int endTokensIndex = -1;
// token sublist
List<LayoutToken> urlTokens = new ArrayList<>();
int tokenPos = 0;
int tokenIndex = 0;
for(LayoutToken localToken : layoutTokens) {
if (startPos <= tokenPos && (tokenPos+localToken.getText().length() <= endPos) ) {
urlTokens.add(localToken);
if (startTokenIndex == -1)
startTokenIndex = tokenIndex;
if (tokenIndex > endTokensIndex)
endTokensIndex = tokenIndex;
}
if (tokenPos > endPos) {
break;
}
tokenPos += localToken.getText().length();
tokenIndex++;
}
//String urlString = LayoutTokensUtil.toText(urlTokens);
String urlString = text.substring(startPos, endPos);
PDFAnnotation targetAnnotation = null;
if (urlTokens.size()>0) {
LayoutToken lastToken = urlTokens.get(urlTokens.size()-1);
if (pdfAnnotations != null) {
for (PDFAnnotation pdfAnnotation : pdfAnnotations) {
if (pdfAnnotation.getType() != null && pdfAnnotation.getType() == PDFAnnotation.Type.URI) {
if (pdfAnnotation.cover(lastToken)) {
//System.out.println("found overlapping PDF annotation for URL: " + pdfAnnotation.getDestination());
targetAnnotation = pdfAnnotation;
break;
}
}
}
}
}
if (targetAnnotation != null) {
String destination = targetAnnotation.getDestination();
int destinationPos = 0;
if (destination.indexOf(urlString) != -1) {
destinationPos = destination.indexOf(urlString)+urlString.length();
}
if (endTokensIndex < layoutTokens.size()-1) {
for(int j=endTokensIndex+1; j<layoutTokens.size(); j++) {
LayoutToken nextToken = layoutTokens.get(j);
if ("\n".equals(nextToken.getText()) ||
" ".equals(nextToken.getText()) ||
nextToken.getText().length() == 0) {
endPos += nextToken.getText().length();
urlTokens.add(nextToken);
continue;
}
int pos = destination.indexOf(nextToken.getText(), destinationPos);
if (pos != -1) {
endPos += nextToken.getText().length();
destinationPos = pos + nextToken.getText().length();
urlTokens.add(nextToken);
} else
break;
}
}
}
// finally avoid ending a URL by a dot, because it can harm the sentence segmentation
if (text.charAt(endPos-1) == '.')
endPos = endPos-1;
OffsetPosition position = new OffsetPosition();
position.start = startPos;
position.end = endPos;
resultPositions.add(position);
}
return resultPositions;
}
/**
* Identify in tokenized input the positions of an email address pattern with token positions
*/
public List<OffsetPosition> tokenPositionsEmailPattern(List<LayoutToken> tokens) {
//List<OffsetPosition> result = new ArrayList<OffsetPosition>();
String text = LayoutTokensUtil.toText(tokens);
if (text.indexOf("@") == -1)
return new ArrayList<OffsetPosition>();
List<OffsetPosition> textResult = new ArrayList<OffsetPosition>();
Matcher emailMatcher = TextUtilities.emailPattern.matcher(text);
while (emailMatcher.find()) {
//System.out.println(urlMatcher.start() + " / " + urlMatcher.end() + " / " + text.substring(urlMatcher.start(), urlMatcher.end()));
textResult.add(new OffsetPosition(emailMatcher.start(), emailMatcher.end()));
}
return Utilities.convertStringOffsetToTokenOffset(textResult, tokens);
}
}
| 43,784 | 37.509235 | 168 |
java
|
grobid
|
grobid-master/grobid-core/src/main/java/org/grobid/core/analyzers/ArabicChars.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.grobid.core.analyzers;
/**
* @author Chihebeddine Ammar
*/
public class ArabicChars {
/**
* Method for mapping some Arabic characters to their equivalent ASCII codes.
*/
public static char arabicCharacters(char c) {
char car;
switch (c) {
case '،':
car = ',';
break;
case '؛':
car = ';';
break;
case '؟':
car = '?';
break;
case '٠':
car = '0';
break;
case '١':
car = '1';
break;
case '٢':
car = '2';
break;
case '٣':
car = '3';
break;
case '٤':
car = '4';
break;
case '٥':
car = '5';
break;
case '٦':
car = '6';
break;
case '٧':
car = '7';
break;
case '٨':
car = '8';
break;
case '٩':
car = '9';
break;
default:
car = c;
break;
}
return car;
}
}
| 1,793 | 22 | 79 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.