repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/ReferenceSegmenterTrainer.java
|
package org.grobid.trainer;
import org.grobid.core.GrobidModels;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.features.FeaturesVectorReferenceSegmenter;
import org.grobid.trainer.sax.TEIReferenceSegmenterSaxParser;
import org.grobid.core.utilities.GrobidProperties;
import org.grobid.core.utilities.UnicodeUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.xml.parsers.SAXParser;
import javax.xml.parsers.SAXParserFactory;
import java.io.*;
import java.util.List;
import java.util.StringTokenizer;
public class ReferenceSegmenterTrainer extends AbstractTrainer {
public static final Logger LOGGER = LoggerFactory.getLogger(ReferenceSegmenterTrainer.class);
public ReferenceSegmenterTrainer() {
super(GrobidModels.REFERENCE_SEGMENTER);
}
@Override
public int createCRFPPData(File corpusPath, File trainingOutputPath) {
return createCRFPPData(corpusPath, trainingOutputPath, null, 1.0);
}
@Override
public int createCRFPPData(File corpusDir, File trainingOutputPath, File evaluationOutputPath, double splitRatio) {
int totalExamples = 0;
try {
LOGGER.info("Corpus directory: " + corpusDir);
if (trainingOutputPath != null) {
LOGGER.info("output path for training data: " + trainingOutputPath);
}
if (evaluationOutputPath != null) {
LOGGER.info("output path for evaluation data: " + evaluationOutputPath);
}
File teiCorpusDir = new File(corpusDir.getAbsolutePath() + "/tei/");
if (!teiCorpusDir.exists()) {
throw new IllegalStateException("Folder " + corpusDir.getAbsolutePath() +
" does not exist. Please have a look!");
}
// we convert the tei files into the usual CRF label format
// we process all tei files in the output directory
final File[] refFiles = teiCorpusDir.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
return (name.endsWith(".xml") || name.endsWith(".tei"));
}
});
if (refFiles == null) {
throw new IllegalStateException("Folder " + teiCorpusDir.getAbsolutePath() +
" does not seem to contain training data. Please check");
}
LOGGER.info("Processing " + refFiles.length + " tei files");
// the file for writing the training data
OutputStream trainingOS = null;
Writer trainingWriter = null;
if (trainingOutputPath != null) {
trainingOS = new FileOutputStream(trainingOutputPath);
trainingWriter = new OutputStreamWriter(trainingOS, "UTF8");
}
// the file for writing the evaluation data
OutputStream evaluationOS = null;
Writer evaluationWriter = null;
if (evaluationOutputPath != null) {
evaluationOS = new FileOutputStream(evaluationOutputPath);
evaluationWriter = new OutputStreamWriter(evaluationOS, "UTF8");
}
System.out.println("training data under: " + trainingOutputPath);
System.out.println("evaluation data under: " + evaluationOutputPath);
// get a factory for SAX parser
SAXParserFactory spf = SAXParserFactory.newInstance();
// List<List<OffsetPosition>> placesPositions;
int n = 0;
for (; n < refFiles.length; n++) {
final File teifile = refFiles[n];
final TEIReferenceSegmenterSaxParser saxParser = new TEIReferenceSegmenterSaxParser();
String name = teifile.getName();
// get a new instance of parser
final SAXParser p = spf.newSAXParser();
p.parse(teifile, saxParser);
final List<String> labeled = saxParser.getLabeledResult();
totalExamples += saxParser.getTotalReferences();
// we can now add the features
// we open the featured file
File rawCorpusDir = new File(corpusDir.getAbsolutePath() + "/raw/");
if (!rawCorpusDir.exists()) {
throw new IllegalStateException("Folder " + rawCorpusDir.getAbsolutePath() +
" does not exist. Please have a look!");
}
File theRawFile = new File(rawCorpusDir.getAbsolutePath() + File.separator +
name.replace(".tei.xml", ""));
if (!theRawFile.exists()) {
System.out.println("Raw file " + theRawFile +
" does not exist. Please have a look!");
continue;
}
int q = 0;
BufferedReader bis = new BufferedReader(
new InputStreamReader(new FileInputStream(
rawCorpusDir.getAbsolutePath() + File.separator +
name.replace(".tei.xml", "")), "UTF8"));
StringBuilder referenceText = new StringBuilder();
String line;
while ((line = bis.readLine()) != null) {
int ii = line.indexOf(' ');
String token = null;
if (ii != -1) {
token = line.substring(0, ii);
// unicode normalisation of the token - it should not be necessary if the training data
// has been gnerated by a recent version of grobid
token = UnicodeUtil.normaliseTextAndRemoveSpaces(token);
}
// we get the label in the labelled data file for the same token
for (int pp = q; pp < labeled.size(); pp++) {
String localLine = labeled.get(pp);
StringTokenizer st = new StringTokenizer(localLine, " ");
if (st.hasMoreTokens()) {
String localToken = st.nextToken();
// unicode normalisation of the token - it should not be necessary if the training data
// has been gnerated by a recent version of grobid
localToken = UnicodeUtil.normaliseTextAndRemoveSpaces(localToken);
if (localToken.equals(token)) {
String tag = st.nextToken();
referenceText.append(line).append(" ").append(tag).append("\n");
// lastTag = tag;
// found = true;
q = pp + 1;
pp = q + 10;
}
}
if (pp - q > 5) {
break;
}
}
}
bis.close();
// format with features for sequence tagging...
//writer2.write(referenceText.toString() + "\n");
// we can now add the features
//String featureVector = FeaturesVectorReferenceSegmenter.addFeaturesReferenceSegmenter(labeled);
// format with features for sequence tagging...
// given the split ratio we write either in the training file or the evaluation file
//affAdd = affAdd.replace("\n \n", "\n \n");
//String[] chunks = featureVector.split("\n\n");
//for (String chunk : chunks)
{
if ((trainingWriter == null) && (evaluationWriter != null))
evaluationWriter.write(referenceText.toString() + "\n \n");
if ((trainingWriter != null) && (evaluationWriter == null))
trainingWriter.write(referenceText.toString() + "\n \n");
else {
if (Math.random() <= splitRatio && trainingWriter != null) {
trainingWriter.write(referenceText.toString() + "\n \n");
} else if (evaluationWriter != null) {
evaluationWriter.write(referenceText.toString() + "\n \n");
}
}
}
}
if (trainingWriter != null) {
trainingWriter.close();
trainingOS.close();
}
if (evaluationWriter != null) {
evaluationWriter.close();
evaluationOS.close();
}
} catch (Exception e) {
throw new GrobidException("An exception occurred while trainining/evaluating reference segmenter model.", e);
}
return totalExamples;
}
/**
* Command line execution.
*
* @param args Command line arguments.
* @throws Exception
*/
public static void main(String[] args) throws Exception {
GrobidProperties.getInstance();
AbstractTrainer.runTraining(new ReferenceSegmenterTrainer());
System.out.println(AbstractTrainer.runEvaluation(new ReferenceSegmenterTrainer()));
System.exit(0);
}
}
| 9,264 | 41.695853 | 121 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/AbstractTrainer.java
|
package org.grobid.trainer;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.text.RandomStringGenerator;
import org.grobid.core.GrobidModel;
import org.grobid.core.GrobidModels;
import org.grobid.core.engines.tagging.GenericTagger;
import org.grobid.core.engines.tagging.GrobidCRFEngine;
import org.grobid.core.engines.tagging.TaggerFactory;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.factory.GrobidFactory;
import org.grobid.core.utilities.GrobidProperties;
import org.grobid.core.utilities.TextUtilities;
import org.grobid.core.utilities.Utilities;
import org.grobid.trainer.evaluation.EvaluationUtilities;
import org.grobid.trainer.evaluation.LabelResult;
import org.grobid.trainer.evaluation.ModelStats;
import org.grobid.trainer.evaluation.Stats;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.awt.*;
import java.io.BufferedWriter;
import java.io.File;
import java.io.IOException;
import java.io.Writer;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.*;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import java.util.stream.Stream;
public abstract class AbstractTrainer implements Trainer {
protected static final Logger LOGGER = LoggerFactory.getLogger(AbstractTrainer.class);
public static final String OLD_MODEL_EXT = ".old";
public static final String NEW_MODEL_EXT = ".new";
// default training parameters (only exploited by Wapiti)
protected double epsilon = 0.0; // size of the interval for stopping criterion
protected int window = 0; // similar to CRF++
protected int nbMaxIterations = 0; // maximum number of iterations in training
protected GrobidModel model;
private File trainDataPath;
private File evalDataPath;
private GenericTagger tagger;
private RandomStringGenerator randomStringGenerator;
public AbstractTrainer(final GrobidModel model) {
GrobidFactory.getInstance().createEngine();
this.model = model;
if (model.equals(GrobidModels.DUMMY)) {
// In case of dummy model we do not initialise (and create) temporary files
return;
}
this.trainDataPath = getTempTrainingDataPath();
this.evalDataPath = getTempEvaluationDataPath();
this.randomStringGenerator = new RandomStringGenerator.Builder()
.withinRange('a', 'z')
.build();
}
public void setParams(double epsilon, int window, int nbMaxIterations) {
this.epsilon = epsilon;
this.window = window;
this.nbMaxIterations = nbMaxIterations;
}
@Override
public int createCRFPPData(final File corpusDir, final File trainingOutputPath) {
return createCRFPPData(corpusDir, trainingOutputPath, null, 1.0);
}
@Override
public void train() {
train(false);
}
@Override
public void train(boolean incremental) {
final File dataPath = trainDataPath;
createCRFPPData(getCorpusPath(), dataPath);
GenericTrainer trainer = TrainerFactory.getTrainer(model);
trainer.setEpsilon(GrobidProperties.getEpsilon(model));
trainer.setWindow(GrobidProperties.getWindow(model));
trainer.setNbMaxIterations(GrobidProperties.getNbMaxIterations(model));
File dirModelPath = new File(GrobidProperties.getModelPath(model).getAbsolutePath()).getParentFile();
if (!dirModelPath.exists()) {
LOGGER.warn("Cannot find the destination directory " + dirModelPath.getAbsolutePath() + " for the model " + model.getModelName() + ". Creating it.");
dirModelPath.mkdir();
//throw new GrobidException("Cannot find the destination directory " + dirModelPath.getAbsolutePath() + " for the model " + model.toString());
}
final File tempModelPath = new File(GrobidProperties.getModelPath(model).getAbsolutePath() + NEW_MODEL_EXT);
final File oldModelPath = GrobidProperties.getModelPath(model);
trainer.train(getTemplatePath(), dataPath, tempModelPath, GrobidProperties.getWapitiNbThreads(), model, incremental);
// if we are here, that means that training succeeded
// rename model for CRF sequence labellers (not with DeLFT deep learning models)
if (GrobidProperties.getGrobidCRFEngine(this.model) != GrobidCRFEngine.DELFT)
renameModels(oldModelPath, tempModelPath);
}
protected void renameModels(final File oldModelPath, final File tempModelPath) {
if (oldModelPath.exists()) {
if (!oldModelPath.renameTo(new File(oldModelPath.getAbsolutePath() + OLD_MODEL_EXT))) {
LOGGER.warn("Unable to rename old model file: " + oldModelPath.getAbsolutePath());
return;
}
}
if (!tempModelPath.renameTo(oldModelPath)) {
LOGGER.warn("Unable to rename new model file: " + tempModelPath);
}
}
@Override
public String evaluate() {
return evaluate(false);
}
@Override
public String evaluate(boolean includeRawResults) {
createCRFPPData(getEvalCorpusPath(), evalDataPath);
return EvaluationUtilities.evaluateStandard(evalDataPath.getAbsolutePath(), getTagger()).toString(includeRawResults);
}
@Override
public String evaluate(GenericTagger tagger, boolean includeRawResults) {
createCRFPPData(getEvalCorpusPath(), evalDataPath);
return EvaluationUtilities.evaluateStandard(evalDataPath.getAbsolutePath(), tagger).toString(includeRawResults);
}
@Override
public String splitTrainEvaluate(Double split) {
return splitTrainEvaluate(split, false);
}
@Override
public String splitTrainEvaluate(Double split, boolean incremental) {
final File dataPath = trainDataPath;
createCRFPPData(getCorpusPath(), dataPath, evalDataPath, split);
GenericTrainer trainer = TrainerFactory.getTrainer(model);
if (epsilon != 0.0)
trainer.setEpsilon(epsilon);
if (window != 0)
trainer.setWindow(window);
if (nbMaxIterations != 0)
trainer.setNbMaxIterations(nbMaxIterations);
File dirModelPath = new File(GrobidProperties.getModelPath(model).getAbsolutePath()).getParentFile();
if (!dirModelPath.exists()) {
LOGGER.warn("Cannot find the destination directory " + dirModelPath.getAbsolutePath() + " for the model " + model.getModelName() + ". Creating it.");
dirModelPath.mkdir();
//throw new GrobidException("Cannot find the destination directory " + dirModelPath.getAbsolutePath() + " for the model " + model.toString());
}
final File tempModelPath = new File(GrobidProperties.getModelPath(model).getAbsolutePath() + NEW_MODEL_EXT);
final File oldModelPath = GrobidProperties.getModelPath(model);
trainer.train(getTemplatePath(), dataPath, tempModelPath, GrobidProperties.getWapitiNbThreads(), model, incremental);
// if we are here, that means that training succeeded
renameModels(oldModelPath, tempModelPath);
return EvaluationUtilities.evaluateStandard(evalDataPath.getAbsolutePath(), getTagger()).toString();
}
@Override
public String nFoldEvaluate(int numFolds) {
return nFoldEvaluate(numFolds, false);
}
@Override
public String nFoldEvaluate(int numFolds, boolean includeRawResults) {
final File dataPath = trainDataPath;
createCRFPPData(getCorpusPath(), dataPath);
GenericTrainer trainer = TrainerFactory.getTrainer(model);
String randomString = randomStringGenerator.generate(10);
// Load in memory and Shuffle
Path dataPath2 = Paths.get(dataPath.getAbsolutePath());
List<String> trainingData = loadAndShuffle(dataPath2);
// Split into folds
List<ImmutablePair<String, String>> foldMap = splitNFold(trainingData, numFolds);
// Train and evaluation
if (epsilon != 0.0)
trainer.setEpsilon(epsilon);
if (window != 0)
trainer.setWindow(window);
if (nbMaxIterations != 0)
trainer.setNbMaxIterations(nbMaxIterations);
//We dump the model in the tmp directory
File tmpDirectory = new File(GrobidProperties.getTempPath().getAbsolutePath());
if (!tmpDirectory.exists()) {
LOGGER.warn("Cannot find the destination directory " + tmpDirectory);
}
List<String> tempFilePaths = new ArrayList<>();
// Output
StringBuilder sb = new StringBuilder();
sb.append("Recap results for each fold:").append("\n\n");
AtomicInteger counter = new AtomicInteger(0);
List<ModelStats> evaluationResults = foldMap.stream().map(fold -> {
sb.append("\n");
sb.append("====================== Fold " + counter.get() + " ====================== ").append("\n");
System.out.println("====================== Fold " + counter.get() + " ====================== ");
final File tempModelPath = new File(tmpDirectory + File.separator + getModel().getModelName()
+ "_nfold_" + counter.getAndIncrement() + "_" + randomString + ".wapiti");
sb.append("Saving model in " + tempModelPath).append("\n");
// Collecting generated paths to be deleted at the end of the process
tempFilePaths.add(tempModelPath.getAbsolutePath());
tempFilePaths.add(fold.getLeft());
tempFilePaths.add(fold.getRight());
sb.append("Training input data: " + fold.getLeft()).append("\n");
trainer.train(getTemplatePath(), new File(fold.getLeft()), tempModelPath, GrobidProperties.getWapitiNbThreads(), model, false);
sb.append("Evaluation input data: " + fold.getRight()).append("\n");
//TODO: find a better solution!!
GrobidModel tmpModel = new GrobidModel() {
@Override
public String getFolderName() {
return tmpDirectory.getAbsolutePath();
}
@Override
public String getModelPath() {
return tempModelPath.getAbsolutePath();
}
@Override
public String getModelName() {
return model.getModelName();
}
@Override
public String getTemplateName() {
return model.getTemplateName();
}
};
ModelStats modelStats = EvaluationUtilities.evaluateStandard(fold.getRight(), TaggerFactory.getTagger(tmpModel));
sb.append(modelStats.toString(includeRawResults));
sb.append("\n");
sb.append("\n");
return modelStats;
}).collect(Collectors.toList());
sb.append("\n").append("Summary results: ").append("\n");
Comparator<ModelStats> f1ScoreComparator = (o1, o2) -> {
Stats fieldStatsO1 = o1.getFieldStats();
Stats fieldStatsO2 = o2.getFieldStats();
if (fieldStatsO1.getMicroAverageF1() > fieldStatsO2.getMicroAverageF1()) {
return 1;
} else if (fieldStatsO1.getMicroAverageF1() < fieldStatsO2.getMicroAverageF1()) {
return -1;
} else {
return 0;
}
};
Optional<ModelStats> worstModel = evaluationResults.stream().min(f1ScoreComparator);
sb.append("Worst fold").append("\n");
ModelStats worstModelStats = worstModel.orElseGet(() -> {
throw new GrobidException("Something wrong when computing evaluations " +
"- worst model metrics not found. ");
});
sb.append(worstModelStats.toString()).append("\n");
sb.append("Best fold:").append("\n");
Optional<ModelStats> bestModel = evaluationResults.stream().max(f1ScoreComparator);
ModelStats bestModelStats = bestModel.orElseGet(() -> {
throw new GrobidException("Something wrong when computing evaluations " +
"- best model metrics not found. ");
});
sb.append(bestModelStats.toString()).append("\n").append("\n");
// Averages
sb.append("Average over " + numFolds + " folds: ").append("\n");
TreeMap<String, LabelResult> averagesLabelStats = new TreeMap<>();
int totalInstances = 0;
int correctInstances = 0;
for (ModelStats ms : evaluationResults) {
totalInstances += ms.getTotalInstances();
correctInstances += ms.getCorrectInstance();
for (Map.Entry<String, LabelResult> entry : ms.getFieldStats().getLabelsResults().entrySet()) {
String key = entry.getKey();
if (averagesLabelStats.containsKey(key)) {
averagesLabelStats.get(key).setAccuracy(averagesLabelStats.get(key).getAccuracy() + entry.getValue().getAccuracy());
averagesLabelStats.get(key).setF1Score(averagesLabelStats.get(key).getF1Score() + entry.getValue().getF1Score());
averagesLabelStats.get(key).setRecall(averagesLabelStats.get(key).getRecall() + entry.getValue().getRecall());
averagesLabelStats.get(key).setPrecision(averagesLabelStats.get(key).getPrecision() + entry.getValue().getPrecision());
averagesLabelStats.get(key).setSupport(averagesLabelStats.get(key).getSupport() + entry.getValue().getSupport());
} else {
averagesLabelStats.put(key, new LabelResult(key));
averagesLabelStats.get(key).setAccuracy(entry.getValue().getAccuracy());
averagesLabelStats.get(key).setF1Score(entry.getValue().getF1Score());
averagesLabelStats.get(key).setRecall(entry.getValue().getRecall());
averagesLabelStats.get(key).setPrecision(entry.getValue().getPrecision());
averagesLabelStats.get(key).setSupport(entry.getValue().getSupport());
}
}
}
sb.append(String.format("\n%-20s %-12s %-12s %-12s %-12s %-7s\n\n",
"label",
"accuracy",
"precision",
"recall",
"f1",
"support"));
for (String label : averagesLabelStats.keySet()) {
LabelResult labelResult = averagesLabelStats.get(label);
double avgAccuracy = labelResult.getAccuracy() / evaluationResults.size();
averagesLabelStats.get(label).setAccuracy(avgAccuracy);
double avgF1Score = labelResult.getF1Score() / evaluationResults.size();
averagesLabelStats.get(label).setF1Score(avgF1Score);
double avgPrecision = labelResult.getPrecision() / evaluationResults.size();
averagesLabelStats.get(label).setPrecision(avgPrecision);
double avgRecall = labelResult.getRecall() / evaluationResults.size();
averagesLabelStats.get(label).setRecall(avgRecall);
sb.append(labelResult.toString());
}
OptionalDouble averageF1 = evaluationResults.stream().mapToDouble(e -> e.getFieldStats().getMicroAverageF1()).average();
OptionalDouble averagePrecision = evaluationResults.stream().mapToDouble(e -> e.getFieldStats().getMicroAveragePrecision()).average();
OptionalDouble averageRecall = evaluationResults.stream().mapToDouble(e -> e.getFieldStats().getMicroAverageRecall()).average();
OptionalDouble averageAccuracy = evaluationResults.stream().mapToDouble(e -> e.getFieldStats().getMicroAverageAccuracy()).average();
double avgAccuracy = averageAccuracy.orElseGet(() -> {
throw new GrobidException("Missing average accuracy. Something went wrong. Please check. ");
});
double avgF1 = averageF1.orElseGet(() -> {
throw new GrobidException("Missing average F1. Something went wrong. Please check. ");
});
double avgPrecision = averagePrecision.orElseGet(() -> {
throw new GrobidException("Missing average precision. Something went wrong. Please check. ");
});
double avgRecall = averageRecall.orElseGet(() -> {
throw new GrobidException("Missing average recall. Something went wrong. Please check. ");
});
sb.append("\n");
sb.append(String.format("%-20s %-12s %-12s %-12s %-7s\n",
"all ",
TextUtilities.formatTwoDecimals(avgAccuracy * 100),
TextUtilities.formatTwoDecimals(avgPrecision * 100),
TextUtilities.formatTwoDecimals(avgRecall * 100),
TextUtilities.formatTwoDecimals(avgF1 * 100))
// String.valueOf(supportSum))
);
sb.append("\n===== Instance-level results =====\n\n");
double averageTotalInstances = (double) totalInstances / numFolds;
double averageCorrectInstances = (double) correctInstances / numFolds;
sb.append(String.format("%-27s %s\n", "Total expected instances:", TextUtilities.formatTwoDecimals(averageTotalInstances)));
sb.append(String.format("%-27s %s\n", "Correct instances:", TextUtilities.formatTwoDecimals(averageCorrectInstances)));
sb.append(String.format("%-27s %s\n",
"Instance-level recall:",
TextUtilities.formatTwoDecimals(averageCorrectInstances / averageTotalInstances * 100)));
// Cleanup
tempFilePaths.stream().forEach(f -> {
try {
Files.delete(Paths.get(f));
} catch (IOException e) {
LOGGER.warn("Error while performing the cleanup after n-fold cross-validation. Cannot delete the file: " + f, e);
}
});
return sb.toString();
}
/**
* Partition the corpus in n folds, dump them in n files and return the pairs of (trainingPath, evaluationPath)
*/
protected List<ImmutablePair<String, String>> splitNFold(List<String> trainingData, int numberFolds) {
int trainingSize = CollectionUtils.size(trainingData);
int foldSize = Math.floorDiv(trainingSize, numberFolds);
if (foldSize == 0) {
throw new IllegalArgumentException("There aren't enough training data for n-fold evaluation with fold of size " + numberFolds);
}
return IntStream.range(0, numberFolds).mapToObj(foldIndex -> {
int foldStart = foldSize * foldIndex;
int foldEnd = foldStart + foldSize;
if (foldIndex == numberFolds - 1) {
foldEnd = trainingSize;
}
List<String> foldEvaluation = trainingData.subList(foldStart, foldEnd);
List<String> foldTraining0 = trainingData.subList(0, foldStart);
List<String> foldTraining1 = trainingData.subList(foldEnd, trainingSize);
List<String> foldTraining = new ArrayList<>();
foldTraining.addAll(foldTraining0);
foldTraining.addAll(foldTraining1);
//Dump Evaluation
String tempEvaluationDataPath = getTempEvaluationDataPath().getAbsolutePath();
try (BufferedWriter writer = Files.newBufferedWriter(Paths.get(tempEvaluationDataPath))) {
writer.write(String.join("\n\n", foldEvaluation));
writer.write("\n");
} catch (IOException e) {
throw new GrobidException("Error when dumping n-fold evaluation data into files. ", e);
}
//Remove temporary Training and models files (note: the original data files (.train and .eval) are not deleted)
String tempTrainingDataPath = getTempTrainingDataPath().getAbsolutePath();
try (BufferedWriter writer = Files.newBufferedWriter(Paths.get(tempTrainingDataPath))) {
writer.write(String.join("\n\n", foldTraining));
writer.write("\n");
} catch (IOException e) {
throw new GrobidException("Error when dumping n-fold training data into files. ", e);
}
return new ImmutablePair<>(tempTrainingDataPath, tempEvaluationDataPath);
}).collect(Collectors.toList());
}
/**
* Load the dataset in memory and shuffle it.
*/
protected List<String> loadAndShuffle(Path dataPath) {
List<String> trainingData = load(dataPath);
Collections.shuffle(trainingData, new Random(839374947498L));
return trainingData;
}
/**
* Read the Wapiti training files in list of String.
* Assuming that each empty line is a delimiter between instances.
* Each list element corresponds to one instance.
* Empty line are filtered out from the output.
*/
public List<String> load(Path dataPath) {
List<String> trainingData = new ArrayList<>();
try (Stream<String> stream = Files.lines(dataPath)) {
List<String> instance = new ArrayList<>();
ListIterator<String> iterator = stream.collect(Collectors.toList()).listIterator();
while (iterator.hasNext()) {
String current = iterator.next();
if (StringUtils.isBlank(current)) {
if (CollectionUtils.isNotEmpty(instance)) {
trainingData.add(String.join("\n", instance));
}
instance = new ArrayList<>();
} else {
instance.add(current);
}
}
if (CollectionUtils.isNotEmpty(instance)) {
trainingData.add(String.join("\n", instance));
}
} catch (IOException e) {
throw new GrobidException("Error in n-fold, when loading training data. Failing. ", e);
}
return trainingData;
}
protected final File getTempTrainingDataPath() {
try {
return File.createTempFile(model.getModelName(), ".train", GrobidProperties.getTempPath());
} catch (IOException e) {
throw new RuntimeException("Unable to create a temporary training file for model: " + model);
}
}
protected final File getTempEvaluationDataPath() {
try {
return File.createTempFile(model.getModelName(), ".test", GrobidProperties.getTempPath());
} catch (IOException e) {
throw new RuntimeException("Unable to create a temporary evaluation file for model: " + model);
}
}
protected GenericTagger getTagger() {
if (tagger == null) {
tagger = TaggerFactory.getTagger(model);
}
return tagger;
}
protected static File getFilePath2Resources() {
File theFile = new File(GrobidProperties.getGrobidHome().getAbsoluteFile() + File.separator + ".." + File.separator
+ "grobid-trainer" + File.separator + "resources");
if (!theFile.exists()) {
theFile = new File("resources");
}
return theFile;
}
protected File getCorpusPath() {
return GrobidProperties.getCorpusPath(getFilePath2Resources(), model);
}
protected File getTemplatePath() {
return getTemplatePath(model);
}
protected File getTemplatePath(final GrobidModel model) {
return GrobidProperties.getTemplatePath(getFilePath2Resources(), model);
}
protected File getEvalCorpusPath() {
return GrobidProperties.getEvalCorpusPath(getFilePath2Resources(), model);
}
public static File getEvalCorpusBasePath() {
final String path2Evelutation = getFilePath2Resources().getAbsolutePath() + File.separator + "dataset" + File.separator + "patent"
+ File.separator + "evaluation";
return new File(path2Evelutation);
}
@Override
public GrobidModel getModel() {
return model;
}
public static void runTraining(final Trainer trainer) {
runTraining(trainer, false);
}
public static void runTraining(final Trainer trainer, boolean incremental) {
long start = System.currentTimeMillis();
trainer.train(incremental);
long end = System.currentTimeMillis();
System.out.println("Model for " + trainer.getModel() + " created in " + (end - start) + " ms");
}
public File getEvalDataPath() {
return evalDataPath;
}
public static String runEvaluation(final Trainer trainer, boolean includeRawResults) {
long start = System.currentTimeMillis();
String report = "";
try {
report = trainer.evaluate(includeRawResults);
} catch (Exception e) {
throw new GrobidException("An exception occurred while evaluating Grobid.", e);
}
long end = System.currentTimeMillis();
report += "\n\nEvaluation for " + trainer.getModel() + " model is realized in " + (end - start) + " ms";
return report;
}
public static String runEvaluation(final Trainer trainer) {
return trainer.evaluate(false);
}
public static String runSplitTrainingEvaluation(final Trainer trainer, Double split) {
return runSplitTrainingEvaluation(trainer, split, false);
}
public static String runSplitTrainingEvaluation(final Trainer trainer, Double split, boolean incremental) {
long start = System.currentTimeMillis();
String report = "";
try {
report = trainer.splitTrainEvaluate(split, incremental);
} catch (Exception e) {
throw new GrobidException("An exception occurred while evaluating Grobid.", e);
}
long end = System.currentTimeMillis();
report += "\n\nSplit, training and evaluation for " + trainer.getModel() + " model is realized in " + (end - start) + " ms";
return report;
}
public static void runNFoldEvaluation(final Trainer trainer, int numFolds, Path outputFile) {
runNFoldEvaluation(trainer, numFolds, outputFile, false);
}
public static void runNFoldEvaluation(final Trainer trainer, int numFolds, Path outputFile, boolean includeRawResults) {
String report = runNFoldEvaluation(trainer, numFolds, includeRawResults);
try (BufferedWriter writer = Files.newBufferedWriter(outputFile)) {
writer.write(report);
writer.write("\n");
} catch (IOException e) {
throw new GrobidException("Error when dumping n-fold training data into files. ", e);
}
}
public static String runNFoldEvaluation(final Trainer trainer, int numFolds) {
return runNFoldEvaluation(trainer, numFolds, false);
}
public static String runNFoldEvaluation(final Trainer trainer, int numFolds, boolean includeRawResults) {
long start = System.currentTimeMillis();
String report = "";
try {
report = trainer.nFoldEvaluate(numFolds, includeRawResults);
} catch (Exception e) {
throw new GrobidException("An exception occurred while evaluating Grobid.", e);
}
long end = System.currentTimeMillis();
report += "\n\nN-Fold evaluation for " + trainer.getModel() + " model is realized in " + (end - start) + " ms";
return report;
}
/**
* Dispatch the example to the training or test data, based on the split ration and the drawing of
* a random number
*/
public Writer dispatchExample(Writer writerTraining, Writer writerEvaluation, double splitRatio) {
Writer writer = null;
if ((writerTraining == null) && (writerEvaluation != null)) {
writer = writerEvaluation;
} else if ((writerTraining != null) && (writerEvaluation == null)) {
writer = writerTraining;
} else {
if (Math.random() <= splitRatio)
writer = writerTraining;
else
writer = writerEvaluation;
}
return writer;
}
}
| 28,320 | 41.270149 | 161 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/FigureTrainer.java
|
package org.grobid.trainer;
import org.grobid.core.GrobidModels;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.utilities.GrobidProperties;
import org.grobid.core.utilities.UnicodeUtil;
import org.grobid.trainer.sax.TEIFigureSaxParser;
import javax.xml.parsers.SAXParser;
import javax.xml.parsers.SAXParserFactory;
import java.io.*;
import java.util.List;
import java.util.StringTokenizer;
public class FigureTrainer extends AbstractTrainer {
public FigureTrainer() {
super(GrobidModels.FIGURE);
}
@Override
public int createCRFPPData(File corpusPath, File outputFile) {
return addFeaturesFigure(corpusPath.getAbsolutePath() + "/tei",
corpusPath.getAbsolutePath() + "/raw",
outputFile, null, 1.0);
}
/**
* Add the selected features for the figure model
*
* @param corpusDir path where corpus files are located
* @param trainingOutputPath path where to store the temporary training data
* @param evalOutputPath path where to store the temporary evaluation data
* @param splitRatio ratio to consider for separating training and evaluation data, e.g. 0.8 for 80%
* @return the total number of used corpus items
*/
@Override
public int createCRFPPData(final File corpusDir,
final File trainingOutputPath,
final File evalOutputPath,
double splitRatio) {
return addFeaturesFigure(corpusDir.getAbsolutePath() + "/tei",
corpusDir.getAbsolutePath() + "/raw",
trainingOutputPath,
evalOutputPath,
splitRatio);
}
/**
* Add the selected features for the figure model
*
* @param sourceTEIPathLabel path to corpus TEI files
* @param sourceRawPathLabel path to corpus raw files
* @param trainingOutputPath path where to store the temporary training data
* @param evalOutputPath path where to store the temporary evaluation data
* @param splitRatio ratio to consider for separating training and evaluation data, e.g. 0.8 for 80%
* @return number of examples
*/
public int addFeaturesFigure(String sourceTEIPathLabel,
String sourceRawPathLabel,
final File trainingOutputPath,
final File evalOutputPath,
double splitRatio) {
int totalExamples = 0;
try {
System.out.println("sourceTEIPathLabel: " + sourceTEIPathLabel);
System.out.println("sourceRawPathLabel: " + sourceRawPathLabel);
System.out.println("trainingOutputPath: " + trainingOutputPath);
System.out.println("evalOutputPath: " + evalOutputPath);
// we need first to generate the labeled files from the TEI annotated files
File input = new File(sourceTEIPathLabel);
// we process all tei files in the output directory
File[] refFiles = input.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
return name.endsWith(".tei.xml") || name.endsWith(".tei");
}
});
if (refFiles == null) {
return 0;
}
System.out.println(refFiles.length + " tei files");
// the file for writing the training data
OutputStream os2 = null;
Writer writer2 = null;
if (trainingOutputPath != null) {
os2 = new FileOutputStream(trainingOutputPath);
writer2 = new OutputStreamWriter(os2, "UTF8");
}
// the file for writing the evaluation data
OutputStream os3 = null;
Writer writer3 = null;
if (evalOutputPath != null) {
os3 = new FileOutputStream(evalOutputPath);
writer3 = new OutputStreamWriter(os3, "UTF8");
}
// get a factory for SAX parser
SAXParserFactory spf = SAXParserFactory.newInstance();
for (File tf : refFiles) {
String name = tf.getName();
System.out.println(name);
// the full text SAX parser can be reused for the figures
TEIFigureSaxParser parser2 = new TEIFigureSaxParser();
//parser2.setMode(TEIFulltextSaxParser.FIGURE);
//get a new instance of parser
SAXParser p = spf.newSAXParser();
p.parse(tf, parser2);
List<String> labeled = parser2.getLabeledResult();
//totalExamples += parser2.n;
// we can now add the features
// we open the featured file
File theRawFile = new File(sourceRawPathLabel + File.separator + name.replace(".tei.xml", ""));
if (!theRawFile.exists()) {
System.out.println("Raw file " + theRawFile +
" does not exist. Please have a look!");
continue;
}
int q = 0;
BufferedReader bis = new BufferedReader(
new InputStreamReader(new FileInputStream(theRawFile), "UTF8"));
StringBuilder figure = new StringBuilder();
String line;
while ((line = bis.readLine()) != null) {
int ii = line.indexOf('\t');
if (ii == -1) {
ii = line.indexOf(' ');
}
String token = null;
if (ii != -1) {
token = line.substring(0, ii).trim();
// unicode normalisation of the token - it should not be necessary if the training data
// has been generated by a recent version of grobid
token = UnicodeUtil.normaliseTextAndRemoveSpaces(token);
}
// we get the label in the labelled data file for the same token
for (int pp = q; pp < labeled.size(); pp++) {
String localLine = labeled.get(pp);
if (localLine.trim().length() == 0) {
figure.append("\n");
continue;
}
StringTokenizer st = new StringTokenizer(localLine, " \t");
if (st.hasMoreTokens()) {
String localToken = st.nextToken();
// unicode normalisation of the token - it should not be necessary if the training data
// has been gnerated by a recent version of grobid
localToken = UnicodeUtil.normaliseTextAndRemoveSpaces(localToken);
if (localToken.equals(token)) {
String tag = st.nextToken();
line = line.replace("\t", " ").replace(" ", " ");
figure.append(line).append(" ").append(tag);
q = pp + 1;
pp = q + 10;
}
}
if (pp - q > 5) {
break;
}
}
}
bis.close();
if ((writer2 == null) && (writer3 != null))
writer3.write(figure.toString() + "\n");
if ((writer2 != null) && (writer3 == null))
writer2.write(figure.toString() + "\n");
else {
if (Math.random() <= splitRatio)
writer2.write(figure.toString() + "\n");
else
writer3.write(figure.toString() + "\n");
}
}
if (writer2 != null) {
writer2.close();
os2.close();
}
if (writer3 != null) {
writer3.close();
os3.close();
}
} catch (Exception e) {
throw new GrobidException("An exception occured while running training for the figure model.", e);
}
return totalExamples;
}
/**
* Command line execution.
*
* @param args Command line arguments.
* @throws Exception
*/
public static void main(String[] args) throws Exception {
GrobidProperties.getInstance();
AbstractTrainer.runTraining(new FigureTrainer());
System.out.println(AbstractTrainer.runEvaluation(new FigureTrainer()));
System.exit(0);
}
}
| 8,930 | 40.929577 | 115 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/CRFPPGenericTrainer.java
|
package org.grobid.trainer;
import org.chasen.crfpp.CRFPPTrainer;
import org.grobid.core.GrobidModel;
import org.grobid.core.GrobidModels;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
/**
* Note: Usage of CRF++ in GROBID is deprecated.
*
* @deprecated use WapitiTrainer or DelftTrainer (requires http://github.com/kermitt2/delft)
*/
@Deprecated
public class CRFPPGenericTrainer implements GenericTrainer {
public static final Logger LOGGER = LoggerFactory.getLogger(CRFPPGenericTrainer.class);
public static final String CRF = "crf";
private final CRFPPTrainer crfppTrainer;
// default training parameters (not exploited by CRFPP so far, it requires to extend the JNI)
protected double epsilon = 0.00001; // default size of the interval for stopping criterion
protected int window = 20; // default similar to CRF++
protected int nbMaxIterations = 6000;
public CRFPPGenericTrainer() {
crfppTrainer = new CRFPPTrainer();
}
@Override
public void train(File template, File trainingData, File outputModel, int numThreads, GrobidModel model) {
train(template, trainingData, outputModel, numThreads, model, false);
}
@Override
public void train(File template, File trainingData, File outputModel, int numThreads, GrobidModel model, boolean incremental) {
crfppTrainer.train(template.getAbsolutePath(), trainingData.getAbsolutePath(), outputModel.getAbsolutePath(), numThreads);
if (!crfppTrainer.what().isEmpty()) {
LOGGER.warn("CRF++ Trainer warnings:\n" + crfppTrainer.what());
} else {
LOGGER.info("No CRF++ Trainer warnings!");
}
}
@Override
public String getName() {
return CRF;
}
@Override
public void setEpsilon(double epsilon) {
this.epsilon = epsilon;
}
@Override
public void setWindow(int window) {
this.window = window;
}
@Override
public double getEpsilon() {
return epsilon;
}
@Override
public int getWindow() {
return window;
}
@Override
public void setNbMaxIterations(int interations) {
this.nbMaxIterations = interations;
}
@Override
public int getNbMaxIterations() {
return nbMaxIterations;
}
}
| 2,327 | 27.740741 | 131 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/GenericTrainer.java
|
package org.grobid.trainer;
import org.grobid.core.GrobidModel;
import org.grobid.core.GrobidModels;
import java.io.File;
public interface GenericTrainer {
void train(File template, File trainingData, File outputModel, int numThreads, GrobidModel model);
void train(File template, File trainingData, File outputModel, int numThreads, GrobidModel model, boolean incremental);
String getName();
public void setEpsilon(double epsilon);
public void setWindow(int window);
public double getEpsilon();
public int getWindow();
public int getNbMaxIterations();
public void setNbMaxIterations(int iterations);
}
| 623 | 33.666667 | 123 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/MonographTrainer.java
|
package org.grobid.trainer;
import org.grobid.core.GrobidModels;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.utilities.GrobidProperties;
import org.grobid.core.utilities.UnicodeUtil;
import org.grobid.trainer.sax.TEIMonographSaxParser;
import javax.xml.parsers.SAXParser;
import javax.xml.parsers.SAXParserFactory;
import java.io.*;
import java.util.List;
import java.util.ArrayList;
import java.util.StringTokenizer;
import org.apache.commons.io.FileUtils;
public class MonographTrainer extends AbstractTrainer {
public MonographTrainer() {
super(GrobidModels.MONOGRAPH);
}
@Override
public int createCRFPPData(File corpusPath, File outputFile) {
return addFeaturesSegmentation(corpusPath.getAbsolutePath() + "/tei",
corpusPath.getAbsolutePath() + "/raw",
outputFile, null, 1.0);
}
/**
* Add the selected features for the monograph model
*
* @param corpusDir path where corpus files are located
* @param trainingOutputPath path where to store the temporary training data
* @param evalOutputPath path where to store the temporary evaluation data
* @param splitRatio ratio to consider for separating training and evaluation data, e.g. 0.8 for 80%
* @return the total number of used corpus items
*/
@Override
public int createCRFPPData(final File corpusDir,
final File trainingOutputPath,
final File evalOutputPath,
double splitRatio) {
return addFeaturesSegmentation(corpusDir.getAbsolutePath() + "/tei",
corpusDir.getAbsolutePath() + "/raw",
trainingOutputPath,
evalOutputPath,
splitRatio);
}
/**
* Add the selected features for the monograph model
*
* @param sourceTEIPathLabel path to corpus TEI files
* @param sourceRawPathLabel path to corpus raw files
* @param trainingOutputPath path where to store the temporary training data
* @param evalOutputPath path where to store the temporary evaluation data
* @param splitRatio ratio to consider for separating training and evaluation data, e.g. 0.8 for 80%
* @return number of examples
*/
public int addFeaturesSegmentation(String sourceTEIPathLabel,
String sourceRawPathLabel,
final File trainingOutputPath,
final File evalOutputPath,
double splitRatio) {
int totalExamples = 0;
try {
System.out.println("sourceTEIPathLabel: " + sourceTEIPathLabel);
System.out.println("sourceRawPathLabel: " + sourceRawPathLabel);
System.out.println("trainingOutputPath: " + trainingOutputPath);
System.out.println("evalOutputPath: " + evalOutputPath);
// we need first to generate the labeled files from the TEI annotated files
File input = new File(sourceTEIPathLabel);
// we process all tei files in the output directory
File[] refFiles = input.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
return name.endsWith(".tei.xml") || name.endsWith(".tei");
}
});
if (refFiles == null) {
return 0;
}
System.out.println(refFiles.length + " tei files");
// the file for writing the training data
OutputStream os2 = null;
Writer writer2 = null;
if (trainingOutputPath != null) {
os2 = new FileOutputStream(trainingOutputPath);
writer2 = new OutputStreamWriter(os2, "UTF8");
}
// the file for writing the evaluation data
OutputStream os3 = null;
Writer writer3 = null;
if (evalOutputPath != null) {
os3 = new FileOutputStream(evalOutputPath);
writer3 = new OutputStreamWriter(os3, "UTF8");
}
// get a factory for SAX parser
SAXParserFactory spf = SAXParserFactory.newInstance();
for (File tf : refFiles) {
String name = tf.getName();
LOGGER.info("Processing: " + name);
TEIMonographSaxParser parser2 = new TEIMonographSaxParser();
//get a new instance of parser
SAXParser p = spf.newSAXParser();
p.parse(tf, parser2);
List<String> labeled = parser2.getLabeledResult();
// we can now add the features
// we open the featured file
try {
File theRawFile = new File(sourceRawPathLabel + File.separator + name.replace(".tei.xml", ""));
if (!theRawFile.exists()) {
LOGGER.error("The raw file does not exist: " + theRawFile.getPath());
continue;
}
} catch (Exception e) {
LOGGER.error("Fail to open or process raw file", e);
}
}
if (writer2 != null) {
writer2.close();
os2.close();
}
if (writer3 != null) {
writer3.close();
os3.close();
}
} catch (Exception e) {
throw new GrobidException("An exception occured while running Grobid.", e);
}
return totalExamples;
}
/**
* Command line execution.
*
* @param args Command line arguments.
* @throws Exception
*/
public static void main(String[] args) throws Exception {
GrobidProperties.getInstance();
AbstractTrainer.runTraining(new MonographTrainer());
System.out.println(AbstractTrainer.runEvaluation(new MonographTrainer()));
System.exit(0);
}
}
| 6,136 | 36.882716 | 115 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/NameCitationTrainer.java
|
package org.grobid.trainer;
import org.grobid.core.GrobidModels;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.features.FeaturesVectorName;
import org.grobid.core.utilities.GrobidProperties;
import org.grobid.core.utilities.OffsetPosition;
import org.grobid.trainer.sax.TEIAuthorSaxParser;
import org.grobid.core.layout.LayoutToken;
import org.grobid.core.lexicon.Lexicon;
import javax.xml.parsers.SAXParser;
import javax.xml.parsers.SAXParserFactory;
import java.io.*;
import java.util.*;
public class NameCitationTrainer extends AbstractTrainer {
public NameCitationTrainer() {
super(GrobidModels.NAMES_CITATION);
}
/**
* Add the selected features to a citation name example set
*
* @param corpusDir
* a path where corpus files are located
* @param modelOutputPath
* path where to store the temporary training data
* @return the total number of used corpus items
*/
@Override
public int createCRFPPData(final File corpusDir, final File modelOutputPath) {
return createCRFPPData(corpusDir, modelOutputPath, null, 1.0);
}
/**
* Add the selected features to a citation name example set
*
* @param corpusDir
* a path where corpus files are located
* @param trainingOutputPath
* path where to store the temporary training data
* @param evalOutputPath
* path where to store the temporary evaluation data
* @param splitRatio
* ratio to consider for separating training and evaluation data, e.g. 0.8 for 80%
* @return the total number of used corpus items
*/
@Override
public int createCRFPPData(final File corpusDir,
final File trainingOutputPath,
final File evalOutputPath,
double splitRatio) {
int totalExamples = 0;
try {
System.out.println("sourcePathLabel: " + corpusDir);
if (trainingOutputPath != null)
System.out.println("outputPath for training data: " + trainingOutputPath);
if (evalOutputPath != null)
System.out.println("outputPath for evaluation data: " + evalOutputPath);
// we convert the tei files into the usual CRF label format
// we process all tei files in the output directory
final File[] refFiles = corpusDir.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
return name.endsWith(".xml");
}
});
if (refFiles == null) {
throw new IllegalStateException("Folder " + corpusDir.getAbsolutePath()
+ " does not seem to contain training data. Please check");
}
System.out.println(refFiles.length + " tei files");
// the file for writing the training data
OutputStream os2 = null;
Writer writer2 = null;
if (trainingOutputPath != null) {
os2 = new FileOutputStream(trainingOutputPath);
writer2 = new OutputStreamWriter(os2, "UTF8");
}
// the file for writing the evaluation data
OutputStream os3 = null;
Writer writer3 = null;
if (evalOutputPath != null) {
os3 = new FileOutputStream(evalOutputPath);
writer3 = new OutputStreamWriter(os3, "UTF8");
}
// get a factory for SAX parser
SAXParserFactory spf = SAXParserFactory.newInstance();
List<OffsetPosition> titlePositions = null;
List<OffsetPosition> suffixPositions = null;
int n = 0;
for (; n < refFiles.length; n++) {
final File teifile = refFiles[n];
String name = teifile.getName();
System.out.println(name);
final TEIAuthorSaxParser parser2 = new TEIAuthorSaxParser();
// get a new instance of parser
final SAXParser p = spf.newSAXParser();
p.parse(teifile, parser2);
final List<List<String>> allLabeled = parser2.getLabeledResult();
final List<List<LayoutToken>> allTokens = parser2.getTokensResult();
totalExamples += parser2.n;
// we can now add the features
for(int i=0; i<allTokens.size(); i++) {
// fix the offsets
int pos = 0;
for(LayoutToken token : allTokens.get(i)) {
token.setOffset(pos);
pos += token.getText().length();
}
titlePositions = Lexicon.getInstance().tokenPositionsPersonTitle(allTokens.get(i));
suffixPositions = Lexicon.getInstance().tokenPositionsPersonSuffix(allTokens.get(i));
final String names = FeaturesVectorName.addFeaturesName(allTokens.get(i),
allLabeled.get(i), titlePositions, suffixPositions);
if ( (writer2 == null) && (writer3 != null) )
writer3.write(names + "\n \n");
if ( (writer2 != null) && (writer3 == null) )
writer2.write(names + "\n \n");
else {
if (Math.random() <= splitRatio)
writer2.write(names + "\n \n");
else
writer3.write(names + "\n \n");
}
}
}
if (writer2 != null) {
writer2.close();
os2.close();
}
if (writer3 != null) {
writer3.close();
os3.close();
}
} catch (Exception e) {
throw new GrobidException("An exception occurred while running Grobid.", e);
}
return totalExamples;
}
/**
* Command line execution.
*
* @param args Command line arguments.
* @throws Exception
*/
public static void main(String[] args) throws Exception {
GrobidProperties.getInstance();
Trainer trainer = new NameCitationTrainer();
AbstractTrainer.runTraining(trainer);
System.out.println(AbstractTrainer.runEvaluation(trainer));
System.exit(0);
}
}
| 5,426 | 30.552326 | 99 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/FulltextTrainer.java
|
package org.grobid.trainer;
import org.grobid.core.GrobidModels;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.utilities.GrobidProperties;
import org.grobid.core.utilities.UnicodeUtil;
import org.grobid.trainer.sax.TEIFulltextSaxParser;
import javax.xml.parsers.SAXParser;
import javax.xml.parsers.SAXParserFactory;
import java.io.*;
import java.util.List;
import java.util.ArrayList;
import java.util.StringTokenizer;
import org.apache.commons.io.FileUtils;
public class FulltextTrainer extends AbstractTrainer{
public FulltextTrainer() {
super(GrobidModels.FULLTEXT);
}
@Override
public int createCRFPPData(File corpusPath, File outputFile) {
return addFeaturesFulltext(corpusPath.getAbsolutePath() + "/tei",
corpusPath + "/raw", outputFile, null, 1.0);
}
/**
* Add the selected features to a full text example set
*
* @param corpusDir
* a path where corpus files are located
* @param trainingOutputPath
* path where to store the temporary training data
* @param evalOutputPath
* path where to store the temporary evaluation data
* @param splitRatio
* ratio to consider for separating training and evaluation data, e.g. 0.8 for 80%
* @return the total number of used corpus items
*/
@Override
public int createCRFPPData(final File corpusDir,
final File trainingOutputPath,
final File evalOutputPath,
double splitRatio) {
return addFeaturesFulltext(corpusDir.getAbsolutePath() + "/tei",
corpusDir.getAbsolutePath() + "/raw",
trainingOutputPath,
evalOutputPath,
splitRatio);
}
public int addFeaturesFulltext(String sourceTEIPathLabel,
String sourceRawPathLabel,
final File trainingOutputPath,
final File evalOutputPath,
double splitRatio) {
int totalExamples = 0;
try {
System.out.println("sourceTEIPathLabel: " + sourceTEIPathLabel);
System.out.println("sourceRawPathLabel: " + sourceRawPathLabel);
System.out.println("trainingOutputPath: " + trainingOutputPath);
System.out.println("evalOutputPath: " + evalOutputPath);
// we need first to generate the labeled files from the TEI annotated files
File input = new File(sourceTEIPathLabel);
// we process all tei files in the output directory
File[] refFiles = input.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
return name.endsWith(".tei.xml");
}
});
if (refFiles == null) {
return 0;
}
System.out.println(refFiles.length + " tei files");
// the file for writing the training data
OutputStream os2 = null;
Writer writer2 = null;
if (trainingOutputPath != null) {
os2 = new FileOutputStream(trainingOutputPath);
writer2 = new OutputStreamWriter(os2, "UTF8");
}
// the file for writing the evaluation data
OutputStream os3 = null;
Writer writer3 = null;
if (evalOutputPath != null) {
os3 = new FileOutputStream(evalOutputPath);
writer3 = new OutputStreamWriter(os3, "UTF8");
}
// the file for writing the training data
/*OutputStream os2 = new FileOutputStream(outputPath);
Writer writer2 = new OutputStreamWriter(os2, "UTF8");*/
// get a factory for SAX parser
SAXParserFactory spf = SAXParserFactory.newInstance();
for (File tf : refFiles) {
String name = tf.getName();
LOGGER.info("Processing: " + name);
TEIFulltextSaxParser parser2 = new TEIFulltextSaxParser();
//get a new instance of parser
SAXParser p = spf.newSAXParser();
p.parse(tf, parser2);
List<String> labeled = parser2.getLabeledResult();
// removing the @newline
/*List<String> newLabeled = new ArrayList<String>();
for(String label : labeled) {
if (!label.startsWith("@newline"))
newLabeled.add(label);
}
labeled = newLabeled;*/
/*StringBuilder temp = new StringBuilder();
for(String label : labeled) {
temp.append(label);
}
FileUtils.writeStringToFile(new File("/tmp/expected-"+name+".txt"), temp.toString());*/
// we can now (try to) add the features
// we open the featured file
try {
File rawFile = new File(sourceRawPathLabel + File.separator +
name.replace(".tei.xml", ""));
if (!rawFile.exists()) {
LOGGER.error("The raw file does not exist: " + rawFile.getPath());
continue;
}
BufferedReader bis = new BufferedReader(
new InputStreamReader(new FileInputStream(
rawFile), "UTF8"));
int q = 0; // current position in the TEI labeled list
StringBuilder fulltext = new StringBuilder();
String line;
int l = 0;
String previousTag = null;
int nbInvalid = 0;
while ((line = bis.readLine()) != null) {
if (line.trim().length() == 0)
continue;
// we could apply here some more check on the wellformedness of the line
//fulltext.append(line);
l++;
int ii = line.indexOf(' ');
String token = null;
if (ii != -1) {
token = line.substring(0, ii);
// unicode normalisation of the token - it should not be necessary if the training data
// has been gnerated by a recent version of grobid
token = UnicodeUtil.normaliseTextAndRemoveSpaces(token);
}
// boolean found = false;
// we get the label in the labelled data file for the same token
for (int pp = q; pp < labeled.size(); pp++) {
String localLine = labeled.get(pp);
StringTokenizer st = new StringTokenizer(localLine, " ");
if (st.hasMoreTokens()) {
String localToken = st.nextToken();
// unicode normalisation of the token - it should not be necessary if the training data
// has been gnerated by a recent version of grobid
localToken = UnicodeUtil.normaliseTextAndRemoveSpaces(localToken);
if (localToken.equals(token)) {
String tag = st.nextToken();
fulltext.append(line).append(" ").append(tag);
previousTag = tag;
q = pp + 1;
nbInvalid = 0;
//pp = q + 10;
break;
}
}
if (pp - q > 5) {
LOGGER.warn(name + " / Fulltext trainer: TEI and raw file unsynchronized at raw line " + l + " : " + localLine);
nbInvalid++;
// let's reuse the latest tag
if (previousTag != null)
fulltext.append(line).append(" ").append(previousTag);
break;
}
}
if (nbInvalid > 20) {
// too many consecutive synchronization issues
break;
}
}
bis.close();
// format with features for sequence tagging...
if (nbInvalid < 10) {
if ((writer2 == null) && (writer3 != null))
writer3.write(fulltext.toString() + "\n");
if ((writer2 != null) && (writer3 == null))
writer2.write(fulltext.toString() + "\n");
else {
if (Math.random() <= splitRatio)
writer2.write(fulltext.toString() + "\n");
else
writer3.write(fulltext.toString() + "\n");
}
totalExamples++;
} else {
LOGGER.error(name + " / too many synchronization issues, file not used in training data and to be fixed!");
}
} catch (Exception e) {
LOGGER.error("Fail to open or process raw file", e);
}
}
if (writer2 != null) {
writer2.close();
os2.close();
}
if (writer3 != null) {
writer3.close();
os3.close();
}
} catch (Exception e) {
LOGGER.error("An exception occured while running Grobid.", e);
}
return totalExamples;
}
/**
* Command line execution.
*
* @param args Command line arguments.
* @throws Exception
*/
public static void main(String[] args) throws Exception {
GrobidProperties.getInstance();
AbstractTrainer.runTraining(new FulltextTrainer());
System.out.println(AbstractTrainer.runEvaluation(new FulltextTrainer()));
System.exit(0);
}
}
| 10,534 | 40.972112 | 144 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/WapitiTrainer.java
|
package org.grobid.trainer;
import org.grobid.core.GrobidModel;
import org.grobid.core.jni.WapitiModel;
import org.grobid.core.GrobidModels;
import org.grobid.trainer.SegmentationTrainer;
import java.math.BigDecimal;
import java.io.File;
public class WapitiTrainer implements GenericTrainer {
public static final String WAPITI = "wapiti";
// default training parameters (only exploited by Wapiti)
protected double epsilon = 0.00001; // default size of the interval for stopping criterion
protected int window = 20; // default similar to CRF++
protected int nbMaxIterations = 2000; // by default maximum of training iterations
@Override
public void train(File template, File trainingData, File outputModel, int numThreads, GrobidModel model) {
train(template, trainingData, outputModel, numThreads, model, false);
}
@Override
public void train(File template, File trainingData, File outputModel, int numThreads, GrobidModel model, boolean incremental) {
System.out.println("\tepsilon: " + epsilon);
System.out.println("\twindow: " + window);
System.out.println("\tnb max iterations: " + nbMaxIterations);
System.out.println("\tnb threads: " + numThreads);
String incrementalBlock = "";
if (incremental) {
String inputModelPath = outputModel.getAbsolutePath();
if (inputModelPath.endsWith(".new"))
inputModelPath = inputModelPath.substring(0, inputModelPath.length()-4);
System.out.println("\tincremental training from: " + inputModelPath);
incrementalBlock += " -m " + inputModelPath;
}
WapitiModel.train(template, trainingData, outputModel, "--nthread " + numThreads +
// " --algo sgd-l1" +
" -e " + BigDecimal.valueOf(epsilon).toPlainString() +
" -w " + window +
" -i " + nbMaxIterations + incrementalBlock
);
}
@Override
public String getName() {
return WAPITI;
}
@Override
public void setEpsilon(double epsilon) {
this.epsilon = epsilon;
}
@Override
public void setWindow(int window) {
this.window = window;
}
@Override
public double getEpsilon() {
return epsilon;
}
@Override
public int getWindow() {
return window;
}
@Override
public void setNbMaxIterations(int interations) {
this.nbMaxIterations = interations;
}
@Override
public int getNbMaxIterations() {
return nbMaxIterations;
}
}
| 2,534 | 29.178571 | 131 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/SegmentationTrainer.java
|
package org.grobid.trainer;
import org.grobid.core.GrobidModels;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.utilities.GrobidProperties;
import org.grobid.core.utilities.UnicodeUtil;
import org.grobid.trainer.sax.TEISegmentationSaxParser;
import javax.xml.parsers.SAXParser;
import javax.xml.parsers.SAXParserFactory;
import java.io.*;
import java.util.List;
import java.util.ArrayList;
import java.util.StringTokenizer;
import org.apache.commons.io.FileUtils;
public class SegmentationTrainer extends AbstractTrainer {
public SegmentationTrainer() {
super(GrobidModels.SEGMENTATION);
}
@Override
public int createCRFPPData(File corpusPath, File outputFile) {
return addFeaturesSegmentation(corpusPath.getAbsolutePath() + "/tei",
corpusPath.getAbsolutePath() + "/raw",
outputFile, null, 1.0);
}
/**
* Add the selected features for the segmentation model
*
* @param corpusDir path where corpus files are located
* @param trainingOutputPath path where to store the temporary training data
* @param evalOutputPath path where to store the temporary evaluation data
* @param splitRatio ratio to consider for separating training and evaluation data, e.g. 0.8 for 80%
* @return the total number of used corpus items
*/
@Override
public int createCRFPPData(final File corpusDir,
final File trainingOutputPath,
final File evalOutputPath,
double splitRatio) {
return addFeaturesSegmentation(corpusDir.getAbsolutePath() + "/tei",
corpusDir.getAbsolutePath() + "/raw",
trainingOutputPath,
evalOutputPath,
splitRatio);
}
/**
* Add the selected features for the segmentation model
*
* @param sourceTEIPathLabel path to corpus TEI files
* @param sourceRawPathLabel path to corpus raw files
* @param trainingOutputPath path where to store the temporary training data
* @param evalOutputPath path where to store the temporary evaluation data
* @param splitRatio ratio to consider for separating training and evaluation data, e.g. 0.8 for 80%
* @return number of examples
*/
public int addFeaturesSegmentation(String sourceTEIPathLabel,
String sourceRawPathLabel,
final File trainingOutputPath,
final File evalOutputPath,
double splitRatio) {
int totalExamples = 0;
try {
System.out.println("sourceTEIPathLabel: " + sourceTEIPathLabel);
System.out.println("sourceRawPathLabel: " + sourceRawPathLabel);
System.out.println("trainingOutputPath: " + trainingOutputPath);
System.out.println("evalOutputPath: " + evalOutputPath);
// we need first to generate the labeled files from the TEI annotated files
File input = new File(sourceTEIPathLabel);
// we process all tei files in the output directory
File[] refFiles = input.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
return name.endsWith(".tei.xml") || name.endsWith(".tei");
}
});
if (refFiles == null) {
return 0;
}
System.out.println(refFiles.length + " tei files");
// the file for writing the training data
OutputStream os2 = null;
Writer writer2 = null;
if (trainingOutputPath != null) {
os2 = new FileOutputStream(trainingOutputPath);
writer2 = new OutputStreamWriter(os2, "UTF8");
}
// the file for writing the evaluation data
OutputStream os3 = null;
Writer writer3 = null;
if (evalOutputPath != null) {
os3 = new FileOutputStream(evalOutputPath);
writer3 = new OutputStreamWriter(os3, "UTF8");
}
// get a factory for SAX parser
SAXParserFactory spf = SAXParserFactory.newInstance();
for (File tf : refFiles) {
String name = tf.getName();
LOGGER.info("Processing: " + name);
TEISegmentationSaxParser parser2 = new TEISegmentationSaxParser();
//get a new instance of parser
SAXParser p = spf.newSAXParser();
p.parse(tf, parser2);
List<String> labeled = parser2.getLabeledResult();
// we can now add the features
// we open the featured file
try {
File theRawFile = new File(sourceRawPathLabel + File.separator + name.replace(".tei.xml", ""));
if (!theRawFile.exists()) {
LOGGER.error("The raw file does not exist: " + theRawFile.getPath());
continue;
}
// removing the @newline
/*List<String> newLabeled = new ArrayList<String>();
for(String label : labeled) {
if (!label.startsWith("@newline"))
newLabeled.add(label);
}
labeled = newLabeled;*/
/*StringBuilder temp = new StringBuilder();
for(String label : labeled) {
temp.append(label);
}
FileUtils.writeStringToFile(new File("/tmp/expected-"+name+".txt"), temp.toString());*/
int q = 0;
BufferedReader bis = new BufferedReader(
new InputStreamReader(new FileInputStream(theRawFile), "UTF8"));
StringBuilder segmentation = new StringBuilder();
String line = null;
int l = 0;
String previousTag = null;
int nbInvalid = 0;
while ((line = bis.readLine()) != null) {
l++;
int ii = line.indexOf(' ');
String token = null;
if (ii != -1) {
token = line.substring(0, ii);
// unicode normalisation of the token - it should not be necessary if the training data
// has been gnerated by a recent version of grobid
token = UnicodeUtil.normaliseTextAndRemoveSpaces(token);
}
// we get the label in the labelled data file for the same token
for (int pp = q; pp < labeled.size(); pp++) {
String localLine = labeled.get(pp);
StringTokenizer st = new StringTokenizer(localLine, " \t");
if (st.hasMoreTokens()) {
String localToken = st.nextToken();
// unicode normalisation of the token - it should not be necessary if the training data
// has been gnerated by a recent version of grobid
localToken = UnicodeUtil.normaliseTextAndRemoveSpaces(localToken);
if (localToken.equals(token)) {
String tag = st.nextToken();
segmentation.append(line).append(" ").append(tag);
previousTag = tag;
q = pp + 1;
nbInvalid = 0;
//pp = q + 10;
break;
}
}
if (pp - q > 5) {
//LOGGER.warn(name + " / Segmentation trainer: TEI and raw file unsynchronized at raw line " + l + " : " + localLine);
nbInvalid++;
// let's reuse the latest tag
if (previousTag != null)
segmentation.append(line).append(" ").append(previousTag);
break;
}
}
if (nbInvalid > 20) {
// too many consecutive synchronization issues
break;
}
}
bis.close();
if (nbInvalid < 10) {
if ((writer2 == null) && (writer3 != null))
writer3.write(segmentation.toString() + "\n");
if ((writer2 != null) && (writer3 == null))
writer2.write(segmentation.toString() + "\n");
else {
if (Math.random() <= splitRatio)
writer2.write(segmentation.toString() + "\n");
else
writer3.write(segmentation.toString() + "\n");
}
} else {
LOGGER.warn(name + " / too many synchronization issues, file not used in training data and to be fixed!");
}
} catch (Exception e) {
LOGGER.error("Fail to open or process raw file", e);
}
}
if (writer2 != null) {
writer2.close();
os2.close();
}
if (writer3 != null) {
writer3.close();
os3.close();
}
} catch (Exception e) {
throw new GrobidException("An exception occured while running Grobid.", e);
}
return totalExamples;
}
/**
* Add the selected features to the author model training for headers
*
* @param sourceTEIPathLabel path to TEI files
* @param sourceRawPathLabel path to raw files
* @param outputPath output train file
* @return number of examples
*/
/*public int addFeaturesSegmentation2(String sourceTEIPathLabel,
String sourceRawPathLabel,
File outputPath) {
int totalExamples = 0;
try {
System.out.println("sourceTEIPathLabel: " + sourceTEIPathLabel);
System.out.println("sourceRawPathLabel: " + sourceRawPathLabel);
System.out.println("outputPath: " + outputPath);
// we need first to generate the labeled files from the TEI annotated files
File input = new File(sourceTEIPathLabel);
// we process all tei files in the output directory
File[] refFiles = input.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
return name.endsWith(".tei.xml");
}
});
if (refFiles == null) {
return 0;
}
System.out.println(refFiles.length + " tei files");
// the file for writing the training data
OutputStream os2 = new FileOutputStream(outputPath);
Writer writer2 = new OutputStreamWriter(os2, "UTF8");
// get a factory for SAX parser
SAXParserFactory spf = SAXParserFactory.newInstance();
// int n = 0;
for (File tf : refFiles) {
String name = tf.getName();
System.out.println(name);
TEISegmentationSaxParser parser2 = new TEISegmentationSaxParser();
//get a new instance of parser
SAXParser p = spf.newSAXParser();
p.parse(tf, parser2);
List<String> labeled = parser2.getLabeledResult();
//totalExamples += parser2.n;
// we can now add the features
// we open the featured file
int q = 0;
BufferedReader bis = new BufferedReader(
new InputStreamReader(new FileInputStream(
sourceRawPathLabel + File.separator + name.replace(".tei.xml", "")), "UTF8"));
StringBuilder segmentation = new StringBuilder();
String line;
// String lastTag = null;
while ((line = bis.readLine()) != null) {
int ii = line.indexOf(' ');
String token = null;
if (ii != -1)
token = line.substring(0, ii);
// boolean found = false;
// we get the label in the labelled data file for the same token
for (int pp = q; pp < labeled.size(); pp++) {
String localLine = labeled.get(pp);
StringTokenizer st = new StringTokenizer(localLine, " ");
if (st.hasMoreTokens()) {
String localToken = st.nextToken();
if (localToken.equals(token)) {
String tag = st.nextToken();
segmentation.append(line).append(" ").append(tag);
// lastTag = tag;
// found = true;
q = pp + 1;
pp = q + 10;
}
}
if (pp - q > 5) {
break;
}
}
}
bis.close();
// format with features for sequence tagging...
writer2.write(segmentation.toString() + "\n");
}
writer2.close();
os2.close();
} catch (Exception e) {
throw new GrobidException("An exception occured while running Grobid.", e);
}
return totalExamples;
}*/
/**
* Command line execution.
*
* @param args Command line arguments.
* @throws Exception
*/
public static void main(String[] args) throws Exception {
GrobidProperties.getInstance();
AbstractTrainer.runTraining(new SegmentationTrainer());
System.out.println(AbstractTrainer.runEvaluation(new SegmentationTrainer()));
System.exit(0);
}
}
| 14,845 | 41.907514 | 150 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/CitationTrainer.java
|
package org.grobid.trainer;
import org.grobid.core.GrobidModels;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.features.FeaturesVectorCitation;
import org.grobid.core.utilities.GrobidProperties;
import org.grobid.core.utilities.OffsetPosition;
import org.grobid.trainer.sax.TEICitationSaxParser;
import org.grobid.core.engines.label.TaggingLabel;
import org.grobid.core.layout.LayoutToken;
import org.grobid.core.lexicon.Lexicon;
import javax.xml.parsers.SAXParser;
import javax.xml.parsers.SAXParserFactory;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FilenameFilter;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.util.List;
/**
* @author Patrice Lopez
*/
public class CitationTrainer extends AbstractTrainer {
public CitationTrainer() {
super(GrobidModels.CITATION);
}
/**
* Add the selected features to the citations model example set, default
*
* @param corpusDir
* a path where corpus files are located
* @return the total number of used corpus items
*/
@Override
public int createCRFPPData(final File corpusDir, final File evalDataPath) {
return createCRFPPData(corpusDir, evalDataPath, null, 1.0);
}
/**
* Add the selected features to the citations model example set
*
* @param corpusDir
* a path where corpus files are located
* @param trainingOutputPath
* path where to store the temporary training data
* @param evalOutputPath
* path where to store the temporary evaluation data
* @param splitRatio
* ratio to consider for separating training and evaluation data, e.g. 0.8 for 80%
* @return the total number of used corpus items
*/
@Override
public int createCRFPPData(final File corpusDir,
final File trainingOutputPath,
final File evalOutputPath,
double splitRatio) {
int totalExamples = 0;
Lexicon lexicon = Lexicon.getInstance();
try {
System.out.println("sourcePathLabel: " + corpusDir);
if (trainingOutputPath != null)
System.out.println("outputPath for training data: " + trainingOutputPath);
if (evalOutputPath != null)
System.out.println("outputPath for evaluation data: " + evalOutputPath);
// we convert the tei files into the usual CRF label format
// we process all tei files in the output directory
final File[] refFiles = corpusDir.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
return name.endsWith(".xml");
}
});
if (refFiles == null) {
throw new IllegalStateException("Folder " + corpusDir.getAbsolutePath()
+ " does not seem to contain training data. Please check");
}
System.out.println(refFiles.length + " tei files");
// the file for writing the training data
OutputStream os2 = null;
Writer writer2 = null;
if (trainingOutputPath != null) {
os2 = new FileOutputStream(trainingOutputPath);
writer2 = new OutputStreamWriter(os2, "UTF8");
}
// the file for writing the evaluation data
OutputStream os3 = null;
Writer writer3 = null;
if (evalOutputPath != null) {
os3 = new FileOutputStream(evalOutputPath);
writer3 = new OutputStreamWriter(os3, "UTF8");
}
// get a factory for SAX parser
SAXParserFactory spf = SAXParserFactory.newInstance();
List<OffsetPosition> journalsPositions;
List<OffsetPosition> abbrevJournalsPositions;
List<OffsetPosition> conferencesPositions;
List<OffsetPosition> publishersPositions;
List<OffsetPosition> locationsPositions;
List<OffsetPosition> collaborationsPositions;
List<OffsetPosition> identifiersPositions;
List<OffsetPosition> urlPositions;
int n = 0;
for (; n < refFiles.length; n++) {
final File teifile = refFiles[n];
String name = teifile.getName();
System.out.println(name);
final TEICitationSaxParser parser2 = new TEICitationSaxParser();
// get a new instance of parser
final SAXParser p = spf.newSAXParser();
p.parse(teifile, parser2);
final List<List<String>> allLabeled = parser2.getLabeledResult();
final List<List<LayoutToken>> allTokens = parser2.getTokensResult();
totalExamples += parser2.nbCitations;
// we can now add the features
for(int i=0; i<allTokens.size(); i++) {
// fix the offsets
int pos = 0;
for(LayoutToken token : allTokens.get(i)) {
token.setOffset(pos);
pos += token.getText().length();
}
journalsPositions = lexicon.tokenPositionsJournalNames(allTokens.get(i));
abbrevJournalsPositions = lexicon.tokenPositionsAbbrevJournalNames(allTokens.get(i));
conferencesPositions = lexicon.tokenPositionsConferenceNames(allTokens.get(i));
publishersPositions = lexicon.tokenPositionsPublisherNames(allTokens.get(i));
locationsPositions = lexicon.tokenPositionsLocationNames(allTokens.get(i));
collaborationsPositions = lexicon.tokenPositionsCollaborationNames(allTokens.get(i));
identifiersPositions = lexicon.tokenPositionsIdentifierPattern(allTokens.get(i));
urlPositions = lexicon.tokenPositionsUrlPattern(allTokens.get(i));
String citation = FeaturesVectorCitation.addFeaturesCitation(allTokens.get(i),
allLabeled.get(i), journalsPositions, abbrevJournalsPositions,
conferencesPositions, publishersPositions, locationsPositions,
collaborationsPositions, identifiersPositions, urlPositions);
if ( (writer2 == null) && (writer3 != null) )
writer3.write(citation + "\n \n");
if ( (writer2 != null) && (writer3 == null) )
writer2.write(citation + "\n \n");
else {
if (Math.random() <= splitRatio)
writer2.write(citation + "\n \n");
else
writer3.write(citation + "\n \n");
}
}
}
if (writer2 != null) {
writer2.close();
os2.close();
}
if (writer3 != null) {
writer3.close();
os3.close();
}
} catch (Exception e) {
throw new GrobidException("An exception occurred while running Grobid.", e);
}
return totalExamples;
}
/**
* Command line execution.
*
* @param args Command line arguments.
* @throws Exception
*/
public static void main(String[] args) throws Exception {
GrobidProperties.getInstance();
Trainer trainer = new CitationTrainer();
AbstractTrainer.runTraining(trainer);
System.out.println(AbstractTrainer.runEvaluation(trainer));
System.exit(0);
}
}
| 6,667 | 33.020408 | 102 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/DateTrainer.java
|
package org.grobid.trainer;
import org.grobid.core.GrobidModels;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.features.FeaturesVectorDate;
import org.grobid.core.utilities.GrobidProperties;
import org.grobid.trainer.sax.TEIDateSaxParser;
import javax.xml.parsers.SAXParser;
import javax.xml.parsers.SAXParserFactory;
import java.io.*;
import java.util.ArrayList;
import java.util.List;
import java.util.StringTokenizer;
public class DateTrainer extends AbstractTrainer {
public DateTrainer() {
super(GrobidModels.DATE);
}
/**
* Add the selected features to a date example set
*
* @param corpusDir
* a path where corpus files are located
* @param trainingOutputPath
* path where to store the temporary training data
* @return the total number of corpus items
*/
@Override
public int createCRFPPData(final File corpusDir, final File trainingOutputPath) {
return createCRFPPData(corpusDir, trainingOutputPath, null, 1.0);
}
/**
* Add the selected features to a date example set
*
* @param corpusDir
* a path where corpus files are located
* @param trainingOutputPath
* path where to store the temporary training data
* @param evalOutputPath
* path where to store the temporary evaluation data
* @param splitRatio
* ratio to consider for separating training and evaluation data, e.g. 0.8 for 80%
* @return the total number of used corpus items
*/
@Override
public int createCRFPPData(final File corpusDir,
final File trainingOutputPath,
final File evalOutputPath,
double splitRatio) {
int totalExamples = 0;
try {
System.out.println("sourcePathLabel: " + corpusDir);
if (trainingOutputPath != null)
System.out.println("outputPath for training data: " + trainingOutputPath);
if (evalOutputPath != null)
System.out.println("outputPath for evaluation data: " + evalOutputPath);
// we convert the tei files into the usual CRF label format
// we process all tei files in the output directory
final File[] refFiles = corpusDir.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
return name.endsWith(".xml");
}
});
if (refFiles == null) {
throw new IllegalStateException("Folder " + corpusDir.getAbsolutePath()
+ " does not seem to contain training data. Please check");
}
System.out.println(refFiles.length + " tei files");
// the file for writing the training data
OutputStream os2 = null;
Writer writer2 = null;
if (trainingOutputPath != null) {
os2 = new FileOutputStream(trainingOutputPath);
writer2 = new OutputStreamWriter(os2, "UTF8");
}
// the file for writing the evaluation data
OutputStream os3 = null;
Writer writer3 = null;
if (evalOutputPath != null) {
os3 = new FileOutputStream(evalOutputPath);
writer3 = new OutputStreamWriter(os3, "UTF8");
}
// get a factory for SAX parser
SAXParserFactory spf = SAXParserFactory.newInstance();
int n = 0;
for (; n < refFiles.length; n++) {
final File teifile = refFiles[n];
String name = teifile.getName();
//System.out.println(name);
final TEIDateSaxParser parser2 = new TEIDateSaxParser();
// get a new instance of parser
final SAXParser p = spf.newSAXParser();
p.parse(teifile, parser2);
final List<String> labeled = parser2.getLabeledResult();
totalExamples += parser2.n;
// we can now add the features
String headerDates = FeaturesVectorDate.addFeaturesDate(labeled);
// format with features for sequence tagging...
// given the split ratio we write either in the training file or the evaluation file
String[] chunks = headerDates.split("\n \n");
for(int i=0; i<chunks.length; i++) {
String chunk = chunks[i];
if ( (writer2 == null) && (writer3 != null) )
writer3.write(chunk + "\n \n");
else if ( (writer2 != null) && (writer3 == null) )
writer2.write(chunk + "\n \n");
else {
if (Math.random() <= splitRatio)
writer2.write(chunk + "\n \n");
else
writer3.write(chunk + "\n \n");
}
}
}
if (writer2 != null) {
writer2.close();
os2.close();
}
if (writer3 != null) {
writer3.close();
os3.close();
}
} catch (Exception e) {
throw new GrobidException("An exception occurred while running Grobid.", e);
}
return totalExamples;
}
/**
* Command line execution.
*
* @param args
* Command line arguments.
* @throws Exception
*/
public static void main(String[] args) throws Exception {
GrobidProperties.getInstance();
DateTrainer trainer = new DateTrainer();
AbstractTrainer.runTraining(trainer);
System.out.println(AbstractTrainer.runEvaluation(trainer));
System.exit(0);
}
}
| 4,884 | 28.251497 | 94 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/TrainerRunner.java
|
package org.grobid.trainer;
import org.apache.commons.lang3.StringUtils;
import org.grobid.core.utilities.GrobidProperties;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Arrays;
import java.util.List;
/**
* Training application for training a target model.
*
*/
public class TrainerRunner {
private static final List<String> models = Arrays.asList("affiliation", "chemical", "date", "citation", "ebook", "fulltext", "header", "name-citation", "name-header", "patent", "segmentation");
private static final List<String> options = Arrays.asList("0 - train", "1 - evaluate", "2 - split, train and evaluate", "3 - n-fold evaluation");
private enum RunType {
TRAIN, EVAL, SPLIT, EVAL_N_FOLD;
public static RunType getRunType(int i) {
for (RunType t : values()) {
if (t.ordinal() == i) {
return t;
}
}
throw new IllegalStateException("Unsupported RunType with ordinal " + i);
}
}
protected static void initProcess(final String path2GbdHome, final String path2GbdProperties) {
GrobidProperties.getInstance();
}
public static void main(String[] args) {
if (args.length < 4) {
throw new IllegalStateException(
"Usage: {" + String.join(", ", options) + "} {" + String.join(", ", models) + "} -gH /path/to/Grobid/home -s { [0.0 - 1.0] - split ratio, optional} -n {[int, num folds for n-fold evaluation, optional]}");
}
RunType mode = RunType.getRunType(Integer.parseInt(args[0]));
if ((mode == RunType.SPLIT || mode == RunType.EVAL_N_FOLD) && (args.length < 6)) {
throw new IllegalStateException(
"Usage: {" + String.join(", ", options) + "} {" + String.join(", ", models) + "} -gH /path/to/Grobid/home -s { [0.0 - 1.0] - split ratio, optional} -n {[int, num folds for n-fold evaluation, optional]}");
}
String path2GbdHome = null;
double split = 0.0;
int numFolds = 0;
String outputFilePath = null;
boolean incremental = false;
for (int i = 0; i < args.length; i++) {
if (args[i].equals("-gH")) {
if (i + 1 == args.length) {
throw new IllegalStateException("Missing path to Grobid home. ");
}
path2GbdHome = args[i + 1];
} else if (args[i].equals("-s")) {
if (i + 1 == args.length) {
throw new IllegalStateException("Missing split ratio value. ");
}
try {
split = Double.parseDouble(args[i + 1]);
} catch (Exception e) {
throw new IllegalStateException("Invalid split value: " + args[i + 1]);
}
} else if (args[i].equals("-n")) {
if (i + 1 == args.length) {
throw new IllegalStateException("Missing number of folds value. ");
}
try {
numFolds = Integer.parseInt(args[i + 1]);
} catch (Exception e) {
throw new IllegalStateException("Invalid number of folds value: " + args[i + 1]);
}
} else if (args[i].equals("-o")) {
if (i + 1 == args.length) {
throw new IllegalStateException("Missing output file. ");
}
outputFilePath = args[i + 1];
} else if (args[i].equals("-i")) {
incremental = true;
}
}
if (path2GbdHome == null) {
throw new IllegalStateException(
"Grobid-home path not found.\n Usage: {" + String.join(", ", options) + "} {" + String.join(", ", models) + "} -gH /path/to/Grobid/home -s { [0.0 - 1.0] - split ratio, optional} -n {[int, num folds for n-fold evaluation, optional]}");
}
final String path2GbdProperties = path2GbdHome + File.separator + "config" + File.separator + "grobid.properties";
System.out.println("path2GbdHome=" + path2GbdHome + " path2GbdProperties=" + path2GbdProperties);
initProcess(path2GbdHome, path2GbdProperties);
String model = args[1];
AbstractTrainer trainer;
if (model.equals("affiliation") || model.equals("affiliation-address")) {
trainer = new AffiliationAddressTrainer();
} else if (model.equals("chemical")) {
trainer = new ChemicalEntityTrainer();
} else if (model.equals("date")) {
trainer = new DateTrainer();
} else if (model.equals("citation")) {
trainer = new CitationTrainer();
} else if (model.equals("monograph")) {
trainer = new MonographTrainer();
} else if (model.equals("fulltext")) {
trainer = new FulltextTrainer();
} else if (model.equals("header")) {
trainer = new HeaderTrainer();
} else if (model.equals("name-citation")) {
trainer = new NameCitationTrainer();
} else if (model.equals("name-header")) {
trainer = new NameHeaderTrainer();
} else if (model.equals("patent-citation")) {
trainer = new PatentParserTrainer();
} else if (model.equals("segmentation")) {
trainer = new SegmentationTrainer();
} else if (model.equals("reference-segmenter")) {
trainer = new ReferenceSegmenterTrainer();
} else if (model.equals("figure")) {
trainer = new FigureTrainer();
} else if (model.equals("table")) {
trainer = new TableTrainer();
} else {
throw new IllegalStateException("The model " + model + " is unknown.");
}
switch (mode) {
case TRAIN:
AbstractTrainer.runTraining(trainer, incremental);
break;
case EVAL:
System.out.println(AbstractTrainer.runEvaluation(trainer));
break;
case SPLIT:
System.out.println(AbstractTrainer.runSplitTrainingEvaluation(trainer, split, incremental));
break;
case EVAL_N_FOLD:
if(numFolds == 0) {
throw new IllegalArgumentException("N should be > 0");
}
if (StringUtils.isNotEmpty(outputFilePath)) {
Path outputPath = Paths.get(outputFilePath);
if (Files.exists(outputPath)) {
System.err.println("Output file exists. ");
}
} else {
String results = AbstractTrainer.runNFoldEvaluation(trainer, numFolds, incremental);
System.out.println(results);
}
break;
default:
throw new IllegalStateException("Invalid RunType: " + mode.name());
}
System.exit(0);
}
}
| 7,078 | 40.397661 | 250 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/TrainerFactory.java
|
package org.grobid.trainer;
import org.grobid.core.utilities.GrobidProperties;
import org.grobid.core.GrobidModel;
import org.grobid.core.engines.tagging.GrobidCRFEngine;
public class TrainerFactory {
public static GenericTrainer getTrainer(GrobidModel model) {
switch (GrobidProperties.getGrobidCRFEngine(model)) {
case CRFPP:
return new CRFPPGenericTrainer();
case WAPITI:
return new WapitiTrainer();
case DELFT:
return new DeLFTTrainer();
case DUMMY:
return new DummyTrainer();
default:
throw new IllegalStateException("Unsupported GROBID sequence labelling engine: " + GrobidProperties.getGrobidCRFEngine(model));
}
}
}
| 790 | 33.391304 | 143 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/ShorttextTrainer.java
|
package org.grobid.trainer;
import org.grobid.core.GrobidModels;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.utilities.GrobidProperties;
import org.grobid.trainer.sax.TEIFulltextSaxParser;
import javax.xml.parsers.SAXParser;
import javax.xml.parsers.SAXParserFactory;
import java.io.*;
import java.util.List;
import java.util.ArrayList;
import java.util.StringTokenizer;
/**
* Trainer class for the short text model, e.g. for abstracts
*
*/
public class ShorttextTrainer extends AbstractTrainer{
public ShorttextTrainer() {
super(GrobidModels.SHORTTEXT);
}
@Override
public int createCRFPPData(File corpusPath, File outputFile) {
return addFeaturesShorttext(corpusPath.getAbsolutePath() + "/tei", corpusPath + "/shorttexts", outputFile);
}
/**
* Add the selected features to a full text example set
*
* @param corpusDir
* a path where corpus files are located
* @param trainingOutputPath
* path where to store the temporary training data
* @param evalOutputPath
* path where to store the temporary evaluation data
* @param splitRatio
* ratio to consider for separating training and evaluation data, e.g. 0.8 for 80%
* @return the total number of used corpus items
*/
@Override
public int createCRFPPData(final File corpusDir,
final File trainingOutputPath,
final File evalOutputPath,
double splitRatio) {
return 0;
}
/**
* Add the selected features to the author model training for short texts
* @param sourceTEIPathLabel path to TEI files
* @param sourceShorttextsPathLabel path to shorttexts
* @param outputPath output train file
* @return number of examples
*/
public int addFeaturesShorttext(String sourceTEIPathLabel,
String sourceShorttextsPathLabel,
File outputPath) {
int totalExamples = 0;
try {
System.out.println("sourceTEIPathLabel: " + sourceTEIPathLabel);
System.out.println("sourceShorttextsPathLabel: " + sourceShorttextsPathLabel);
System.out.println("outputPath: " + outputPath);
// we need first to generate the labeled files from the TEI annotated files
File input = new File(sourceTEIPathLabel);
// we process all tei files in the output directory
File[] refFiles = input.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
return name.endsWith(".tei.xml");
}
});
if (refFiles == null) {
return 0;
}
System.out.println(refFiles.length + " tei files");
// the file for writing the training data
OutputStream os2 = new FileOutputStream(outputPath);
Writer writer2 = new OutputStreamWriter(os2, "UTF8");
// get a factory for SAX parser
SAXParserFactory spf = SAXParserFactory.newInstance();
for (File tf : refFiles) {
String name = tf.getName();
System.out.println(name);
// the full text SAX parser covers also the short texts
TEIFulltextSaxParser parser2 = new TEIFulltextSaxParser();
//get a new instance of parser
SAXParser p = spf.newSAXParser();
p.parse(tf, parser2);
List<String> labeled = parser2.getLabeledResult();
// we can now add the features
// we open the featured file
int q = 0;
BufferedReader bis = new BufferedReader(
new InputStreamReader(new FileInputStream(
sourceShorttextsPathLabel + File.separator +
name.replace(".tei.xml", "")), "UTF8"));
StringBuilder shorttext = new StringBuilder();
String line;
while ((line = bis.readLine()) != null) {
int ii = line.indexOf(' ');
String token = null;
if (ii != -1)
token = line.substring(0, ii);
// we get the label in the labelled data file for the same token
for (int pp = q; pp < labeled.size(); pp++) {
String localLine = labeled.get(pp);
StringTokenizer st = new StringTokenizer(localLine, " ");
if (st.hasMoreTokens()) {
String localToken = st.nextToken();
if (localToken.equals(token)) {
String tag = st.nextToken();
shorttext.append(line).append(" ").append(tag);
// lastTag = tag;
// found = true;
q = pp + 1;
pp = q + 10;
}
}
if (pp - q > 5) {
break;
}
}
}
bis.close();
// format with features for sequence tagging...
writer2.write(shorttext.toString() + "\n");
}
writer2.close();
os2.close();
} catch (Exception e) {
throw new GrobidException("An exception occured while running short text training.", e);
}
return totalExamples;
}
/**
* Command line execution.
*
* @param args Command line arguments.
* @throws Exception
*/
public static void main(String[] args) throws Exception {
GrobidProperties.getInstance();
AbstractTrainer.runTraining(new ShorttextTrainer());
System.out.println(AbstractTrainer.runEvaluation(new ShorttextTrainer()));
System.exit(0);
}
}
| 6,074 | 35.377246 | 115 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/PatentParserTrainer.java
|
package org.grobid.trainer;
import javax.xml.parsers.SAXParser;
import javax.xml.parsers.SAXParserFactory;
import java.io.*;
import java.util.*;
import org.grobid.core.GrobidModels;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.features.FeaturesVectorReference;
import org.grobid.core.sax.MarecSaxParser;
import org.grobid.core.utilities.GrobidProperties;
import org.grobid.core.utilities.OffsetPosition;
import org.grobid.core.utilities.TextUtilities;
import org.grobid.trainer.evaluation.PatentEvaluation;
public class PatentParserTrainer extends AbstractTrainer {
// the window value indicate the right and left context of text to consider for an annotation when building
// the training or the test data - the value is empirically set
// this window is used to maintain a certain level of over-sampling of the patent and NPL references, and avoid
// to have the citation annotation too diluted because they are very rare (less than 1 token per 1000)
private static final int trainWindow = 200;
public PatentParserTrainer() {
super(GrobidModels.PATENT_CITATION);
}
public int createTrainingData(String trainingDataDir) {
int nb = 0;
try {
String path = new File(new File(getFilePath2Resources(),
"dataset/patent/corpus/").getAbsolutePath()).getAbsolutePath();
createDataSet(null, path, trainingDataDir, 0);
} catch (Exception e) {
throw new GrobidException("An exception occurred while training Grobid.", e);
}
return nb;
}
/**
* Add the selected features to the affiliation/address model training for
* names
*
* @param corpusDir
* a path where corpus files are located
* @return the total number of used corpus items
*/
@Override
public int createCRFPPData(final File corpusDir, final File modelOutputPath) {
return createCRFPPData(corpusDir, modelOutputPath, null, 1.0);
}
/**
* Add the selected features to the affiliation/address model training for
* names
*
* @param corpusDir
* a path where corpus files are located
* @param trainingOutputPath
* path where to store the temporary training data
* @param evalOutputPath
* path where to store the temporary evaluation data
* @param splitRatio
* ratio to consider for separating training and evaluation data, e.g. 0.8 for 80%
* @return the total number of used corpus items
*/
@Override
public int createCRFPPData(File corpusPath, File trainingOutputPath, File evalOutputPath, double splitRatio) {
int totalExamples = 0;
int nbFiles = 0;
int nbNPLRef = 0;
int nbPatentRef = 0;
int maxRef = 0;
int srCitations = 0;
int previousSrCitations = 0;
int withSR = 0;
try {
System.out.println("sourcePathLabel: " + corpusPath);
if (trainingOutputPath != null)
System.out.println("outputPath for training data: " + trainingOutputPath);
if (evalOutputPath != null)
System.out.println("outputPath for evaluation data: " + evalOutputPath);
// we convert the xml files into the usual CRF label format
// we process all xml files in the output directory
final File[] refFiles = corpusPath.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
return name.endsWith(".xml");
}
});
if (refFiles == null) {
throw new IllegalStateException("Folder " + corpusPath.getAbsolutePath()
+ " does not seem to contain training data. Please check");
}
System.out.println(refFiles.length + " xml files");
// the file for writing the training data
OutputStream os2 = null;
Writer writer2 = null;
if (trainingOutputPath != null) {
os2 = new FileOutputStream(trainingOutputPath);
writer2 = new OutputStreamWriter(os2, "UTF8");
}
// the file for writing the evaluation data
OutputStream os3 = null;
Writer writer3 = null;
if (evalOutputPath != null) {
os3 = new FileOutputStream(evalOutputPath);
writer3 = new OutputStreamWriter(os3, "UTF8");
}
// get a factory for SAX parser
SAXParserFactory spf = SAXParserFactory.newInstance();
spf.setValidating(false);
spf.setFeature("http://xml.org/sax/features/namespaces", false);
spf.setFeature("http://xml.org/sax/features/validation", false);
List<OffsetPosition> journalsPositions = null;
List<OffsetPosition> abbrevJournalsPositions = null;
List<OffsetPosition> conferencesPositions = null;
List<OffsetPosition> publishersPositions = null;
int n = 0;
nbFiles = refFiles.length;
for (; n < refFiles.length; n++) {
final File xmlfile = refFiles[n];
String name = xmlfile.getName();
System.out.println(name);
// Patent + NPL REF. textual data (the "all" model)
MarecSaxParser sax = new MarecSaxParser();
sax.patentReferences = true;
sax.nplReferences = true;
sax.setN(trainWindow);
// get a new instance of parser
final SAXParser p = spf.newSAXParser();
p.parse(xmlfile, sax);
nbNPLRef += sax.getNbNPLRef();
nbPatentRef += sax.getNbPatentRef();
if (sax.nbAllRef > maxRef) {
maxRef = sax.nbAllRef;
}
if (sax.citations != null) {
if (sax.citations.size() > previousSrCitations) {
previousSrCitations = sax.citations.size();
withSR++;
}
}
journalsPositions = sax.journalsPositions;
abbrevJournalsPositions = sax.abbrevJournalsPositions;
conferencesPositions = sax.conferencesPositions;
publishersPositions = sax.publishersPositions;
//totalLength += sax.totalLength;
Writer writer = null;
if ( (writer2 == null) && (writer3 != null) )
writer = writer3;
if ( (writer2 != null) && (writer3 == null) )
writer = writer2;
else {
if (Math.random() <= splitRatio)
writer= writer2;
else
writer = writer3;
}
if (sax.accumulatedText != null) {
String text = sax.accumulatedText.toString();
// add features for patent+NPL
addFeatures(text,
writer,
journalsPositions,
abbrevJournalsPositions,
conferencesPositions,
publishersPositions);
writer.write("\n \n");
}
}
if (writer2 != null) {
writer2.close();
os2.close();
}
if (writer3 != null) {
writer3.close();
os3.close();
}
System.out.println("\nNumber of references: " + (nbNPLRef + nbPatentRef));
System.out.println("Number of patent references: " + nbPatentRef);
System.out.println("Number of NPL references: " + nbNPLRef);
//System.out.println("Number of search report citations: " + srCitations);
System.out.println("Average number of references: " +
TextUtilities.formatTwoDecimals((double) (nbNPLRef + nbPatentRef) / nbFiles));
System.out.println("Max number of references in file: " + maxRef +"\n");
} catch (Exception e) {
throw new GrobidException("An exception occurred while running Grobid.", e);
}
return totalExamples;
}
/* public void train() {
createTrainingData(GrobidProperties.getTempPath().getAbsolutePath());
// String path = new File(new File("resources/dataset/patent/crfpp-templates/").getAbsolutePath()).getAbsolutePath();
// train the resulting training files with features (based external command line, no JNI
// binding for the training functions of CRF++)
//File trainingDataPath1 = new File(GrobidProperties.getTempPath() + "/npl.train");
//File trainingDataPath2 = new File(GrobidProperties.getTempPath() + "/patent.train");
File trainingDataPath3 = new File(GrobidProperties.getTempPath() + "/all.train");
// File templatePath1 = new File(getFilePath2Resources(), "dataset/patent/crfpp-templates/text.npl.references.template");
//File templatePath2 = new File(getFilePath2Resources(), "dataset/patent/crfpp-templates/text.patent.references.template");
File templatePath3 =
new File(getFilePath2Resources(), "dataset/patent/crfpp-templates/text.references.template");
GenericTrainer trainer = TrainerFactory.getTrainer(model);
trainer.setEpsilon(GrobidProperties.getEpsilon(model));
trainer.setWindow(GrobidProperties.getWindow(model));
trainer.setNbMaxIterations(GrobidProperties.getNbMaxIterations(model));
//File modelPath1 = new File(GrobidProperties.getModelPath(GrobidModels.PATENT_NPL).getAbsolutePath() + NEW_MODEL_EXT);
//File modelPath2 = new File(GrobidProperties.getModelPath(GrobidModels.PATENT_PATENT).getAbsolutePath() + NEW_MODEL_EXT);
File modelPath3 =
new File(GrobidProperties.getModelPath(GrobidModels.PATENT_ALL).getAbsolutePath() + NEW_MODEL_EXT);
//trainer.train(templatePath1, trainingDataPath1, modelPath1, GrobidProperties.getNBThreads());
//trainer.train(templatePath2, trainingDataPath2, modelPath2, GrobidProperties.getNBThreads());
trainer.train(templatePath3, trainingDataPath3, modelPath3, GrobidProperties.getNBThreads(), model);
//renaming
//renameModels(GrobidProperties.getModelPath(GrobidModels.PATENT_NPL), modelPath1);
//renameModels(GrobidProperties.getModelPath(GrobidModels.PATENT_PATENT), modelPath2);
renameModels(GrobidProperties.getModelPath(GrobidModels.PATENT_ALL), modelPath3);
}
*/
/**
* Create the set of training and evaluation sets from the annotated examples with
* extraction of citations in the patent description body.
*
* @param type type of data to be created, 0 is training data, 1 is evaluation data
*/
public void createDataSet(String setName, String corpusPath, String outputPath, int type) {
int nbFiles = 0;
int nbNPLRef = 0;
int nbPatentRef = 0;
int maxRef = 0;
try {
// we use a SAX parser on the patent XML files
MarecSaxParser sax = new MarecSaxParser();
sax.patentReferences = true;
sax.nplReferences = true;
int srCitations = 0;
int previousSrCitations = 0;
int withSR = 0;
List<OffsetPosition> journalsPositions = null;
List<OffsetPosition> abbrevJournalsPositions = null;
List<OffsetPosition> conferencesPositions = null;
List<OffsetPosition> publishersPositions = null;
if (type == 0) {
// training set
sax.setN(trainWindow);
} else {
// for the test set we enlarge the focus window to include all the document.
sax.setN(-1);
}
// get a factory
SAXParserFactory spf = SAXParserFactory.newInstance();
spf.setValidating(false);
spf.setFeature("http://xml.org/sax/features/namespaces", false);
spf.setFeature("http://xml.org/sax/features/validation", false);
LinkedList<File> fileList = new LinkedList<File>();
if (setName == null) {
fileList.add(new File(corpusPath));
} else {
fileList.add(new File(corpusPath));
}
Writer writer = null;
if ((setName == null) || (setName.length() == 0)) {
writer = new OutputStreamWriter(new FileOutputStream(
new File(outputPath + File.separator + "all.train"), false), "UTF-8");
} else {
writer = new OutputStreamWriter(new FileOutputStream(
new File(outputPath + File.separator + "all." + setName), false), "UTF-8");
}
//int totalLength = 0;
while (fileList.size() > 0) {
File file = fileList.removeFirst();
if (file.isDirectory()) {
for (File subFile : file.listFiles()) {
fileList.addLast(subFile);
}
} else {
if (file.getName().endsWith(".xml")) {
nbFiles++;
try {
//get a new instance of parser
SAXParser p = spf.newSAXParser();
FileInputStream in = new FileInputStream(file);
sax.setFileName(file.toString());
p.parse(in, sax);
//writer3.write("\n");
nbNPLRef += sax.getNbNPLRef();
nbPatentRef += sax.getNbPatentRef();
if (sax.nbAllRef > maxRef) {
maxRef = sax.nbAllRef;
}
if (sax.citations != null) {
if (sax.citations.size() > previousSrCitations) {
previousSrCitations = sax.citations.size();
withSR++;
}
}
journalsPositions = sax.journalsPositions;
abbrevJournalsPositions = sax.abbrevJournalsPositions;
conferencesPositions = sax.conferencesPositions;
publishersPositions = sax.publishersPositions;
//totalLength += sax.totalLength;
if (sax.accumulatedText != null) {
String text = sax.accumulatedText.toString();
// add features for patent+NPL
addFeatures(text,
writer,
journalsPositions,
abbrevJournalsPositions,
conferencesPositions,
publishersPositions);
writer.write("\n");
}
} catch (Exception e) {
throw new GrobidException("An exception occured while running Grobid.", e);
}
}
}
}
if (sax.citations != null) {
srCitations += sax.citations.size();
}
if (setName != null) {
System.out.println(setName + "ing on " + nbFiles + " files");
} else {
System.out.println("training on " + nbFiles + " files");
}
//System.out.println("Number of file with search report: " + withSR);
System.out.println("Number of references: " + (nbNPLRef + nbPatentRef));
System.out.println("Number of patent references: " + nbPatentRef);
System.out.println("Number of NPL references: " + nbNPLRef);
//System.out.println("Number of search report citations: " + srCitations);
System.out.println("Average number of references: " +
TextUtilities.formatTwoDecimals((double) (nbNPLRef + nbPatentRef) / nbFiles));
System.out.println("Max number of references in file: " + maxRef);
if ((setName == null) || (setName.length() == 0)) {
System.out.println("common data set under: " + outputPath + "/all.train");
} else {
System.out.println("common data set under: " + outputPath + "/all." + setName);
}
} catch (Exception e) {
throw new GrobidException("An exception occurred while running Grobid.", e);
}
}
public void addFeatures(String text,
Writer writer,
List<OffsetPosition> journalPositions,
List<OffsetPosition> abbrevJournalPositions,
List<OffsetPosition> conferencePositions,
List<OffsetPosition> publisherPositions) {
try {
String line;
StringTokenizer st = new StringTokenizer(text, "\n");
int totalLine = st.countTokens();
int posit = 0;
int currentJournalPositions = 0;
int currentAbbrevJournalPositions = 0;
int currentConferencePositions = 0;
int currentPublisherPositions = 0;
boolean isJournalToken;
boolean isAbbrevJournalToken;
boolean isConferenceToken;
boolean isPublisherToken;
boolean skipTest;
while (st.hasMoreTokens()) {
isJournalToken = false;
isAbbrevJournalToken = false;
isConferenceToken = false;
isPublisherToken = false;
skipTest = false;
line = st.nextToken();
if (line.trim().length() == 0) {
writer.write("\n");
posit = 0;
continue;
} else if (line.endsWith("\t<ignore>")) {
posit++;
continue;
}
// check the position of matches for journals
if (journalPositions != null) {
if (currentJournalPositions == journalPositions.size() - 1) {
if (journalPositions.get(currentJournalPositions).end < posit) {
skipTest = true;
}
}
if (!skipTest) {
for (int i = currentJournalPositions; i < journalPositions.size(); i++) {
if ((journalPositions.get(i).start <= posit) &&
(journalPositions.get(i).end >= posit)) {
isJournalToken = true;
currentJournalPositions = i;
break;
} else if (journalPositions.get(i).start > posit) {
isJournalToken = false;
currentJournalPositions = i;
break;
}
}
}
}
// check the position of matches for abbreviated journals
skipTest = false;
if (abbrevJournalPositions != null) {
if (currentAbbrevJournalPositions == abbrevJournalPositions.size() - 1) {
if (abbrevJournalPositions.get(currentAbbrevJournalPositions).end < posit) {
skipTest = true;
}
}
if (!skipTest) {
for (int i = currentAbbrevJournalPositions; i < abbrevJournalPositions.size(); i++) {
if ((abbrevJournalPositions.get(i).start <= posit) &&
(abbrevJournalPositions.get(i).end >= posit)) {
isAbbrevJournalToken = true;
currentAbbrevJournalPositions = i;
break;
} else if (abbrevJournalPositions.get(i).start > posit) {
isAbbrevJournalToken = false;
currentAbbrevJournalPositions = i;
break;
}
}
}
}
// check the position of matches for conferences
skipTest = false;
if (conferencePositions != null) {
if (currentConferencePositions == conferencePositions.size() - 1) {
if (conferencePositions.get(currentConferencePositions).end < posit) {
skipTest = true;
}
}
if (!skipTest) {
for (int i = currentConferencePositions; i < conferencePositions.size(); i++) {
if ((conferencePositions.get(i).start <= posit) &&
(conferencePositions.get(i).end >= posit)) {
isConferenceToken = true;
currentConferencePositions = i;
break;
} else if (conferencePositions.get(i).start > posit) {
isConferenceToken = false;
currentConferencePositions = i;
break;
}
}
}
}
// check the position of matches for publishers
skipTest = false;
if (publisherPositions != null) {
if (currentPublisherPositions == publisherPositions.size() - 1) {
if (publisherPositions.get(currentPublisherPositions).end < posit) {
skipTest = true;
}
}
if (!skipTest) {
for (int i = currentPublisherPositions; i < publisherPositions.size(); i++) {
if ((publisherPositions.get(i).start <= posit) &&
(publisherPositions.get(i).end >= posit)) {
isPublisherToken = true;
currentPublisherPositions = i;
break;
} else if (publisherPositions.get(i).start > posit) {
isPublisherToken = false;
currentPublisherPositions = i;
break;
}
}
}
}
FeaturesVectorReference featuresVector =
FeaturesVectorReference.addFeaturesPatentReferences(line,
totalLine,
posit,
isJournalToken,
isAbbrevJournalToken,
isConferenceToken,
isPublisherToken);
if (featuresVector.label == null)
continue;
writer.write(featuresVector.printVector());
writer.flush();
posit++;
}
} catch (Exception e) {
throw new GrobidException("An exception occurred while running Grobid.", e);
}
}
/**
* Command line execution.
*
* @param args Command line arguments.
* @throws Exception
*/
public static void main(String[] args) throws Exception {
GrobidProperties.getInstance();
Trainer trainer = new PatentParserTrainer();
AbstractTrainer.runTraining(trainer);
System.out.println(AbstractTrainer.runEvaluation(trainer));
System.exit(0);
}
}
| 24,744 | 44.739372 | 131 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/NameHeaderTrainer.java
|
package org.grobid.trainer;
import org.grobid.core.GrobidModels;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.features.FeaturesVectorName;
import org.grobid.core.utilities.GrobidProperties;
import org.grobid.core.utilities.OffsetPosition;
import org.grobid.trainer.sax.TEIAuthorSaxParser;
import org.grobid.core.layout.LayoutToken;
import org.grobid.core.lexicon.Lexicon;
import javax.xml.parsers.SAXParser;
import javax.xml.parsers.SAXParserFactory;
import java.io.*;
import java.util.*;
public class NameHeaderTrainer extends AbstractTrainer {
public NameHeaderTrainer() {
super(GrobidModels.NAMES_HEADER);
}
/**
* Add the selected features to the affiliation/address model training for
* names
*
* @param corpusDir
* a path where corpus files are located
* @return the total number of used corpus items
*/
@Override
public int createCRFPPData(final File corpusDir, final File modelOutputPath) {
return createCRFPPData(corpusDir, modelOutputPath, null, 1.0);
}
/**
* Add the selected features to the affiliation/address model training for
* names
*
* @param corpusDir
* a path where corpus files are located
* @param trainingOutputPath
* path where to store the temporary training data
* @param evalOutputPath
* path where to store the temporary evaluation data
* @param splitRatio
* ratio to consider for separating training and evaluation data, e.g. 0.8 for 80%
* @return the total number of used corpus items
*/
@Override
public int createCRFPPData(final File corpusDir,
final File trainingOutputPath,
final File evalOutputPath,
double splitRatio) {
int totalExamples = 0;
try {
System.out.println("sourcePathLabel: " + corpusDir);
if (trainingOutputPath != null)
System.out.println("outputPath for training data: " + trainingOutputPath);
if (evalOutputPath != null)
System.out.println("outputPath for evaluation data: " + evalOutputPath);
// we convert the tei files into the usual CRF label format
// we process all tei files in the output directory
final File[] refFiles = corpusDir.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
return name.endsWith(".xml");
}
});
if (refFiles == null) {
throw new IllegalStateException("Folder " + corpusDir.getAbsolutePath()
+ " does not seem to contain training data. Please check");
}
System.out.println(refFiles.length + " tei files");
// the file for writing the training data
OutputStream os2 = null;
Writer writer2 = null;
if (trainingOutputPath != null) {
os2 = new FileOutputStream(trainingOutputPath);
writer2 = new OutputStreamWriter(os2, "UTF8");
}
// the file for writing the evaluation data
OutputStream os3 = null;
Writer writer3 = null;
if (evalOutputPath != null) {
os3 = new FileOutputStream(evalOutputPath);
writer3 = new OutputStreamWriter(os3, "UTF8");
}
// get a factory for SAX parser
SAXParserFactory spf = SAXParserFactory.newInstance();
List<OffsetPosition> titlePositions = null;
List<OffsetPosition> suffixPositions = null;
int n = 0;
for (; n < refFiles.length; n++) {
final File teifile = refFiles[n];
String name = teifile.getName();
System.out.println(name);
final TEIAuthorSaxParser parser2 = new TEIAuthorSaxParser();
// get a new instance of parser
final SAXParser p = spf.newSAXParser();
p.parse(teifile, parser2);
final List<List<String>> allLabeled = parser2.getLabeledResult();
final List<List<LayoutToken>> allTokens = parser2.getTokensResult();
totalExamples += parser2.n;
// we can now add the features
for(int i=0; i<allTokens.size(); i++) {
// fix the offsets
int pos = 0;
for(LayoutToken token : allTokens.get(i)) {
token.setOffset(pos);
pos += token.getText().length();
}
titlePositions = Lexicon.getInstance().tokenPositionsPersonTitle(allTokens.get(i));
suffixPositions = Lexicon.getInstance().tokenPositionsPersonSuffix(allTokens.get(i));
final String names = FeaturesVectorName.addFeaturesName(allTokens.get(i),
allLabeled.get(i), titlePositions, suffixPositions);
if ( (writer2 == null) && (writer3 != null) )
writer3.write(names + "\n \n");
if ( (writer2 != null) && (writer3 == null) )
writer2.write(names + "\n \n");
else {
if (Math.random() <= splitRatio)
writer2.write(names + "\n \n");
else
writer3.write(names + "\n \n");
}
}
}
if (writer2 != null) {
writer2.close();
os2.close();
}
if (writer3 != null) {
writer3.close();
os3.close();
}
} catch (Exception e) {
throw new GrobidException("An exception occurred while running Grobid.", e);
}
return totalExamples;
}
/**
* Command line execution.
*
* @param args Command line arguments.
* @throws Exception
*/
public static void main(String[] args) throws Exception {
GrobidProperties.getInstance();
Trainer trainer = new NameHeaderTrainer();
AbstractTrainer.runTraining(trainer);
System.out.println(AbstractTrainer.runEvaluation(trainer));
System.exit(0);
}
}
| 5,368 | 30.034682 | 99 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/TableTrainer.java
|
package org.grobid.trainer;
import org.grobid.core.GrobidModels;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.utilities.GrobidProperties;
import org.grobid.core.utilities.UnicodeUtil;
import org.grobid.trainer.sax.TEIFigureSaxParser;
import javax.xml.parsers.SAXParser;
import javax.xml.parsers.SAXParserFactory;
import java.io.*;
import java.util.List;
import java.util.StringTokenizer;
public class TableTrainer extends AbstractTrainer {
public TableTrainer() {
super(GrobidModels.TABLE);
}
@Override
public int createCRFPPData(File corpusPath, File outputFile) {
return addFeaturesTable(corpusPath.getAbsolutePath() + "/tei",
corpusPath.getAbsolutePath() + "/raw",
outputFile, null, 1.0);
}
/**
* Add the selected features for the table model
*
* @param corpusDir path where corpus files are located
* @param trainingOutputPath path where to store the temporary training data
* @param evalOutputPath path where to store the temporary evaluation data
* @param splitRatio ratio to consider for separating training and evaluation data, e.g. 0.8 for 80%
* @return the total number of used corpus items
*/
@Override
public int createCRFPPData(final File corpusDir,
final File trainingOutputPath,
final File evalOutputPath,
double splitRatio) {
return addFeaturesTable(corpusDir.getAbsolutePath() + "/tei",
corpusDir.getAbsolutePath() + "/raw",
trainingOutputPath,
evalOutputPath,
splitRatio);
}
/**
* Add the selected features for the table model
*
* @param sourceTEIPathLabel path to corpus TEI files
* @param sourceRawPathLabel path to corpus raw files
* @param trainingOutputPath path where to store the temporary training data
* @param evalOutputPath path where to store the temporary evaluation data
* @param splitRatio ratio to consider for separating training and evaluation data, e.g. 0.8 for 80%
* @return number of examples
*/
public int addFeaturesTable(String sourceTEIPathLabel,
String sourceRawPathLabel,
final File trainingOutputPath,
final File evalOutputPath,
double splitRatio) {
int totalExamples = 0;
try {
System.out.println("sourceTEIPathLabel: " + sourceTEIPathLabel);
System.out.println("sourceRawPathLabel: " + sourceRawPathLabel);
System.out.println("trainingOutputPath: " + trainingOutputPath);
System.out.println("evalOutputPath: " + evalOutputPath);
// we need first to generate the labeled files from the TEI annotated files
File input = new File(sourceTEIPathLabel);
// we process all tei files in the output directory
File[] refFiles = input.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
return name.endsWith(".tei.xml") || name.endsWith(".tei");
}
});
if (refFiles == null) {
return 0;
}
System.out.println(refFiles.length + " tei files");
// the file for writing the training data
OutputStream os2 = null;
Writer writer2 = null;
if (trainingOutputPath != null) {
os2 = new FileOutputStream(trainingOutputPath);
writer2 = new OutputStreamWriter(os2, "UTF8");
}
// the file for writing the evaluation data
OutputStream os3 = null;
Writer writer3 = null;
if (evalOutputPath != null) {
os3 = new FileOutputStream(evalOutputPath);
writer3 = new OutputStreamWriter(os3, "UTF8");
}
// get a factory for SAX parser
SAXParserFactory spf = SAXParserFactory.newInstance();
for (File tf : refFiles) {
String name = tf.getName();
System.out.println(name);
// the full text SAX parser can be reused for the tables
TEIFigureSaxParser parser2 = new TEIFigureSaxParser();
//parser2.setMode(TEIFulltextSaxParser.TABLE);
//get a new instance of parser
SAXParser p = spf.newSAXParser();
p.parse(tf, parser2);
List<String> labeled = parser2.getLabeledResult();
//totalExamples += parser2.n;
// we can now add the features
// we open the featured file
File theRawFile = new File(sourceRawPathLabel + File.separator + name.replace(".tei.xml", ""));
if (!theRawFile.exists()) {
System.out.println("Raw file " + theRawFile + " does not exist. Please have a look!");
continue;
}
int q = 0;
BufferedReader bis = new BufferedReader(
new InputStreamReader(new FileInputStream(
sourceRawPathLabel + File.separator + name.replace(".tei.xml", "")), "UTF8"));
StringBuilder table = new StringBuilder();
String line;
while ((line = bis.readLine()) != null) {
if (line.trim().length() < 2) {
table.append("\n");
}
int ii = line.indexOf('\t');
if (ii == -1) {
ii = line.indexOf(' ');
}
String token = null;
if (ii != -1) {
token = line.substring(0, ii);
// unicode normalisation of the token - it should not be necessary if the training data
// has been gnerated by a recent version of grobid
token = UnicodeUtil.normaliseTextAndRemoveSpaces(token);
}
// boolean found = false;
// we get the label in the labelled data file for the same token
for (int pp = q; pp < labeled.size(); pp++) {
String localLine = labeled.get(pp);
if (localLine.length() == 0) {
q = pp + 1;
continue;
}
StringTokenizer st = new StringTokenizer(localLine, " \t");
if (st.hasMoreTokens()) {
String localToken = st.nextToken();
// unicode normalisation of the token - it should not be necessary if the training data
// has been gnerated by a recent version of grobid
localToken = UnicodeUtil.normaliseTextAndRemoveSpaces(localToken);
if (localToken.equals(token)) {
String tag = st.nextToken();
line = line.replace("\t", " ").replace(" ", " ");
table.append(line).append(" ").append(tag);
q = pp + 1;
pp = q + 10;
}
}
if (pp - q > 5) {
break;
}
}
}
bis.close();
if ((writer2 == null) && (writer3 != null))
writer3.write(table.toString() + "\n");
if ((writer2 != null) && (writer3 == null))
writer2.write(table.toString() + "\n");
else {
if (Math.random() <= splitRatio)
writer2.write(table.toString() + "\n");
else
writer3.write(table.toString() + "\n");
}
}
if (writer2 != null) {
writer2.close();
os2.close();
}
if (writer3 != null) {
writer3.close();
os3.close();
}
} catch (Exception e) {
throw new GrobidException("An exception occured while running training for the table model.", e);
}
return totalExamples;
}
/**
* Command line execution.
*
* @param args Command line arguments.
* @throws Exception
*/
public static void main(String[] args) throws Exception {
GrobidProperties.getInstance();
AbstractTrainer.runTraining(new TableTrainer());
System.out.println(AbstractTrainer.runEvaluation(new TableTrainer()));
System.exit(0);
}
}
| 9,108 | 40.784404 | 115 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/HeaderTrainer.java
|
package org.grobid.trainer;
import org.grobid.core.GrobidModels;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.utilities.GrobidProperties;
import org.grobid.core.utilities.UnicodeUtil;
import org.grobid.trainer.sax.TEIHeaderSaxParser;
import javax.xml.parsers.SAXParser;
import javax.xml.parsers.SAXParserFactory;
import java.io.*;
import java.util.ArrayList;
import java.util.StringTokenizer;
public class HeaderTrainer extends AbstractTrainer{
public HeaderTrainer() {
super(GrobidModels.HEADER);
}
@Override
public int createCRFPPData(File corpusPath, File trainingOutputPath) {
return addFeaturesHeaders(corpusPath.getAbsolutePath() + "/tei",
corpusPath.getAbsolutePath() + "/raw",
trainingOutputPath, null, 1.0);
}
/**
* Add the selected features to a header example set
*
* @param corpusDir
* a path where corpus files are located
* @param trainingOutputPath
* path where to store the temporary training data
* @param evalOutputPath
* path where to store the temporary evaluation data
* @param splitRatio
* ratio to consider for separating training and evaluation data, e.g. 0.8 for 80%
* @return the total number of used corpus items
*/
@Override
public int createCRFPPData(final File corpusDir,
final File trainingOutputPath,
final File evalOutputPath,
double splitRatio) {
return addFeaturesHeaders(corpusDir.getAbsolutePath() + "/tei",
corpusDir.getAbsolutePath() + "/raw",
trainingOutputPath,
evalOutputPath,
splitRatio);
}
/**
* Add the selected features to the header model training
* @param sourceFile source path
* @param headerPath header path
* @param trainingOutputPath output training file
* @return number of corpus files
*/
public int addFeaturesHeaders(String sourceFile,
String headerPath,
final File trainingOutputPath,
final File evalOutputPath,
double splitRatio) {
System.out.println(sourceFile);
System.out.println(headerPath);
System.out.println(trainingOutputPath);
System.out.println(evalOutputPath);
System.out.println("TEI files: " + sourceFile);
System.out.println("header info files: " + headerPath);
if (trainingOutputPath != null)
System.out.println("outputPath for training data: " + trainingOutputPath);
if (evalOutputPath != null)
System.out.println("outputPath for evaluation data: " + evalOutputPath);
int nbExamples = 0;
try {
File pathh = new File(sourceFile);
// we process all tei files in the output directory
File[] refFiles = pathh.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
return name.endsWith(".tei") | name.endsWith(".tei.xml");
}
});
if (refFiles == null)
return 0;
nbExamples = refFiles.length;
System.out.println(nbExamples + " tei files");
// the file for writing the training data
OutputStream os2 = null;
Writer writer2 = null;
if (trainingOutputPath != null) {
os2 = new FileOutputStream(trainingOutputPath);
writer2 = new OutputStreamWriter(os2, "UTF8");
}
// the file for writing the evaluation data
OutputStream os3 = null;
Writer writer3 = null;
if (evalOutputPath != null) {
os3 = new FileOutputStream(evalOutputPath);
writer3 = new OutputStreamWriter(os3, "UTF8");
}
for (File teifile : refFiles) {
String name = teifile.getName();
System.out.println(name);
TEIHeaderSaxParser parser2 = new TEIHeaderSaxParser();
parser2.setFileName(name);
// get a factory
SAXParserFactory spf = SAXParserFactory.newInstance();
//get a new instance of parser
SAXParser par = spf.newSAXParser();
par.parse(teifile, parser2);
ArrayList<String> labeled = parser2.getLabeledResult();
//System.out.println(labeled);
//System.out.println(parser2.getPDFName()+"._");
File refDir2 = new File(headerPath);
String headerFile = null;
File[] refFiles2 = refDir2.listFiles();
for (File aRefFiles2 : refFiles2) {
String localFileName = aRefFiles2.getName();
if (localFileName.equals(parser2.getPDFName() + ".header") ||
localFileName.equals(parser2.getPDFName() + ".training.header")) {
headerFile = localFileName;
break;
}
if ((localFileName.startsWith(parser2.getPDFName() + "._")) &&
(localFileName.endsWith(".header") || localFileName.endsWith(".training.header") )) {
headerFile = localFileName;
break;
}
}
if (headerFile == null)
continue;
String pathHeader = headerPath + File.separator + headerFile;
int p = 0;
BufferedReader bis = new BufferedReader(
new InputStreamReader(new FileInputStream(pathHeader), "UTF8"));
StringBuilder header = new StringBuilder();
String line;
while ((line = bis.readLine()) != null) {
header.append(line);
int ii = line.indexOf(' ');
String token = null;
if (ii != -1) {
token = line.substring(0, ii);
// unicode normalisation of the token - it should not be necessary if the training data
// has been gnerated by a recent version of grobid
token = UnicodeUtil.normaliseTextAndRemoveSpaces(token);
}
// we get the label in the labelled data file for the same token
for (int pp = p; pp < labeled.size(); pp++) {
String localLine = labeled.get(pp);
StringTokenizer st = new StringTokenizer(localLine, " ");
if (st.hasMoreTokens()) {
String localToken = st.nextToken();
// unicode normalisation of the token - it should not be necessary if the training data
// has been gnerated by a recent version of grobid
localToken = UnicodeUtil.normaliseTextAndRemoveSpaces(localToken);
if (localToken.equals(token)) {
String tag = st.nextToken();
header.append(" ").append(tag);
p = pp + 1;
pp = p + 10;
} /*else {
System.out.println("feature:"+token + " / tei:" + localToken);
}*/
}
if (pp - p > 5) {
break;
}
}
header.append("\n");
}
bis.close();
// post process for ensuring continous labelling
StringBuilder header2 = new StringBuilder();
String headerStr = header.toString();
StringTokenizer sto = new StringTokenizer(headerStr, "\n");
String lastLabel = null;
String lastLastLabel = null;
String previousLine = null;
while (sto.hasMoreTokens()) {
String linee = sto.nextToken();
StringTokenizer sto2 = new StringTokenizer(linee, " ");
String label = null;
while (sto2.hasMoreTokens()) {
label = sto2.nextToken();
}
if (label != null) {
if (label.length() > 0) {
if (!((label.charAt(0) == '<') | (label.startsWith("I-<")))) {
label = null;
}
}
}
if (previousLine != null) {
if ((label != null) & (lastLabel == null) & (lastLastLabel != null)) {
if (label.equals(lastLastLabel)) {
lastLabel = label;
previousLine += " " + label;
header2.append(previousLine);
header2.append("\n");
} else {
//if (lastLabel == null)
// previousLine += " <note>";
if (lastLabel != null) {
header2.append(previousLine);
header2.append("\n");
}
}
} else {
//if (lastLabel == null)
// previousLine += " <note>";
if (lastLabel != null) {
header2.append(previousLine);
header2.append("\n");
}
}
}
// previousPreviousLine = previousLine;
previousLine = linee;
lastLastLabel = lastLabel;
lastLabel = label;
}
if (lastLabel != null) {
header2.append(previousLine);
header2.append("\n");
}
if ( (writer2 == null) && (writer3 != null) )
writer3.write(header2.toString() + "\n");
if ( (writer2 != null) && (writer3 == null) )
writer2.write(header2.toString() + "\n");
else {
if (Math.random() <= splitRatio)
writer2.write(header2.toString() + "\n");
else
writer3.write(header2.toString() + "\n");
}
}
if (writer2 != null) {
writer2.close();
os2.close();
}
if (writer3 != null) {
writer3.close();
os3.close();
}
} catch (Exception e) {
throw new GrobidException("An exception occured while running Grobid.", e);
}
return nbExamples;
}
/**
* Command line execution.
*
* @param args Command line arguments.
* @throws Exception
*/
public static void main(String[] args) throws Exception {
GrobidProperties.getInstance();
AbstractTrainer.runTraining(new HeaderTrainer());
System.out.println(AbstractTrainer.runEvaluation(new HeaderTrainer()));
System.exit(0);
}
}
| 11,304 | 37.452381 | 115 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/evaluation/LabelStat.java
|
package org.grobid.trainer.evaluation;
/**
* Model the results for each label
*/
public final class LabelStat {
private int falsePositive = 0;
private int falseNegative = 0;
private int observed = 0; // this is true positives
private int expected = 0; // total expected number of items with this label
private double accuracy = 0.0;
private int trueNegative;
private boolean hasChanged = false;
public void incrementFalseNegative() {
this.incrementFalseNegative(1);
hasChanged = true;
}
public void incrementFalsePositive() {
this.incrementFalsePositive(1);
hasChanged = true;
}
public void incrementObserved() {
this.incrementObserved(1);
hasChanged = true;
}
public void incrementExpected() {
this.incrementExpected(1);
hasChanged = true;
}
public void incrementFalseNegative(int count) {
this.falseNegative += count;
hasChanged = true;
}
public void incrementFalsePositive(int count) {
this.falsePositive += count;
hasChanged = true;
}
public void incrementObserved(int count) {
this.observed += count;
hasChanged = true;
}
public void incrementExpected(int count) {
this.expected += count;
hasChanged = true;
}
public int getExpected() {
return this.expected;
}
public int getFalseNegative() {
return this.falseNegative;
}
public int getFalsePositive() {
return this.falsePositive;
}
public int getObserved() {
return this.observed;
}
public int getAll() {
return observed + falseNegative + falsePositive;
}
public void setFalsePositive(int falsePositive) {
this.falsePositive = falsePositive;
hasChanged = true;
}
public void setFalseNegative(int falseNegative) {
this.falseNegative = falseNegative;
hasChanged = true;
}
public void setObserved(int observed) {
this.observed = observed;
hasChanged = true;
}
public void setExpected(int expected) {
this.expected = expected;
hasChanged = true;
}
public static LabelStat create() {
return new LabelStat();
}
public double getAccuracy() {
double accuracy = (double) (observed + trueNegative) / (observed + falsePositive + trueNegative + falseNegative);
if (accuracy < 0.0)
accuracy = 0.0;
return accuracy;
}
public long getSupport() {
return expected;
}
public double getPrecision() {
if (observed == 0.0) {
return 0.0;
}
return ((double) observed) / (falsePositive + observed);
}
public double getRecall() {
if (expected == 0.0)
return 0.0;
return ((double) observed) / (expected);
}
public double getF1Score() {
double precision = getPrecision();
double recall = getRecall();
if ((precision == 0.0) && (recall == 0.0))
return 0.0;
return (2.0 * precision * recall) / (precision + recall);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder
.append("falsePositive: ").append(falsePositive)
.append("; falseNegative: ").append(falseNegative)
.append("; observed: ").append(observed)
.append("; expected: ").append(expected);
return builder.toString();
}
public void setTrueNegative(int trueNegative) {
this.trueNegative = trueNegative;
}
public boolean hasChanged() {
boolean oldValue = hasChanged;
hasChanged = false;
return oldValue;
}
}
| 3,805 | 23.397436 | 121 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/evaluation/LabelResult.java
|
package org.grobid.trainer.evaluation;
import org.grobid.core.utilities.TextUtilities;
public class LabelResult {
private final String label;
private double accuracy;
private double precision;
private double recall;
private double f1Score;
private long support;
public LabelResult(String label) {
this.label = label;
}
public void setAccuracy(double accuracy) {
this.accuracy = accuracy;
}
public double getAccuracy() {
return accuracy;
}
public String getLabel() {
return label;
}
public void setPrecision(double precision) {
this.precision = precision;
}
public double getPrecision() {
return precision;
}
public void setRecall(double recall) {
this.recall = recall;
}
public double getRecall() {
return recall;
}
public void setF1Score(double f1Score) {
this.f1Score = f1Score;
}
public double getF1Score() {
return f1Score;
}
public void setSupport(long support) {
this.support = support;
}
public String toString() {
return String.format("%-20s %-12s %-12s %-12s %-12s %-7s\n",
label,
TextUtilities.formatTwoDecimals(getAccuracy() * 100),
TextUtilities.formatTwoDecimals(getPrecision() * 100),
TextUtilities.formatTwoDecimals(getRecall() * 100),
TextUtilities.formatTwoDecimals(getF1Score() * 100),
String.valueOf(getSupport())
);
}
public long getSupport() {
return support;
}
}
| 1,614 | 20.824324 | 68 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/evaluation/ModelStats.java
|
package org.grobid.trainer.evaluation;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair;
import org.grobid.core.utilities.OffsetPosition;
import org.grobid.core.utilities.TextUtilities;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.StringTokenizer;
import static org.grobid.core.engines.tagging.GenericTaggerUtils.getPlainLabel;
/**
* Represent all different evaluation for a specific model
*/
public class ModelStats {
private int totalInstances;
private int correctInstance;
private Stats fieldStats;
private String rawResults;
protected ModelStats() {
}
public ModelStats(String results) {
this.fieldStats = fieldLevelStats(results);
this.rawResults = results;
Pair<Integer, Integer> doubleDoublePair = computeInstanceStatistics(results);
this.setTotalInstances(doubleDoublePair.getLeft());
this.setCorrectInstance(doubleDoublePair.getRight());
}
public Pair<Integer, Integer> computeInstanceStatistics(String results) {
// instance-level: instances are separated by a new line in the result file
// third pass
String resultsPost = results.replace("\n\n", "\n \n");
StringTokenizer stt = new StringTokenizer(resultsPost, "\n");
boolean allGood = true;
int correctInstance = 0;
int totalInstance = 0;
String line = null;
while (stt.hasMoreTokens()) {
line = stt.nextToken();
if ((line.trim().length() == 0) || (!stt.hasMoreTokens())) {
// instance done
totalInstance++;
if (allGood) {
correctInstance++;
}
// we reinit for a new instance
allGood = true;
} else {
StringTokenizer st = new StringTokenizer(line, "\t ");
String obtainedLabel = null;
String expectedLabel = null;
while (st.hasMoreTokens()) {
obtainedLabel = getPlainLabel(st.nextToken());
if (st.hasMoreTokens()) {
expectedLabel = obtainedLabel;
}
}
if (!obtainedLabel.equals(expectedLabel)) {
// one error is enough to have the whole instance false, damn!
allGood = false;
}
}
}
return new ImmutablePair<>(totalInstance, correctInstance);
}
public void setTotalInstances(int totalInstances) {
this.totalInstances = totalInstances;
}
public int getTotalInstances() {
return totalInstances;
}
public void setCorrectInstance(int correctInstance) {
this.correctInstance = correctInstance;
}
public int getCorrectInstance() {
return correctInstance;
}
public void setFieldStats(Stats fieldStats) {
this.fieldStats = fieldStats;
}
public Stats getFieldStats() {
return fieldStats;
}
public double getInstanceRecall() {
if (getTotalInstances() <= 0) {
return 0.0d;
}
return (double) getCorrectInstance() / (getTotalInstances());
}
public String toString() {
return toString(false);
}
public String toString(boolean includeRawResults) {
StringBuilder report = new StringBuilder();
if (includeRawResults) {
report.append("=== START RAW RESULTS ===").append("\n");
report.append(getRawResults()).append("\n");
report.append("=== END RAw RESULTS ===").append("\n").append("\n");
}
Stats fieldStats = getFieldStats();
report.append("\n===== Field-level results =====\n");
report.append(String.format("\n%-20s %-12s %-12s %-12s %-12s %-7s\n\n",
"label",
"accuracy",
"precision",
"recall",
"f1",
"support"));
for (Map.Entry<String, LabelResult> labelResult : fieldStats.getLabelsResults().entrySet()) {
report.append(labelResult.getValue());
}
report.append("\n");
report.append(String.format("%-20s %-12s %-12s %-12s %-12s %-7s\n",
"all (micro avg.)",
TextUtilities.formatTwoDecimals(fieldStats.getMicroAverageAccuracy() * 100),
TextUtilities.formatTwoDecimals(fieldStats.getMicroAveragePrecision() * 100),
TextUtilities.formatTwoDecimals(fieldStats.getMicroAverageRecall() * 100),
TextUtilities.formatTwoDecimals(fieldStats.getMicroAverageF1() * 100),
String.valueOf(getSupportSum())));
report.append(String.format("%-20s %-12s %-12s %-12s %-12s %-7s\n",
"all (macro avg.)",
TextUtilities.formatTwoDecimals(fieldStats.getMacroAverageAccuracy() * 100),
TextUtilities.formatTwoDecimals(fieldStats.getMacroAveragePrecision() * 100),
TextUtilities.formatTwoDecimals(fieldStats.getMacroAverageRecall() * 100),
TextUtilities.formatTwoDecimals(fieldStats.getMacroAverageF1() * 100),
String.valueOf(getSupportSum())));
// instance-level: instances are separated by a new line in the result file
report.append("\n===== Instance-level results =====\n\n");
report.append(String.format("%-27s %d\n", "Total expected instances:", getTotalInstances()));
report.append(String.format("%-27s %d\n", "Correct instances:", getCorrectInstance()));
report.append(String.format("%-27s %s\n",
"Instance-level recall:",
TextUtilities.formatTwoDecimals(getInstanceRecall() * 100)));
return report.toString();
}
public long getSupportSum() {
long supportSum = 0;
for (LabelResult labelResult : fieldStats.getLabelsResults().values()) {
supportSum += labelResult.getSupport();
}
return supportSum;
}
public String getRawResults() {
return rawResults;
}
public void setRawResults(String rawResults) {
this.rawResults = rawResults;
}
public Stats fieldLevelStats(String theResult) {
Stats fieldStats = new Stats();
// field: a field is simply a sequence of token with the same label
// we build first the list of fields in expected and obtained result
// with offset positions
List<Pair<String, OffsetPosition>> expectedFields = new ArrayList<>();
List<Pair<String, OffsetPosition>> obtainedFields = new ArrayList<>();
StringTokenizer stt = new StringTokenizer(theResult, System.lineSeparator());
String line = null;
String previousExpectedLabel = null;
String previousObtainedLabel = null;
int pos = 0; // current token index
OffsetPosition currentObtainedPosition = new OffsetPosition();
currentObtainedPosition.start = 0;
OffsetPosition currentExpectedPosition = new OffsetPosition();
currentExpectedPosition.start = 0;
String obtainedLabel = null;
String expectedLabel = null;
while (stt.hasMoreTokens()) {
line = stt.nextToken();
obtainedLabel = null;
expectedLabel = null;
StringTokenizer st = new StringTokenizer(line, "\t ");
while (st.hasMoreTokens()) {
obtainedLabel = st.nextToken();
if (st.hasMoreTokens()) {
expectedLabel = obtainedLabel;
}
}
if ((obtainedLabel == null) || (expectedLabel == null))
continue;
if ((previousObtainedLabel != null) &&
(!obtainedLabel.equals(getPlainLabel(previousObtainedLabel)))) {
// new obtained field
currentObtainedPosition.end = pos - 1;
Pair<String, OffsetPosition> theField = new ImmutablePair<>(getPlainLabel(previousObtainedLabel),
currentObtainedPosition);
currentObtainedPosition = new OffsetPosition();
currentObtainedPosition.start = pos;
obtainedFields.add(theField);
}
if ((previousExpectedLabel != null) &&
(!expectedLabel.equals(getPlainLabel(previousExpectedLabel)))) {
// new expected field
currentExpectedPosition.end = pos - 1;
Pair<String, OffsetPosition> theField = new ImmutablePair<>(getPlainLabel(previousExpectedLabel),
currentExpectedPosition);
currentExpectedPosition = new OffsetPosition();
currentExpectedPosition.start = pos;
expectedFields.add(theField);
}
previousExpectedLabel = expectedLabel;
previousObtainedLabel = obtainedLabel;
pos++;
}
// last fields of the sequence
if ((previousObtainedLabel != null)) {
currentObtainedPosition.end = pos - 1;
Pair<String, OffsetPosition> theField = new ImmutablePair<>(getPlainLabel(previousObtainedLabel),
currentObtainedPosition);
obtainedFields.add(theField);
}
if ((previousExpectedLabel != null)) {
currentExpectedPosition.end = pos - 1;
Pair<String, OffsetPosition> theField = new ImmutablePair<>(getPlainLabel(previousExpectedLabel),
currentExpectedPosition);
expectedFields.add(theField);
}
// we then simply compared the positions and labels of the two fields and update
// statistics
int obtainedFieldIndex = 0;
List<Pair<String, OffsetPosition>> matchedObtainedFields = new ArrayList<Pair<String, OffsetPosition>>();
for (Pair<String, OffsetPosition> expectedField : expectedFields) {
expectedLabel = expectedField.getLeft();
int expectedStart = expectedField.getRight().start;
int expectedEnd = expectedField.getRight().end;
LabelStat labelStat = fieldStats.getLabelStat(getPlainLabel(expectedLabel));
labelStat.incrementExpected();
// try to find a match in the obtained fields
boolean found = false;
for (int i = obtainedFieldIndex; i < obtainedFields.size(); i++) {
obtainedLabel = obtainedFields.get(i).getLeft();
if (!expectedLabel.equals(obtainedLabel))
continue;
if ((expectedStart == obtainedFields.get(i).getRight().start) &&
(expectedEnd == obtainedFields.get(i).getRight().end)) {
// we have a match
labelStat.incrementObserved(); // TP
found = true;
obtainedFieldIndex = i;
matchedObtainedFields.add(obtainedFields.get(i));
break;
}
// if we went too far, we can stop the pain
if (expectedEnd < obtainedFields.get(i).getRight().start) {
break;
}
}
if (!found) {
labelStat.incrementFalseNegative();
}
}
// all the obtained fields without match in the expected fields are false positive
for (Pair<String, OffsetPosition> obtainedField : obtainedFields) {
if (!matchedObtainedFields.contains(obtainedField)) {
obtainedLabel = obtainedField.getLeft();
LabelStat labelStat = fieldStats.getLabelStat(getPlainLabel(obtainedLabel));
labelStat.incrementFalsePositive();
}
}
return fieldStats;
}
}
| 11,850 | 37.477273 | 113 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/evaluation/EvaluationDOIMatching.java
|
package org.grobid.trainer.evaluation;
import com.fasterxml.jackson.core.io.JsonStringEncoder;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.commons.io.FileUtils;
import org.grobid.core.data.BibDataSet;
import org.grobid.core.data.BiblioItem;
import org.grobid.core.engines.Engine;
import org.grobid.core.exceptions.GrobidResourceException;
import org.grobid.core.factory.GrobidFactory;
import org.grobid.core.utilities.Consolidation.GrobidConsolidationService;
import org.grobid.core.utilities.GrobidProperties;
import org.grobid.core.utilities.TextUtilities;
import org.grobid.trainer.evaluation.utilities.NamespaceContextMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.xml.sax.EntityResolver;
import org.xml.sax.InputSource;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.SAXParserFactory;
import javax.xml.xpath.XPath;
import javax.xml.xpath.XPathConstants;
import javax.xml.xpath.XPathFactory;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.FilenameFilter;
import java.text.Normalizer;
import java.util.*;
import java.util.regex.Pattern;
/**
* Evaluation of the DOI matching for the extracted bibliographical references,
* using PDF+native publisher XML where the DOI or PMID are provided. A typical
* example of the evaluation data is PubMed Central fulltext resources. A second
* type of evaluation is PDF+TEI files produced via Pub2TEI fir mainstream
* publishers.
*
*/
public class EvaluationDOIMatching {
private static final Logger LOGGER = LoggerFactory.getLogger(EvaluationDOIMatching.class);
private static String evaluationFilePath = null;
private Engine engine = null;
public static final int BIBLIO_GLUTTON = 0;
public static final int CROSSREF_API = 1;
public static final double minRatcliffObershelpSimilarity = 0.5;
// xpath expressions for nlm
private static final String path_nlm_ref = "/article/back/ref-list/ref/mixed-citation";
private static final String path_nlm_doi = "pub-id[@pub-id-type=\"doi\"]/text()";
private static final String path_nlm_pmid = "pub-id[@pub-id-type=\"pmid\"]/text()";
private static final String path_nlm_title = "article-title/text()";
private static final String path_nlm_author = "person-group[@person-group-type=\"author\"]/name/surname/text()";
private static final String path_nlm_host = "source/text()";
private static final String path_nlm_first_page = "fpage/text()";
private static final String path_nlm_volume = "volume/text()";
// xpath expressions for tei
private static final String path_tei_ref = "//back/div/listBibl/biblStruct";
private static final String path_tei_doi = "idno[@type=\"doi\"]/text()";
public EvaluationDOIMatching(String path) {
this.evaluationFilePath = path;
File evaluationFile = new File(path);
if (!evaluationFile.exists()) {
System.out.println("Path to evaluation (gold) XML data is not valid !");
this.evaluationFilePath = null;
}
try {
GrobidProperties.getInstance();
LOGGER.info(">>>>>>>> GROBID_HOME="+GrobidProperties.getGrobidHome());
engine = GrobidFactory.getInstance().createEngine();
}
catch (Exception e) {
e.printStackTrace();
}
}
public String evaluation() throws Exception {
StringBuilder report = new StringBuilder();
// we run Grobid reference extraction on the PubMedCentral data
File input = new File(this.evaluationFilePath);
// we process all json files in the input evaluation directory
File[] refFiles = input.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
return name.endsWith(".json");
}
});
if (refFiles == null) {
report.append("No file in dataset");
return report.toString();
}
int nbRef = 0;
int nbDOIFound = 0;
int nbDOICorrect = 0;
long start = System.currentTimeMillis();
ObjectMapper mapper = new ObjectMapper();
for (File dir : refFiles) {
// get the PDF file in the directory
final File jsonFile = refFiles[0];
JsonNode rootNode = mapper.readTree(jsonFile);
Iterator<JsonNode> ite = rootNode.elements();
List<String> rawRefs = new ArrayList<String>();
List<String> dois = new ArrayList<String>();
List<String> pmids = new ArrayList<String>();
List<String> atitles = new ArrayList<String>();
List<String> jtitles = new ArrayList<String>();
List<String> firstAuthors = new ArrayList<String>();
List<String> volumes = new ArrayList<String>();
List<String> firstPages = new ArrayList<String>();
while (ite.hasNext()) {
//if (nbRef > 1000)
// break;
JsonNode entryNode = ite.next();
String rawRef = null;
JsonNode refNode = entryNode.findPath("reference");
if ((refNode != null) && (!refNode.isMissingNode())) {
rawRef = refNode.textValue();
}
rawRefs.add(rawRef);
String doi = null;
JsonNode doiNode = entryNode.findPath("doi");
if ((doiNode != null) && (!doiNode.isMissingNode())) {
doi = doiNode.textValue();
}
dois.add(doi);
String pmid = null;
JsonNode pmidNode = entryNode.findPath("pmid");
if ((pmidNode != null) && (!pmidNode.isMissingNode())) {
pmid = pmidNode.textValue();
}
pmids.add(pmid);
String atitle = null;
JsonNode atitleNode = entryNode.findPath("atitle");
if ((atitleNode != null) && (!atitleNode.isMissingNode())) {
atitle = atitleNode.textValue();
}
atitles.add(atitle);
String jtitle = null;
JsonNode jtitleNode = entryNode.findPath("jtitle");
if ((jtitleNode != null) && (!jtitleNode.isMissingNode())) {
jtitle = jtitleNode.textValue();
}
jtitles.add(jtitle);
String volume = null;
JsonNode volumeNode = entryNode.findPath("volume");
if ((volumeNode != null) && (!volumeNode.isMissingNode())) {
volume = volumeNode.textValue();
}
volumes.add(volume);
String firstPage = null;
JsonNode firstPageNode = entryNode.findPath("firstPage");
if ((firstPageNode != null) && (!firstPageNode.isMissingNode())) {
firstPage = firstPageNode.textValue();
}
firstPages.add(firstPage);
String author = null;
JsonNode authorNode = entryNode.findPath("author");
if ((authorNode != null) && (!authorNode.isMissingNode())) {
author = authorNode.textValue();
}
firstAuthors.add(author);
nbRef++;
}
// run Grobid reference parser on this raw strings
try {
List<BiblioItem> biblios = engine.processRawReferences(rawRefs, 2);
for(int i=0; i<rawRefs.size(); i++) {
BiblioItem biblio = biblios.get(i);
String doi = dois.get(i);
String pmid = pmids.get(i);
//LOGGER.info("\n\tDOI: " + doi);
//LOGGER.info("\trawRef: " + rawRefs.get(i));
if (biblio.getDOI() != null) {
nbDOIFound++;
//LOGGER.info("\tfound: "+ biblio.getDOI());
// is the DOI correct?
if (biblio.getDOI().toLowerCase().equals(doi.toLowerCase()))
nbDOICorrect++;
else {
//LOGGER.info("!!!!!!!!!!!!! Mismatch DOI: " + doi + " / " + biblio.getDOI());
}
}
}
}
catch (Exception e) {
LOGGER.error("Error when processing: " + jsonFile.getPath(), e);
}
}
double processTime = ((double)System.currentTimeMillis() - start) / 1000.0;
double rate = ((double)processTime)/nbRef;
System.out.println("\n\n" + nbRef + " bibliographical references processed in " +
processTime + " seconds, " +
TextUtilities.formatFourDecimals(rate) +
" seconds per bibliographical reference.");
System.out.println("Found " + nbDOIFound + " DOI");
// evaluation of the run
start = System.currentTimeMillis();
report.append("\n======= ");
if (GrobidProperties.getInstance().getConsolidationService() == GrobidConsolidationService.GLUTTON)
report.append("GLUTTON");
else
report.append("CROSSREF");
report.append(" API ======= \n");
double precision = ((double)nbDOICorrect / nbDOIFound);
report.append("\nprecision:\t");
report.append(TextUtilities.formatTwoDecimals(precision * 100));
double recall = ((double)nbDOICorrect / nbRef);
report.append("\nrecall:\t\t").append(TextUtilities.formatTwoDecimals(recall * 100));
double f1 = 0.0;
if (precision + recall != 0.0)
f1 = (2 * precision * recall) / (precision + recall);
report.append("\nF1-score:\t").append(TextUtilities.formatTwoDecimals(f1 * 100)).append("\n");
//report.append("\n======= BIBLIO GLUTTON ======= \n");
//System.out.println("Evaluation metrics produced in " +
// (System.currentTimeMillis() - start) / (1000.00) + " seconds");
return report.toString();
}
/**
* From PDF and publisher XML (nlm or TEI), we create a dataset of raw bibliographical
* references extracted from the PDF by GROBID associated with their DOI obtained from
* the XML. This set will be used for evaluating the DOI matching.
*/
public void buildEvaluationDataset() throws Exception {
if (this.evaluationFilePath == null) {
throw new GrobidResourceException("Path to evaluation (gold) XML data is not correctly set");
}
StringBuffer report = new StringBuffer();
// get a factory for SAX parsers
SAXParserFactory spf = SAXParserFactory.newInstance();
XPathFactory xpf = XPathFactory.newInstance();
XPath xp = xpf.newXPath();
HashMap map = new HashMap();
// explicit indication of the default namespace
map.put("tei", "http://www.tei-c.org/ns/1.0");
Map<String, String> mappings = new HashMap<String, String>();
mappings.put("tei", "http://www.tei-c.org/ns/1.0");
xp.setNamespaceContext(new NamespaceContextMap(mappings));
File input = new File(this.evaluationFilePath);
// we process all tei files in the output directory
File[] refFiles = input.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
if (dir.isDirectory())
return true;
else
return false;
}
});
if (refFiles == null) {
report.append("No file in dataset");
return;
}
List<BibRefAggregated> allGoldReferences = new ArrayList<BibRefAggregated>();
int n = 0;
long start = System.currentTimeMillis();
int fails = 0;
for (File dir : refFiles) {
// get the PDF file in the directory
File[] refFiles2 = dir.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
return name.endsWith(".pdf") || name.endsWith(".PDF");
}
});
if (refFiles2 == null || refFiles2.length == 0) {
LOGGER.info("warning: no PDF found under " + dir.getPath());
continue;
}
if (refFiles2.length != 1) {
LOGGER.warn("warning: more than one PDF found under " + dir.getPath());
LOGGER.warn("processing only the first one...");
}
final File pdfFile = refFiles2[0];
File nlmFile = null;
File teiFile = null;
List<BibRefAggregated> goldReferences = new ArrayList<BibRefAggregated>();
// get the (gold) reference file corresponding to this pdf
File[] refFiles3 = dir.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
return name.endsWith(".nxml") || name.endsWith(".xml");
}
});
if ( (refFiles3 != null) && (refFiles3.length != 0) )
nlmFile = refFiles3[0];
refFiles3 = dir.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
return name.endsWith(".tei");
}
});
if ( (refFiles3 != null) && (refFiles3.length != 0) )
teiFile = refFiles3[0];
if ( (nlmFile == null) && (teiFile == null) ) {
LOGGER.warn("warning: no reference NLM or TEI file found under " + dir.getPath());
continue;
}
DocumentBuilderFactory docFactory = DocumentBuilderFactory.newInstance();
docFactory.setValidating(false);
//System.out.println("\n\nFile: " + pdfFile.getPath());
try {
DocumentBuilder docBuilder = docFactory.newDocumentBuilder();
docBuilder.setEntityResolver(new EntityResolver() {
public InputSource resolveEntity(String publicId, String systemId) {
return new InputSource(
new ByteArrayInputStream("<?xml version=\"1.0\" encoding=\"UTF-8\"?>".getBytes()));
}
}); // swap in a dummy resolver to neutralise the online DTD
File goldFile = null;
if (teiFile != null)
goldFile = teiFile;
else
goldFile = nlmFile;
Document gold = docBuilder.parse(goldFile);
//System.out.println("Reference DOIs in : " + goldFile.getPath());
// get the DOI or PMID of the bibliographical references ia xpath
String path_doi = null;
String path_pmid = null;
String path_ref = null;
if (teiFile == null) {
// gold DOI are in the nlm file
path_ref = path_nlm_ref;
path_doi = path_nlm_doi;
path_pmid = path_nlm_pmid;
} else {
path_ref = path_tei_ref;
path_doi = path_tei_doi;
}
NodeList nodeList = (NodeList) xp.compile(path_ref).
evaluate(gold.getDocumentElement(), XPathConstants.NODESET);
for (int i = 0; i < nodeList.getLength(); i++) {
// for each node we have a ref bib
BibRefAggregated refBib = new BibRefAggregated();
Node ref = nodeList.item(i);
refBib.setXML(ref);
// get DOI and PMID - if any
NodeList nodeListDOI = (NodeList) xp.compile(path_doi).
evaluate(ref, XPathConstants.NODESET);
if (nodeListDOI.getLength() > 0) {
Node nodeDOI = nodeListDOI.item(0);
String doi = nodeDOI.getNodeValue();
refBib.setDOI(doi);
}
NodeList nodeListPMID = (NodeList) xp.compile(path_pmid).
evaluate(ref, XPathConstants.NODESET);
if (nodeListPMID.getLength() > 0) {
Node nodePMID = nodeListPMID.item(0);
String pmid = nodePMID.getNodeValue();
refBib.setPMID(pmid);
}
goldReferences.add(refBib);
}
} catch(Exception e) {
LOGGER.error("Error when collecting reference citations", e);
}
int p = 0; // count the number of citation raw reference string aligned with gold reference
// run Grobid reference extraction
try {
LOGGER.info(n + " - " + pdfFile.getPath());
List<BibDataSet> bibrefs = engine.processReferences(pdfFile, 0);
for(BibDataSet bib : bibrefs) {
String rawRef = bib.getRawBib();
// we remove a DOI possibly in the raw ref, as the whole exercie is about DOI
// matching
if (rawRef != null) {
rawRef = TextUtilities.DOIPattern.matcher(rawRef).replaceAll(" ");
// we need to align this raw ref bib string with a gold ref bib
for(BibRefAggregated goldReference : goldReferences) {
if ( (goldReference.getRawRef() == null) &&
//(goldReference.getDOI() != null || goldReference.getPMID() != null) ) {
(goldReference.getDOI() != null) ) {
// check key fields like for alignment
Node refNode = goldReference.getXML();
if (refNode == null)
continue;
// title
NodeList nodeList = (NodeList) xp.compile(path_nlm_title).
evaluate(refNode, XPathConstants.NODESET);
String title = null;
if ((nodeList != null) && nodeList.getLength()>0)
title = nodeList.item(0).getNodeValue();
// author
String author = null;
String firstAuthor = null;
nodeList = (NodeList) xp.compile(path_nlm_author).
evaluate(refNode, XPathConstants.NODESET);
if ((nodeList != null) && nodeList.getLength()>0) {
author = nodeList.item(0).getNodeValue();
firstAuthor = author;
for (int i=1; i<nodeList.getLength(); i++)
author += nodeList.item(i).getNodeValue();
}
// journal, book or conference (aka source in NLM)
String host = null;
nodeList = (NodeList) xp.compile(path_nlm_host).
evaluate(refNode, XPathConstants.NODESET);
if ((nodeList != null) && nodeList.getLength()>0)
host = nodeList.item(0).getNodeValue();
// first page
String firstPage = null;
nodeList = (NodeList) xp.compile(path_nlm_first_page).
evaluate(refNode, XPathConstants.NODESET);
if ((nodeList != null) && nodeList.getLength()>0)
firstPage = nodeList.item(0).getNodeValue();
// volume
String volume = null;
nodeList = (NodeList) xp.compile(path_nlm_volume).
evaluate(refNode, XPathConstants.NODESET);
if ((nodeList != null) && nodeList.getLength()>0)
volume = nodeList.item(0).getNodeValue();
//System.out.println(title + " " + author + " " + host);
if ( (title == null) && (author == null) && (host == null) ) {
// nlm might contain the raw string but usually not DOI or PMID
} else {
String rawRefSignature = this.getSignature(rawRef);
String titleSignature = this.getSignature(title);
String authorSignature = this.getSignature(author);
String hostSignature = this.getSignature(host);
String firstPageSignature = this.getSignature(firstPage);
String volumeSignature = this.getSignature(volume);
int ind1 = -1, ind2 = -1, ind3 = -1, ind4 =-1, ind5 =-1;
if (title != null) {
ind1 = rawRefSignature.indexOf(titleSignature);
}
if (author != null) {
ind2 = rawRefSignature.indexOf(authorSignature);
}
if (host != null) {
ind3 = rawRefSignature.indexOf(hostSignature);
}
if (firstPage != null) {
ind4 = rawRefSignature.indexOf(firstPageSignature);
}
if (volume != null) {
ind5 = rawRefSignature.indexOf(volumeSignature);
}
// soft match for the title using Ratcliff Obershelp string distance
//double similarity = 0.0;
//Option<Object> similarityObject =
// RatcliffObershelpMetric.compare(title, localRawRef);
//if ( (similarityObject != null) && (similarityObject.get() != null) )
// similarity = (Double)similarityObject.get();
// intra-document matching
if ( (ind1 != -1) ||
(ind2 != -1 && ind3 != -1 && (ind4 != -1 || ind5 != -1)) ) {
goldReference.setRawRef(rawRef);
goldReference.setFirstPage(firstPage);
goldReference.setVolume(volume);
goldReference.setAtitle(title);
goldReference.setJtitle(host);
goldReference.setFirstAuthor(firstAuthor);
// if we have a pmid but no doi, we can still try to get the DOI from it
p++;
continue;
}
}
}
}
}
}
allGoldReferences.addAll(goldReferences);
// we get from this the reference strings and matched DOI
System.out.println("total of " + bibrefs.size() + " ref. bib. found by GROBID");
System.out.println(goldReferences.size() + " DOI identified in gold");
System.out.println("and " + p + " original reference strings identified");
}
catch (Exception e) {
System.out.println("Error when processing: " + pdfFile.getPath());
e.printStackTrace();
fails++;
}
n++;
}
// writing the dataset file
File jsonFile = new File(this.evaluationFilePath + File.separator + "references-doi-matching.json");
JsonStringEncoder encoder = JsonStringEncoder.getInstance();
StringBuilder sb = new StringBuilder();
sb.append("[\n");
boolean first = true;
for(BibRefAggregated goldReference : allGoldReferences) {
if ((goldReference.getRawRef() != null) &&
(goldReference.getDOI() != null || goldReference.getPMID() != null) ) {
if (first)
first = false;
else
sb.append(",\n");
sb.append("{");
byte[] encodedValueRef = encoder.quoteAsUTF8(goldReference.getRawRef());
String outputValueRef = new String(encodedValueRef);
sb.append("\"reference\": \"" + outputValueRef + "\"");
if (goldReference.getDOI() != null) {
byte[] encodedValueDOI = encoder.quoteAsUTF8(goldReference.getDOI());
String outputValueDOI = new String(encodedValueDOI);
sb.append(", \"doi\": \"" + outputValueDOI + "\"");
}
if (goldReference.getPMID() != null) {
byte[] encodedValuePMID = encoder.quoteAsUTF8(goldReference.getPMID());
String outputValuePMID = new String(encodedValuePMID);
sb.append(", \"pmid\": \"" + outputValuePMID + "\"");
}
// other metadata
if (goldReference.getAtitle() != null) {
byte[] encodedValueAtitle = encoder.quoteAsUTF8(goldReference.getAtitle());
String outputValueAtitle = new String(encodedValueAtitle);
sb.append(", \"atitle\": \"" + outputValueAtitle + "\"");
}
if (goldReference.getFirstAuthor() != null) {
byte[] encodedValueFirstAuthor = encoder.quoteAsUTF8(goldReference.getFirstAuthor());
String outputValueFirstAuthor = new String(encodedValueFirstAuthor);
sb.append(", \"firstAuthor\": \"" + outputValueFirstAuthor + "\"");
}
if (goldReference.getJtitle() != null) {
byte[] encodedValueJtitle = encoder.quoteAsUTF8(goldReference.getJtitle());
String outputValueJtitle = new String(encodedValueJtitle);
sb.append(", \"jtitle\": \"" + outputValueJtitle + "\"");
}
if (goldReference.getVolume() != null) {
byte[] encodedValueVolume = encoder.quoteAsUTF8(goldReference.getVolume());
String outputValueVolume = new String(encodedValueVolume);
sb.append(", \"volume\": \"" + outputValueVolume + "\"");
}
if (goldReference.getFirstPage() != null) {
byte[] encodedValueFirstPage = encoder.quoteAsUTF8(goldReference.getFirstPage());
String outputValueFirstPage = new String(encodedValueFirstPage);
sb.append(", \"firstPage\": \"" + outputValueFirstPage + "\"");
}
sb.append("}");
}
}
sb.append("]");
try {
// saving the file
FileUtils.writeStringToFile(jsonFile, sb.toString(), "UTF-8");
} catch(Exception e) {
e.printStackTrace();
}
System.out.println("GROBID failed on " + fails + " PDF");
double processTime = ((double)System.currentTimeMillis() - start) / 1000.0;
System.out.println(n + " PDF files processed in " +
processTime + " seconds, " + ((double)processTime)/n + " seconds per PDF file.");
}
private Pattern pattern = Pattern.compile("[^a-zA-Z0-9]+");
/**
* Simplify a string for soft matching: lowercasing, ascii folding, remove punctuation
* and special characters
*/
private String getSignature(String field) {
if (field == null)
return null;
String string = field.toLowerCase();
string = Normalizer.normalize(string, Normalizer.Form.NFD);
string = string.replaceAll("[^\\p{ASCII}]", "");
//string = string.replaceAll("\\p{M}", "");
string = pattern.matcher(string).replaceAll("");
return string;
}
/**
* This class represents a bibliographical reference by aggregating information
* from the publisher XML and the PDF extracted information from GROBID.
*/
public class BibRefAggregated {
// raw string of the bib ref
private String rawRef = null;
// doi if any
private String doi = null;
// pmid if present
private String pmid = null;
// xml segment corresponding to the bibliographical reference
private Node xml = null;
// other metadata
private String atitle = null;
private String jtitle = null;
private String firstAuthor = null;
private String volume = null;
private String firstPage = null;
public String getRawRef() {
return this.rawRef;
}
public void setRawRef(String raw) {
this.rawRef = raw;
}
public String getDOI() {
return this.doi;
}
public void setDOI(String doi) {
this.doi = doi;
}
public String getPMID() {
return this.pmid;
}
public void setPMID(String pmid) {
this.pmid = pmid;
}
public Node getXML() {
return this.xml;
}
public void setXML(Node xml) {
this.xml = xml;
}
public String getAtitle() {
return this.atitle;
}
public void setAtitle(String atitle) {
this.atitle = atitle;
}
public String getJtitle() {
return this.jtitle;
}
public void setJtitle(String jtitle) {
this.jtitle = jtitle;
}
public String getFirstAuthor() {
return this.firstAuthor;
}
public void setFirstAuthor(String firstAuthor) {
this.firstAuthor = firstAuthor;
}
public String getVolume() {
return this.volume;
}
public void setVolume(String volume) {
this.volume = volume;
}
public String getFirstPage() {
return this.firstPage;
}
public void setFirstPage(String firstPage) {
this.firstPage = firstPage;
}
}
/**
* Command line execution.
*
* @param args Command line arguments.
*/
public static void main(String[] args) {
if ( (args.length > 2) || (args.length == 0) ) {
System.err.println("command parameters: action[data|eval] [path to the (gold) evaluation dataset]");
return;
}
String action = args[0];
if ( (action == null) || (action.length() == 0) || (!action.equals("data")) && (!action.equals("eval")) ) {
System.err.println("Action to be performed not correctly set, should be [data|eval]");
return;
}
String inputPath = args[1];
if ( (inputPath == null) || (inputPath.length() == 0) ) {
System.err.println("Path to evaluation (gold) XML data is not correctly set");
return;
}
try {
File thePath = new File(inputPath);
if (!thePath.exists()) {
System.err.println("Path to evaluation (gold) XML data does not exist");
return;
}
if (!thePath.isDirectory()) {
System.err.println("Path to evaluation (gold) XML data is not a directory");
return;
}
}
catch (Exception e) {
e.printStackTrace();
}
try {
if (action.equals("data")) {
EvaluationDOIMatching data = new EvaluationDOIMatching(inputPath);
data.buildEvaluationDataset();
} else if (action.equals("eval")) {
EvaluationDOIMatching eval = new EvaluationDOIMatching(inputPath);
String report = eval.evaluation();
System.out.println(report);
}
System.out.println(Engine.getCntManager());
} catch (Exception e) {
e.printStackTrace();
}
// to be sure jvm stops
System.exit(0);
}
}
| 33,928 | 42.057107 | 116 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/evaluation/Stats.java
|
package org.grobid.trainer.evaluation;
import java.util.Set;
import java.util.TreeMap;
import org.grobid.core.exceptions.*;
import org.grobid.core.utilities.TextUtilities;
/**
* Contains the single statistic computation for evaluation
*
* This class is state full. The statistics needs to be recomputed every time
* something is changed (the flag requiredToRecomputeMetrics).
*/
public final class Stats {
private final TreeMap<String, LabelStat> labelStats;
// State variable to know whether is required to recompute the statistics
private boolean requiredToRecomputeMetrics = true;
private double cumulated_tp = 0;
private double cumulated_fp = 0;
private double cumulated_tn = 0;
private double cumulated_fn = 0;
private double cumulated_f1 = 0.0;
private double cumulated_accuracy = 0.0;
private double cumulated_precision = 0.0;
private double cumulated_recall = 0.0;
private double cumulated_expected = 0;
private int totalValidFields = 0;
public Stats() {
this.labelStats = new TreeMap<>();
}
public Set<String> getLabels() {
return this.labelStats.keySet();
}
public void removeLabel(String label) {
this.labelStats.remove(label);
}
public void incrementFalsePositive(String label) {
this.incrementFalsePositive(label, 1);
}
public void incrementFalsePositive(String label, int count) {
LabelStat labelStat = this.getLabelStat(label);
if (labelStat == null)
throw new GrobidException("Unknown label: " + label);
labelStat.incrementFalsePositive(count);
requiredToRecomputeMetrics = true;
}
public void incrementFalseNegative(String label) {
this.incrementFalseNegative(label, 1);
}
public void incrementFalseNegative(String label, int count) {
LabelStat labelStat = this.getLabelStat(label);
if (labelStat == null)
throw new GrobidException("Unknown label: " + label);
labelStat.incrementFalseNegative(count);
requiredToRecomputeMetrics = true;
}
public void incrementObserved(String label) {
this.incrementObserved(label, 1);
}
public void incrementObserved(String label, int count) {
LabelStat labelStat = this.getLabelStat(label);
if (labelStat == null)
throw new GrobidException("Unknown label: " + label);
labelStat.incrementObserved(count);
requiredToRecomputeMetrics = true;
}
public void incrementExpected(String label) {
this.incrementExpected(label, 1);
}
public void incrementExpected(String label, int count) {
LabelStat labelStat = this.getLabelStat(label);
if (labelStat == null)
throw new GrobidException("Unknown label: " + label);
labelStat.incrementExpected(count);
requiredToRecomputeMetrics = true;
}
public LabelStat getLabelStat(String label) {
if (this.labelStats.containsKey(label)) {
return this.labelStats.get(label);
}
LabelStat labelStat = LabelStat.create();
this.labelStats.put(label, labelStat);
requiredToRecomputeMetrics = true;
return labelStat;
}
public int size() {
return this.labelStats.size();
}
public double getPrecision(String label) {
LabelStat labelStat = this.getLabelStat(label);
if (labelStat == null)
throw new GrobidException("Unknown label: " + label);
return labelStat.getPrecision();
}
public double getRecall(String label) {
LabelStat labelStat = this.getLabelStat(label);
if (labelStat == null)
throw new GrobidException("Unknown label: " + label);
return labelStat.getRecall();
}
public double getF1Score(String label) {
LabelStat labelStat = this.getLabelStat(label);
if (labelStat == null)
throw new GrobidException("Unknown label: " + label);
return labelStat.getF1Score();
}
/**
* In order to compute metrics in an efficient way, they are computed all at the same time.
* Since the state of the object is important in this case, it's required to have a flag that
* allow the recompute of the metrics when one is required.
*/
public void computeMetrics() {
for (String label : getLabels()) {
if (getLabelStat(label).hasChanged()) {
requiredToRecomputeMetrics = true;
break;
}
}
if (!requiredToRecomputeMetrics)
return;
int totalFields = 0;
for (String label : getLabels()) {
LabelStat labelStat = getLabelStat(label);
totalFields += labelStat.getObserved();
totalFields += labelStat.getFalseNegative();
totalFields += labelStat.getFalsePositive();
}
for (String label : getLabels()) {
if (label.equals("<other>") || label.equals("base") || label.equals("O")) {
continue;
}
LabelStat labelStat = getLabelStat(label);
int tp = labelStat.getObserved(); // true positives
int fp = labelStat.getFalsePositive(); // false positives
int fn = labelStat.getFalseNegative(); // false negative
int tn = totalFields - tp - (fp + fn); // true negatives
labelStat.setTrueNegative(tn);
int expected = labelStat.getExpected(); // all expected
if (expected != 0) {
totalValidFields++;
}
if (expected != 0) {
cumulated_tp += tp;
cumulated_fp += fp;
cumulated_tn += tn;
cumulated_fn += fn;
cumulated_expected += expected;
cumulated_f1 += labelStat.getF1Score();
cumulated_accuracy += labelStat.getAccuracy();
cumulated_precision += labelStat.getPrecision();
cumulated_recall += labelStat.getRecall();
}
}
requiredToRecomputeMetrics = false;
}
public TreeMap<String, LabelResult> getLabelsResults() {
computeMetrics();
TreeMap<String, LabelResult> result = new TreeMap<>();
for (String label : getLabels()) {
if (label.equals("<other>") || label.equals("base") || label.equals("O")) {
continue;
}
LabelStat labelStat = getLabelStat(label);
LabelResult labelResult = new LabelResult(label);
labelResult.setAccuracy(labelStat.getAccuracy());
labelResult.setPrecision(labelStat.getPrecision());
labelResult.setRecall(labelStat.getRecall());
labelResult.setF1Score(labelStat.getF1Score());
labelResult.setSupport(labelStat.getSupport());
result.put(label, labelResult);
}
return result;
}
public double getMicroAverageAccuracy() {
computeMetrics();
// macro average over measures
if (totalValidFields == 0)
return 0.0;
else
return Math.min(1.0, cumulated_accuracy / totalValidFields);
}
public double getMacroAverageAccuracy() {
computeMetrics();
double accuracy = 0.0;
if (cumulated_tp + cumulated_fp + cumulated_tn + cumulated_fn != 0.0)
accuracy = ((double) cumulated_tp + cumulated_tn) / (cumulated_tp + cumulated_fp + cumulated_tn + cumulated_fn);
return Math.min(1.0, accuracy);
}
public double getMicroAveragePrecision() {
computeMetrics();
double precision = 0.0;
if (cumulated_tp + cumulated_fp != 0)
precision = cumulated_tp / (cumulated_tp + cumulated_fp);
return Math.min(1.0, precision);
}
public double getMacroAveragePrecision() {
computeMetrics();
if (totalValidFields == 0)
return 0.0;
return Math.min(1.0, cumulated_precision / totalValidFields);
}
public double getMicroAverageRecall() {
computeMetrics();
double recall = 0.0;
if (cumulated_expected != 0.0)
recall = cumulated_tp / cumulated_expected;
return Math.min(1.0, recall);
}
public double getMacroAverageRecall() {
computeMetrics();
if (totalValidFields == 0)
return 0.0;
return Math.min(1.0, cumulated_recall / totalValidFields);
}
public int getTotalValidFields() {
computeMetrics();
return totalValidFields;
}
public double getMicroAverageF1() {
double precision = getMicroAveragePrecision();
double recall = getMicroAverageRecall();
double f1 = 0.0;
if (precision + recall != 0.0)
f1 = (2 * precision * recall) / (precision + recall);
return f1;
}
public double getMacroAverageF1() {
computeMetrics();
if (totalValidFields == 0)
return 0.0;
return Math.min(1.0, cumulated_f1 / totalValidFields);
}
public String getTextReport() {
computeMetrics();
StringBuilder report = new StringBuilder();
report.append(String.format("\n%-20s %-12s %-12s %-12s %-12s %-7s\n\n",
"label",
"accuracy",
"precision",
"recall",
"f1",
"support"));
long supportSum = 0;
for (String label : getLabels()) {
if (label.equals("<other>") || label.equals("base") || label.equals("O")) {
continue;
}
LabelStat labelStat = getLabelStat(label);
long support = labelStat.getSupport();
report.append(String.format("%-20s %-12s %-12s %-12s %-12s %-7s\n",
label,
TextUtilities.formatTwoDecimals(labelStat.getAccuracy() * 100),
TextUtilities.formatTwoDecimals(labelStat.getPrecision() * 100),
TextUtilities.formatTwoDecimals(labelStat.getRecall() * 100),
TextUtilities.formatTwoDecimals(labelStat.getF1Score() * 100),
String.valueOf(support))
);
supportSum += support;
}
report.append("\n");
report.append(String.format("%-20s %-12s %-12s %-12s %-12s %-7s\n",
"all (micro avg.)",
TextUtilities.formatTwoDecimals(getMicroAverageAccuracy() * 100),
TextUtilities.formatTwoDecimals(getMicroAveragePrecision() * 100),
TextUtilities.formatTwoDecimals(getMicroAverageRecall() * 100),
TextUtilities.formatTwoDecimals(getMicroAverageF1() * 100),
String.valueOf(supportSum)));
report.append(String.format("%-20s %-12s %-12s %-12s %-12s %-7s\n",
"all (macro avg.)",
TextUtilities.formatTwoDecimals(getMacroAverageAccuracy() * 100),
TextUtilities.formatTwoDecimals(getMacroAveragePrecision() * 100),
TextUtilities.formatTwoDecimals(getMacroAverageRecall() * 100),
TextUtilities.formatTwoDecimals(getMacroAverageF1() * 100),
String.valueOf(supportSum)));
return report.toString();
}
public String getMarkDownReport() {
computeMetrics();
StringBuilder report = new StringBuilder();
report.append("\n| label | precision | recall | f1 | support |\n");
report.append("|--- |--- |--- |--- |--- |\n");
long supportSum = 0;
for (String label : getLabels()) {
if (label.equals("<other>") || label.equals("base") || label.equals("O")) {
continue;
}
LabelStat labelStat = getLabelStat(label);
long support = labelStat.getSupport();
report.append("| "+label+" | "+
TextUtilities.formatTwoDecimals(labelStat.getPrecision() * 100)+" | "+
TextUtilities.formatTwoDecimals(labelStat.getRecall() * 100) +" | "+
TextUtilities.formatTwoDecimals(labelStat.getF1Score() * 100) +" | "+
String.valueOf(support)+" |\n");
supportSum += support;
}
report.append("| | | | | |\n");
report.append("| **all fields (micro avg.)** | **"+
TextUtilities.formatTwoDecimals(getMicroAveragePrecision() * 100)+"** | **"+
TextUtilities.formatTwoDecimals(getMicroAverageRecall() * 100)+"** | **"+
TextUtilities.formatTwoDecimals(getMicroAverageF1() * 100)+"** | "+
String.valueOf(supportSum)+" |\n");
report.append("| all fields (macro avg.) | "+
TextUtilities.formatTwoDecimals(getMacroAveragePrecision() * 100)+" | "+
TextUtilities.formatTwoDecimals(getMacroAverageRecall() * 100)+" | "+
TextUtilities.formatTwoDecimals(getMacroAverageF1() * 100)+" | "+
String.valueOf(supportSum)+" |\n\n");
return report.toString();
}
}
| 13,178 | 32.534351 | 124 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/evaluation/EndToEndEvaluation.java
|
package org.grobid.trainer.evaluation;
import org.grobid.core.engines.config.GrobidAnalysisConfig;
import org.grobid.core.exceptions.*;
import org.grobid.core.engines.Engine;
import org.grobid.core.factory.GrobidFactory;
import org.grobid.core.utilities.GrobidProperties;
import org.grobid.core.utilities.UnicodeUtil;
import org.grobid.core.utilities.TextUtilities;
import org.grobid.core.factory.GrobidPoolingFactory;
import org.grobid.trainer.evaluation.utilities.NamespaceContextMap;
import org.grobid.trainer.evaluation.utilities.FieldSpecification;
import java.io.*;
import java.util.*;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.stream.Collectors;
import org.apache.commons.io.FileUtils;
import org.apache.commons.collections4.CollectionUtils;
import org.w3c.dom.*;
import javax.xml.xpath.XPath;
import javax.xml.xpath.XPathFactory;
import javax.xml.parsers.*;
import org.xml.sax.*;
import javax.xml.xpath.XPathConstants;
import com.rockymadden.stringmetric.similarity.RatcliffObershelpMetric;
import scala.Option;
import me.tongfei.progressbar.*;
//import org.apache.log4j.xml.DOMConfigurator;
/**
* Evaluation against native XML documents. This is an end-to-end evaluation involving
* complete document processing, and therefore a complete set of sequence labelling models.
*
*/
public class EndToEndEvaluation {
private static String xmlInputPath = null;
private Engine engine = null;
public static final int GROBID = 0;
public static final int PDFX = 1;
public static final int CERMINE = 2;
public static final int HEADER = 0;
public static final int CITATION = 1;
public static final int FULLTEXT = 2;
public double fileRatio = 1.0;
public static final double minLevenshteinDistance = 0.8;
public static final double minRatcliffObershelpSimilarity = 0.95;
// the list of labels considered for the evaluation
private List<String> headerLabels = null;
private List<String> fulltextLabels = null;
private List<String> citationsLabels = null;
// the list of fields considered for the evaluation
private List<FieldSpecification> headerFields = null;
private List<FieldSpecification> fulltextFields = null;
private List<FieldSpecification> citationsFields = null;
// the type of evaluation XML data - NLM or TEI (obtained via Pub2TEI)
private String inputType = null;
private class GrobidEndToEndTask implements Callable<Boolean> {
private File pdfFile;
public GrobidEndToEndTask(File pdfFile) {
this.pdfFile = pdfFile;
}
@Override
public Boolean call() {
boolean success = true;
Engine engine = null;
try {
engine = Engine.getEngine(true);
GrobidAnalysisConfig config =
GrobidAnalysisConfig.builder()
.consolidateHeader(1)
.consolidateCitations(0)
.withPreprocessImages(true)
// .withSentenceSegmentation(true)
.build();
String tei = engine.fullTextToTEI(this.pdfFile, config);
// write the result in the same directory
File resultTEI = new File(pdfFile.getParent() + File.separator
+ pdfFile.getName().replace(".pdf", ".fulltext.tei.xml"));
FileUtils.writeStringToFile(resultTEI, tei, "UTF-8");
} catch (NoSuchElementException nseExp) {
System.out.println("Could not get an engine from the pool within configured time.");
System.out.println("Could not process: " + this.pdfFile.getPath());
} catch(IOException e) {
System.out.println("DeLFT model labelling failed for file " + this.pdfFile.getPath());
e.printStackTrace();
} catch (Exception e) {
System.out.println("Error when processing: " + this.pdfFile.getPath());
e.printStackTrace();
success = false;
} finally {
if (engine != null) {
GrobidPoolingFactory.returnEngine(engine);
}
}
return Boolean.valueOf(success);
}
}
public EndToEndEvaluation(String path, String inType) {
this.xmlInputPath = path;
this.inputType = inType;
File xmlInputFile = new File(path);
if (!xmlInputFile.exists()) {
System.out.println("Path to evaluation (gold) XML data is not valid !");
xmlInputPath = null;
}
try {
GrobidProperties.getInstance();
System.out.println(">>>>>>>> GROBID_HOME="+GrobidProperties.getGrobidHome());
engine = GrobidFactory.getInstance().createEngine();
}
catch (Exception e) {
e.printStackTrace();
}
// initialize the field specifications and label list
headerFields = new ArrayList<>();
fulltextFields = new ArrayList<>();
citationsFields = new ArrayList<>();
headerLabels = new ArrayList<>();
fulltextLabels = new ArrayList<>();
citationsLabels = new ArrayList<>();
FieldSpecification.setUpFields(headerFields, fulltextFields, citationsFields,
headerLabels, fulltextLabels, citationsLabels);
}
public String evaluationGrobid(boolean forceRun, StringBuilder reportMD) throws Exception {
if (xmlInputPath == null) {
throw new GrobidResourceException("Path to evaluation (gold) XML data is not correctly set");
}
// text report for console
StringBuilder report = new StringBuilder();
if (forceRun) {
// we run Grobid full text extraction on the PubMedCentral data
File input = new File(xmlInputPath);
// we process all tei files in the output directory
File[] refFiles = input.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
File localDir = new File(dir.getAbsolutePath() + File.separator + name);
if (localDir.isDirectory())
return true;
else
return false;
}
});
if (refFiles == null) {
report.append("No file in dataset");
return report.toString();
}
int n = 0;
long start = System.currentTimeMillis();
int fails = 0;
ExecutorService executor = Executors.newFixedThreadPool(GrobidProperties.getInstance().getMaxConcurrency()-1);
List<Future<Boolean>> results = new ArrayList<>();
if (refFiles.length > 0) {
// this will preload the models, so that the model loading messages don't mess with the progress bar
engine = Engine.getEngine(true);
}
for (File dir : refFiles) {
// get the PDF file in the directory
File[] refFiles2 = dir.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
return name.endsWith(".pdf") || name.endsWith(".PDF");
}
});
if (refFiles2 == null || refFiles2.length == 0) {
System.out.println("warning: no PDF found under " + dir.getPath());
continue;
}
if (refFiles2.length != 1) {
System.out.println("warning: more than one PDF found under " + dir.getPath());
System.out.println("processing only the first one...");
}
final File pdfFile = refFiles2[0];
Future<Boolean> future = executor.submit(new GrobidEndToEndTask(pdfFile));
results.add(future);
n++;
}
//executor.awaitTermination(5, TimeUnit.SECONDS);
System.out.println("\n");
try (ProgressBar pb = new ProgressBar("PDF processing", refFiles.length)) {
for(Future<Boolean> result : results) {
try {
Boolean success = result.get();
if (!success)
fails++;
pb.step();
} catch (InterruptedException e) {
e.printStackTrace();
} catch (ExecutionException e) {
e.printStackTrace();
}
}
}
executor.shutdown();
System.out.println("\n-------------> GROBID failed on " + fails + " PDF\n");
double processTime = ((double)System.currentTimeMillis() - start) / 1000.0;
System.out.println(n + " PDF files processed in " +
processTime + " seconds, " + ((double)processTime)/n + " seconds per PDF file\n");
}
// evaluation of the run
long start = System.currentTimeMillis();
report.append("\n======= Header metadata ======= \n");
reportMD.append("\n## Header metadata \n");
report.append(evaluationRun(this.GROBID, this.HEADER, reportMD));
report.append("\n======= Citation metadata ======= \n");
reportMD.append("\n## Citation metadata \n");
report.append(evaluationRun(this.GROBID, this.CITATION, reportMD));
report.append("\n======= Fulltext structures ======= \n");
reportMD.append("\n## Fulltext structures \n\n");
reportMD.append("Fulltext structure contents are complicated to capture from JATS NLM files. They are often normalized and different from the actual PDF content and are can be inconsistent from one document to another. The scores of the following metrics are thus not very meaningful in absolute term, in particular for the strict matching (textual content of the srtructure can be very long). As relative values for comparing different models, they seem however useful.\n\n");
report.append(evaluationRun(this.GROBID, this.FULLTEXT, reportMD));
System.out.println("Evaluation metrics produced in " +
(System.currentTimeMillis() - start) / (1000.00) + " seconds");
reportMD.append("Evaluation metrics produced in " +
(System.currentTimeMillis() - start) / (1000.00) + " seconds\n");
return report.toString();
}
public String evaluationPDFX(boolean forceRun, StringBuilder reportMD) throws Exception {
if (xmlInputPath == null) {
throw new GrobidResourceException("Path to PubMedCentral is not correctly set");
}
// text report for console
StringBuilder report = new StringBuilder();
if (forceRun) {
// we run here PDFX online call on the PDF files...
// TBD
// ...
}
// evaluation of the run
report.append("\n======= Header metadata ======= \n");
reportMD.append("\n## Header metadata \n\n");
report.append(evaluationRun(this.PDFX, this.HEADER, reportMD));
report.append("\n======= Citation metadata ======= \n");
reportMD.append("\n## Citation metadata \n\n");
report.append(evaluationRun(this.PDFX, this.CITATION, reportMD));
report.append("\n======= Fulltext structures ======= \n");
reportMD.append("\n## Fulltext structures \n\n");
report.append(evaluationRun(this.PDFX, this.FULLTEXT, reportMD));
return report.toString();
}
public String evaluationCermine(boolean forceRun, StringBuilder reportMD) throws Exception {
if (xmlInputPath == null) {
throw new GrobidResourceException("Path to PubMedCentral is not correctly set");
}
// text report for console
StringBuilder report = new StringBuilder();
if (forceRun) {
// we run here CERMINE on the PDF files...
// TBD
// ...
}
// evaluation of the run
report.append("\n======= Header metadata ======= \n");
reportMD.append("\n## Header metadata \n\n");
report.append(evaluationRun(this.CERMINE, this.HEADER, reportMD));
report.append("\n======= Citation metadata ======= \n");
reportMD.append("\n## Citation metadata \n\n");
report.append(evaluationRun(this.CERMINE, this.CITATION, reportMD));
report.append("\n======= Fulltext structures ======= \n");
reportMD.append("\n## Fulltext structures \n\n");
report.append(evaluationRun(this.CERMINE, this.FULLTEXT, reportMD));
return report.toString();
}
/**
* This method removes the fields from the evaluation specifications and labels
* NOTE: This modifies the fieldSpecification and labelSpecification lists
*
* @param listFieldNamesToRemove list of fields names to be removed
* @param fieldSpecification field specification list where the fields needs to be removed
* @param labelsSpecification field specification labels list where the fields needs to be removed
*/
protected static void removeFieldsFromEvaluation(List<String> listFieldNamesToRemove, List<FieldSpecification> fieldSpecification, List<String> labelsSpecification) {
for (String fieldNameToRemove : listFieldNamesToRemove) {
List<FieldSpecification> toRemove = new ArrayList<>();
if (CollectionUtils.isNotEmpty(fieldSpecification)) {
for (FieldSpecification field : fieldSpecification) {
if (listFieldNamesToRemove.contains(field.fieldName)) {
toRemove.add(field);
}
}
}
if (toRemove.size() > 0) {
labelsSpecification.remove(fieldNameToRemove);
for (FieldSpecification fulltextField : toRemove) {
fieldSpecification.remove(fulltextField);
}
}
}
}
private String evaluationRun(int runType, int sectionType, StringBuilder reportMD) {
if ( (runType != this.GROBID) && (runType != this.PDFX) && (runType != this.CERMINE) ) {
throw new GrobidException("The run type is not valid for evaluation: " + runType);
}
if ( (sectionType != this.HEADER) && (sectionType != this.CITATION) && (sectionType != this.FULLTEXT) ) {
throw new GrobidException("The section type is not valid for evaluation: " + sectionType);
}
// text report for console
StringBuilder report = new StringBuilder();
// we introduce four string-matching comparisons variants for different levels of
// fidelity between observed and expected strings, in line with other evaluations
// in the litterature:
// - strict, i.e. exact match
// - soft, matching ignoring punctuations, character case and extra space characters
// - Levenshtein distance (relative to the max length of fields)
// - Ratcliff/Obershelp similarity
// These variants only apply to textual fields, not numerical and dates fields
// (such as volume, issue, dates).
Stats strictStats = new Stats();
Stats softStats = new Stats();
Stats levenshteinStats = new Stats();
Stats ratcliffObershelpStats = new Stats();
Stats availabilityRatioStat = new Stats();
List<String> labels = null;
List<FieldSpecification> fields = null;
int totalExpectedInstances = 0;
int totalObservedInstances = 0;
int totalCorrectInstancesStrict = 0;
int totalCorrectInstancesSoft = 0;
int totalCorrectInstancesLevenshtein = 0;
int totalCorrectInstancesRatcliffObershelp = 0;
int totalExpectedReferences = 0;
int totalObservedReferences = 0;
int totalExpectedCitations = 0;
int totalObservedCitations = 0;
int totalCorrectObservedCitations = 0;
int totalWrongObservedCitations = 0;
if (sectionType == this.HEADER) {
fields = headerFields;
labels = headerLabels;
}
else if (sectionType == this.CITATION) {
fields = citationsFields;
labels = citationsLabels;
}
else if (sectionType == this.FULLTEXT) {
fields = fulltextFields;
labels = fulltextLabels;
}
// statics about citation matching
int match1 = 0;
int match2 = 0;
int match3 = 0;
int match4 = 0;
if (xmlInputPath.toLowerCase().indexOf("pmc") != -1 ||
xmlInputPath.toLowerCase().indexOf("plos") != -1 ||
xmlInputPath.toLowerCase().indexOf("elife") != -1) {
// for PMC files, we further specify the NLM type: some fields might be encoded but not in the document (like PMID, DOI)
removeFieldsFromEvaluation(Arrays.asList("doi", "pmid", "pmcid"), citationsFields, citationsLabels);
}
if (xmlInputPath.toLowerCase().indexOf("elife") != -1) {
// keywords are present in the eLife XML, but not in the PDF !
removeFieldsFromEvaluation(Arrays.asList("keywords"), headerFields, headerLabels);
}
if (xmlInputPath.toLowerCase().indexOf("pmc") != -1) {
// remove availability and funding statements from PMC (not covered, and it would make metrics not comparable over time)
removeFieldsFromEvaluation(Arrays.asList("availability_stmt", "funding_stmt"), fulltextFields, fulltextLabels);
}
File input = new File(xmlInputPath);
// we process all tei files in the output directory
File[] refFiles = input.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
File localDir = new File(dir.getAbsolutePath() + File.separator + name);
if (localDir.isDirectory()) {
return true;
}
else
return false;
}
});
if (refFiles == null) {
report.append("No file in dataset");
return report.toString();
}
// get a factory for SAX parsers
SAXParserFactory spf = SAXParserFactory.newInstance();
Random rand = new Random();
int nbFile = 0;
String typeEval = "";
if (sectionType == this.HEADER)
typeEval = "header";
if (sectionType == this.FULLTEXT)
typeEval = "full text";
if (sectionType == this.CITATION)
typeEval = "citation";
System.out.println("\n");
try (ProgressBar pb = new ProgressBar("Evaluation "+typeEval, refFiles.length)) {
for (File dir : refFiles) {
pb.step();
if (!dir.isDirectory())
continue;
// file ratio filtering
double random = rand.nextDouble();
if (random > fileRatio) {
continue;
}
// get the gold file in the directory
File[] refFiles2 = dir.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
return name.endsWith(".nxml") || name.endsWith(".pub2tei.tei.xml");
}
});
if (refFiles2 == null || refFiles2.length == 0) {
// in the case of a bioRxiv NLM/JATS file, we have an .xml extension
refFiles2 = dir.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
return name.endsWith(".xml") && !name.endsWith(".tei.xml");
}
});
if (refFiles2 == null || refFiles2.length == 0) {
System.out.println("warning: no evaluation (gold) XML data file found under " + dir.getPath());
continue;
}
}
if (refFiles2.length != 1) {
System.out.println("warning: more than one evaluation (gold) XML data files found under " + dir.getPath());
for(int m=0; m<refFiles2.length;m++) {
System.out.println(refFiles2[m].getPath());
}
System.out.println("processing only the first one...");
}
File goldFile = refFiles2[0];
DocumentBuilderFactory docFactory = DocumentBuilderFactory.newInstance();
docFactory.setValidating(false);
try {
DocumentBuilder docBuilder = docFactory.newDocumentBuilder();
docBuilder.setEntityResolver(new EntityResolver() {
public InputSource resolveEntity(String publicId, String systemId) {
return new InputSource(
new ByteArrayInputStream("<?xml version=\"1.0\" encoding=\"UTF-8\"?>".getBytes()));
}
}); // swap in a dummy resolver to neutralise the online DTD
Document gold = docBuilder.parse(goldFile);
// get the results of the evaluated tool for this file
if (runType == this.GROBID) {
// results are produced in a TEI file
File[] refFiles3 = dir.listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
return name.endsWith(".fulltext.tei.xml");
}
});
if ( (refFiles3 == null) || (refFiles3.length == 0) ) {
System.out.println("warning: no Grobid TEI file found under " + dir.getPath());
continue;
}
if (refFiles3.length != 1) {
System.out.println("warning: more than one Grobid TEI files found under " + dir.getPath());
System.out.println("processing only the first one...");
}
File teiFile = refFiles3[0];
Document tei = docBuilder.parse(teiFile);
XPathFactory xpf = XPathFactory.newInstance();
XPath xp = xpf.newXPath();
HashMap map = new HashMap();
// explicit indication of the default namespace
map.put("tei", "http://www.tei-c.org/ns/1.0");
Map<String, String> mappings = new HashMap<>();
mappings.put("tei", "http://www.tei-c.org/ns/1.0");
xp.setNamespaceContext(new NamespaceContextMap(mappings));
if (sectionType == this.CITATION) {
// we start by identifying each expected citation
// the first FieldSpecification object for the citation is the base path for
// each citation structure in the corresponding XML
FieldSpecification base = fields.get(0);
String path = null;
if (inputType.equals("nlm"))
path = base.nlmPath.get(0);
else
path = base.grobidPath.get(0);
NodeList nodeList = (NodeList) xp.compile(path).
evaluate(gold.getDocumentElement(), XPathConstants.NODESET);
int nbCitationsGold = nodeList.getLength();
totalExpectedInstances += nbCitationsGold;
List<Map<String,List<String>>> goldCitations =
new ArrayList<Map<String,List<String>>>();
// "signature" of the citations for this file
// level 1 signature: titre + date
List<String> goldCitationSignaturesLevel1 = new ArrayList<>();
// level 2 signature: all authors names + date
List<String> goldCitationSignaturesLevel2 = new ArrayList<>();
// level 3 signature: journal + volume + page
List<String> goldCitationSignaturesLevel3 = new ArrayList<>();
// level 4 signature: "fuzzy titre" + date + at least one of auteurs or first page
List<String> goldCitationSignaturesLevel4 = new ArrayList<>();
// map between citation id from gold and from grobid (if matching between the two citations)
Map<String, String> idMap = new HashMap<>();
Map<String, String> reverseIdMap = new HashMap<>();
List<String> goldIds = new ArrayList<>();
for (int i = 0; i < nodeList.getLength(); i++) {
// sometimes we just have the raw citation bellow this, so we will have to further
// test if we have something structured
Map<String,List<String>> fieldsValues = new HashMap<>();
Node node = nodeList.item(i);
int p = 0;
for(FieldSpecification field : fields) {
String fieldName = field.fieldName;
if (fieldName.equals("base")) {
//p++;
continue;
}
List<String> subpaths = null;
if (inputType.equals("nlm")) {
subpaths = field.nlmPath;
} else if (inputType.equals("tei")) {
subpaths = field.grobidPath;
}
if (subpaths == null)
continue;
for(String subpath : subpaths) {
NodeList nodeList2 = (NodeList) xp.compile(subpath).
evaluate(node, XPathConstants.NODESET);
List<String> goldResults = new ArrayList<>();
for (int j = 0; j < nodeList2.getLength(); j++) {
String content = nodeList2.item(j).getNodeValue();
if ((content != null) && (content.trim().length() > 0)) {
if (fieldName.equals("doi") || fieldName.equals("pmid") || fieldName.equals("pmcid")) {
content = identifierNormalization(content);
}
goldResults.add(content);
}
}
if (goldResults.size() > 0) {
fieldsValues.put(fieldName, goldResults);
if (!fieldName.equals("id")) {
strictStats.incrementExpected(fieldName);
softStats.incrementExpected(fieldName);
levenshteinStats.incrementExpected(fieldName);
ratcliffObershelpStats.incrementExpected(fieldName);
}
}
}
p++;
}
// signature for this citation
String goldTitle = "";
List<String> goldResults = fieldsValues.get("title");
if (goldResults != null) {
for(String res : goldResults) {
goldTitle += " " + res;
}
}
goldTitle = basicNormalization(goldTitle);
String goldTitleSoft = removeFullPunct(goldTitle);
// source title / inTitle information
String goldInTitle = "";
List<String> inTitleResults = fieldsValues.get("inTitle");
if (inTitleResults != null) {
for(String res : inTitleResults) {
goldInTitle += " " + res;
}
}
goldInTitle = basicNormalization(goldInTitle);
String goldInTitleSoft = removeFullPunct(goldInTitle);
// first author last name only
List<String> authorResults = fieldsValues.get("first_author");
String goldAuthor = "";
if ((authorResults != null) && (authorResults.size() > 0))
goldAuthor = authorResults.get(0);
goldAuthor = basicNormalization(goldAuthor);
String goldAuthorSoft = removeFullPunct(goldAuthor);
// all authors last names
String goldAuthors = "";
List<String> authorsResults = fieldsValues.get("authors");
if ((authorsResults != null) && (authorsResults.size() > 0)) {
for(String aut : authorsResults)
goldAuthors += aut;
}
goldAuthors = basicNormalization(goldAuthors);
String goldAuthorsSoft = removeFullPunct(goldAuthors);
// date of publication
List<String> dateResults = fieldsValues.get("date");
String goldDate = "";
if ((dateResults != null) && (dateResults.size() > 0))
goldDate = dateResults.get(0);
goldDate = basicNormalization(goldDate);
// volume
List<String> volumeResults = fieldsValues.get("volume");
String goldVolume = "";
if ((volumeResults != null) && (volumeResults.size() > 0))
goldVolume = volumeResults.get(0);
goldVolume = basicNormalization(goldVolume);
// first page
List<String> pageResults = fieldsValues.get("page");
String goldPage = "";
if ((pageResults != null) && (pageResults.size() > 0))
goldPage = pageResults.get(0);
goldPage = basicNormalization(goldPage);
// identifier
List<String> idResults = fieldsValues.get("id");
String goldId = "";
if ((idResults != null) && (idResults.size() > 0))
goldId = idResults.get(0);
goldId = basicNormalization(goldId);
goldIds.add(goldId);
/*
* We introduce 4 sequential alignment rules to match an extracted citation with an expected citation.
* If the first rule is not working, we test the second one, and so on until the last one.
* If all rules fail, the extracted citation is considered as false positive for its non-empty fields.
* - first rule: matching of the "soft" title (title ignoring case, punctuation ans space mismatches) and year
* - second rule: matching all of "soft" authors and year
* - third rule: matching of "soft" inTitle (title of Journal or Conference), volume and first page
* - forth rule: matching of first author last name and title, or inTitle if title is empty
*/
String signature1 = null;
if ( (goldTitleSoft.length()>0) && (goldDate.length()>0) ) {
signature1 = goldTitleSoft + goldDate;
//signature1 = signature1.replaceAll("[^\\x00-\\x7F]", "");
}
String signature2 = null;
if ( (goldAuthorsSoft.length()>0) && (goldDate.length()>0) ) {
signature2 = goldAuthorsSoft + goldDate;
//signature2 = signature2.replaceAll("[^\\x00-\\x7F]", "");
}
String signature3 = null;
if ( (goldInTitleSoft.length()>0) && (goldVolume.length()>0) && (goldPage.length()>0)) {
signature3 = goldInTitleSoft + goldVolume + goldPage;
//signature3 = signature3.replaceAll("[^\\x00-\\x7F]", "");
}
String signature4 = null;
if ( ((goldInTitleSoft.length()>0) || (goldTitleSoft.length()>0))
&& (goldAuthorSoft.length()>0) ) {
if (goldTitleSoft.length()>0)
signature4 = goldAuthorSoft + goldTitleSoft;
else
signature4 = goldAuthorSoft + goldInTitleSoft;
}
goldCitationSignaturesLevel1.add(signature1);
goldCitationSignaturesLevel2.add(signature2);
goldCitationSignaturesLevel3.add(signature3);
goldCitationSignaturesLevel4.add(signature4);
goldCitations.add(fieldsValues);
}
// get the Grobid citations
path = base.grobidPath.get(0);
nodeList = (NodeList) xp.compile(path).
evaluate(tei.getDocumentElement(), XPathConstants.NODESET);
int nbCitationsGrobid = nodeList.getLength();
//if (nbCitationsGold != nbCitationsGrobid)
//System.out.println(dir.getPath() + " references: " + nbCitationsGold + " (expected) / " + nbCitationsGrobid + " (grobid)");
totalObservedInstances += nbCitationsGrobid;
List<Map<String,List<String>>> grobidCitations =
new ArrayList<Map<String,List<String>>>();
for (int i = 0; i < nodeList.getLength(); i++) {
Map<String,List<String>> fieldsValues = new HashMap<String,List<String>>();
Node node = nodeList.item(i);
int p = 0;
for(FieldSpecification field : fields) {
String fieldName = field.fieldName;
if (fieldName.equals("base")) {
//p++;
continue;
}
for(String subpath : field.grobidPath) {
NodeList nodeList2 = (NodeList) xp.compile(subpath).
evaluate(node, XPathConstants.NODESET);
List<String> grobidResults = new ArrayList<>();
for (int j = 0; j < nodeList2.getLength(); j++) {
String content = nodeList2.item(j).getNodeValue();
if ((content != null) && (content.trim().length() > 0)) {
if (fieldName.equals("doi") || fieldName.equals("pmid") || fieldName.equals("pmcid")) {
content = identifierNormalization(content);
}
grobidResults.add(content);
}
}
if (grobidResults.size() > 0) {
fieldsValues.put(fieldName, grobidResults);
}
}
p++;
}
grobidCitations.add(fieldsValues);
}
for(Map<String,List<String>> grobidCitation: grobidCitations) {
String grobidTitle = "";
List<String> titleResults = grobidCitation.get("title");
if (titleResults != null) {
for(String res : titleResults) {
grobidTitle += " " + res;
}
}
grobidTitle = basicNormalization(grobidTitle);
String grobidTitleSoft = removeFullPunct(grobidTitle);
List<String> inTitleResults = grobidCitation.get("inTitle");
String grobidInTitle = "";
if (inTitleResults != null) {
for(String res : inTitleResults) {
grobidInTitle += " " + res;
}
}
grobidInTitle = basicNormalization(grobidInTitle);
String grobidInTitleSoft = removeFullPunct(grobidInTitle);
// first author last name only
List<String> authorResults = grobidCitation.get("first_author");
String grobidAuthor = "";
if ((authorResults != null) && (authorResults.size() > 0))
grobidAuthor = authorResults.get(0);
grobidAuthor = basicNormalization(grobidAuthor);
String grobidAuthorSoft = removeFullPunct(grobidAuthor);
// all authors last names
String grobidAuthors = "";
List<String> authorsResults = grobidCitation.get("authors");
if ((authorsResults != null) && (authorsResults.size() > 0)) {
for(String aut : authorsResults)
grobidAuthors += aut;
}
grobidAuthors = basicNormalization(grobidAuthors);
String grobidAuthorsSoft = removeFullPunct(grobidAuthors);
// date of publication
List<String> dateResults = grobidCitation.get("date");
String grobidDate = "";
if ((dateResults != null) && (dateResults.size() > 0))
grobidDate = dateResults.get(0);
grobidDate = basicNormalization(grobidDate);
// volume
List<String> volumeResults = grobidCitation.get("volume");
String grobidVolume = "";
if ((volumeResults != null) && (volumeResults.size() > 0))
grobidVolume = volumeResults.get(0);
grobidVolume = basicNormalization(grobidVolume);
// first page
List<String> pageResults = grobidCitation.get("page");
String grobidPage = "";
if ((pageResults != null) && (pageResults.size() > 0))
grobidPage = pageResults.get(0);
grobidPage = basicNormalization(grobidPage);
// identifier
List<String> idResults = grobidCitation.get("id");
String grobidId = "";
if ((idResults != null) && (idResults.size() > 0))
grobidId = idResults.get(0);
grobidId = basicNormalization(grobidId);
// DOI
List<String> doiResults = grobidCitation.get("doi");
String grobidDOI = "";
if ((doiResults != null) && (doiResults.size() > 0))
grobidDOI = doiResults.get(0);
grobidDOI = identifierNormalization(grobidDOI);
// PMID
List<String> pmidResults = grobidCitation.get("pmid");
String grobidPMID = "";
if ((pmidResults != null) && (pmidResults.size() > 0))
grobidPMID = pmidResults.get(0);
grobidPMID = identifierNormalization(grobidPMID);
// PMCID
List<String> pmcidResults = grobidCitation.get("pmcid");
String grobidPMCID = "";
if ((pmcidResults != null) && (pmcidResults.size() > 0))
grobidPMCID = pmcidResults.get(0);
grobidPMCID = identifierNormalization(grobidPMCID);
String grobidSignature1 = null;
if ( (grobidTitleSoft.length()>0) && (grobidDate.length()>0) ) {
grobidSignature1 = grobidTitleSoft + grobidDate;
//grobidSignature1 = grobidSignature1.replaceAll("[^\\x00-\\x7F]", "");
}
String grobidSignature2 = null;
if ( (grobidAuthorsSoft.length()>0) && (grobidDate.length()>0) ) {
grobidSignature2 = grobidAuthorsSoft + grobidDate;
//grobidSignature2 = grobidSignature2.replaceAll("[^\\x00-\\x7F]", "");
}
String grobidSignature3 = null;
if ( (grobidInTitleSoft.length()>0) && (grobidVolume.length()>0)
&& (grobidPage.length()>0)) {
grobidSignature3 = grobidInTitleSoft + grobidVolume + grobidPage;
//grobidSignature3 = grobidSignature3.replaceAll("[^\\x00-\\x7F]", "");
}
String grobidSignature4 = null;
if ( ((grobidInTitleSoft.length()>0) || (grobidTitleSoft.length()>0))
&& (grobidAuthorSoft.length()>0) ) {
if (grobidTitleSoft.length()>0)
grobidSignature4 = grobidAuthorSoft + grobidTitleSoft;
else
grobidSignature4 = grobidAuthorSoft + grobidInTitleSoft;
//grobidSignature4 = grobidSignature4.replaceAll("[^\\x00-\\x7F]", "");
}
int indexGold = -1;
// try to match an expected citation with the signature
if ( ((grobidSignature1 != null) && (grobidSignature1.length() > 0)) ||
((grobidSignature2 != null) && (grobidSignature2.length() > 0)) ||
((grobidSignature3 != null) && (grobidSignature3.length() > 0)) ||
((grobidSignature4 != null) && (grobidSignature4.length() > 0)))
{
if ((grobidSignature1 != null) &&
goldCitationSignaturesLevel1.contains(grobidSignature1)) {
// we have a citation-level match and we can evaluate the fields
indexGold = goldCitationSignaturesLevel1.indexOf(grobidSignature1);
match1++;
}
else if ((grobidSignature2 != null) &&
goldCitationSignaturesLevel2.contains(grobidSignature2)) {
// we have a citation-level match and we can evaluate the fields
indexGold = goldCitationSignaturesLevel2.indexOf(grobidSignature2);
match2++;
}
else if ((grobidSignature3 != null) &&
goldCitationSignaturesLevel3.contains(grobidSignature3)) {
// we have a citation-level match and we can evaluate the fields
indexGold = goldCitationSignaturesLevel3.indexOf(grobidSignature3);
match3++;
}
else if ((grobidSignature4 != null) &&
goldCitationSignaturesLevel4.contains(grobidSignature4)) {
// we have a citation-level match and we can evaluate the fields
indexGold = goldCitationSignaturesLevel4.indexOf(grobidSignature4);
match4++;
}
if (indexGold != -1) {
// we have aligned an extracted citation with an expected ones
boolean allGoodStrict = true;
boolean allGoodSoft = true;
boolean allGoodLevenshtein = true;
boolean allGoodRatcliffObershelp = true;
Map<String,List<String>> goldCitation = goldCitations.get(indexGold);
goldCitationSignaturesLevel1.remove(indexGold);
goldCitationSignaturesLevel2.remove(indexGold);
goldCitationSignaturesLevel3.remove(indexGold);
goldCitationSignaturesLevel4.remove(indexGold);
goldCitations.remove(indexGold);
if (goldCitation.get("id") != null && goldCitation.get("id").size() > 0) {
idMap.put(goldCitation.get("id").get(0), grobidId);
reverseIdMap.put(grobidId, goldCitation.get("id").get(0));
int p = 0;
for(FieldSpecification field : fields) {
String label = field.fieldName;
if (label.equals("base") || label.equals("id")) {
//p++;
continue;
}
List<String> grobidResults = grobidCitation.get(label);
String grobidResult = "";
if (grobidResults != null) {
for(String res : grobidResults) {
grobidResult += " " + res;
}
}
grobidResult = basicNormalization(grobidResult);
List<String> goldResults = goldCitation.get(label);
String goldResult = "";
if (goldResults != null) {
for(String res : goldResults) {
goldResult += " " + res;
}
}
goldResult = basicNormalization(goldResult);
// strict
if ((goldResult.length()>0) && (goldResult.equals(grobidResult))) {
strictStats.incrementObserved(label);
}
else {
if ( (grobidResult.length() > 0) ) {
strictStats.incrementFalsePositive(label);
allGoodStrict = false;
}
else if (goldResult.length()>0) {
strictStats.incrementFalseNegative(label);
allGoodStrict = false;
}
}
// soft
String goldResultSoft = goldResult;
String grobidResultSoft = grobidResult;
if (field.isTextual) {
goldResultSoft = removeFullPunct(goldResult);
grobidResultSoft = removeFullPunct(grobidResult);
}
if ((goldResultSoft.length() > 0) &&
(goldResultSoft.equals(grobidResultSoft)) ) {
softStats.incrementObserved(label);
}
else {
if (grobidResultSoft.length() > 0) {
softStats.incrementFalsePositive(label);
allGoodSoft = false;
}
else if (goldResultSoft.length() > 0) {
softStats.incrementFalseNegative(label);
allGoodSoft = false;
}
}
// Levenshtein
double pct = 0.0;
if ((goldResultSoft.length() > 0) && goldResult.equals(grobidResult))
pct = 1.0;
if (field.isTextual) {
int distance =
TextUtilities.getLevenshteinDistance(goldResult, grobidResult);
// Levenshtein distance is an integer value, not a percentage... however
// articles usually introduced it as a percentage... so we report it
// following the straightforward formula:
int bigger = Math.max(goldResult.length(), grobidResult.length());
pct = (double)(bigger - distance) / bigger;
}
if ((goldResultSoft.length() > 0) && (pct >= minLevenshteinDistance)) {
levenshteinStats.incrementObserved(label);
}
else {
if (grobidResultSoft.length() > 0) {
levenshteinStats.incrementFalsePositive(label);
allGoodLevenshtein = false;
}
else if (goldResultSoft.length() > 0) {
levenshteinStats.incrementFalseNegative(label);
allGoodLevenshtein = false;
}
}
// RatcliffObershelp
Double similarity = 0.0;
if ((goldResultSoft.length() > 0) && goldResult.equals(grobidResult))
similarity = 1.0;
if (field.isTextual) {
if ( (goldResult.length() > 0) && (grobidResult.length() > 0) ) {
Option<Object> similarityObject =
RatcliffObershelpMetric.compare(goldResult, grobidResult);
if ( (similarityObject != null) && (similarityObject.get() != null) )
similarity = (Double)similarityObject.get();
}
}
if ((goldResultSoft.length() > 0) &&
(similarity >= minRatcliffObershelpSimilarity)) {
ratcliffObershelpStats.incrementObserved(label);
}
else {
if (grobidResultSoft.length() > 0) {
ratcliffObershelpStats.incrementFalsePositive(label);
allGoodRatcliffObershelp = false;
}
else if (goldResultSoft.length() > 0) {
ratcliffObershelpStats.incrementFalseNegative(label);
allGoodRatcliffObershelp = false;
}
}
p++;
}
if (allGoodStrict) {
totalCorrectInstancesStrict++;
}
if (allGoodSoft) {
totalCorrectInstancesSoft++;
}
if (allGoodLevenshtein) {
totalCorrectInstancesLevenshtein++;
}
if (allGoodRatcliffObershelp) {
totalCorrectInstancesRatcliffObershelp++;
}
}
}
else {
// we have a Grobid extracted citation, but no matching with
// expected ones -> false positive for all the present fields
int p = 0;
for(FieldSpecification field : fields) {
String label = field.fieldName;
if (label.equals("base")) {
//p++;
continue;
}
List<String> grobidResults = grobidCitation.get(label);
if ( (grobidResults == null) || (grobidResults.size() == 0) ) {
p++;
continue;
}
strictStats.incrementFalsePositive(label);
softStats.incrementFalsePositive(label);
levenshteinStats.incrementFalsePositive(label);
ratcliffObershelpStats.incrementFalsePositive(label);
p++;
}
}
}
}
// reference context matching
if ( (sectionType == this.CITATION) && (runType == this.GROBID) ) {
// list of identifiers present in the bibliographical references
List<String> refBibRefIds = new ArrayList<>();
List<String> grobidBibRefIds = new ArrayList<>();
String subpath = null;
if (inputType.equals("nlm")) {
subpath = FieldSpecification.nlmBibReferenceId;
} else if (inputType.equals("tei")) {
subpath = FieldSpecification.grobidBibReferenceId;
}
// gold
nodeList = (NodeList) xp.compile(subpath).
evaluate(gold.getDocumentElement(), XPathConstants.NODESET);
//System.out.println(path + ": " + nodeList.getLength() + " nodes");
int nbgoldResults = nodeList.getLength();
for (int i = 0; i < nodeList.getLength(); i++) {
refBibRefIds.add(nodeList.item(i).getNodeValue());
}
totalExpectedReferences += refBibRefIds.size();
// grobid
nodeList = (NodeList) xp.compile(FieldSpecification.grobidBibReferenceId).
evaluate(tei.getDocumentElement(), XPathConstants.NODESET);
//System.out.println(FieldSpecification.grobidBibReferenceId + ": " + nodeList.getLength() + " nodes");
for (int i = 0; i < nodeList.getLength(); i++) {
grobidBibRefIds.add(nodeList.item(i).getNodeValue());
}
totalObservedReferences += grobidBibRefIds.size();
// Map associating the identifiers present in the reference callout with their number of occurences
Map<String, Integer> refCalloutRefIds = new HashMap<>();
Map<String, Integer> grobidCalloutRefIds = new HashMap<>();
if (inputType.equals("nlm")) {
subpath = FieldSpecification.nlmCitationContextId;
} else if (inputType.equals("tei")) {
subpath = FieldSpecification.grobidCitationContextId;
}
// gold
nodeList = (NodeList) xp.compile(subpath).
evaluate(gold.getDocumentElement(), XPathConstants.NODESET);
nbgoldResults = nodeList.getLength();
for (int i = 0; i < nodeList.getLength(); i++) {
String localIds = nodeList.item(i).getNodeValue();
if ( (localIds != null) && (localIds.length()>0) ) {
// we might have several identifiers, separated by space: e.g.:
// <xref rid="bb0010 bb0090 bb0125 bb0135 bb0150" ref-type="bibr">Beauregard et al., 2008; Jordan and Miller, 2009;
// Symer and Boeke, 2010; Tenaillon et al., 2010; Wolf and Goff, 2008</xref>
String[] theIds = localIds.split(" ");
for(int j = 0 ; j < theIds.length; j++) {
String localId = theIds[j];
localId = localId.replace("#", "");
if (refCalloutRefIds.get(localId) == null)
refCalloutRefIds.put(localId,Integer.valueOf(1));
else {
int val = refCalloutRefIds.get(localId).intValue();
refCalloutRefIds.put(localId, Integer.valueOf(val+1));
}
totalExpectedCitations++;
}
}
}
// grobid
nodeList = (NodeList) xp.compile(FieldSpecification.grobidCitationContextId).
evaluate(tei.getDocumentElement(), XPathConstants.NODESET);
//System.out.println(FieldSpecification.grobidCitationContextId + ": " + nodeList.getLength() + " nodes");
for (int i = 0; i < nodeList.getLength(); i++) {
String localId = nodeList.item(i).getNodeValue();
localId = localId.replace("#", "");
if ( (localId != null) && (localId.length()>0) ) {
if (grobidCalloutRefIds.get(localId) == null)
grobidCalloutRefIds.put(localId, Integer.valueOf(1));
else {
int val = grobidCalloutRefIds.get(localId).intValue();
grobidCalloutRefIds.put(localId, Integer.valueOf(val+1));
}
totalObservedCitations++;
}
}
// simple estimation of correct citation identifications by checking overlaped ids and map
int nbCorrect = 0;
int nbWrong = 0;
for (Map.Entry<String, Integer> entry : grobidCalloutRefIds.entrySet()) {
int nbGrobidId = entry.getValue();
int nbRefId = 0;
if ((refCalloutRefIds != null) && (reverseIdMap.get(entry.getKey()) != null)) {
if (refCalloutRefIds.get(reverseIdMap.get(entry.getKey())) != null) {
nbRefId = refCalloutRefIds.get(reverseIdMap.get(entry.getKey()));
}
if (nbGrobidId > nbRefId) {
nbWrong += nbGrobidId - nbRefId;
nbCorrect += nbRefId;
} else
nbCorrect += nbGrobidId;
} else {
// all wrong matches
nbWrong += nbGrobidId;
}
}
totalCorrectObservedCitations += nbCorrect;
totalWrongObservedCitations += nbWrong;
}
// cleaning
strictStats.removeLabel("id");
softStats.removeLabel("id");
levenshteinStats.removeLabel("id");;
ratcliffObershelpStats.removeLabel("id");
} else if (sectionType == this.HEADER) {
// HEADER structures
int p = 0;
boolean allGoodStrict = true;
boolean allGoodSoft = true;
boolean allGoodLevenshtein = true;
boolean allGoodRatcliffObershelp = true;
for(FieldSpecification field : fields) {
String fieldName = field.fieldName;
List<String> grobidResults = new ArrayList<>();
for(String path : field.grobidPath) {
NodeList nodeList = (NodeList) xp.compile(path).
evaluate(tei.getDocumentElement(), XPathConstants.NODESET);
for (int i = 0; i < nodeList.getLength(); i++) {
grobidResults.add((nodeList.item(i).getNodeValue().replaceAll(" +", " ")));
}
}
//if (!field.hasMultipleValue)
{
String grobidResult = "";
for(String res : grobidResults)
grobidResult += " " + res;
// basic normalisation
grobidResult = basicNormalization(grobidResult);
//System.out.println("Grobid: " + fieldName + ":\t" + grobidResult);
grobidResults = new ArrayList<>();
grobidResults.add(grobidResult);
}
/*if (fieldName.equals("title") && (grobidResults.size() == 0 || grobidResults.get(0).length() == 0))
System.out.println(dir.getPath() + " no GROBID title");
if (fieldName.equals("authors") && (grobidResults.size() == 0 || grobidResults.get(0).length() == 0))
System.out.println(dir.getPath() + " no authors");
if (fieldName.equals("abstract") && (grobidResults.size() == 0 || grobidResults.get(0).length() == 0))
System.out.println(dir.getPath() + " no abstract");
*/
List<String> goldResults = new ArrayList<>();
int nbGoldResults = 0;
List<String> subpaths = null;
if (inputType.equals("nlm")) {
subpaths = field.nlmPath;
} else if (inputType.equals("tei")) {
subpaths = field.grobidPath;
}
if (subpaths == null)
continue;
for(String path : subpaths) {
NodeList nodeList = (NodeList) xp.compile(path).
evaluate(gold.getDocumentElement(), XPathConstants.NODESET);
//System.out.println(path + ": " + nodeList.getLength() + " nodes");
nbGoldResults = nodeList.getLength();
for (int i = 0; i < nodeList.getLength(); i++) {
goldResults.add(nodeList.item(i).getNodeValue().replaceAll(" +", " "));
}
}
//if (!field.hasMultipleValue)
{
String goldResult = "";
for(String res : goldResults)
goldResult += " " + res;
// basic normalisation
goldResult = basicNormalization(goldResult);
if (fieldName.equals("abstract")) {
// some additional cleaning for abstract is required, because PMC and bioRxiv
// tends to put the useless abstract title "Abstract" together with the abstract
if (goldResult.toLowerCase().startsWith("abstract") || goldResult.toLowerCase().startsWith("summary")) {
goldResult = goldResult.replaceAll("(?i)^(abstract)|(summary)(\\n)?( )?", "");
}
}
//System.out.println("gold: " + fieldName + ":\t" + goldResult);
goldResults = new ArrayList<>();
goldResults.add(goldResult);
nbGoldResults = 1;
}
int g = 0;
for (String goldResult : goldResults) {
String grobidResult = "";
if (g < grobidResults.size())
grobidResult = grobidResults.get(g);
if (goldResult.trim().length() == 0 && grobidResult.trim().length() == 0) {
g++;
continue;
}
// nb expected results
if (goldResult.trim().length() > 0) {
strictStats.incrementExpected(fieldName);
softStats.incrementExpected(fieldName);
levenshteinStats.incrementExpected(fieldName);
ratcliffObershelpStats.incrementExpected(fieldName);
}
// strict
if ((goldResult.trim().length() > 0) && goldResult.equals(grobidResult)) {
strictStats.incrementObserved(fieldName);
}
else {
/*System.out.println("gold: " + fieldName);
System.out.println("gold: " + goldResult);
System.out.println("grobid: " + grobidResult);*/
if (grobidResult.length() > 0) {
strictStats.incrementFalsePositive(fieldName);
allGoodStrict = false;
}
else if (goldResult.length() > 0) {
strictStats.incrementFalseNegative(fieldName);
allGoodStrict = false;
}
}
// soft
String goldResultSoft = goldResult;
String grobidResultSoft = grobidResult;
if (field.isTextual) {
goldResultSoft = removeFullPunct(goldResult);
grobidResultSoft = removeFullPunct(grobidResult);
}
if ((goldResult.trim().length() > 0) && goldResultSoft.equals(grobidResultSoft)) {
softStats.incrementObserved(fieldName);
}
else {
//System.out.println("\n" + teiFile.getPath());
//System.out.println("gold:" + fieldName);
//System.out.println("gold: " + goldResultSoft);
//System.out.println("grobid: " + grobidResultSoft);
//System.out.println("gold:" + goldResult);
//System.out.println("grobid:" + grobidResult);
if (grobidResultSoft.length() > 0) {
softStats.incrementFalsePositive(fieldName);
allGoodSoft = false;
}
else if (goldResultSoft.length() > 0){
softStats.incrementFalseNegative(fieldName);
allGoodSoft = false;
}
}
// Levenshtein
double pct = 0.0;
if (goldResult.equals(grobidResult))
pct = 1.0;
if (field.isTextual) {
int distance = TextUtilities.getLevenshteinDistance(goldResult, grobidResult);
// Levenshtein distance is an integer value, not a percentage... however
// articles usually introduced it as a percentage... so we report it
// following the straightforward formula:
int bigger = Math.max(goldResult.length(), grobidResult.length());
pct = (double)(bigger - distance) / bigger;
}
if ((goldResult.length() > 0) && (pct >= minLevenshteinDistance)) {
levenshteinStats.incrementObserved(fieldName);
}
else {
if (grobidResultSoft.length() > 0) {
levenshteinStats.incrementFalsePositive(fieldName);
allGoodLevenshtein = false;
}
else if (goldResultSoft.length() > 0){
levenshteinStats.incrementFalseNegative(fieldName);
allGoodLevenshtein = false;
}
}
// RatcliffObershelp
Double similarity = 0.0;
if (goldResult.trim().equals(grobidResult.trim()))
similarity = 1.0;
if (field.isTextual) {
if ( (goldResult.length() > 0) && (grobidResult.length() > 0) ) {
Option<Object> similarityObject =
RatcliffObershelpMetric.compare(goldResult, grobidResult);
if ( (similarityObject != null) && (similarityObject.get() != null) )
similarity = (Double)similarityObject.get();
}
}
if ((goldResult.length() > 0) && (similarity >= minRatcliffObershelpSimilarity)) {
ratcliffObershelpStats.incrementObserved(fieldName);
}
else {
if (grobidResultSoft.length() > 0) {
ratcliffObershelpStats.incrementFalsePositive(fieldName);
allGoodRatcliffObershelp = false;
}
else if (goldResultSoft.length() > 0){
ratcliffObershelpStats.incrementFalseNegative(fieldName);
allGoodRatcliffObershelp = false;
}
}
g++;
}
p++;
}
totalExpectedInstances++;
if (allGoodStrict) {
totalCorrectInstancesStrict++;
}
if (allGoodSoft) {
totalCorrectInstancesSoft++;
}
if (allGoodLevenshtein) {
totalCorrectInstancesLevenshtein++;
}
if (allGoodRatcliffObershelp) {
totalCorrectInstancesRatcliffObershelp++;
}
}
else if (sectionType == this.FULLTEXT) {
// full text structures
int p = 0;
boolean allGoodStrict = true;
boolean allGoodSoft = true;
boolean allGoodLevenshtein = true;
boolean allGoodRatcliffObershelp = true;
boolean grobidAvailabilityStatement = false;
boolean goldAvailabilityStatement = false;
for(FieldSpecification field : fields) {
String fieldName = field.fieldName;
List<String> grobidResults = new ArrayList<>();
for(String path : field.grobidPath) {
NodeList nodeList = (NodeList) xp.compile(path).
evaluate(tei.getDocumentElement(), XPathConstants.NODESET);
for (int i = 0; i < nodeList.getLength(); i++) {
String normalizedString = basicNormalizationFullText(nodeList.item(i).getNodeValue(), fieldName);
if (normalizedString != null && normalizedString.length()>0)
grobidResults.add(normalizedString);
}
}
/*boolean first = true;
System.out.print("\n"+fieldName+" - ");
System.out.print("\ngrobidResults:\t");
for(String res : grobidResults) {
if (!first)
System.out.print(" | ");
else
first = false;
System.out.print(res);
}
System.out.println("");*/
List<String> goldResults = new ArrayList<>();
int nbgoldResults = 0;
List<String> subpaths = null;
if (inputType.equals("nlm")) {
subpaths = field.nlmPath;
} else if (inputType.equals("tei")) {
subpaths = field.grobidPath;
}
for(String path : subpaths) {
NodeList nodeList = (NodeList) xp.compile(path).
evaluate(gold.getDocumentElement(), XPathConstants.NODESET);
//System.out.println(path + ": " + nodeList.getLength() + " nodes");
nbgoldResults = nodeList.getLength();
for (int i = 0; i < nodeList.getLength(); i++) {
String normalizedString = basicNormalizationFullText(nodeList.item(i).getNodeValue(), fieldName);
if (normalizedString != null && normalizedString.length()>0)
goldResults.add(normalizedString);
}
}
/*first = true;
System.out.print("goldResults:\t");
for(String res : goldResults) {
if (!first)
System.out.print(" | ");
else
first = false;
System.out.print(res);
}
System.out.println("");*/
// Workaround to avoid having two different lists with the same content
// Probably to be extended to other fields if does not cause
if (fieldName.equals("availability_stmt")) {
if (CollectionUtils.isNotEmpty(grobidResults)) {
List<String> grobidResults2 = new ArrayList<>();
grobidResults2.add(grobidResults.stream().collect(Collectors.joining(" ")).replace(" ", " "));
grobidResults = grobidResults2;
grobidAvailabilityStatement = true;
}
if (CollectionUtils.isNotEmpty(goldResults)) {
List<String> goldResults2 = new ArrayList<>();
goldResults2.add(goldResults.stream().collect(Collectors.joining(" ")).replace(" ", " "));
goldResults = goldResults2;
goldAvailabilityStatement = true;
}
}
// we compare the two result sets
/*if (fieldName.equals("availability_stmt")) {
if (goldResults.size() > 0) {
System.out.print("\n\n---- GOLD ----");
for (String goldResult : goldResults) {
System.out.print("\n" + goldResult);
}
}
if (grobidResults.size() > 0) {
System.out.print("\n---- GROBID ----");
for (String grobidResult : grobidResults) {
System.out.print("\n" + grobidResult);
}
}
}*/
// prepare first the grobidResult set for soft match
List<String> grobidSoftResults = new ArrayList<>();
for(String res : grobidResults)
grobidSoftResults.add(removeFullPunct(res));
int g = 0;
int grobidResultsSize = grobidResults.size();
int nbMatchStrict = 0; // number of matched grobid results, strict set
int nbMatchSoft = 0;
int nbMatchLevenshtein = 0;
int nbMatchRatcliffObershelp = 0;
for (String goldResult : goldResults) {
// nb expected results
if (goldResult.length() > 0) {
strictStats.incrementExpected(fieldName);
softStats.incrementExpected(fieldName);
levenshteinStats.incrementExpected(fieldName);
ratcliffObershelpStats.incrementExpected(fieldName);
}
double pct = 0.0;
// strict
if ((goldResult.length() > 0) && grobidResults.contains(goldResult)) {
strictStats.incrementObserved(fieldName);
nbMatchStrict++;
pct = 1.0;
grobidResults.remove(goldResult);
}
else {
if (goldResult.length() > 0) {
strictStats.incrementFalseNegative(fieldName);
allGoodStrict = false;
}
}
// soft
String goldResultSoft = goldResult;
if (field.isTextual) {
goldResultSoft = removeFullPunct(goldResult);
}
if ((goldResult.length() > 0) && grobidSoftResults.contains(goldResultSoft)) {
softStats.incrementObserved(fieldName);
nbMatchSoft++;
grobidSoftResults.remove(goldResultSoft);
}
else {
if (goldResultSoft.length() > 0){
softStats.incrementFalseNegative(fieldName);
allGoodSoft = false;
}
}
/*StringBuilder goldResultBuilder = new StringBuilder();
for (String goldResult : goldResults) {
goldResultBuilder.append(goldResult).append(" ");
}
String goldResultString = goldResultBuilder.toString();
StringBuilder grobidResultBuilder = new StringBuilder();
for (String grobidResult : grobidResults) {
grobidResultBuilder.append(grobidResult).append(" ");
}
String grobidResultString = grobidResultBuilder.toString();
// Levenshtein
if (field.isTextual) {
int distance = TextUtilities.getLevenshteinDistance(goldResultString, grobidResultString);
// Levenshtein distance is an integer value, not a percentage... however
// articles usually introduced it as a percentage... so we report it
// following the straightforward formula:
int bigger = Math.max(goldResult.length(), grobidResult.length());
pct = (double)(bigger - distance) / bigger;
}
if ((goldResult.length() > 0) && (pct >= minLevenshteinDistance)) {
Integer count = counterObservedLevenshtein.get(p);
counterObservedLevenshtein.set(p, count+1);
nbMatchLevenshtein++;
}
else {
if (goldResult.length() > 0){
Integer count = counterFalseNegativeLevenshtein.get(p);
counterFalseNegativeLevenshtein.set(p, count+1);
allGoodLevenshtein = false;
}
}
// RatcliffObershelp
Double similarity = 0.0;
if (goldResult.trim().equals(grobidResult.trim()))
similarity = 1.0;
if (field.isTextual) {
if ( (goldResult.length() > 0) && (grobidResult.length() > 0) ) {
Option<Object> similarityObject =
RatcliffObershelpMetric.compare(goldResultString, grobidResultString);
if ( (similarityObject != null) && (similarityObject.get() != null) )
similarity = (Double)similarityObject.get();
}
}
if ((goldResult.length() > 0) && (similarity >= minRatcliffObershelpSimilarity)) {
Integer count = counterObservedRatcliffObershelp.get(p);
counterObservedRatcliffObershelp.set(p, count+1);
nbMatchRatcliffObershelp++;
}
else {
if (grobidResultSoft.length() > 0) {
Integer count = counterFalsePositiveRatcliffObershelp.get(p);
counterFalsePositiveRatcliffObershelp.set(p, count+1);
allGoodRatcliffObershelp = false;
}
else if (goldResultSoft.length() > 0){
Integer count = counterFalseNegativeRatcliffObershelp.get(p);
counterFalseNegativeRatcliffObershelp.set(p, count+1);
allGoodRatcliffObershelp = false;
}
}*/
g++;
}
if (nbMatchStrict < grobidResultsSize) {
strictStats.incrementFalsePositive(fieldName, grobidResultsSize-nbMatchStrict);
allGoodStrict = false;
}
if (nbMatchSoft < grobidResultsSize) {
softStats.incrementFalsePositive(fieldName, grobidResultsSize-nbMatchSoft);
allGoodSoft = false;
}
/*if (nbMatchLevenshtein < grobidResultsSize) {
levenshteinStats.incrementFalsePositive(fieldName, grobidResultsSize-nbMatchLevenshtein);
allGoodLevenshtein= false;
}
if (nbMatchRatcliffObershelp < grobidResultsSize) {
ratcliffObershelpStats.incrementFalsePositive(fieldName, grobidResultsSize-nbMatchRatcliffObershelp);
allGoodRatcliffObershelp = false;
}*/
p++;
}
// document level ratio for availability statements
if (grobidAvailabilityStatement)
availabilityRatioStat.incrementObserved("availability_stmt");
if (goldAvailabilityStatement)
availabilityRatioStat.incrementExpected("availability_stmt");
if (grobidAvailabilityStatement && !goldAvailabilityStatement)
availabilityRatioStat.incrementFalsePositive("availability_stmt");
if (!grobidAvailabilityStatement && goldAvailabilityStatement)
availabilityRatioStat.incrementFalseNegative("availability_stmt");
}
}
else if (runType == this.PDFX) {
// TBD
}
else if (runType == this.CERMINE) {
// TBD
}
}
catch(Exception e) {
e.printStackTrace();
}
nbFile++;
}
}
report.append("\nEvaluation on " + nbFile + " random PDF files out of " +
(refFiles.length-2) + " PDF (ratio " + fileRatio + ").\n");
reportMD.append("\nEvaluation on " + nbFile + " random PDF files out of " +
(refFiles.length-2) + " PDF (ratio " + fileRatio + ").\n");
report.append("\n======= Strict Matching ======= (exact matches)\n");
reportMD.append("\n#### Strict Matching (exact matches)\n");
report.append("\n===== Field-level results =====\n");
reportMD.append("\n**Field-level results**\n");
report.append(EvaluationUtilities.computeMetrics(strictStats));
reportMD.append(EvaluationUtilities.computeMetricsMD(strictStats));
report.append("\n\n======== Soft Matching ======== (ignoring punctuation, " +
"case and space characters mismatches)\n");
reportMD.append("\n\n#### Soft Matching (ignoring punctuation, case and space characters mismatches)\n");
report.append("\n===== Field-level results =====\n");
reportMD.append("\n**Field-level results**\n");
report.append(EvaluationUtilities.computeMetrics(softStats));
reportMD.append(EvaluationUtilities.computeMetricsMD(softStats));
if (sectionType != this.FULLTEXT) {
report.append("\n\n==== Levenshtein Matching ===== (Minimum Levenshtein distance at " +
this.minLevenshteinDistance + ")\n");
reportMD.append("\n\n#### Levenshtein Matching (Minimum Levenshtein distance at " +
this.minLevenshteinDistance+")\n");
report.append("\n===== Field-level results =====\n");
reportMD.append("\n**Field-level results**\n");
report.append(EvaluationUtilities.computeMetrics(levenshteinStats));
reportMD.append(EvaluationUtilities.computeMetricsMD(levenshteinStats));
report.append("\n\n= Ratcliff/Obershelp Matching = (Minimum Ratcliff/Obershelp similarity at " +
minRatcliffObershelpSimilarity + ")\n");
reportMD.append("\n\n#### Ratcliff/Obershelp Matching (Minimum Ratcliff/Obershelp similarity at " +
minRatcliffObershelpSimilarity + ")\n");
report.append("\n===== Field-level results =====\n");
reportMD.append("\n**Field-level results**\n");
report.append(EvaluationUtilities.computeMetrics(ratcliffObershelpStats));
reportMD.append(EvaluationUtilities.computeMetricsMD(ratcliffObershelpStats));
}
if (sectionType == this.CITATION) {
report.append("\n===== Instance-level results =====\n\n");
reportMD.append("\n#### Instance-level results\n\n");
StringBuilder localReport = new StringBuilder();
localReport.append("Total expected instances: \t\t").append(totalExpectedInstances).append("\n");
localReport.append("Total extracted instances: \t\t").append(totalObservedInstances).append("\n");
localReport.append("Total correct instances: \t\t").append(totalCorrectInstancesStrict)
.append(" (strict) \n");
localReport.append("Total correct instances: \t\t").append(totalCorrectInstancesSoft)
.append(" (soft) \n");
localReport.append("Total correct instances: \t\t").append(totalCorrectInstancesLevenshtein)
.append(" (Levenshtein) \n");
localReport.append("Total correct instances: \t\t").append(totalCorrectInstancesRatcliffObershelp)
.append(" (RatcliffObershelp) \n");
double precisionStrict = (double) totalCorrectInstancesStrict / (totalObservedInstances);
double precisionSoft = (double) totalCorrectInstancesSoft / (totalObservedInstances);
double precisionLevenshtein = (double) totalCorrectInstancesLevenshtein / (totalObservedInstances);
double precisionRatcliffObershelp = (double) totalCorrectInstancesRatcliffObershelp /
(totalObservedInstances);
localReport.append("\nInstance-level precision:\t")
.append(TextUtilities.formatTwoDecimals(precisionStrict * 100)).append(" (strict) \n");
localReport.append("Instance-level precision:\t")
.append(TextUtilities.formatTwoDecimals(precisionSoft * 100)).append(" (soft) \n");
localReport.append("Instance-level precision:\t")
.append(TextUtilities.formatTwoDecimals(precisionLevenshtein * 100))
.append(" (Levenshtein) \n");
localReport.append("Instance-level precision:\t")
.append(TextUtilities.formatTwoDecimals(precisionRatcliffObershelp * 100))
.append(" (RatcliffObershelp) \n");
double recallStrict = (double) totalCorrectInstancesStrict / (totalExpectedInstances);
double recallSoft = (double) totalCorrectInstancesSoft / (totalExpectedInstances);
double recallLevenshtein = (double) totalCorrectInstancesLevenshtein / (totalExpectedInstances);
double recallRatcliffObershelp = (double) totalCorrectInstancesRatcliffObershelp /
(totalExpectedInstances);
localReport.append("\nInstance-level recall:\t")
.append(TextUtilities.formatTwoDecimals(recallStrict * 100)).append("\t(strict) \n");
localReport.append("Instance-level recall:\t")
.append(TextUtilities.formatTwoDecimals(recallSoft * 100)).append("\t(soft) \n");
localReport.append("Instance-level recall:\t")
.append(TextUtilities.formatTwoDecimals(recallLevenshtein * 100))
.append("\t(Levenshtein) \n");
localReport.append("Instance-level recall:\t")
.append(TextUtilities.formatTwoDecimals(recallRatcliffObershelp* 100))
.append("\t(RatcliffObershelp) \n");
double f0Strict = (2 * precisionStrict * recallStrict) / (precisionStrict + recallStrict);
double f0Soft = (2 * precisionSoft * recallSoft) / (precisionSoft + recallSoft);
double f0Levenshtein = (2 * precisionLevenshtein * recallLevenshtein) /
(precisionLevenshtein + recallLevenshtein);
double f0RatcliffObershelp = (2 * precisionRatcliffObershelp * recallRatcliffObershelp) /
(precisionRatcliffObershelp + recallRatcliffObershelp);
localReport.append("\nInstance-level f-score:\t")
.append(TextUtilities.formatTwoDecimals(f0Strict * 100)).append(" (strict) \n");
localReport.append("Instance-level f-score:\t")
.append(TextUtilities.formatTwoDecimals(f0Soft * 100)).append(" (soft) \n");
localReport.append("Instance-level f-score:\t")
.append(TextUtilities.formatTwoDecimals(f0Levenshtein * 100)).append(" (Levenshtein) \n");
localReport.append("Instance-level f-score:\t")
.append(TextUtilities.formatTwoDecimals(f0RatcliffObershelp * 100)).append(" (RatcliffObershelp) \n");
localReport.append("\nMatching 1 :\t").append(match1 + "\n");
localReport.append("\nMatching 2 :\t").append(match2 + "\n");
localReport.append("\nMatching 3 :\t").append(match3 + "\n");
localReport.append("\nMatching 4 :\t").append(match4 + "\n");
localReport.append("\nTotal matches :\t").append((match1 + match2 + match3 + match4) + "\n");
report.append(localReport.toString());
reportMD.append("```\n"+localReport.toString()+"```\n\n");
report.append("\n======= Citation context resolution ======= \n");
reportMD.append("\n#### Citation context resolution\n");
localReport = new StringBuilder();
localReport.append("\nTotal expected references: \t ").append(totalExpectedReferences)
.append(" - ").append(TextUtilities.formatTwoDecimals((double) totalExpectedReferences / nbFile)).append(" references per article");
localReport.append("\nTotal predicted references: \t ").append(totalObservedReferences)
.append(" - ").append(TextUtilities.formatTwoDecimals((double) totalObservedReferences / nbFile)).append(" references per article");
//report.append("\nTotal observed references (instance): \t ").append(totalObservedInstances);
//report.append("\nTotal correct observed references: \t ").append(totalCorrectInstancesRatcliffObershelp);
localReport.append("\n\nTotal expected citation contexts: \t ").append(totalExpectedCitations)
.append(" - ").append(TextUtilities.formatTwoDecimals((double) totalExpectedCitations / nbFile)).append(" citation contexts per article");
localReport.append("\nTotal predicted citation contexts: \t ").append(totalObservedCitations)
.append(" - ").append(TextUtilities.formatTwoDecimals((double) totalObservedCitations / nbFile)).append(" citation contexts per article");
localReport.append("\n\nTotal correct predicted citation contexts: \t ").append(totalCorrectObservedCitations)
.append(" - ").append(TextUtilities.formatTwoDecimals((double) totalCorrectObservedCitations / nbFile)).append(" citation contexts per article");
localReport.append("\nTotal wrong predicted citation contexts: \t ").append(totalWrongObservedCitations).append(" (wrong callout matching, callout missing in NLM, or matching with a bib. ref. not aligned with a bib.ref. in NLM)");
double precisionCitationContext = (double) totalCorrectObservedCitations / totalObservedCitations;
double recallCitationContext = (double) totalCorrectObservedCitations / totalExpectedCitations;
double fscoreCitationContext = (2 * precisionCitationContext * recallCitationContext) / (precisionCitationContext + recallCitationContext);;
localReport.append("\n\nPrecision citation contexts: \t ").append(TextUtilities.formatTwoDecimals(precisionCitationContext * 100));
localReport.append("\nRecall citation contexts: \t ").append(TextUtilities.formatTwoDecimals(recallCitationContext * 100));
localReport.append("\nfscore citation contexts: \t ").append(TextUtilities.formatTwoDecimals(fscoreCitationContext * 100));
localReport.append("\n");
report.append(localReport.toString());
reportMD.append("```\n"+localReport.toString()+"```\n\n");
}
else if (sectionType == this.HEADER) {
report.append("\n===== Instance-level results =====\n\n");
reportMD.append("\n#### Instance-level results\n\n");
StringBuilder localReport = new StringBuilder();
localReport.append("Total expected instances: \t").append(totalExpectedInstances).append("\n");
localReport.append("Total correct instances: \t").append(totalCorrectInstancesStrict)
.append(" (strict) \n");
localReport.append("Total correct instances: \t").append(totalCorrectInstancesSoft)
.append(" (soft) \n");
localReport.append("Total correct instances: \t").append(totalCorrectInstancesLevenshtein)
.append(" (Levenshtein) \n");
localReport.append("Total correct instances: \t").append(totalCorrectInstancesRatcliffObershelp)
.append(" (ObservedRatcliffObershelp) \n");
double accuracyStrict = (double) totalCorrectInstancesStrict / (totalExpectedInstances);
double accuracySoft = (double) totalCorrectInstancesSoft / (totalExpectedInstances);
double accuracyLevenshtein = (double) totalCorrectInstancesLevenshtein / (totalExpectedInstances);
double accuracyRatcliffObershelp = (double) totalCorrectInstancesRatcliffObershelp /
(totalExpectedInstances);
localReport.append("\nInstance-level recall:\t")
.append(TextUtilities.formatTwoDecimals(accuracyStrict * 100)).append("\t(strict) \n");
localReport.append("Instance-level recall:\t")
.append(TextUtilities.formatTwoDecimals(accuracySoft * 100)).append("\t(soft) \n");
localReport.append("Instance-level recall:\t")
.append(TextUtilities.formatTwoDecimals(accuracyLevenshtein * 100))
.append("\t(Levenshtein) \n");
localReport.append("Instance-level recall:\t")
.append(TextUtilities.formatTwoDecimals(accuracyRatcliffObershelp * 100))
.append("\t(RatcliffObershelp) \n");
report.append(localReport.toString());
reportMD.append("```\n"+localReport.toString()+"```\n\n");
}
if (sectionType == this.FULLTEXT) {
report.append("\n===== Document-level ratio results =====\n");
reportMD.append("\n**Document-level ratio results**\n");
report.append(EvaluationUtilities.computeMetrics(availabilityRatioStat));
reportMD.append(EvaluationUtilities.computeMetricsMD(availabilityRatioStat));
}
return report.toString();
}
private static String basicNormalization(String string) {
string = string.trim();
string = string.replace("\n", " ");
string = string.replace("\t", " ");
string = string.replaceAll(" ( )*", " ");
string = string.replace("'", "'");
return string.trim().toLowerCase();
}
private static String identifierNormalization(String string) {
string = basicNormalization(string);
if (string.startsWith("pmcpmc")) {
string = string.replace("pmcpmc", "");
}
string = string.replace("pmc", "");
if (string.startsWith("doi")) {
string = string.replace("doi", "").trim();
if (string.startsWith(":")) {
string = string.substring(1,string.length());
string = string.trim();
}
}
if (string.startsWith("pmid")) {
string = string.replace("pmid", "").trim();
if (string.startsWith(":")) {
string = string.substring(1,string.length());
string = string.trim();
}
}
return string.trim().toLowerCase();
}
private static String basicNormalizationFullText(String string, String fieldName) {
string = string.trim();
string = UnicodeUtil.normaliseText(string);
string = string.replace("\n", " ");
string = string.replace("\t", " ");
string = string.replace("_", " ");
string = string.replace("\u00A0", " ");
if (fieldName.equals("reference_figure")) {
string = string.replace("figure", "").replace("Figure", "").replace("fig.", "").replace("Fig.", "").replace("fig", "").replace("Fig", "");
}
if (fieldName.equals("reference_table")) {
string = string.replace("table", "").replace("Table", "");
}
string = string.replaceAll(" ( )*", " ");
if (string.startsWith("[") || string.startsWith("("))
string = string.substring(1,string.length());
while (string.endsWith("]") || string.endsWith(")") || string.endsWith(","))
string = string.substring(0,string.length()-1);
return string.trim();
}
private static String removeFullPunct(String string) {
StringBuilder result = new StringBuilder();
string = string.toLowerCase();
String allMismatchToIgnore = TextUtilities.fullPunctuations+"‐ \t\n\r\u00A0" + "\u00B7\u25FC\u25B2\u25BA\u25C6\u25CB\u25C7\u25CF\u25CE\u25FD\u25F8\u25F9\u25FA";//last are placeholders used for to be OCR chars
for(int i=0; i<string.length(); i++) {
if (allMismatchToIgnore.indexOf(string.charAt(i)) == -1) {
result.append(string.charAt(i));
}
}
return result.toString();
}
/**
* Command line execution.
*
* @param args Command line arguments.
*/
public static void main(String[] args) {
//DOMConfigurator is called to force logger to use the xml configuration file
//DOMConfigurator.configure("src/main/resources/log4j.xml");
if ( (args.length >4) || (args.length == 0) ) {
System.err.println("usage: command [path to the (gold) evaluation XML dataset] Run[0|1] fileRatio[0.0-1.0]");
return;
}
String inputType = args[0];
if ( (inputType == null) || (inputType.length() == 0) || (!inputType.equals("nlm") && !inputType.equals("tei")) ) {
System.err.println("Input type is not correctly set, should be [tei|nlm]");
return;
}
boolean runGrobidVal = true;
String xmlInputPath = args[1];
if ( (xmlInputPath == null) || (xmlInputPath.length() == 0) ) {
System.err.println("Path to evaluation (gold) XML data is not correctly set");
return;
}
String runGrobid = args[2];
if (runGrobid.equals("0")) {
runGrobidVal = false;
}
else if (runGrobid.equals("1")) {
runGrobidVal = true;
}
else {
System.err.println("Invalid value for last argument (run): [0|1]");
return;
}
// optional file ratio for applying the evaluation
double fileRatio = 1.0;
if (args.length > 1) {
String fileRatioString = args[3];
if ((fileRatioString != null) && (fileRatioString.length() > 0)) {
try {
fileRatio = Double.parseDouble(fileRatioString);
}
catch(Exception e) {
System.err.println("Invalid argument fileRatio, must be a double, e.g. 0.1");
return;
}
}
}
try {
File xmlPath = new File(xmlInputPath);
if (!xmlPath.exists()) {
System.err.println("Path to evaluation (gold) XML data does not exist");
return;
}
if (!xmlPath.isDirectory()) {
System.err.println("Path to evaluation (gold) XML data is not a directory");
return;
}
}
catch (Exception e) {
e.printStackTrace();
}
try {
EndToEndEvaluation eval = new EndToEndEvaluation(xmlInputPath, inputType);
eval.fileRatio = fileRatio;
// markdown report
StringBuilder reportMD = new StringBuilder();
String report = eval.evaluationGrobid(runGrobidVal, reportMD);
System.out.println(report);
System.out.println(Engine.getCntManager());
// write markdown report
File fileMarkDown = new File(GrobidProperties.getInstance().getTempPath().getPath() + File.separator + "report.md");
FileUtils.writeStringToFile(fileMarkDown, reportMD.toString(), "UTF-8");
System.out.println("\nEvaluation report in markdown format saved under " + fileMarkDown.getAbsolutePath());
} catch (Exception e) {
e.printStackTrace();
}
// to be sure jvm stops
System.exit(0);
}
}
| 85,451 | 39.886124 | 479 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/evaluation/PatentEvaluation.java
|
package org.grobid.trainer.evaluation;
import org.chasen.crfpp.Tagger;
import org.grobid.core.GrobidModels;
import org.grobid.core.engines.tagging.GenericTagger;
import org.grobid.core.engines.tagging.TaggerFactory;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.utilities.GrobidProperties;
import org.grobid.core.utilities.TextUtilities;
import org.grobid.trainer.AbstractTrainer;
import org.grobid.trainer.PatentParserTrainer;
import java.io.*;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.StringTokenizer;
import java.util.TreeMap;
/**
* Evaluation of the extraction and parsing of the patent and NPL citations present in the patent
* description.
*
*/
public class PatentEvaluation {
private String evaluationPath = null;
private GenericTagger taggerPatent = null;
private GenericTagger taggerNPL = null;
private GenericTagger taggerAll = null;
//where a test file would be put
private String outputPath;
public PatentEvaluation() {
evaluationPath = AbstractTrainer.getEvalCorpusBasePath().getAbsolutePath();
outputPath = GrobidProperties.getInstance().getTempPath().getAbsolutePath();
//taggerNPL = TaggerFactory.getTagger(GrobidModels.PATENT_NPL);
//taggerPatent = TaggerFactory.getTagger(GrobidModels.PATENT_PATENT);
taggerAll = TaggerFactory.getTagger(GrobidModels.PATENT_CITATION);
}
/**
* Evaluation of the patent and NPL parsers against an evaluation set in the normal training format
* at token and instance level.
*
* @param type gives the model to be evaluated: 0 is the patent citation only model, 1 is the NPL
* citation only model and 2 is the combined patent+NPL citation model.
* @return report
*/
public String evaluate(int type) {
// we need first to produce the evaluation files with features from the files in corpus format present
// in the evaluation folder
PatentParserTrainer ppt = new PatentParserTrainer();
//noinspection NullableProblems
//ppt.createDataSet("test", null, evaluationPath, outputPath);
String setName;
GenericTagger tagger;
if (type == 0) {
tagger = taggerPatent;
setName = "patent";
} else if (type == 1) {
tagger = taggerNPL;
setName = "npl";
} else if (type == 2) {
tagger = taggerAll;
setName = "all";
} else {
throw new GrobidException("An exception occured while evaluating Grobid. The parameter " +
"type is undefined.");
}
return evaluate();
}
/**
* Evaluation of the patent and NPL parsers against an evaluation set in the normal training format
* at token and instance level.
* @return report
*/
public String evaluate() {
// we need first to produce the evaluation files with features from the files in corpus format present
// in the evaluation folder
StringBuilder report = new StringBuilder();
PatentParserTrainer ppt = new PatentParserTrainer();
//noinspection NullableProblems
ppt.createDataSet("test", evaluationPath, outputPath, 1);
List<GenericTagger> taggers = new ArrayList<GenericTagger>();
taggers.add(taggerAll);
// note: there is no field for these models
for (GenericTagger tagger : taggers) {
// total tag
int totalExpected = 0;
int totalCorrect = 0;
int totalSuggested = 0;
// total instance
int totalInstanceExpected = 0;
int totalInstanceCorrect = 0;
int totalInstanceSuggested = 0;
// npl tag
int totalNPLExpected = 0;
int totalNPLCorrect = 0;
int totalNPLSuggested = 0;
// npl instance
int totalInstanceNPLExpected = 0;
int totalInstanceNPLCorrect = 0;
int totalInstanceNPLSuggested = 0;
// patent tag
int totalPatentExpected = 0;
int totalPatentCorrect = 0;
int totalPatentSuggested = 0;
// patent instance
int totalInstancePatentExpected = 0;
int totalInstancePatentCorrect = 0;
int totalInstancePatentSuggested = 0;
try {
// read the evaluation file enriched with feature
BufferedReader bufReader = new BufferedReader(
new InputStreamReader(new FileInputStream(outputPath + "/all.test"), "UTF-8"));
String line = null;
ArrayList<String> patentBlocks = new ArrayList<String>();
while ((line = bufReader.readLine()) != null) {
patentBlocks.add(line);
}
bufReader.close();
//TODO: VZ_FIX
// String theResult = EvaluationUtilities.taggerRun(patentBlocks, tagger);
String theResult = tagger.label(patentBlocks);
//System.out.println(theResult);
StringTokenizer stt = new StringTokenizer(theResult, "\n");
// line = null;
String previousExpectedLabel = null;
String previousSuggestedLabel = null;
boolean instanceCorrect = true;
while (stt.hasMoreTokens()) {
line = stt.nextToken();
StringTokenizer st = new StringTokenizer(line, "\t");
String expected = null; // expected tag
String actual = null; // tag suggested by the model
String word = null; // the token
boolean start = true;
boolean failure = false;
while (st.hasMoreTokens()) {
String token = st.nextToken();
if (start) {
word = token.trim();
start = false;
}
expected = actual;
actual = token.trim();
}
// we can simply compare the two last line (expected, actual) for evaluation
// in this context an instance is a connected sequence the "non-other" tags in the expected tags
// As in (Peng & McCallum, 2006), we simply measure the accuracy at the instance level.
// tags
if ((expected != null) && (actual != null)) {
if (!expected.equals("<other>")) {
totalExpected++;
if (expected.endsWith("refPatent>"))
totalPatentExpected++;
else if (expected.endsWith("refNPL>"))
totalNPLExpected++;
else
report.append("WARNING bizarre suggested tag: " + expected + "\n");
}
if (!actual.equals("<other>")) {
totalSuggested++;
if (actual.endsWith("refPatent>"))
totalPatentSuggested++;
else if (actual.endsWith("refNPL>"))
totalNPLSuggested++;
else
report.append("WARNING bizarre suggested tag: " + actual + "\n");
}
if (actual.endsWith("refPatent>"))
actual = "refPatent";
else if (actual.endsWith("refNPL>")) {
actual = "refNPL";
}
if (expected.endsWith("refPatent>"))
expected = "refPatent";
else if (expected.endsWith("refNPL>"))
expected = "refNPL";
if (actual.equals("<other>"))
actual = "other";
if (expected.equals("<other>"))
expected = "other";
if (expected.equals(actual)) {
if (!actual.equals("other") && !expected.equals("other")) {
totalCorrect++;
if (expected.startsWith("refPatent"))
totalPatentCorrect++;
else if (expected.startsWith("refNPL"))
totalNPLCorrect++;
}
} else {
failure = true;
}
// expected instance
if (!expected.equals("other")) {
if ((previousExpectedLabel == null) || (!expected.equals(previousExpectedLabel))) {
// we are starting a new instance
// are we ending an instance?
if (previousExpectedLabel != null) {
if (!previousExpectedLabel.equals("other")) {
// we are ending an instance
if (instanceCorrect) {
if (previousExpectedLabel.startsWith("refPatent"))
totalInstancePatentCorrect++;
else if (previousExpectedLabel.startsWith("refNPL"))
totalInstanceNPLCorrect++;
}
}
}
// new instance
totalInstanceExpected++;
if (expected.startsWith("refPatent"))
totalInstancePatentExpected++;
else if (expected.startsWith("refNPL"))
totalInstanceNPLExpected++;
instanceCorrect = true;
}
} else {
// are we ending an instance?
if (previousExpectedLabel != null) {
if (!previousExpectedLabel.equals("other")) {
// we are ending an instance
if (instanceCorrect) {
totalInstanceCorrect++;
if (previousExpectedLabel.startsWith("refPatent"))
totalInstancePatentCorrect++;
else if (previousExpectedLabel.startsWith("refNPL"))
totalInstanceNPLCorrect++;
}
instanceCorrect = true;
}
}
}
if (failure) {
instanceCorrect = false;
}
previousExpectedLabel = expected;
previousSuggestedLabel = actual;
}
}
} catch (Exception e) {
throw new GrobidException("An exception occured while evaluating Grobid.", e);
}
double precision;
double recall;
double f;
if (tagger == taggerNPL) {
report.append("\n\n*********************************************\n");
report.append("****** NPL reference extraction model *******\n");
report.append("*********************************************\n");
} else if (tagger == taggerPatent) {
report.append("\n\n************************************************\n");
report.append("****** patent reference extraction model *******\n");
report.append("************************************************\n");
} else if (tagger == taggerAll) {
report.append("\n\n*************************************************************\n");
report.append("****** combined NPL+patent reference extraction model *******\n");
report.append("*************************************************************\n");
}
if (tagger == taggerAll) {
report.append("\n======== GENERAL TAG EVALUATION ========\n");
report.append("Total expected tags: ").append(totalExpected).append("\n");
report.append("Total suggested tags: ").append(totalSuggested).append("\n");
report.append("Total correct tags (Correct Positive): ").append(totalCorrect).append("\n");
report.append("Total incorrect tags (False Positive + False Negative): ").append(Math.abs(totalSuggested - totalCorrect)).append("\n");
precision = (double) totalCorrect / totalSuggested;
recall = (double) totalCorrect / totalExpected;
f = 2 * precision * recall / (precision + recall);
report.append("Precision\t= ").append(TextUtilities.formatTwoDecimals(precision * 100)).append("\n");
report.append("Recall\t= ").append(TextUtilities.formatTwoDecimals(recall * 100)).append("\n");
report.append("F-score\t= ").append(TextUtilities.formatTwoDecimals(f * 100)).append("\n");
}
if (tagger != taggerPatent) {
report.append("\n======== TAG NPL EVALUATION ========\n");
report.append("Total expected tags: ").append(totalNPLExpected).append("\n");
report.append("Total suggested tags: ").append(totalNPLSuggested).append("\n");
report.append("Total correct tags (Correct Positive): ").append(totalNPLCorrect).append("\n");
report.append("Total incorrect tags (False Positive + False Negative): ").append(Math.abs(totalNPLSuggested - totalNPLCorrect)).append("\n");
precision = (double) totalNPLCorrect / totalNPLSuggested;
recall = (double) totalNPLCorrect / totalNPLExpected;
f = 2 * precision * recall / (precision + recall);
report.append("Precision\t= ").append(TextUtilities.formatTwoDecimals(precision * 100)).append("\n");
report.append("Recall\t= ").append(TextUtilities.formatTwoDecimals(recall * 100)).append("\n");
report.append("F-score\t= ").append(TextUtilities.formatTwoDecimals(f * 100)).append("\n");
}
if (tagger != taggerNPL) {
report.append("\n======== TAG PATENT EVALUATION ========\n");
report.append("Total expected tags: ").append(totalPatentExpected).append("\n");
report.append("Total suggested tags: ").append(totalPatentSuggested).append("\n");
report.append("Total correct tags (Correct Positive): ").append(totalPatentCorrect).append("\n");
report.append("Total incorrect tags (False Positive + False Negative): ").append(Math.abs(totalPatentSuggested - totalPatentCorrect)).append("\n");
precision = (double) totalPatentCorrect / totalPatentSuggested;
recall = (double) totalPatentCorrect / totalPatentExpected;
f = 2 * precision * recall / (precision + recall);
report.append("Precision\t= ").append(TextUtilities.formatTwoDecimals(precision * 100)).append("\n");
report.append("Recall\t= ").append(TextUtilities.formatTwoDecimals(recall * 100)).append("\n");
report.append("F-score\t= ").append(TextUtilities.formatTwoDecimals(f * 100)).append("\n");
}
if (tagger == taggerAll) {
report.append("\n======== GENERAL INSTANCE EVALUATION ========\n");
report.append("Total expected instances: ").append(totalInstanceExpected).append("\n");
report.append("Total correct instances: ").append(totalInstanceCorrect).append("\n");
recall = (double) totalInstanceCorrect / totalInstanceExpected;
report.append("Instance Accuracy = ").append(TextUtilities.formatTwoDecimals(recall * 100)).append("\n");
}
if (tagger != taggerPatent) {
report.append("\n======== INSTANCE NPL EVALUATION ========\n");
report.append("Total expected instances: ").append(totalInstanceNPLExpected).append("\n");
report.append("Total correct instances: ").append(totalInstanceNPLCorrect).append("\n");
recall = (double) totalInstanceNPLCorrect / totalInstanceNPLExpected;
report.append("Instance accuracy = ").append(TextUtilities.formatTwoDecimals(recall * 100)).append("\n");
}
if (tagger != taggerNPL) {
report.append("\n======== INSTANCE PATENT EVALUATION ========\n");
report.append("Total expected instances: ").append(totalInstancePatentExpected).append("\n");
report.append("Total correct instances: ").append(totalInstancePatentCorrect).append("\n");
recall = (double) totalInstancePatentCorrect / totalInstancePatentExpected;
report.append("Instance accuracy = ").append(TextUtilities.formatTwoDecimals(recall * 100)).append("\n\n");
}
}
return report.toString();
}
/**
* Evaluation of the extraction against the gold corpus for patent reference resolution (non XML format, realized in 2010).
* Use in particular for a comparison with Ddoc and ACE.
* @param path file path
*/
public void evaluateGold(File path) {
try {
TreeMap<String, ArrayList<String>> rfap_reference = new TreeMap<String, ArrayList<String>>();
TreeMap<String, ArrayList<String>> rf_reference = new TreeMap<String, ArrayList<String>>();
TreeMap<String, ArrayList<String>> rfap_ace = new TreeMap<String, ArrayList<String>>();
TreeMap<String, ArrayList<String>> rf_ace = new TreeMap<String, ArrayList<String>>();
TreeMap<String, ArrayList<String>> rfap_Ddoc = new TreeMap<String, ArrayList<String>>();
TreeMap<String, ArrayList<String>> rf_Ddoc = new TreeMap<String, ArrayList<String>>();
TreeMap<String, ArrayList<String>> rfap = new TreeMap<String, ArrayList<String>>();
TreeMap<String, ArrayList<String>> rf = new TreeMap<String, ArrayList<String>>();
// we parse the log file for getting reference data and ACE/Ddoc results
String dossierName = null;
BufferedReader br = new BufferedReader(
new InputStreamReader(
new FileInputStream(evaluationPath + "/gold/REF_20100426.txt"), "UTF8"));
String s;
// boolean rf_part = false;
ArrayList<String> resap_reference = null;
ArrayList<String> res_reference = null;
while ((s = br.readLine()) != null) {
if (s.length() == 0) continue;
if (s.startsWith("RFAP:")) {
resap_reference = new ArrayList<String>();
s = s.substring(5, s.length());
String[] pats = s.split(" ");
for (String pat : pats) {
if (pat != null) {
if (pat.length() > 0) {
if (!resap_reference.contains(pat))
resap_reference.add(pat);
}
}
}
} else if (s.startsWith("RF:")) {
res_reference = new ArrayList<String>();
s = s.substring(3, s.length());
String[] pats = s.split(" ");
for (String pat : pats) {
if (pat != null) {
if (pat.length() > 0) {
if (!res_reference.contains(pat))
res_reference.add(pat);
}
}
}
} else {
if (dossierName != null) {
rfap_reference.put(dossierName, resap_reference);
rf_reference.put(dossierName, res_reference);
}
dossierName = s.trim();
dossierName = dossierName.replace(".txt", "");
}
}
rfap_reference.put(dossierName, resap_reference);
rf_reference.put(dossierName, res_reference);
br.close();
// we parse the log file for getting ACE results
br = new BufferedReader(
new InputStreamReader(
new FileInputStream(evaluationPath + "/ACE_20100426.txt"), "UTF8"));
// rf_part = false;
ArrayList<String> resap_ace = null;
ArrayList<String> res_ace = null;
dossierName = null;
while ((s = br.readLine()) != null) {
if (s.length() == 0) continue;
if (s.startsWith("RFAP:")) {
resap_ace = new ArrayList<String>();
s = s.substring(5, s.length());
String[] pats = s.split(" ");
for (String pat : pats) {
if (pat != null) {
if (pat.length() > 0) {
if (!resap_ace.contains(pat))
resap_ace.add(pat);
}
}
}
} else if (s.startsWith("RF:")) {
res_ace = new ArrayList<String>();
s = s.substring(3, s.length());
String[] pats = s.split(" ");
for (String pat : pats) {
if (pat != null) {
if (pat.length() > 0) {
if (!res_ace.contains(pat))
res_ace.add(pat);
}
}
}
} else {
if (dossierName != null) {
rfap_ace.put(dossierName, resap_ace);
rf_ace.put(dossierName, res_ace);
}
dossierName = s.trim();
dossierName = dossierName.replace(".txt", "");
}
}
rfap_ace.put(dossierName, resap_ace);
rf_ace.put(dossierName, res_ace);
br.close();
// we parse the log file for Ddoc results
br = new BufferedReader(
new InputStreamReader(
new FileInputStream(evaluationPath + "/Ddoc_20100426.txt"), "UTF8"));
ArrayList<String> resap_Ddoc = null;
ArrayList<String> res_Ddoc = null;
dossierName = null;
while ((s = br.readLine()) != null) {
if (s.length() == 0) continue;
if (s.startsWith("RFAP:")) {
resap_Ddoc = new ArrayList<String>();
s = s.substring(5, s.length());
String[] pats = s.split(" ");
for (String pat : pats) {
if (pat != null) {
if (pat.length() > 0) {
if (!resap_Ddoc.contains(pat))
resap_Ddoc.add(pat);
}
}
}
} else if (s.startsWith("RF:")) {
res_Ddoc = new ArrayList<String>();
s = s.substring(3, s.length());
String[] pats = s.split(" ");
for (String pat : pats) {
if (pat != null) {
if (pat.length() > 0) {
if (!res_Ddoc.contains(pat))
res_Ddoc.add(pat);
}
}
}
} else {
if (dossierName != null) {
rfap_Ddoc.put(dossierName, resap_Ddoc);
rf_Ddoc.put(dossierName, res_Ddoc);
}
dossierName = s.trim();
dossierName = dossierName.replace(".txt", "");
}
}
rfap_Ddoc.put(dossierName, resap_Ddoc);
rf_Ddoc.put(dossierName, res_Ddoc);
br.close();
/*while((s = br.readLine()) != null) {
s = s.substring(1, s.length());
if (s.trim().length() == 0) continue;
if (s.startsWith("EP") & (dossierName == null)) {
StringTokenizer st = new StringTokenizer(s, " ");
dossierName = st.nextToken().trim();
//dossierName = "EP20"+dossierName.substring(2,4)+"0"+dossierName.substring(4,dossierName.length());
//System.out.println(dossierName);
}
else if (s.startsWith("RFAP")) {
rf_part = false;
resap_reference = new ArrayList<String>();
resap_ace = new ArrayList<String>();
resap_Ddoc = new ArrayList<String>();
}
else if (s.startsWith("RF")) {
rf_part = true;
res_reference = new ArrayList<String>();
res_ace = new ArrayList<String>();
res_Ddoc = new ArrayList<String>();
}
else if (s.startsWith("_______")) {
rfap_reference.put(dossierName, resap_reference);
rf_reference.put(dossierName, res_reference);
rfap_ace.put(dossierName, resap_ace);
rf_ace.put(dossierName, res_ace);
rfap_Ddoc.put(dossierName, resap_Ddoc);
rf_Ddoc.put(dossierName, res_Ddoc);
dossierName = null;
}
else {
StringTokenizer st = new StringTokenizer(s, "|");
if (rf_part) {
String tok1 = st.nextToken().trim();
String tok2 = st.nextToken().trim();
String tok3 = st.nextToken().trim();
if (tok1.length() > 0) {
if (!res_reference.contains(tok1))
res_reference.add(tok1);
}
if (tok2.length() > 0) {
if (!res_ace.contains(tok2))
res_ace.add(tok2);
}
if (tok3.length() > 0) {
if (!res_Ddoc.contains(tok3))
res_Ddoc.add(tok3);
}
}
else {
String tok1 = st.nextToken().trim();
if (!st.hasMoreTokens())
System.out.println("WARNING: " + s);
String tok2 = st.nextToken().trim();
if (!st.hasMoreTokens())
System.out.println("WARNING: " + s);
String tok3 = st.nextToken().trim();
if (tok1.length() > 0) {
if (!resap_reference.contains(tok1))
resap_reference.add(tok1);
}
if (tok2.length() > 0) {
if (!resap_ace.contains(tok2))
resap_ace.add(tok2);
}
if (tok3.length() > 0) {
if (!resap_Ddoc.contains(tok3))
resap_Ddoc.add(tok3);
}
}
}
}
br.close();
*/
// we parse our own results
BufferedReader br2 = new BufferedReader(
new InputStreamReader(new FileInputStream(path.getParent() + "/report.txt"), "UTF8"));
dossierName = null;
ArrayList<String> resap = null;
ArrayList<String> res = null;
while ((s = br2.readLine()) != null) {
if (s.length() == 0) continue;
if (s.startsWith("RFAP:")) {
resap = new ArrayList<String>();
s = s.substring(5, s.length());
String[] pats = s.split(" ");
for (String pat : pats) {
if (pat != null) {
if (pat.length() > 0) {
if (!resap.contains(pat))
resap.add(pat);
}
}
}
} else if (s.startsWith("RF:")) {
res = new ArrayList<String>();
s = s.substring(3, s.length());
String[] pats = s.split(" ");
for (String pat : pats) {
if (pat != null) {
if (pat.length() > 0) {
if (!res.contains(pat))
res.add(pat);
}
}
}
} else {
if (dossierName != null) {
rfap.put(dossierName, resap);
rf.put(dossierName, res);
}
dossierName = s.trim();
dossierName = dossierName.replace(".txt", "");
}
}
rfap.put(dossierName, resap);
rf.put(dossierName, res);
br2.close();
// all the set are initiated, we compute the metrics
// reference
int count_rfap_reference = 0;
for (Map.Entry<String, ArrayList<String>> entry : rfap_reference.entrySet()) {
// dossierName = entry.getKey();
ArrayList<String> liste = entry.getValue();
count_rfap_reference += liste.size();
}
int count_rf_reference = 0;
int nbDossier = 0;
for (Map.Entry<String, ArrayList<String>> entry : rf_reference.entrySet()) {
// dossierName = entry.getKey();
ArrayList<String> liste = entry.getValue();
count_rf_reference += liste.size();
nbDossier++;
}
System.out.println("Ref. data: " + count_rfap_reference + " serials and "
+ count_rf_reference + " publications, total: "
+ (count_rfap_reference + count_rf_reference) + " in " + nbDossier + " dossiers");
// ace
int count_rfap_ace = 0;
int count_rfap_ace_correct = 0;
for (Map.Entry<String, ArrayList<String>> entry : rfap_ace.entrySet()) {
dossierName = entry.getKey();
ArrayList<String> referenceListe = rfap_reference.get(dossierName);
ArrayList<String> liste = entry.getValue();
count_rfap_ace += liste.size();
for (String pat : liste) {
if (referenceListe.contains(pat)) {
count_rfap_ace_correct++;
}
}
}
int count_rf_ace = 0;
int count_rf_ace_correct = 0;
nbDossier = 0;
for (Map.Entry<String, ArrayList<String>> entry : rf_ace.entrySet()) {
dossierName = entry.getKey();
ArrayList<String> referenceListe = rf_reference.get(dossierName);
ArrayList<String> liste = entry.getValue();
count_rf_ace += liste.size();
for (String pat : liste) {
if (referenceListe.contains(pat)) {
count_rf_ace_correct++;
}
}
nbDossier++;
}
System.out.println("ACE data: " + count_rfap_ace + " (" + count_rfap_ace_correct + " correct) serials and "
+ count_rf_ace + " (" + count_rf_ace_correct + " correct) publications, total: " + (count_rfap_ace + count_rf_ace)
+ " in " + nbDossier + " dossiers");
// Ddoc
int count_rfap_Ddoc = 0;
int count_rfap_Ddoc_correct = 0;
for (Map.Entry<String, ArrayList<String>> entry : rfap_Ddoc.entrySet()) {
dossierName = entry.getKey();
ArrayList<String> referenceListe = rfap_reference.get(dossierName);
ArrayList<String> liste = entry.getValue();
count_rfap_Ddoc += liste.size();
for (String pat : liste) {
if (referenceListe.contains(pat)) {
count_rfap_Ddoc_correct++;
}
}
}
int count_rf_Ddoc = 0;
int count_rf_Ddoc_correct = 0;
nbDossier = 0;
for (Map.Entry<String, ArrayList<String>> entry : rf_Ddoc.entrySet()) {
dossierName = entry.getKey();
ArrayList<String> referenceListe = rf_reference.get(dossierName);
ArrayList<String> liste = entry.getValue();
count_rf_Ddoc += liste.size();
for (String pat : liste) {
if (referenceListe.contains(pat)) {
count_rf_Ddoc_correct++;
}
}
nbDossier++;
}
System.out.println("Ddoc data: " + count_rfap_Ddoc + " (" + count_rfap_Ddoc_correct + " correct) serials and "
+ count_rf_Ddoc + " (" + count_rf_Ddoc_correct + " correct) publications, total: " + (count_rfap_Ddoc + count_rf_Ddoc)
+ " in " + nbDossier + " dossiers");
// GROBID
int count_rfap = 0;
int count_rfap_correct = 0;
for (Map.Entry<String, ArrayList<String>> entry : rfap.entrySet()) {
//System.out.println("key is " + entry.getKey() + " and value is " + entry.getValue());
dossierName = entry.getKey();
ArrayList<String> referenceListe = rfap_reference.get(dossierName);
if (referenceListe != null) {
ArrayList<String> liste = entry.getValue();
count_rfap += liste.size();
for (String pat : liste) {
if (referenceListe.contains(pat)) {
count_rfap_correct++;
}
}
}
}
int count_rf = 0;
int count_rf_correct = 0;
nbDossier = 0;
for (Map.Entry<String, ArrayList<String>> entry : rf.entrySet()) {
dossierName = entry.getKey();
ArrayList<String> referenceListe = rf_reference.get(dossierName);
if (referenceListe != null) {
ArrayList<String> liste = entry.getValue();
count_rf += liste.size();
for (String pat : liste) {
if (referenceListe.contains(pat)) {
count_rf_correct++;
}
}
nbDossier++;
} else
System.out.println("WARNING! file " + dossierName
+ " in GROBID's results but not in reference results");
}
System.out.println("GROBID data: " + count_rfap + " (" + count_rfap_correct + " correct) serials and "
+ count_rf + " (" + count_rf_correct + " correct) publications, total: " + (count_rfap + count_rf)
+ " in " + nbDossier + " dossiers");
// creating sharing Ddoc and Grobid by intersection
int count_rfap_DdocIGROBID = 0;
int count_rfap_DdocIGROBID_correct = 0;
for (Map.Entry<String, ArrayList<String>> entry : rfap_Ddoc.entrySet()) {
dossierName = entry.getKey();
ArrayList<String> referenceListe = rfap_reference.get(dossierName);
ArrayList<String> liste = entry.getValue();
ArrayList<String> listeGrobid = rfap.get(dossierName);
if (listeGrobid == null) {
System.out.println("WARNING! file " + dossierName
+ " in Ddoc results but not in GROBID's one");
} else {
ArrayList<String> liste2 = new ArrayList<String>();
for (String toto : liste) {
if (listeGrobid.contains(toto))
liste2.add(toto);
}
count_rfap_DdocIGROBID += liste2.size();
for (String pat : liste2) {
if (referenceListe.contains(pat)) {
count_rfap_DdocIGROBID_correct++;
}
}
}
}
int count_rf_DdocIGROBID = 0;
int count_rf_DdocIGROBID_correct = 0;
nbDossier = 0;
for (Map.Entry<String, ArrayList<String>> entry : rf_Ddoc.entrySet()) {
dossierName = entry.getKey();
ArrayList<String> referenceListe = rf_reference.get(dossierName);
ArrayList<String> liste = entry.getValue();
ArrayList<String> listeGrobid = rf.get(dossierName);
if (listeGrobid == null) {
System.out.println("WARNING! file " + dossierName
+ " in Ddoc results but not in GROBID's one");
} else {
ArrayList<String> liste2 = new ArrayList<String>();
for (String toto : liste) {
if (listeGrobid.contains(toto))
liste2.add(toto);
}
count_rf_DdocIGROBID += liste2.size();
for (String pat : liste2) {
if (referenceListe.contains(pat)) {
count_rf_DdocIGROBID_correct++;
}
}
nbDossier++;
}
}
System.out.println("Ddoc+GROBID data: " + count_rfap_DdocIGROBID + " (" + count_rfap_DdocIGROBID_correct
+ " correct) serials and "
+ count_rf_DdocIGROBID + " (" + count_rf_DdocIGROBID_correct + " correct) publications, total: "
+ (count_rfap_DdocIGROBID + count_rf_DdocIGROBID)
+ " in " + nbDossier + " dossiers");
// creating sharing Ddoc and Grobid by union
int count_rfap_DdocUGROBID = 0;
int count_rfap_DdocUGROBID_correct = 0;
for (Map.Entry<String, ArrayList<String>> entry : rfap_Ddoc.entrySet()) {
dossierName = entry.getKey();
ArrayList<String> referenceListe = rfap_reference.get(dossierName);
ArrayList<String> liste = entry.getValue();
ArrayList<String> listeGrobid = rfap.get(dossierName);
if (listeGrobid == null) {
System.out.println("WARNING! file " + dossierName
+ " in Ddoc results but not in GROBID's one");
} else {
for (String toto : listeGrobid) {
if (!liste.contains(toto))
liste.add(toto);
}
count_rfap_DdocUGROBID += liste.size();
for (String pat : liste) {
if (referenceListe.contains(pat)) {
count_rfap_DdocUGROBID_correct++;
}
}
}
}
int count_rf_DdocUGROBID = 0;
int count_rf_DdocUGROBID_correct = 0;
nbDossier = 0;
for (Map.Entry<String, ArrayList<String>> entry : rf_Ddoc.entrySet()) {
dossierName = entry.getKey();
ArrayList<String> referenceListe = rf_reference.get(dossierName);
ArrayList<String> liste = entry.getValue();
ArrayList<String> listeGrobid = rf.get(dossierName);
if (listeGrobid == null) {
System.out.println("WARNING! file " + dossierName
+ " in Ddoc results but not in GROBID's one");
} else {
for (String toto : listeGrobid) {
if (!liste.contains(toto))
liste.add(toto);
}
count_rf_DdocUGROBID += liste.size();
for (String pat : liste) {
if (referenceListe.contains(pat)) {
count_rf_DdocUGROBID_correct++;
}
}
nbDossier++;
}
}
System.out.println("Ddoc|GROBID data: " + count_rfap_DdocUGROBID + " (" + count_rfap_DdocUGROBID_correct
+ " correct) serials and "
+ count_rf_DdocUGROBID + " (" + count_rf_DdocUGROBID_correct + " correct) publications, total: "
+ (count_rfap_DdocUGROBID + count_rf_DdocUGROBID)
+ " in " + nbDossier + " dossiers");
// ACE
double ace_rfap_precision = (double) count_rfap_ace_correct / count_rfap_ace;
double ace_rfap_recall = (double) count_rfap_ace_correct / count_rfap_reference;
double ace_rfap_f = (2 * ace_rfap_precision * ace_rfap_recall)
/ (ace_rfap_precision + ace_rfap_recall);
double ace_rf_precision = (double) count_rf_ace_correct / count_rf_ace;
double ace_rf_recall = (double) count_rf_ace_correct / count_rf_reference;
double ace_rf_f = (2 * ace_rf_precision * ace_rf_recall)
/ (ace_rf_precision + ace_rf_recall);
double ace_rfall_precision = (double) (count_rfap_ace_correct + count_rf_ace_correct)
/ (count_rfap_ace + count_rf_ace);
double ace_rfall_recall = (double) (count_rfap_ace_correct + count_rf_ace_correct)
/ (count_rfap_reference + count_rf_reference);
double ace_rfall_f = (2 * ace_rfall_precision * ace_rfall_recall)
/ (ace_rfall_precision + ace_rfall_recall);
// Ddoc
double Ddoc_rfap_precision = (double) count_rfap_Ddoc_correct / count_rfap_Ddoc;
double Ddoc_rfap_recall = (double) count_rfap_Ddoc_correct / count_rfap_reference;
double Ddoc_rfap_f = (2 * Ddoc_rfap_precision * Ddoc_rfap_recall)
/ (Ddoc_rfap_precision + Ddoc_rfap_recall);
double Ddoc_rf_precision = (double) count_rf_Ddoc_correct / count_rf_Ddoc;
double Ddoc_rf_recall = (double) count_rf_Ddoc_correct / count_rf_reference;
double Ddoc_rf_f = (2 * Ddoc_rf_precision * Ddoc_rf_recall)
/ (Ddoc_rf_precision + Ddoc_rf_recall);
double Ddoc_rfall_precision = (double) (count_rfap_Ddoc_correct + count_rf_Ddoc_correct)
/ (count_rfap_Ddoc + count_rf_Ddoc);
double Ddoc_rfall_recall = (double) (count_rfap_Ddoc_correct + count_rf_Ddoc_correct)
/ (count_rfap_reference + count_rf_reference);
double Ddoc_rfall_f = (2 * Ddoc_rfall_precision * Ddoc_rfall_recall)
/ (Ddoc_rfall_precision + Ddoc_rfall_recall);
// GROBID
double grobid_rfap_precision = (double) count_rfap_correct / count_rfap;
double grobid_rfap_recall = (double) count_rfap_correct / count_rfap_reference;
double grobid_rfap_f = (2 * grobid_rfap_precision * grobid_rfap_recall)
/ (grobid_rfap_precision + grobid_rfap_recall);
double grobid_rf_precision = (double) count_rf_correct / count_rf;
double grobid_rf_recall = (double) count_rf_correct / count_rf_reference;
double grobid_rf_f = (2 * grobid_rf_precision * grobid_rf_recall)
/ (grobid_rf_precision + grobid_rf_recall);
double grobid_rfall_precision = (double) (count_rfap_correct + count_rf_correct)
/ (count_rf + count_rfap);
double grobid_rfall_recall = (double) (count_rfap_correct + count_rf_correct)
/ (count_rfap_reference + count_rf_reference);
double grobid_rfall_f = (2 * grobid_rfall_precision * grobid_rfall_recall)
/ (grobid_rfall_precision + grobid_rfall_recall);
// Ddoc ? GROBID
double DdocIGROBID_rfap_precision = (double) count_rfap_DdocIGROBID_correct / count_rfap_DdocIGROBID;
double DdocIGROBID_rfap_recall = (double) count_rfap_DdocIGROBID_correct / count_rfap_reference;
double DdocIGROBID_rfap_f = (2 * DdocIGROBID_rfap_precision * DdocIGROBID_rfap_recall)
/ (DdocIGROBID_rfap_precision + DdocIGROBID_rfap_recall);
double DdocIGROBID_rf_precision = (double) count_rf_DdocIGROBID_correct / count_rf_DdocIGROBID;
double DdocIGROBID_rf_recall = (double) count_rf_DdocIGROBID_correct / count_rf_reference;
double DdocIGROBID_rf_f = (2 * DdocIGROBID_rf_precision * DdocIGROBID_rf_recall)
/ (DdocIGROBID_rf_precision + DdocIGROBID_rf_recall);
double DdocIGROBID_rfall_precision = (double) (count_rfap_DdocIGROBID_correct + count_rf_DdocIGROBID_correct)
/ (count_rfap_DdocIGROBID + count_rf_DdocIGROBID);
double DdocIGROBID_rfall_recall = (double) (count_rfap_DdocIGROBID_correct + count_rf_DdocIGROBID_correct)
/ (count_rfap_reference + count_rf_reference);
double DdocIGROBID_rfall_f = (2 * DdocIGROBID_rfall_precision * DdocIGROBID_rfall_recall)
/ (DdocIGROBID_rfall_precision + DdocIGROBID_rfall_recall);
// Ddoc U GROBID
double DdocUGROBID_rfap_precision = (double) count_rfap_DdocUGROBID_correct / count_rfap_DdocUGROBID;
double DdocUGROBID_rfap_recall = (double) count_rfap_DdocUGROBID_correct / count_rfap_reference;
double DdocUGROBID_rfap_f = (2 * DdocUGROBID_rfap_precision * DdocUGROBID_rfap_recall)
/ (DdocUGROBID_rfap_precision + DdocUGROBID_rfap_recall);
double DdocUGROBID_rf_precision = (double) count_rf_DdocUGROBID_correct / count_rf_DdocUGROBID;
double DdocUGROBID_rf_recall = (double) count_rf_DdocUGROBID_correct / count_rf_reference;
double DdocUGROBID_rf_f = (2 * DdocUGROBID_rf_precision * DdocUGROBID_rf_recall)
/ (DdocUGROBID_rf_precision + DdocUGROBID_rf_recall);
double DdocUGROBID_rfall_precision = (double) (count_rfap_DdocUGROBID_correct + count_rf_DdocUGROBID_correct)
/ (count_rfap_DdocUGROBID + count_rf_DdocUGROBID);
double DdocUGROBID_rfall_recall = (double) (count_rfap_DdocUGROBID_correct + count_rf_DdocUGROBID_correct)
/ (count_rfap_reference + count_rf_reference);
double DdocUGROBID_rfall_f = (2 * DdocUGROBID_rfall_precision * DdocUGROBID_rfall_recall)
/ (DdocUGROBID_rfall_precision + DdocUGROBID_rfall_recall);
// print the report
System.out.println("___________________________________________________________");
System.out.println("RFAP: ");
System.out.println("\t\tPrecision\tRecall\t\tF-score");
System.out.println("ACE\t\t" + TextUtilities.formatTwoDecimals(ace_rfap_precision * 100) + "\t\t"
+ TextUtilities.formatTwoDecimals(ace_rfap_recall * 100)
+ "\t\t" + TextUtilities.formatTwoDecimals(ace_rfap_f * 100));
System.out.println("Ddoc\t" + TextUtilities.formatTwoDecimals(Ddoc_rfap_precision * 100) + "\t\t"
+ TextUtilities.formatTwoDecimals(Ddoc_rfap_recall * 100)
+ "\t\t" + TextUtilities.formatTwoDecimals(Ddoc_rfap_f * 100));
System.out.println("GROBID\t" + TextUtilities.formatTwoDecimals(grobid_rfap_precision * 100) + "\t\t"
+ TextUtilities.formatTwoDecimals(grobid_rfap_recall * 100)
+ "\t\t" + TextUtilities.formatTwoDecimals(grobid_rfap_f * 100));
System.out.println("Ddoc+GROBID\t" + TextUtilities.formatTwoDecimals(DdocIGROBID_rfap_precision * 100) + "\t\t"
+ TextUtilities.formatTwoDecimals(DdocIGROBID_rfap_recall * 100) + "\t\t"
+ TextUtilities.formatTwoDecimals(DdocIGROBID_rfap_f * 100));
System.out.println("Ddoc|GROBID\t" + TextUtilities.formatTwoDecimals(DdocUGROBID_rfap_precision * 100) + "\t\t"
+ TextUtilities.formatTwoDecimals(DdocUGROBID_rfap_recall * 100) + "\t\t"
+ TextUtilities.formatTwoDecimals(DdocUGROBID_rfap_f * 100));
System.out.println("\n___________________________________________________________");
System.out.println("RF: ");
System.out.println("\t\tPrecision\tRecall\t\tF-score");
System.out.println("ACE\t\t" + TextUtilities.formatTwoDecimals(ace_rf_precision * 100) + "\t\t"
+ TextUtilities.formatTwoDecimals(ace_rf_recall * 100) + "\t\t"
+ TextUtilities.formatTwoDecimals(ace_rf_f * 100));
System.out.println("Ddoc\t" + TextUtilities.formatTwoDecimals(Ddoc_rf_precision * 100) + "\t\t"
+ TextUtilities.formatTwoDecimals(Ddoc_rf_recall * 100) + "\t\t"
+ TextUtilities.formatTwoDecimals(Ddoc_rf_f * 100));
System.out.println("GROBID\t" + TextUtilities.formatTwoDecimals(grobid_rf_precision * 100) + "\t\t"
+ TextUtilities.formatTwoDecimals(grobid_rf_recall * 100) + "\t\t"
+ TextUtilities.formatTwoDecimals(grobid_rf_f * 100));
System.out.println("Ddoc+GROBID\t" + TextUtilities.formatTwoDecimals(DdocIGROBID_rf_precision * 100) + "\t\t"
+ TextUtilities.formatTwoDecimals(DdocIGROBID_rf_recall * 100) + "\t\t"
+ TextUtilities.formatTwoDecimals(DdocIGROBID_rf_f * 100));
System.out.println("Ddoc|GROBID\t" + TextUtilities.formatTwoDecimals(DdocUGROBID_rf_precision * 100) + "\t\t"
+ TextUtilities.formatTwoDecimals(DdocUGROBID_rf_recall * 100) + "\t\t"
+ TextUtilities.formatTwoDecimals(DdocUGROBID_rf_f * 100));
System.out.println("\n___________________________________________________________");
System.out.println("All: ");
System.out.println("\t\tPrecision\tRecall\t\tF-score");
System.out.println("ACE\t\t" + TextUtilities.formatTwoDecimals(ace_rfall_precision * 100) + "\t\t"
+ TextUtilities.formatTwoDecimals(ace_rfall_recall * 100) + "\t\t"
+ TextUtilities.formatTwoDecimals(ace_rfall_f * 100));
System.out.println("Ddoc\t" + TextUtilities.formatTwoDecimals(Ddoc_rfall_precision * 100) + "\t\t"
+ TextUtilities.formatTwoDecimals(Ddoc_rfall_recall * 100) + "\t\t"
+ TextUtilities.formatTwoDecimals(Ddoc_rfall_f * 100));
System.out.println("GROBID\t" + TextUtilities.formatTwoDecimals(grobid_rfall_precision * 100) + "\t\t"
+ TextUtilities.formatTwoDecimals(grobid_rfall_recall * 100) + "\t\t"
+ TextUtilities.formatTwoDecimals(grobid_rfall_f * 100));
System.out.println("Ddoc+GROBID\t" + TextUtilities.formatTwoDecimals(DdocIGROBID_rfall_precision * 100)
+ "\t\t"
+ TextUtilities.formatTwoDecimals(DdocIGROBID_rfall_recall * 100) + "\t\t"
+ TextUtilities.formatTwoDecimals(DdocIGROBID_rfall_f * 100));
System.out.println("Ddod|GROBID\t" + TextUtilities.formatTwoDecimals(DdocUGROBID_rfall_precision * 100) + "\t\t"
+ TextUtilities.formatTwoDecimals(DdocUGROBID_rfall_recall * 100) + "\t\t"
+ TextUtilities.formatTwoDecimals(DdocUGROBID_rfall_f * 100));
// write Ddoc and reference results
File fileOut = new File(path.getParent() + "/reference.txt");
OutputStream os = new FileOutputStream(fileOut, false);
Writer referenceWriter = new OutputStreamWriter(os, "UTF-8");
//Collection.reverse(rf_reference);
//rf_reference = new TreeMap<String, ArrayList<String>>(Collections.reverseOrder());
System.out.println("Reference data in " + path.getParent() + "/reference.txt");
for (Map.Entry<String, ArrayList<String>> entry : rf_reference.entrySet()) {
dossierName = entry.getKey();
referenceWriter.write(dossierName + ".txt\n");
ArrayList<String> referenceListe1 = rfap_reference.get(dossierName);
ArrayList<String> liste = entry.getValue();
referenceWriter.write("RFAP: ");
for (String toto : referenceListe1) {
referenceWriter.write(toto + " ");
}
referenceWriter.write("\nRF: ");
for (String toto : liste) {
referenceWriter.write(toto + " ");
}
referenceWriter.write("\n");
}
referenceWriter.close();
} catch (Exception e) {
e.printStackTrace();
throw new GrobidException("An exception occurred while evaluating Grobid.", e);
}
}
}
| 55,458 | 50.782446 | 163 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/evaluation/EvaluationUtilities.java
|
package org.grobid.trainer.evaluation;
import org.chasen.crfpp.Tagger;
import org.grobid.core.engines.tagging.GenericTagger;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.utilities.OffsetPosition;
import org.grobid.core.utilities.Pair;
import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.InputStreamReader;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import java.util.StringTokenizer;
import java.util.function.Function;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.grobid.core.engines.tagging.GenericTaggerUtils.getPlainLabel;
/**
* Generic evaluation of a single-CRF model processing given an expected result.
*
* @author Patrice Lopez
*/
public class EvaluationUtilities {
protected static final Logger logger = LoggerFactory.getLogger(EvaluationUtilities.class);
/**
* Method for running a CRF tagger for evaluation purpose (i.e. with
* expected and actual labels).
*
* @param ress list
* @param tagger a tagger
* @return a report
*/
public static String taggerRun(List<String> ress, Tagger tagger) {
// clear internal context
tagger.clear();
StringBuilder res = new StringBuilder();
// we have to re-inject the pre-tags because they are removed by the JNI
// parse method
ArrayList<String> pretags = new ArrayList<>();
// add context
for (String piece : ress) {
if (piece.trim().length() == 0) {
// parse and change internal stated as 'parsed'
if (!tagger.parse()) {
// throw an exception
throw new RuntimeException("CRF++ parsing failed.");
}
for (int i = 0; i < tagger.size(); i++) {
for (int j = 0; j < tagger.xsize(); j++) {
res.append(tagger.x(i, j)).append("\t");
}
res.append(pretags.get(i)).append("\t");
res.append(tagger.y2(i));
res.append("\n");
}
res.append(" \n");
// clear internal context
tagger.clear();
pretags = new ArrayList<>();
} else {
tagger.add(piece);
tagger.add("\n");
// get last tag
StringTokenizer tokenizer = new StringTokenizer(piece, " \t");
while (tokenizer.hasMoreTokens()) {
String toke = tokenizer.nextToken();
if (!tokenizer.hasMoreTokens()) {
pretags.add(toke);
}
}
}
}
// parse and change internal stated as 'parsed'
if (!tagger.parse()) {
// throw an exception
throw new RuntimeException("CRF++ parsing failed.");
}
for (int i = 0; i < tagger.size(); i++) {
for (int j = 0; j < tagger.xsize(); j++) {
res.append(tagger.x(i, j)).append("\t");
}
res.append(pretags.get(i)).append("\t");
res.append(tagger.y2(i));
res.append(System.lineSeparator());
}
res.append(System.lineSeparator());
return res.toString();
}
public static ModelStats evaluateStandard(String path, final GenericTagger tagger) {
return evaluateStandard(path, tagger::label);
}
public static ModelStats evaluateStandard(String path, Function<List<String>, String> taggerFunction) {
String theResult = null;
try {
final BufferedReader bufReader = new BufferedReader(new InputStreamReader(new FileInputStream(path), StandardCharsets.UTF_8));
String line = null;
List<String> instance = new ArrayList<>();
while ((line = bufReader.readLine()) != null) {
instance.add(line);
}
long time = System.currentTimeMillis();
theResult = taggerFunction.apply(instance);
bufReader.close();
System.out.println("Labeling took: " + (System.currentTimeMillis() - time) + " ms");
} catch (Exception e) {
throw new GrobidException("An exception occurred while evaluating Grobid.", e);
}
return computeStats(theResult);
}
/** Computes the stats for a single model, returning a ModelStats object, which
* ships:
* - field level statistics
* - instances statistics
*/
public static ModelStats computeStats(String theResult) {
return new ModelStats(theResult);
}
/**
* computes the token level results
*/
@Deprecated
public static Stats tokenLevelStats(String theResult) {
Stats wordStats = new Stats();
String line = null;
StringTokenizer stt = new StringTokenizer(theResult, System.lineSeparator());
while (stt.hasMoreTokens()) {
line = stt.nextToken();
if (line.trim().length() == 0) {
continue;
}
// the two last tokens, separated by a tabulation, gives the
// expected label and, last, the resulting label -> for Wapiti
StringTokenizer st = new StringTokenizer(line, "\t ");
String obtainedLabel = null;
String expectedLabel = null;
while (st.hasMoreTokens()) {
obtainedLabel = getPlainLabel(st.nextToken());
if (st.hasMoreTokens()) {
expectedLabel = obtainedLabel;
}
}
if ((expectedLabel == null) || (obtainedLabel == null)) {
continue;
}
processCounters(wordStats, obtainedLabel, expectedLabel);
/*if (!obtainedLabel.equals(expectedLabel)) {
logger.warn("Disagreement / expected: " + expectedLabel + " / obtained: " + obtainedLabel);
}*/
}
return wordStats;
}
private static void processCounters(Stats stats, String obtained, String expected) {
LabelStat expectedStat = stats.getLabelStat(expected);
LabelStat obtainedStat = stats.getLabelStat(obtained);
expectedStat.incrementExpected();
if (expected.equals(obtained)) {
expectedStat.incrementObserved();
} else {
expectedStat.incrementFalseNegative();
obtainedStat.incrementFalsePositive();
}
}
public static String computeMetrics(Stats stats) {
return stats.getTextReport();
}
public static String computeMetricsMD(Stats stats) {
return stats.getMarkDownReport();
}
}
| 6,803 | 33.190955 | 138 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/evaluation/utilities/FieldSpecification.java
|
package org.grobid.trainer.evaluation.utilities;
import java.util.*;
/**
* Specification of field XML paths in different result documents for evaluation.
*
*/
public class FieldSpecification {
public String fieldName = null;
public List<String> nlmPath = new ArrayList<String>();
public List<String> grobidPath = new ArrayList<String>();
public List<String> pdfxPath = new ArrayList<String>();
public List<String> cerminePath = new ArrayList<String>();
public boolean isTextual = false;
/**
* This static method instanciates the fields with the appropriate paths
* in the different gold and extraction formats.
*/
public static void setUpFields(List<FieldSpecification> headerFields,
List<FieldSpecification> fulltextFields,
List<FieldSpecification> citationsFields,
List<String> headerLabels,
List<String> fulltextLabels,
List<String> citationsLabels) {
// header
// title
FieldSpecification titleField = new FieldSpecification();
titleField.fieldName = "title";
titleField.isTextual = true;
titleField.grobidPath.add("//titleStmt/title/text()");
titleField.nlmPath.add("/article/front/article-meta/title-group/article-title//text()");
titleField.pdfxPath.add("/pdfx/article/front/title-group/article-title/text()");
headerFields.add(titleField);
headerLabels.add("title");
// authors
FieldSpecification authorField = new FieldSpecification();
authorField.fieldName = "authors";
authorField.isTextual = true;
//authorField.hasMultipleValue = true;
/*authorField.grobidPath.
add("//sourceDesc/biblStruct/analytic/author/persName/forename[@type=\"first\"]");
authorField.grobidPath.
add("//sourceDesc/biblStruct/analytic/author/persName/forename[@type=\"middle\"]");*/
authorField.grobidPath.
add("//sourceDesc/biblStruct/analytic/author/persName/surname/text()");
//authorField.nlmPath.
// add("/article/front/article-meta/contrib-group/contrib[@contrib-type=\"author\"]/name/given-names");
authorField.nlmPath.
add("/article/front/article-meta/contrib-group/contrib[@contrib-type=\"author\"]/name/surname/text()");
authorField.pdfxPath.add("/pdfx/article/front/contrib-group/contrib[@contrib-type=\"author\"]/name/text()");
headerFields.add(authorField);
headerLabels.add("authors");
// authors
FieldSpecification firstAuthorField = new FieldSpecification();
firstAuthorField.fieldName = "first_author";
firstAuthorField.isTextual = true;
/*firstAuthorField.grobidPath
.add("//sourceDesc/biblStruct/analytic/author/persName/forename[@type=\"first\"]");
firstAuthorField.grobidPath
.add("//sourceDesc/biblStruct/analytic/author/persName/forename[@type=\"middle\"]");*/
firstAuthorField.grobidPath
.add("//sourceDesc/biblStruct/analytic/author[1]/persName/surname/text()");
//firstAuthorField.nlmPath
// .add("/article/front/article-meta/contrib-group/contrib[@contrib-type=\"author\"]/name/given-names");
firstAuthorField.nlmPath
.add("/article/front/article-meta/contrib-group/contrib[@contrib-type=\"author\"][1]/name/surname/text()");
firstAuthorField.pdfxPath
.add("/pdfx/article/front/contrib-group/contrib[@contrib-type=\"author\"][1]/name/text()");
headerFields.add(firstAuthorField);
headerLabels.add("first_author");
// affiliation
FieldSpecification affiliationField = new FieldSpecification();
affiliationField.fieldName = "affiliations";
affiliationField.isTextual = true;
//affiliationField.hasMultipleValue = true;
affiliationField.grobidPath.
add("//sourceDesc/biblStruct/analytic/author/affiliation/orgName/text()");
affiliationField.nlmPath.
add("/article/front/article-meta/contrib-group/aff/text()");
affiliationField.pdfxPath.add("/pdfx/article/front/contrib-group");
//headerFields.add(affiliationField);
//headerLabels.add("affiliations");
// date
FieldSpecification dateField = new FieldSpecification();
dateField.fieldName = "date";
dateField.grobidPath.
add("//publicationStmt/date[1]/@when");
dateField.nlmPath.
add("/article/front/article-meta/pub-date[@pub-type=\"pmc-release\"][1]//text()");
//in bioRxiv: <pub-date pub-type="epub"><year>2014</year></pub-date>
//headerFields.add(dateField);
//headerLabels.add("date");
// abstract
FieldSpecification abstractField = new FieldSpecification();
abstractField.fieldName = "abstract";
abstractField.isTextual = true;
abstractField.grobidPath.
add("//profileDesc/abstract//text()");
abstractField.nlmPath.
add("/article/front/article-meta/abstract//text()");
headerFields.add(abstractField);
headerLabels.add("abstract");
// keywords
FieldSpecification keywordsField = new FieldSpecification();
keywordsField.fieldName = "keywords";
keywordsField.isTextual = true;
keywordsField.grobidPath.
add("//profileDesc/textClass/keywords//text()");
keywordsField.nlmPath.
add("/article/front/article-meta/kwd-group/kwd/text()");
headerFields.add(keywordsField);
headerLabels.add("keywords");
// DOI (header)
FieldSpecification doiField = new FieldSpecification();
doiField.fieldName = "doi";
doiField.grobidPath.
add("//sourceDesc/biblStruct/idno[@type=\"DOI\"]/text()");
doiField.nlmPath.
add("/article/front/article-meta/article-id[@pub-id-type=\"doi\"]/text()");
//headerFields.add(doiField);
//headerLabels.add("doi");
// citations
// the first field gives the base path for each citation structure
FieldSpecification baseCitation = new FieldSpecification();
baseCitation.fieldName = "base";
baseCitation.grobidPath.
add("//back/div/listBibl/biblStruct");
baseCitation.nlmPath.
add("//ref-list/ref"); // note: sometimes we just have the raw citation bellow this!
baseCitation.pdfxPath.
add("//ref-list/ref"); // note: there is nothing beyond that in pdfx xml results!
citationsFields.add(baseCitation);
// the rest of the citation fields are relative to the base path
// title
FieldSpecification titleField2 = new FieldSpecification();
titleField2.fieldName = "title";
titleField2.isTextual = true;
titleField2.grobidPath.
add("analytic/title/text()");
titleField2.nlmPath.
add("*/article-title//text()");
citationsFields.add(titleField2);
citationsLabels.add("title");
// authors
FieldSpecification authorField2 = new FieldSpecification();
authorField2.fieldName = "authors";
authorField2.isTextual = true;
authorField2.grobidPath.add("analytic/author/persName/surname/text()");
authorField2.nlmPath.add("*//surname[parent::name|parent::string-name]/text()");
//authorField2.nlmPath.add("*//name/surname/text()");
//authorField2.nlmPath.add("*//string-name/surname/text()");
citationsFields.add(authorField2);
citationsLabels.add("authors");
// authors
FieldSpecification firstAuthorField2 = new FieldSpecification();
firstAuthorField2.fieldName = "first_author";
firstAuthorField2.isTextual = true;
firstAuthorField2.grobidPath.add("analytic/author[1]/persName/surname/text()");
//firstAuthorField2.nlmPath.add("*//surname[parent::name|parent::string-name][1]/text()");
firstAuthorField2.nlmPath.add("*//name[1]/surname/text()");
firstAuthorField2.nlmPath.add("*//string-name[1]/surname/text()");
citationsFields.add(firstAuthorField2);
citationsLabels.add("first_author");
// date
FieldSpecification dateField2 = new FieldSpecification();
dateField2.fieldName = "date";
dateField2.grobidPath.
add("monogr/imprint/date/@when");
dateField2.nlmPath.
add("*/year/text()");
citationsFields.add(dateField2);
citationsLabels.add("date");
// monograph title
FieldSpecification inTitleField2 = new FieldSpecification();
inTitleField2.fieldName = "inTitle";
inTitleField2.isTextual = true;
inTitleField2.grobidPath.
add("monogr/title/text()");
inTitleField2.nlmPath.
add("*/source/text()");
citationsFields.add(inTitleField2);
citationsLabels.add("inTitle");
// volume
FieldSpecification volumeField = new FieldSpecification();
volumeField.fieldName = "volume";
volumeField.grobidPath.
add("monogr/imprint/biblScope[@unit=\"volume\" or @unit=\"vol\"]/text()");
volumeField.nlmPath.
add("*/volume/text()");
citationsFields.add(volumeField);
citationsLabels.add("volume");
// issue
FieldSpecification issueField = new FieldSpecification();
issueField.fieldName = "issue";
issueField.grobidPath.
add("monogr/imprint/biblScope[@unit=\"issue\"]/text()");
issueField.nlmPath.
add("*/issue/text()");
citationsFields.add(issueField);
citationsLabels.add("issue");
// first page
FieldSpecification pageField = new FieldSpecification();
pageField.fieldName = "page";
pageField.grobidPath.
add("monogr/imprint/biblScope[@unit=\"page\"]/@from");
pageField.nlmPath.
add("*/fpage/text()");
citationsFields.add(pageField);
citationsLabels.add("page");
// publisher
FieldSpecification publisherField = new FieldSpecification();
publisherField.fieldName = "publisher";
publisherField.isTextual = true;
publisherField.grobidPath.
add("monogr/imprint/publisher/text()");
publisherField.nlmPath.
add("*/publisher-name/text()");
//citationsFields.add(publisherField);
//citationsLabels.add("publisher");
// citation identifier (will be used for citation mapping, not for matching)
FieldSpecification citationIdField = new FieldSpecification();
citationIdField.fieldName = "id";
citationIdField.isTextual = true;
citationIdField.grobidPath.
add("@id");
citationIdField.nlmPath.
add("@id");
citationsFields.add(citationIdField);
citationsLabels.add("id");
// DOI
FieldSpecification citationDOIField = new FieldSpecification();
citationDOIField.fieldName = "doi";
citationDOIField.isTextual = true;
citationDOIField.grobidPath.
add("analytic/idno[@type=\"DOI\"]/text()");
citationDOIField.nlmPath.
add("*/pub-id[@pub-id-type=\"doi\"]/text()");
citationsFields.add(citationDOIField);
citationsLabels.add("doi");
// PMID
FieldSpecification citationPMIDField = new FieldSpecification();
citationPMIDField.fieldName = "pmid";
citationPMIDField.isTextual = true;
citationPMIDField.grobidPath.
add("analytic/idno[@type=\"PMID\"]/text()");
citationPMIDField.nlmPath.
add("*/pub-id[@pub-id-type=\"pmid\"]/text()");
citationsFields.add(citationPMIDField);
citationsLabels.add("pmid");
// PMC
FieldSpecification citationPMCIDField = new FieldSpecification();
citationPMCIDField.fieldName = "pmcid";
citationPMCIDField.isTextual = true;
citationPMCIDField.grobidPath.
add("analytic/idno[@type=\"PMCID\"]/text()");
citationPMCIDField.nlmPath.
add("*/pub-id[@pub-id-type=\"pmcid\"]/text()");
citationsFields.add(citationPMCIDField);
citationsLabels.add("pmcid");
// full text structures
/*FieldSpecification sectionReferenceField = new FieldSpecification();
sectionReferenceField.fieldName = "references";
sectionReferenceField.isTextual = true;
sectionReferenceField.grobidPath.
add("//back/div/listBibl/biblStruct//text()");
sectionReferenceField.nlmPath.
add("//ref-list/ref//text()");
fulltextFields.add(sectionReferenceField);
fulltextLabels.add("references");*/
FieldSpecification sectionTitleField = new FieldSpecification();
sectionTitleField.fieldName = "section_title";
sectionTitleField.isTextual = true;
sectionTitleField.grobidPath.
add("//text/body/div/head/text()");
sectionTitleField.nlmPath.
add("//body//sec/title/text()");
fulltextFields.add(sectionTitleField);
fulltextLabels.add("section_title");
FieldSpecification referenceMarkerField = new FieldSpecification();
referenceMarkerField.fieldName = "reference_citation";
referenceMarkerField.isTextual = true;
referenceMarkerField.grobidPath.
add("//ref[@type=\"bibr\"]/text()");
referenceMarkerField.nlmPath.
add("//xref[@ref-type=\"bibr\"]/text()");
fulltextFields.add(referenceMarkerField);
fulltextLabels.add("reference_citation");
FieldSpecification referenceFigureField = new FieldSpecification();
referenceFigureField.fieldName = "reference_figure";
referenceFigureField.isTextual = true;
referenceFigureField.grobidPath.
add("//ref[@type=\"figure\"]/text()");
referenceFigureField.nlmPath.
add("//xref[@ref-type=\"fig\"]/text()");
fulltextFields.add(referenceFigureField);
fulltextLabels.add("reference_figure");
FieldSpecification referenceTableField = new FieldSpecification();
referenceTableField.fieldName = "reference_table";
referenceTableField.isTextual = true;
referenceTableField.grobidPath.
add("//ref[@type=\"table\"]/text()");
referenceTableField.nlmPath.
add("//xref[@ref-type=\"table\"]/text()");
fulltextFields.add(referenceTableField);
fulltextLabels.add("reference_table");
FieldSpecification figureTitleField = new FieldSpecification();
figureTitleField.fieldName = "figure_title";
figureTitleField.isTextual = true;
figureTitleField.grobidPath.
add("//figure[not(@type)]/head/text()");
figureTitleField.nlmPath.
add("//fig/label/text()");
// eLife JATS support
figureTitleField.nlmPath.
add("//fig/caption/title/text()");
fulltextFields.add(figureTitleField);
fulltextLabels.add("figure_title");
/*FieldSpecification figureCaptionField = new FieldSpecification();
figureCaptionField.fieldName = "figure_caption";
figureCaptionField.isTextual = true;
figureCaptionField.grobidPath.
add("//figure[not(@type)]/figDesc/text()");
figureCaptionField.nlmPath.
add("//fig/caption/p/text()");
fulltextFields.add(figureCaptionField);
fulltextLabels.add("figure_caption");*/
/*FieldSpecification figureLabelField = new FieldSpecification();
figureLabelField.fieldName = "figure_label";
figureLabelField.isTextual = true;
figureLabelField.grobidPath.
add("//figure[not(@type)]/label/text()");
figureLabelField.nlmPath.
add("//fig/label/text()");
fulltextFields.add(figureLabelField);
fulltextLabels.add("figure_label");*/
FieldSpecification tableTitleField = new FieldSpecification();
tableTitleField.fieldName = "table_title";
tableTitleField.isTextual = true;
tableTitleField.grobidPath.
add("//figure[@type=\"table\"]/head/text()");
tableTitleField.nlmPath.
add("//table-wrap/label/text()");
// eLife JATS support
tableTitleField.nlmPath.
add("//table-wrap/caption/title/text()");
fulltextFields.add(tableTitleField);
fulltextLabels.add("table_title");
/*FieldSpecification tableLabelField = new FieldSpecification();
tableLabelField.fieldName = "figure_label";
tableLabelField.isTextual = true;
tableLabelField.grobidPath.
add("//figure[@type=\"table\"]/label/text()");
tableLabelField.nlmPath.
add("//fig/label/text()");
fulltextFields.add(tableLabelField);
fulltextLabels.add("figure_label");*/
/*FieldSpecification tableCaptionField = new FieldSpecification();
tableCaptionField.fieldName = "table_caption";
tableCaptionField.isTextual = true;
tableCaptionField.grobidPath.
add("//figure[@type=\"table\"]/figDesc/text()");
tableCaptionField.nlmPath.
add("//table-wrap/caption/p/text()");
fulltextFields.add(tableCaptionField);
fulltextLabels.add("figure_caption");*/
//labels.add("section_title");
//labels.add("paragraph");
//labels.add("citation_marker");
//labels.add("figure_marker");
//labels.add("table_marker");
FieldSpecification dataAvailabilityFulltextField = new FieldSpecification();
dataAvailabilityFulltextField.fieldName = "availability_stmt";
dataAvailabilityFulltextField.isTextual = true;
dataAvailabilityFulltextField.grobidPath
.add("//div[@type=\"availability\"]//text()");
dataAvailabilityFulltextField.nlmPath
.add("//sec[@sec-type=\"availability\"]//text()");
dataAvailabilityFulltextField.nlmPath
.add("//p[@content-type=\"availability\"]//text()");
dataAvailabilityFulltextField.nlmPath
.add("//sec[@specific-use=\"availability\"]//text()");
// for eLife JATS support
dataAvailabilityFulltextField.nlmPath
.add("//sec[@sec-type=\"data-availability\"]//text()");
// the following for PLOS JATS support
dataAvailabilityFulltextField.nlmPath
.add("//custom-meta[@id=\"data-availability\"]/meta-value//text()");
fulltextFields.add(dataAvailabilityFulltextField);
fulltextLabels.add("availability_stmt");
FieldSpecification fundingFulltextField = new FieldSpecification();
fundingFulltextField.fieldName = "funding_stmt";
fundingFulltextField.isTextual = true;
fundingFulltextField.grobidPath
.add("//div[@type=\"funding\"]//text()");
fundingFulltextField.nlmPath
.add("//sec[@sec-type=\"funding\"]//text()");
fundingFulltextField.nlmPath
.add("//p[@content-type=\"funding\"]//text()");
fundingFulltextField.nlmPath
.add("//sec[@specific-use=\"funding\"]//text()");
// for eLife JATS support
// the following for PLOS support
fundingFulltextField.nlmPath
.add("//funding-statement//text()");
fulltextFields.add(fundingFulltextField);
fulltextLabels.add("funding_stmt");
}
public static String grobidCitationContextId = "//ref[@type=\"bibr\"]/@target";
public static String grobidBibReferenceId = "//listBibl/biblStruct/@id";
public static String nlmCitationContextId = "//xref[@ref-type=\"bibr\"]/@rid";
public static String nlmBibReferenceId = "//ref-list/ref/@id";
}
| 17,613 | 37.458515 | 111 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/evaluation/utilities/NamespaceContextMap.java
|
package org.grobid.trainer.evaluation.utilities;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import javax.xml.XMLConstants;
import javax.xml.namespace.NamespaceContext;
/**
* An implementation of <a
* href="http://java.sun.com/javase/6/docs/api/javax/xml/namespace/NamespaceContext.html">
* NamespaceContext </a>. Instances are immutable.
*
* Code from http://illegalargumentexception.blogspot.com/2009/05/java-using-xpath-with-namespaces-and.html
* Free to reuse.
*
* @author McDowell
*/
public final class NamespaceContextMap implements
NamespaceContext {
private final Map<String, String> prefixMap;
private final Map<String, Set<String>> nsMap;
/**
* Constructor that takes a map of XML prefix-namespaceURI values. A defensive
* copy is made of the map. An IllegalArgumentException will be thrown if the
* map attempts to remap the standard prefixes defined in the NamespaceContext
* contract.
*
* @param prefixMappings
* a map of prefix:namespaceURI values
*/
public NamespaceContextMap(Map<String, String> prefixMappings) {
prefixMap = createPrefixMap(prefixMappings);
nsMap = createNamespaceMap(prefixMap);
}
/**
* Convenience constructor.
*
* @param mappingPairs
* pairs of prefix-namespaceURI values
*/
public NamespaceContextMap(String... mappingPairs) {
this(toMap(mappingPairs));
}
private static Map<String, String> toMap(
String... mappingPairs) {
Map<String, String> prefixMappings = new HashMap<String, String>(
mappingPairs.length / 2);
for (int i = 0; i < mappingPairs.length; i++) {
prefixMappings
.put(mappingPairs[i], mappingPairs[++i]);
}
return prefixMappings;
}
private Map<String, String> createPrefixMap(
Map<String, String> prefixMappings) {
Map<String, String> prefixMap = new HashMap<String, String>(
prefixMappings);
addConstant(prefixMap, XMLConstants.XML_NS_PREFIX,
XMLConstants.XML_NS_URI);
addConstant(prefixMap, XMLConstants.XMLNS_ATTRIBUTE,
XMLConstants.XMLNS_ATTRIBUTE_NS_URI);
return Collections.unmodifiableMap(prefixMap);
}
private void addConstant(Map<String, String> prefixMap,
String prefix, String nsURI) {
String previous = prefixMap.put(prefix, nsURI);
if (previous != null && !previous.equals(nsURI)) {
throw new IllegalArgumentException(prefix + " -> "
+ previous + "; see NamespaceContext contract");
}
}
private Map<String, Set<String>> createNamespaceMap(
Map<String, String> prefixMap) {
Map<String, Set<String>> nsMap = new HashMap<String, Set<String>>();
for (Map.Entry<String, String> entry : prefixMap
.entrySet()) {
String nsURI = entry.getValue();
Set<String> prefixes = nsMap.get(nsURI);
if (prefixes == null) {
prefixes = new HashSet<String>();
nsMap.put(nsURI, prefixes);
}
prefixes.add(entry.getKey());
}
for (Map.Entry<String, Set<String>> entry : nsMap
.entrySet()) {
Set<String> readOnly = Collections
.unmodifiableSet(entry.getValue());
entry.setValue(readOnly);
}
return nsMap;
}
@Override
public String getNamespaceURI(String prefix) {
checkNotNull(prefix);
String nsURI = prefixMap.get(prefix);
return nsURI == null ? XMLConstants.NULL_NS_URI : nsURI;
}
@Override
public String getPrefix(String namespaceURI) {
checkNotNull(namespaceURI);
Set<String> set = nsMap.get(namespaceURI);
return set == null ? null : set.iterator().next();
}
@Override
public Iterator<String> getPrefixes(String namespaceURI) {
checkNotNull(namespaceURI);
Set<String> set = nsMap.get(namespaceURI);
return set.iterator();
}
private void checkNotNull(String value) {
if (value == null) {
throw new IllegalArgumentException("null");
}
}
/**
* @return an unmodifiable map of the mappings in the form prefix-namespaceURI
*/
public Map<String, String> getMap() {
return prefixMap;
}
}
| 4,196 | 29.194245 | 107 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/sax/TEIMonographSaxParser.java
|
package org.grobid.trainer.sax;
import org.grobid.core.utilities.TextUtilities;
import org.xml.sax.Attributes;
import org.xml.sax.SAXException;
import org.xml.sax.helpers.DefaultHandler;
import java.util.ArrayList;
import java.util.Stack;
import java.util.StringTokenizer;
/**
* SAX parser for the TEI format for monograph data. Normally all training data should be in this unique format.
* The segmentation of tokens must be identical as the one from pdf2xml files so that
* training and online input tokens are aligned.
*
* @author Patrice Lopez
*/
public class TEIMonographSaxParser extends DefaultHandler {
//private Stack<StringBuffer> accumulators = null; // accumulated parsed piece of texts
private StringBuffer accumulator = null; // current accumulated text
private String output = null;
private Stack<String> currentTags = null;
//private String fileName = null;
//private String pdfName = null;
private ArrayList<String> labeled = null; // store line by line the labeled data
public TEIMonographSaxParser() {
labeled = new ArrayList<String>();
currentTags = new Stack<String>();
//accumulators = new Stack<StringBuffer>();
accumulator = new StringBuffer();
}
public void characters(char[] buffer, int start, int length) {
//if (accumulator != null)
accumulator.append(buffer, start, length);
System.out.println(accumulator.toString());
}
public String getText() {
if (accumulator != null) {
return accumulator.toString().trim();
} else {
return null;
}
}
public ArrayList<String> getLabeledResult() {
return labeled;
}
public void endElement(java.lang.String uri,
java.lang.String localName,
java.lang.String qName) throws SAXException {
if ((!qName.equals("lb")) & (!qName.equals("pb"))) {
writeData(qName, true);
}
}
public void startElement(String namespaceURI,
String localName,
String qName,
Attributes atts)
throws SAXException {
if (qName.equals("lb")) {
accumulator.append(" +L+ ");
} else if (qName.equals("pb")) {
accumulator.append(" +PAGE+ ");
} else {
// we have to write first what has been accumulated yet with the upper-level tag
String text = getText();
if (text != null) {
if (text.length() > 0) {
writeData(qName, false);
}
}
accumulator.setLength(0);
if (qName.equals("header")) {
currentTags.push("<header>");
} else if (qName.equals("other")) {
currentTags.push("<other>");
} else if (qName.equals("page_header")) {
currentTags.push("<page_header>");
} else if (qName.equals("page_footnote")) {
currentTags.push("<page_footnote>");
} else if (qName.equals("page") | qName.equals("pages")) {
currentTags.push("<page>");
} else if (qName.equals("reference")) {
currentTags.push("<reference>");
} else if (qName.equals("toc")) {
currentTags.push("<toc>");
} else if (qName.equals("index")) {
currentTags.push("<index>");
} else if (qName.equals("section")) {
currentTags.push("<section>");
}
}
}
private void writeData(String qName, boolean pop) {
if ((qName.equals("header")) | (qName.equals("other")) | (qName.equals("page_header")) |
(qName.equals("page_footnote")) | (qName.equals("page")) | (qName.equals("pages")) |
(qName.equals("reference")) |
(qName.equals("toc")) | (qName.equals("index")) | (qName.equals("section"))
) {
String currentTag = null;
if (pop) {
currentTag = currentTags.pop();
} else {
currentTag = currentTags.peek();
}
String text = getText();
// we segment the text
StringTokenizer st = new StringTokenizer(text, " \n\t" + TextUtilities.fullPunctuations, true);
boolean begin = true;
while (st.hasMoreTokens()) {
String tok = st.nextToken().trim();
if (tok.length() == 0) continue;
if (tok.equals("+L+")) {
labeled.add("@newline\n");
} else if (tok.equals("+PAGE+")) {
// page break should be a distinct feature
labeled.add("@newpage\n");
} else {
String content = tok;
int i = 0;
if (content.length() > 0) {
if (begin) {
labeled.add(content + " I-" + currentTag + "\n");
begin = false;
} else {
labeled.add(content + " " + currentTag + "\n");
}
}
}
begin = false;
}
accumulator.setLength(0);
}
}
}
| 5,440 | 35.516779 | 112 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/sax/TEIDateSaxParser.java
|
package org.grobid.trainer.sax;
import org.grobid.core.utilities.TextUtilities;
import org.xml.sax.Attributes;
import org.xml.sax.SAXException;
import org.xml.sax.helpers.DefaultHandler;
import java.util.ArrayList;
import java.util.List;
/**
* SAX parser for author sequences encoded in the TEI format data.
* Segmentation of tokens must be identical as the one from pdf2xml files to that
* training and online input tokens are identical.
*
* @author Patrice Lopez
*/
public class TEIDateSaxParser extends DefaultHandler {
private StringBuffer accumulator = new StringBuffer(); // Accumulate parsed text
private String currentTag = null;
private ArrayList<String> labeled = null; // store line by line the labeled data
public int n = 0;
public TEIDateSaxParser() {
labeled = new ArrayList<String>();
}
public void characters(char[] buffer, int start, int length) {
accumulator.append(buffer, start, length);
}
public String getText() {
return accumulator.toString().trim();
}
public ArrayList<String> getLabeledResult() {
return labeled;
}
public void endElement(java.lang.String uri,
java.lang.String localName,
java.lang.String qName) throws SAXException {
if (( (qName.equals("year")) | (qName.equals("month")) | (qName.equals("day"))) & (currentTag != null)) {
String text = getText();
writeField(text);
}
/*else if (qName.equals("lb")) {
// we note a line break
accumulator.append(" +L+ ");
} else if (qName.equals("pb")) {
accumulator.append(" +PAGE+ ");
} */
else if (qName.equals("date")) {
String text = getText();
if (text.length() > 0) {
currentTag = "<other>";
writeField(text);
}
labeled.add("\n \n");
}
accumulator.setLength(0);
}
public void startElement(String namespaceURI,
String localName,
String qName,
Attributes atts)
throws SAXException {
String text = getText();
if (text.length() > 0) {
currentTag = "<other>";
writeField(text);
}
accumulator.setLength(0);
if (qName.equals("year")) {
currentTag = "<year>";
} else if (qName.equals("month")) {
currentTag = "<month>";
} else if (qName.equals("day")) {
currentTag = "<day>";
} else if (qName.equals("date")) {
n++;
}
}
private void writeField(String text) {
// we segment the text
//StringTokenizer st = new StringTokenizer(text, " \n\t");
List<String> tokens = TextUtilities.segment(text, TextUtilities.punctuations);
boolean begin = true;
for (String tok : tokens) {
tok = tok.trim();
if (tok.length() == 0) continue;
boolean punct1 = false;
if (tok.equals("+L+")) {
labeled.add("@newline\n");
} else if (tok.equals("+PAGE+")) {
// page break not relevant for authors
labeled.add("@newline\n");
} else {
String content = tok;
int i = 0;
for (; i < TextUtilities.punctuations.length(); i++) {
if (tok.length() > 0) {
if (tok.charAt(tok.length() - 1) == TextUtilities.punctuations.charAt(i)) {
punct1 = true;
content = tok.substring(0, tok.length() - 1);
break;
}
}
}
if (tok.length() > 0) {
if ((tok.startsWith("(")) & (tok.length() > 1)) {
if (punct1)
content = tok.substring(1, tok.length() - 1);
else
content = tok.substring(1, tok.length());
if (begin) {
labeled.add("(" + " I-" + currentTag + "\n");
begin = false;
} else {
labeled.add("(" + " " + currentTag + "\n");
}
} else if ((tok.startsWith("[")) & (tok.length() > 1)) {
if (punct1)
content = tok.substring(1, tok.length() - 1);
else
content = tok.substring(1, tok.length());
if (begin) {
labeled.add("[" + " I-" + currentTag + "\n");
begin = false;
} else {
labeled.add("[" + " " + currentTag + "\n");
}
} else if ((tok.startsWith("\"")) & (tok.length() > 1)) {
if (punct1)
content = tok.substring(1, tok.length() - 1);
else
content = tok.substring(1, tok.length());
if (begin) {
labeled.add("\"" + " I-" + currentTag + "\n");
begin = false;
} else {
labeled.add("\"" + " " + currentTag + "\n");
}
}
}
if (content.length() > 0) {
if (begin) {
labeled.add(content + " I-" + currentTag + "\n");
begin = false;
} else {
labeled.add(content + " " + currentTag + "\n");
}
}
if (punct1) {
if (begin) {
labeled.add(tok.charAt(tok.length() - 1) + " I-" + currentTag + "\n");
begin = false;
} else {
labeled.add(tok.charAt(tok.length() - 1) + " " + currentTag + "\n");
}
}
}
begin = false;
}
}
}
| 6,396 | 34.148352 | 113 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/sax/TEIReferenceSegmenterSaxParser.java
|
package org.grobid.trainer.sax;
import org.grobid.core.lexicon.Lexicon;
import org.grobid.core.utilities.TextUtilities;
import org.xml.sax.Attributes;
import org.xml.sax.SAXException;
import org.xml.sax.helpers.DefaultHandler;
import java.util.ArrayList;
import java.util.List;
import java.util.StringTokenizer;
/**
* SAX parser for reference strings encoded in the TEI format data for training purposes.
* Segmentation of tokens must be identical as the one from pdf2xml files to that
* training and online input tokens are identical.
*
* @author Vyacheslav Zholudev
*/
public class TEIReferenceSegmenterSaxParser extends DefaultHandler {
private StringBuilder accumulator = new StringBuilder(); // Accumulate parsed text
private StringBuilder allContent = new StringBuilder();
// private String output = null;
private String currentTag = null;
private List<String> labeled = null; // store line by line the labeled data
// public List<List<OffsetPosition>> placesPositions = null; // list of offset positions of place names
//private Writer writerAddress = null; // writer for the address model
// private Writer writerCORA = null; // writer for conversion into TEI header model
// public int n = 0;
public Lexicon lexicon = Lexicon.getInstance();
private int totalReferences = 0;
// public void setTEIHeaderOutput(Writer writer) {
// writerCORA = writer;
// }
public TEIReferenceSegmenterSaxParser() {
labeled = new ArrayList<String>();
}
public void characters(char[] buffer, int start, int length) {
accumulator.append(buffer, start, length);
//if (allContent != null) {
// allContent.append(buffer, start, length);
//}
}
public String getText() {
return accumulator.toString().trim();
}
public List<String> getLabeledResult() {
return labeled;
}
public void endElement(String uri, String localName, String qName) throws SAXException {
if (qName.equals("label")) {
String text = getText();
writeField(text);
if (allContent != null) {
if (allContent.length() != 0) {
allContent.append(" ");
}
allContent.append(text);
}
accumulator.setLength(0);
} else if (qName.equals("bibl")) {
String text = getText();
currentTag = "<reference>";
writeField(text);
if (allContent != null) {
if (allContent.length() != 0) {
allContent.append(" ");
}
allContent.append(text);
}
accumulator.setLength(0);
} else if (qName.equals("lb") || qName.equals("pb")) {
// we note a line break
accumulator.append(" @newline ");
}
// if (((qName.equals("addrLine")) ||
// (qName.equals("settlement")) ||
// (qName.equals("region")) ||
// (qName.equals("postCode")) ||
// (qName.equals("postBox")) ||
// (qName.equals("marker")) ||
// (qName.equals("country") ||
// (qName.equals("orgName"))))) {
// String text = getText();
// writeField(text);
// if (allContent != null) {
// if (allContent.length() != 0) {
// allContent.append(" ");
// }
// allContent.append(text);
// }
// accumulator.setLength(0);
// } else if (qName.equals("lb") | qName.equals("pb")) {
// // we note a line break
// accumulator.append(" @newline ");
// } else if (qName.equals("affiliation")) {
// String text = getText();
// if (text.length() > 0) {
// currentTag = "<other>";
// writeField(text);
// if (allContent != null) {
// if (allContent.length() != 0) {
// allContent.append(" ");
// }
// allContent.append(text);
// }
// }
// accumulator.setLength(0);
// } else if (qName.equals("author")) {
// String text = getText();
// if (text.length() > 0) {
// currentTag = "<other>";
// writeField(text);
// if (allContent != null) {
// if (allContent.length() != 0) {
// allContent.append(" ");
// }
// allContent.append(text);
// }
// }
// labeled.add("\n \n");
//
// String allString = allContent.toString().trim();
// allString = allString.replace("@newline", "");
// List<OffsetPosition> toto = lexicon.inCityNames(allString);
// placesPositions.add(toto);
// allContent = null;
// allString = null;
//
// accumulator.setLength(0);
// } else {
// accumulator.setLength(0);
// }
}
public void startElement(String namespaceURI, String localName, String qName, Attributes atts) throws SAXException {
if (!qName.equals("lb") && !qName.equals("pb")) {
String text = getText();
if (text.length() > 0) {
currentTag = "<other>";
writeField(text);
if (allContent != null) {
if (allContent.length() != 0) {
allContent.append(" ");
}
allContent.append(text);
}
}
accumulator.setLength(0);
}
if (qName.equals("bibl")) {
currentTag = null;
accumulator.setLength(0);
totalReferences++;
} else if (qName.equals("label")) {
currentTag = "<label>";
}
}
private void writeField(String text) {
// we segment the text
StringTokenizer st = new StringTokenizer(text, " \n\t" + TextUtilities.fullPunctuations, true);
boolean begin = true;
while (st.hasMoreTokens()) {
String tok = st.nextToken().trim();
if (tok.length() == 0) {
continue;
}
if (tok.equals("@newline")) {
labeled.add("@newline");
} else if (tok.equals("+PAGE+")) {
// page break - no influence here
labeled.add("@newline");
} else {
if (tok.length() > 0) {
if (begin) {
labeled.add(tok + " I-" + currentTag);
begin = false;
} else {
labeled.add(tok + " " + currentTag);
}
}
}
}
}
public int getTotalReferences() {
return totalReferences;
}
}
| 7,065 | 33.637255 | 120 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/sax/TEIEbookSaxParser.java
|
package org.grobid.trainer.sax;
import org.grobid.core.utilities.TextUtilities;
import org.xml.sax.Attributes;
import org.xml.sax.SAXException;
import org.xml.sax.helpers.DefaultHandler;
import java.util.ArrayList;
import java.util.Stack;
import java.util.StringTokenizer;
/**
* SAX parser for the TEI format for fulltext data. Normally all training data should be in this unique format.
* The segmentation of tokens must be identical as the one from pdf2xml files so that
* training and online input tokens are aligned.
*
* @author Patrice Lopez
*/
public class TEIEbookSaxParser extends DefaultHandler {
//private Stack<StringBuffer> accumulators = null; // accumulated parsed piece of texts
private StringBuffer accumulator = null; // current accumulated text
private String output = null;
private Stack<String> currentTags = null;
//private String fileName = null;
//private String pdfName = null;
private ArrayList<String> labeled = null; // store line by line the labeled data
public TEIEbookSaxParser() {
labeled = new ArrayList<String>();
currentTags = new Stack<String>();
//accumulators = new Stack<StringBuffer>();
accumulator = new StringBuffer();
}
public void characters(char[] buffer, int start, int length) {
//if (accumulator != null)
accumulator.append(buffer, start, length);
System.out.println(accumulator.toString());
}
public String getText() {
if (accumulator != null) {
return accumulator.toString().trim();
} else {
return null;
}
}
public ArrayList<String> getLabeledResult() {
return labeled;
}
public void endElement(java.lang.String uri,
java.lang.String localName,
java.lang.String qName) throws SAXException {
if ((!qName.equals("lb")) & (!qName.equals("pb"))) {
writeData(qName, true);
}
}
public void startElement(String namespaceURI,
String localName,
String qName,
Attributes atts)
throws SAXException {
if (qName.equals("lb")) {
accumulator.append(" +L+ ");
} else if (qName.equals("pb")) {
accumulator.append(" +PAGE+ ");
} else {
// we have to write first what has been accumulated yet with the upper-level tag
String text = getText();
if (text != null) {
if (text.length() > 0) {
writeData(qName, false);
}
}
accumulator.setLength(0);
if (qName.equals("header")) {
currentTags.push("<header>");
} else if (qName.equals("other")) {
currentTags.push("<other>");
} else if (qName.equals("page_header")) {
currentTags.push("<page_header>");
} else if (qName.equals("page_footnote")) {
currentTags.push("<page_footnote>");
} else if (qName.equals("page") | qName.equals("pages")) {
currentTags.push("<page>");
} else if (qName.equals("reference")) {
currentTags.push("<reference>");
} else if (qName.equals("toc")) {
currentTags.push("<toc>");
} else if (qName.equals("index")) {
currentTags.push("<index>");
} else if (qName.equals("section")) {
currentTags.push("<section>");
}
}
}
private void writeData(String qName, boolean pop) {
if ((qName.equals("header")) | (qName.equals("other")) | (qName.equals("page_header")) |
(qName.equals("page_footnote")) | (qName.equals("page")) | (qName.equals("pages")) |
(qName.equals("reference")) |
(qName.equals("toc")) | (qName.equals("index")) | (qName.equals("section"))
) {
String currentTag = null;
if (pop) {
currentTag = currentTags.pop();
} else {
currentTag = currentTags.peek();
}
String text = getText();
// we segment the text
StringTokenizer st = new StringTokenizer(text, " \n\t" + TextUtilities.fullPunctuations, true);
boolean begin = true;
while (st.hasMoreTokens()) {
String tok = st.nextToken().trim();
if (tok.length() == 0) continue;
if (tok.equals("+L+")) {
labeled.add("@newline\n");
} else if (tok.equals("+PAGE+")) {
// page break should be a distinct feature
labeled.add("@newpage\n");
} else {
String content = tok;
int i = 0;
if (content.length() > 0) {
if (begin) {
labeled.add(content + " I-" + currentTag + "\n");
begin = false;
} else {
labeled.add(content + " " + currentTag + "\n");
}
}
}
begin = false;
}
accumulator.setLength(0);
}
}
}
| 5,431 | 35.456376 | 111 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/sax/TEISegmentationSaxParser.java
|
package org.grobid.trainer.sax;
import org.xml.sax.Attributes;
import org.xml.sax.SAXException;
import org.xml.sax.helpers.DefaultHandler;
import java.util.Arrays;
import java.util.List;
import java.util.ArrayList;
import java.util.StringTokenizer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.grobid.core.engines.label.TaggingLabels.AVAILABILITY_LABEL;
/**
* SAX parser for the TEI format for the training data for the segmentation model.
* Normally all training data should be in this unique format.
* The segmentation of tokens must be identical as the one from pdf2xml files so that
* training and online input tokens are aligned.
*
* @author Patrice Lopez
*/
public class TEISegmentationSaxParser extends DefaultHandler {
/* TEI -> label mapping (10 labels for this model)
cover page (<cover>): titlePage (optionally under front),
document header (<header>): front,
page footer (<footnote>): note type footnote,
page header (<headnote>): note type headnote,
margin note (<marginnote>): note type margin,
document body (<body>): body,
bibliographical section (<references>): listbibl,
page number (<page>): page,
? each bibliographical references in the biblio section (<ref>): bibl
annexes (<annex>): div type="annex" (optionally under back)
data availability (<availability>): div type="availability"
acknowledgement (<acknowledgement>): div type="acknowledgement" (optionally under back)
*/
private static final Logger logger = LoggerFactory.getLogger(TEISegmentationSaxParser.class);
private StringBuffer accumulator = null; // current accumulated text
private String output = null;
//private Stack<String> currentTags = null;
private String currentTag = null;
private String upperQname = null;
private String upperTag = null;
private List<String> labeled = null; // store line by line the labeled data
public TEISegmentationSaxParser() {
labeled = new ArrayList<String>();
//currentTags = new Stack<String>();
accumulator = new StringBuffer();
}
public void characters(char[] buffer, int start, int length) {
accumulator.append(buffer, start, length);
}
public String getText() {
if (accumulator != null) {
//System.out.println(accumulator.toString().trim());
return accumulator.toString().trim();
} else {
return null;
}
}
public List<String> getLabeledResult() {
return labeled;
}
public void endElement(java.lang.String uri,
java.lang.String localName,
java.lang.String qName) throws SAXException {
if ((!qName.equals("lb")) && (!qName.equals("pb") )) {
writeData(qName, currentTag);
}
if (qName.equals("body") ||
qName.equals("cover") ||
qName.equals("front") ||
qName.equals("div") ||
qName.equals("toc") ||
qName.equals("other") ||
qName.equals("listBibl")) {
currentTag = null;
upperTag = null;
}
else if (qName.equals("note") ||
qName.equals("page") ||
qName.equals("pages") ||
qName.equals("titlePage") ) {
currentTag = upperTag;
}
}
public void startElement(String namespaceURI,
String localName,
String qName,
Attributes atts)
throws SAXException {
if (qName.equals("lb")) {
accumulator.append(" +L+ ");
} else if (qName.equals("pb")) {
accumulator.append(" +PAGE+ ");
} else if (qName.equals("space")) {
accumulator.append(" ");
} else {
// we have to write first what has been accumulated yet with the upper-level tag
String text = getText();
if (text != null) {
if (text.length() > 0) {
writeData(upperQname, upperTag);
}
}
//accumulator.setLength(0);
if (qName.equals("front")) {
//currentTags.push("<header>");
currentTag = "<header>";
upperTag = currentTag;
upperQname = "front";
} else if (qName.equals("body")) {
//currentTags.push("<other>");
currentTag = "<body>";
upperTag = currentTag;
upperQname = "body";
} else if (qName.equals("titlePage")) {
//currentTags.push("<other>");
currentTag = "<cover>";
//upperTag = currentTag;
//upperQname = "titlePage";
} else if (qName.equals("other")) {
//currentTags.push("<other>");
currentTag = "<other>";
} else if (qName.equals("toc")) {
// normally valid table of content mark-up should be <div type="toc>", not tag <toc>
//currentTags.push("<other>");
currentTag = "<toc>";
upperTag = currentTag;
upperQname = "div";
} else if (qName.equals("note")) {
int length = atts.getLength();
// Process each attribute
for (int i = 0; i < length; i++) {
// Get names and values for each attribute
String name = atts.getQName(i);
String value = atts.getValue(i);
if (name != null) {
if (name.equals("place")) {
if (value.equals("footnote") || value.equals("foot") ) {
currentTag = "<footnote>";
} else if (value.equals("headnote") || value.equals("head") ) {
currentTag = "<headnote>";
} else if (value.equals("margin")) {
currentTag = "<marginnote>";
} else {
logger.error("Invalid attribute value for element note: " + name + "=" + value);
}
} else {
logger.error("Invalid attribute name for element note: " + name);
}
}
}
}
else if (qName.equals("div")) {
int length = atts.getLength();
// Process each attribute
for (int i = 0; i < length; i++) {
// Get names and values for each attribute
String name = atts.getQName(i);
String value = atts.getValue(i);
if (name != null) {
if (name.equals("type")) {
if (value.equals("annex")) {
currentTag = "<annex>";
upperTag = currentTag;
upperQname = "div";
} else if (value.equals("funding")) {
currentTag = "<funding>";
upperTag = currentTag;
upperQname = "div";
} else if (Arrays.asList("availability", "data_availability", "data-availability").contains(value)) {
currentTag = AVAILABILITY_LABEL;
upperTag = currentTag;
upperQname = "div";
} else if (value.equals("acknowledgement") || value.equals("acknowledgements") || value.equals("acknowledgment")
|| value.equals("acknowledgments")) {
currentTag = "<acknowledgement>";
upperTag = currentTag;
upperQname = "div";
} else if (value.equals("toc")) {
currentTag = "<toc>";
upperTag = currentTag;
upperQname = "div";
} else {
logger.error("Invalid attribute value for element div: " + name + "=" + value);
}
} else {
logger.error("Invalid attribute name for element div: " + name);
}
}
}
}
else if (qName.equals("page") || qName.equals("pages")) {
currentTag = "<page>";
}
else if (qName.equals("listBibl")) {
currentTag = "<references>";
upperTag = currentTag;
upperQname = "listBibl";
} else if (qName.equals("text")) {
currentTag = "<other>";
upperTag = null;
upperQname = null;
} /*else {
logger.error("Invalid element name: " + qName + " - it will be mapped to the label <other>");
currentTag = "<other>";
}*/
}
}
private void writeData(String qName, String surfaceTag) {
if (qName == null) {
qName = "other";
surfaceTag = "<other>";
}
if ((qName.equals("front")) || (qName.equals("titlePage")) || (qName.equals("note")) ||
(qName.equals("page")) || (qName.equals("pages")) || (qName.equals("body")) ||
(qName.equals("listBibl")) || (qName.equals("div")) ||
(qName.equals("other")) || (qName.equals("toc"))
) {
String text = getText();
text = text.replace("\n", " ");
text = text.replace("\r", " ");
text = text.replace(" ", " ");
boolean begin = true;
//System.out.println(text);
// we segment the text line by line first
//StringTokenizer st = new StringTokenizer(text, "\n", true);
String[] tokens = text.split("\\+L\\+");
//while (st.hasMoreTokens()) {
boolean page = false;
for(int p=0; p<tokens.length; p++) {
//String line = st.nextToken().trim();
String line = tokens[p].trim();
if (line.length() == 0)
continue;
if (line.equals("\n") || line.equals("\r"))
continue;
if (line.indexOf("+PAGE+") != -1) {
// page break should be a distinct feature
//labeled.add("@newpage\n");
line = line.replace("+PAGE+", "");
page = true;
}
//StringTokenizer st = new StringTokenizer(line, " \t");
StringTokenizer st = new StringTokenizer(line, " \t\f\u00A0");
if (!st.hasMoreTokens())
continue;
String tok = st.nextToken();
/*StringTokenizer st = new StringTokenizer(line, TextUtilities.delimiters, true);
if (!st.hasMoreTokens())
continue;
String tok = st.nextToken().trim();*/
if (tok.length() == 0)
continue;
if (surfaceTag == null) {
// this token belongs to a chunk to ignored
//System.out.println("\twarning: surfaceTag is null for token '"+tok+"' - it will be tagged with label <other>");
surfaceTag = "<other>";
}
if (begin && (!surfaceTag.equals("<other>"))) {
labeled.add(tok + " I-" + surfaceTag + "\n");
begin = false;
} else {
labeled.add(tok + " " + surfaceTag + "\n");
}
if (page) {
//labeled.add("@newpage\n");
page = false;
}
//}
}
accumulator.setLength(0);
}
}
}
| 11,526 | 37.295681 | 140 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/sax/TEIHeaderSaxParserOCRTraining.java
|
package org.grobid.trainer.sax;
import org.grobid.core.exceptions.GrobidException;
import org.xml.sax.Attributes;
import org.xml.sax.SAXException;
import org.xml.sax.helpers.DefaultHandler;
import java.io.IOException;
import java.io.Writer;
import java.util.StringTokenizer;
/**
* SAX parser for the TEI format header data. Normally all training data should be in this unique format which
* replace the ugly CORA format. Segmentation of tokens must be identical as the one from pdf2xml files to that
* training and online input tokens are identical.
*
* @author Patrice Lopez
*/
public class TEIHeaderSaxParserOCRTraining extends DefaultHandler {
private StringBuffer accumulator = new StringBuffer(); // Accumulate parsed text
private String output = null;
private String currentTag = null;
private Writer writer = null;
// output the training data sets for OCR post corrections
private Writer writer_affiliations = null;
private Writer writer_addresses = null;
private Writer writer_keywords = null;
private Writer writer_authors = null;
private Writer writer_notes = null;
public static String punctuations = ",:;?.!)-\"']";
public TEIHeaderSaxParserOCRTraining() {
}
public TEIHeaderSaxParserOCRTraining(Writer writ1,
Writer writ2,
Writer writ3,
Writer writ4,
Writer writ5) {
writer_affiliations = writ1;
writer_addresses = writ2;
writer_keywords = writ3;
writer_authors = writ4;
writer_notes = writ5;
}
public void characters(char[] buffer, int start, int length) {
accumulator.append(buffer, start, length);
}
public String getText() {
return accumulator.toString().trim();
}
public void endElement(java.lang.String uri, java.lang.String localName, java.lang.String qName) throws SAXException {
if ((qName.equals("titlePart")) | (qName.equals("note")) | (qName.equals("byline")) |
(qName.equals("affiliation")) | (qName.equals("address")) | (qName.equals("email")) |
(qName.equals("idno")) | (qName.equals("date")) | (qName.equals("biblScope")) |
(qName.equals("keywords")) | (qName.equals("ptr")) | (qName.equals("div")) | (qName.equals("title"))
) {
// we register in the DB the new entry
String text = getText();
Writer writer = null;
if (qName.equals("affiliation"))
writer = writer_affiliations;
else if (qName.equals("address"))
writer = writer_addresses;
else if (qName.equals("keywords"))
writer = writer_keywords;
else if (currentTag.equals("<author>"))
writer = writer_authors;
else if (qName.equals("note"))
writer = writer_notes;
if (writer != null) {
try {
// we segment the text
StringTokenizer st = new StringTokenizer(text, " \n\t");
while (st.hasMoreTokens()) {
String tok = st.nextToken().trim();
if (tok.length() == 0) continue;
boolean punct1 = false;
if (tok.equals("+L+")) {
writer.write("\n");
} else if (tok.equals("+PAGE+")) {
//writer.write("@newpage\n");
writer.write("\n"); // page break should be a distinct feature
} else {
String content = tok;
int i = 0;
for (; i < punctuations.length(); i++) {
if (tok.length() > 0) {
if (tok.charAt(tok.length() - 1) == punctuations.charAt(i)) {
punct1 = true;
content = tok.substring(0, tok.length() - 1);
break;
}
}
}
if (tok.length() > 0) {
if ((tok.startsWith("(")) & (tok.length() > 1)) {
if (punct1)
content = tok.substring(1, tok.length() - 1);
else
content = tok.substring(1, tok.length());
writer.write("(" + " ");
} else if ((tok.startsWith("[")) & (tok.length() > 1)) {
if (punct1)
content = tok.substring(1, tok.length() - 1);
else
content = tok.substring(1, tok.length());
writer.write("[" + " ");
} else if ((tok.startsWith("\"")) & (tok.length() > 1)) {
if (punct1)
content = tok.substring(1, tok.length() - 1);
else
content = tok.substring(1, tok.length());
writer.write("\"" + " ");
}
}
if (content.length() > 0)
writer.write(content + " ");
if (punct1) {
//writer.write(""+ punctuations.charAt(i) + " " + currentTag + "\n");
writer.write(tok.charAt(tok.length() - 1) + " ");
}
}
}
writer.write("\n");
} catch (IOException e) {
// e.printStackTrace();
throw new GrobidException("An exception occured while running Grobid.", e);
}
}
accumulator.setLength(0);
} else if (qName.equals("lb")) {
// we note a line break
//try {
//writer.write("@newline\n");
accumulator.append(" +L+ ");
//}
//catch(IOException e) {
// e.printStackTrace();
//}
//accumulator.setLength(0);
} else if (qName.equals("pb")) {
// we note a page break
//writer.write("@newpage\n");
//try {
//writer.write("@newline\n");
accumulator.append(" +PAGE+ ");
//}
//catch(IOException e) {
// e.printStackTrace();
//}
//accumulator.setLength(0);
}
//accumulator.setLength(0);
}
public void startElement(String namespaceURI,
String localName,
String qName,
Attributes atts)
throws SAXException {
if (qName.equals("div")) {
int length = atts.getLength();
// Process each attribute
for (int i = 0; i < length; i++) {
// Get names and values for each attribute
String name = atts.getQName(i);
String value = atts.getValue(i);
if (name != null) {
if (name.equals("type")) {
if (value.equals("abstract")) {
currentTag = "<abstract>";
} else if (value.equals("intro")) {
currentTag = "<intro>";
} else if (value.equals("paragraph")) {
currentTag = "<other>";
}
}
}
}
//accumulator.setLength(0);
} else if (qName.equals("note")) {
int length = atts.getLength();
currentTag = "<note>";
//accumulator.setLength(0);
} else if (qName.equals("ptr")) {
int length = atts.getLength();
// Process each attribute
for (int i = 0; i < length; i++) {
// Get names and values for each attribute
String name = atts.getQName(i);
String value = atts.getValue(i);
if (name != null) {
if (name.equals("type")) {
if (value.equals("web")) {
currentTag = "<web>";
}
}
}
}
//accumulator.setLength(0);
} else if (qName.equals("biblScope")) {
int length = atts.getLength();
// Process each attribute
for (int i = 0; i < length; i++) {
// Get names and values for each attribute
String name = atts.getQName(i);
String value = atts.getValue(i);
if (name != null) {
if (name.equals("type")) {
if (value.equals("vol")) {
currentTag = "<volume>";
} else if (value.equals("pp")) {
currentTag = "<pages>";
}
}
}
}
//accumulator.setLength(0);
} else if (qName.equals("titlePart")) {
currentTag = "<title>";
accumulator.setLength(0);
} else if (qName.equals("idno")) {
currentTag = "<pubnum>";
//accumulator.setLength(0);
} else if (qName.equals("docAuthor")) {
currentTag = "<author>";
//accumulator.setLength(0);
} else if (qName.equals("affiliation")) {
currentTag = "<affiliation>";
//accumulator.setLength(0);
} else if (qName.equals("address")) {
currentTag = "<address>";
//accumulator.setLength(0);
} else if (qName.equals("email")) {
currentTag = "<email>";
//accumulator.setLength(0);
} else if (qName.equals("date")) {
currentTag = "<date>";
//accumulator.setLength(0);
} else if (qName.equals("keywords")) {
currentTag = "<keyword>";
//accumulator.setLength(0);
} else if (qName.equals("title")) {
currentTag = "<journal>";
//accumulator.setLength(0);
}
}
}
| 10,885 | 38.299639 | 122 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/sax/NLMHeaderSaxHandler.java
|
package org.grobid.trainer.sax;
import org.grobid.core.data.Affiliation;
import org.grobid.core.data.BiblioItem;
import org.grobid.core.data.Person;
import org.xml.sax.Attributes;
import org.xml.sax.SAXException;
import org.xml.sax.helpers.DefaultHandler;
import java.util.ArrayList;
/**
* SAX parser for the NLM XML format - the PubMed XML full text format.
* This class covers only the header of the NLM file.
*
* @author Patrice Lopez
*/
public class NLMHeaderSaxHandler extends DefaultHandler {
private BiblioItem biblio = null;
private ArrayList<Person> authors = null;
private ArrayList<String> editors = null;
private Person author = null;
private Affiliation affiliation = null;
private StringBuffer accumulator = new StringBuffer(); // Accumulate parsed text
private String media = null; // print or electronic, for ISSN
private String current_id = null;
public boolean journalMetadataBlock = false;
public boolean journalIssueBlock = false;
public boolean journalArticleBlock = false;
public boolean conferencePaperBlock = false;
public boolean proceedingsMetadataBlock = false;
public boolean contentItemBlock = false;
public boolean eventMetadataBlock = false;
public boolean bookMetadataBlock = false;
public boolean serieMetadataBlock = false;
public boolean pubDateMetadataBlock = false;
public boolean affiliationMetadataBlock = false;
public boolean online = false;
public boolean date_accepted = false;
public boolean date_submitted = false;
public boolean authorBlock = false;
public boolean editorBlock = false;
public boolean firstAuthor = false;
public NLMHeaderSaxHandler() {
}
public NLMHeaderSaxHandler(BiblioItem b) {
biblio = b;
}
public BiblioItem getBiblio() {
return biblio;
}
public void characters(char[] ch, int start, int length) {
accumulator.append(ch, start, length);
}
public String getText() {
return accumulator.toString().trim();
}
public void endElement(java.lang.String uri, java.lang.String localName, java.lang.String qName) throws SAXException {
if (qName.equals("journal-title")) {
biblio.setJournal(getText());
biblio.setItem(BiblioItem.Periodical);
} else if (qName.equals("abbrev-journal-title")) {
biblio.setJournalAbbrev(getText());
biblio.setItem(BiblioItem.Periodical);
} else if (qName.equals("issn")) {
String issn = getText();
biblio.setItem(BiblioItem.Periodical);
if (media != null) {
if (media.equals("print"))
biblio.setISSN(issn);
else
biblio.setISSNe(issn);
} else
biblio.setISSN(issn);
} else if (qName.equals("publisher-name")) {
String publisher = getText();
biblio.setPublisher(publisher);
} else if (qName.equals("article-id ")) {
if (current_id != null) {
if (current_id.equals("doi")) {
String doi = getText();
biblio.setDOI(doi);
biblio.setError(false);
}
}
} else if (qName.equals("article-title")) {
biblio.setArticleTitle(getText());
} else if (qName.equals("contrib")) {
authorBlock = false;
editorBlock = false;
if (authorBlock)
authors.add(author);
author = null;
} else if (qName.equals("given_names")) {
String sauce = getText();
if (authorBlock) {
if (author == null)
author = new Person();
author.setFirstName(sauce);
}
} else if (qName.equals("surname")) {
String sauce = getText();
if (authorBlock) {
if (!sauce.equals("Unknown")) {
if (author == null)
author = new Person();
author.setLastName(sauce);
}
}
} else if (qName.equals("volume")) {
String volume = getText();
if (volume != null)
if (volume.length() > 0)
biblio.setVolume(volume);
} else if (qName.equals("issue")) {
String issue = getText();
// issue can be of the form 4-5
if (issue != null) {
if (issue.length() > 0) {
biblio.setNumber(issue);
biblio.setIssue(issue);
}
}
} else if (qName.equals("fpage")) {
String page = getText();
if (page != null)
if (page.length() > 0) {
if (page.startsWith("L") | page.startsWith("l"))
page = page.substring(1, page.length());
biblio.setBeginPage(Integer.parseInt(page));
}
} else if (qName.equals("pub-date ")) {
//biblio.setPublicationDate(getText());
pubDateMetadataBlock = false;
} else if (qName.equals("year")) {
String year = getText();
//if (pubDateMetadataBlock)
// biblio.setPublicationDate(year);
if (online) {
biblio.setE_Year(year);
} else if (date_accepted) {
biblio.setA_Year(year);
} else {
biblio.setYear(year);
}
} else if (qName.equals("month")) {
String month = getText();
if (online) {
biblio.setE_Month(month);
} else if (date_accepted) {
biblio.setA_Month(month);
} else {
biblio.setMonth(month);
}
} else if (qName.equals("day")) {
String day = getText();
if (online) {
biblio.setE_Day(day);
} else if (date_accepted) {
biblio.setA_Day(day);
} else {
biblio.setDay(day);
}
} else if (qName.equals("p")) {
String paragraph = getText();
if (paragraph != null) {
if (paragraph.length() > 0) {
if (biblio.getAbstract() == null)
biblio.setAbstract(paragraph);
else
biblio.setAbstract(biblio.getAbstract() + "\n" + paragraph);
}
}
} else if (qName.equals("aff")) {
affiliationMetadataBlock = false;
if (author != null) {
if (affiliation != null)
author.addAffiliation(affiliation);
}
affiliation = null;
} else if (qName.equals("country")) {
if (affiliationMetadataBlock) {
if (affiliation == null) {
affiliation = new Affiliation();
}
affiliation.setCountry(getText());
}
} else if (qName.equals("email")) {
if (affiliationMetadataBlock) {
if (author != null)
author.setEmail(getText());
}
} else if (qName.equals("contrib-group")) {
biblio.setFullAuthors(authors);
} else if (qName.equals("date")) {
pubDateMetadataBlock = false;
date_accepted = false;
date_submitted = false;
}
accumulator.setLength(0);
}
public void startElement(String namespaceURI,
String localName,
String qName,
Attributes atts)
throws SAXException {
if (qName.equals("article")) {
int length = atts.getLength();
if (biblio == null) {
biblio = new BiblioItem();
}
// Process each attribute
for (int i = 0; i < length; i++) {
// Get names and values for each attribute
String name = atts.getQName(i);
String value = atts.getValue(i);
if ((name != null) & (value != null)) {
if (name.equals("xml:lang")) {
biblio.setLanguage(value);
}
}
}
} else if (qName.equals("issn")) {
int length = atts.getLength();
biblio.setItem(BiblioItem.Periodical);
// Process each attribute
for (int i = 0; i < length; i++) {
// Get names and values for each attribute
String name = atts.getQName(i);
String value = atts.getValue(i);
if ((name != null) & (value != null)) {
if (name.equals("pub-type")) {
if (value.equals("ppub")) {
media = "print";
} else if (value.equals("epub")) {
media = "digital";
}
}
}
}
} else if (qName.equals("article-id ")) {
int length = atts.getLength();
// Process each attribute
for (int i = 0; i < length; i++) {
// Get names and values for each attribute
String name = atts.getQName(i);
String value = atts.getValue(i);
if ((name != null) & (value != null)) {
if (name.equals("pub-id-type")) {
if (value.equals("doi")) {
current_id = "doi";
}
}
}
}
} else if (qName.equals("contrib-group")) {
authors = new ArrayList<Person>(0);
editors = new ArrayList<String>(0);
} else if (qName.equals("contrib")) {
int length = atts.getLength();
// Process each attribute
for (int i = 0; i < length; i++) {
// Get names and values for each attribute
String name = atts.getQName(i);
String value = atts.getValue(i);
if ((name != null) & (value != null)) {
if (name.equals("contrib-type")) {
if (value.equals("author")) {
authorBlock = true;
} else if (value.equals("editor")) {
editorBlock = true;
}
}
}
}
} else if (qName.equals("pub-date")) {
pubDateMetadataBlock = true;
int length = atts.getLength();
// Process each attribute
for (int i = 0; i < length; i++) {
// Get names and values for each attribute
String name = atts.getQName(i);
String value = atts.getValue(i);
if ((name != null) & (value != null)) {
if (name.equals("pub-type")) {
if (value.equals("ppub")) {
online = false;
} else if (value.equals("epub")) {
online = true;
}
}
}
}
} else if (qName.equals("aff")) {
affiliationMetadataBlock = true;
} else if (qName.equals("date")) {
pubDateMetadataBlock = true;
int length = atts.getLength();
// Process each attribute
for (int i = 0; i < length; i++) {
// Get names and values for each attribute
String name = atts.getQName(i);
String value = atts.getValue(i);
if ((name != null) & (value != null)) {
if (name.equals("date-type")) {
if (value.equals("accepted")) {
date_accepted = true;
}
}
}
}
}
accumulator.setLength(0);
}
}
| 12,292 | 34.735465 | 122 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/sax/ChemicalWordsSaxParser.java
|
package org.grobid.trainer.sax;
import org.grobid.core.exceptions.GrobidException;
import org.xml.sax.Attributes;
import org.xml.sax.SAXException;
import org.xml.sax.helpers.DefaultHandler;
import java.util.ArrayList;
import java.util.List;
/**
* This SAX parser process the list of words of a document and produce the labeled data
* based on stand off annotations.
*
* @author Patrice Lopez
*/
public class ChemicalWordsSaxParser extends DefaultHandler {
private List<String> chemicalAnnotations = null;
private List<String> chemicalAnnotationsStarts = null;
private List<String> chemicalFormulas = null;
private List<String> chemicalFormulasStarts = null;
private List<String> chemicalSubstances = null;
private List<String> chemicalSubstancesStarts = null;
private List<String> chemicalClassNames = null;
private List<String> chemicalClassNamesStarts = null;
private List<String> chemicalLigand = null;
private List<String> chemicalLigandStarts = null;
private List<String> labeledResult = null;
private StringBuffer accumulator = new StringBuffer(); // Accumulate parsed text
private String localID = null;
public void setChemicalAnnotations(List<String> annotations, List<String> annotationsStarts) {
chemicalAnnotations = annotations;
chemicalAnnotationsStarts = annotationsStarts;
}
public void setChemicalFormulas(List<String> formulas, List<String> formulasStarts) {
chemicalFormulas = formulas;
chemicalFormulasStarts = formulasStarts;
}
public void setChemicalSubstances(List<String> substances, List<String> substancesStarts) {
chemicalSubstances = substances;
chemicalSubstancesStarts = substancesStarts;
}
public void setChemicalClassNames(List<String> classNames, List<String> classNamesStarts) {
chemicalClassNames = classNames;
chemicalClassNamesStarts = classNamesStarts;
}
public void setChemicalLigand(List<String> ligand, List<String> ligandStarts) {
chemicalLigand = ligand;
chemicalLigandStarts = ligandStarts;
}
public List<String> getLabeledResult() {
return labeledResult;
}
public void characters(char[] buffer, int start, int length) {
accumulator.append(buffer, start, length);
}
public String getText() {
return accumulator.toString().trim();
}
public void endElement(java.lang.String uri,
java.lang.String localName,
java.lang.String qName) throws SAXException {
try {
if (qName.equals("word")) {
String word = getText();
// we determine the label of the word based on localID
String label = null;
if (chemicalAnnotationsStarts.contains(localID) ||
chemicalFormulasStarts.contains(localID) ||
chemicalSubstancesStarts.contains(localID) ||
chemicalClassNamesStarts.contains(localID) ||
chemicalLigandStarts.contains(localID)) {
label = "I-<chemName>";
} else if (chemicalAnnotations.contains(localID) ||
chemicalFormulas.contains(localID) ||
chemicalSubstances.contains(localID) ||
chemicalClassNames.contains(localID) ||
chemicalLigand.contains(localID)) {
label = "<chemName>";
} else {
label = "<other>";
}
labeledResult.add(word + "\t" + label);
}
accumulator.setLength(0);
} catch (Exception e) {
// e.printStackTrace();
throw new GrobidException("An exception occured while running Grobid.", e);
}
}
public void startElement(String namespaceURI,
String localName,
String qName,
Attributes atts) throws SAXException {
try {
if (qName.equals("nite:root")) {
labeledResult = new ArrayList<String>();
} else if (qName.equals("word")) {
int length = atts.getLength();
// Process each attribute
for (int i = 0; i < length; i++) {
// Get names and values for each attribute
String name = atts.getQName(i);
String value = atts.getValue(i);
if ((name != null) && (value != null)) {
if (name.equals("nite:id")) {
localID = value;
}
}
}
}
} catch (Exception e) {
// e.printStackTrace();
throw new GrobidException("An exception occured while running Grobid.", e);
}
}
}
| 4,987 | 35.676471 | 98 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/sax/TEICitationSaxParser.java
|
package org.grobid.trainer.sax;
import org.grobid.core.lexicon.Lexicon;
import org.grobid.core.utilities.OffsetPosition;
import org.grobid.core.utilities.TextUtilities;
import org.grobid.core.utilities.UnicodeUtil;
import org.grobid.core.lang.Language;
import org.grobid.core.engines.label.TaggingLabel;
import org.grobid.core.layout.LayoutToken;
import org.grobid.core.analyzers.*;
import org.xml.sax.Attributes;
import org.xml.sax.SAXException;
import org.xml.sax.helpers.DefaultHandler;
import java.util.ArrayList;
import java.util.List;
import static org.apache.commons.collections4.CollectionUtils.isEmpty;
/**
* SAX parser for the XML format for citation data. Normally all training data should be in this unique format which
* replaces the ugly CORA format. Segmentation of tokens must be identical as the one from pdf2xml files to that
* training and online input tokens are identical.
* <p/>
* This is unfortunately not yet TEI...
*
* @author Patrice Lopez
*/
public class TEICitationSaxParser extends DefaultHandler {
private StringBuffer accumulator = new StringBuffer(); // Accumulate parsed text
private StringBuffer allContent = new StringBuffer();
private String output = null;
private String currentTag = null;
private List<String> labeled = null; // store token by token the labels
private List<List<String>> allLabeled = null; // list of labels
private List<LayoutToken> tokens = null;
private List<List<LayoutToken>> allTokens = null; // list of LayoutToken segmentation
public int nbCitations = 0;
public TEICitationSaxParser() {
allTokens = new ArrayList<List<LayoutToken>>();
allLabeled = new ArrayList<List<String>>();
}
public void characters(char[] buffer, int start, int length) {
accumulator.append(buffer, start, length);
if (allContent != null) {
allContent.append(buffer, start, length);
}
}
public String getText() {
return accumulator.toString().trim();
}
public List<List<String>> getLabeledResult() {
return allLabeled;
}
public List<List<LayoutToken>> getTokensResult() {
return allTokens;
}
public void endElement(java.lang.String uri,
java.lang.String localName,
java.lang.String qName) throws SAXException {
qName = qName.toLowerCase();
if ((qName.equals("author")) || (qName.equals("authors")) || (qName.equals("orgname")) ||
(qName.equals("title")) || (qName.equals("editor")) || (qName.equals("editors")) ||
(qName.equals("booktitle")) || (qName.equals("date")) || (qName.equals("journal")) ||
(qName.equals("institution")) || (qName.equals("tech")) || (qName.equals("volume")) ||
(qName.equals("pages")) || (qName.equals("page")) || (qName.equals("pubplace")) ||
(qName.equals("note")) || (qName.equals("web")) || (qName.equals("pages")) ||
(qName.equals("publisher")) || (qName.equals("idno") || qName.equals("issue")) ||
(qName.equals("pubnum")) || (qName.equals("biblscope")) || (qName.equals("ptr")) ||
(qName.equals("keyword")) || (qName.equals("keywords"))
) {
String text = getText();
writeField(text);
} else if (qName.equals("lb")) {
// we note a line break
accumulator.append(" +L+ ");
} else if (qName.equals("pb")) {
accumulator.append(" +PAGE+ ");
} else if (qName.equals("bibl")) {
String text = getText();
currentTag = "<other>";
if (text.length() > 0) {
writeField(text);
}
nbCitations++;
allLabeled.add(labeled);
allTokens.add(tokens);
allContent = null;
}
accumulator.setLength(0);
}
public void startElement(String namespaceURI,
String localName,
String qName,
Attributes atts)
throws SAXException {
String text = getText();
if (text.length() > 0) {
currentTag = "<other>";
writeField(text);
}
accumulator.setLength(0);
qName = qName.toLowerCase();
if (qName.equals("title")) {
int length = atts.getLength();
// Process each attribute
for (int i = 0; i < length; i++) {
// Get names and values for each attribute
String name = atts.getQName(i);
String value = atts.getValue(i);
if ((name != null) && (value != null)) {
if (name.equals("level")) {
if (value.equals("a")) {
currentTag = "<title>";
} else if (value.equals("j")) {
currentTag = "<journal>";
} else if (value.equals("m")) {
currentTag = "<booktitle>";
} else if (value.equals("s")) {
currentTag = "<series>";
}
}
}
}
} else if ((qName.equals("author")) || (qName.equals("authors"))) {
currentTag = "<author>";
} else if (qName.equals("editor")) {
currentTag = "<editor>";
} else if (qName.equals("date")) {
currentTag = "<date>";
} else if ((qName.equals("keywords")) || (qName.equals("keyword"))) {
currentTag = "<keyword>";
} else if (qName.equals("orgname")) {
// check if we have a collaboration
boolean found = false;
int length = atts.getLength();
for (int i = 0; i < length; i++) {
// Get names and values for each attribute
String name = atts.getQName(i);
String value = atts.getValue(i);
if ((name != null) && (value != null)) {
if (name.equals("type")) {
if (value.equals("collaboration")) {
currentTag = "<collaboration>";
found = true;
}
}
}
}
if (!found)
currentTag = "<institution>";
} else if (qName.equals("note")) {
int length = atts.getLength();
if (length == 0) {
currentTag = "<note>";
} else {
// Process each attribute
for (int i = 0; i < length; i++) {
// Get names and values for each attribute
String name = atts.getQName(i);
String value = atts.getValue(i);
if ((name != null) && (value != null)) {
if (name.equals("type")) {
if (value.equals("report")) {
currentTag = "<tech>";
}
}
}
}
}
} else if (qName.equals("biblscope")) {
int length = atts.getLength();
// Process each attribute
for (int i = 0; i < length; i++) {
// Get names and values for each attribute
String name = atts.getQName(i);
String value = atts.getValue(i);
if ((name != null) && (value != null)) {
if (name.equals("type") || name.equals("unit")) {
if ((value.equals("vol")) || (value.equals("volume"))) {
currentTag = "<volume>";
} else if ((value.equals("issue")) || (value.equals("number"))) {
currentTag = "<issue>";
}
if (value.equals("pp") || value.equals("page")) {
currentTag = "<pages>";
}
}
}
}
} else if (qName.equals("pubplace")) {
currentTag = "<location>";
} else if (qName.equals("publisher")) {
currentTag = "<publisher>";
} else if (qName.equals("ptr")) {
int length = atts.getLength();
// Process each attribute
for (int i = 0; i < length; i++) {
// Get names and values for each attribute
String name = atts.getQName(i);
String value = atts.getValue(i);
if ((name != null) && (value != null)) {
if (name.equals("type")) {
if (value.equals("web")) {
currentTag = "<web>";
}
}
}
}
} else if (qName.equals("idno") || qName.equals("pubnum")) {
currentTag = "<pubnum>";
String idnoType = null;
int length = atts.getLength();
// Process each attribute
for (int i = 0; i < length; i++) {
// Get names and values for each attribute
String name = atts.getQName(i);
String value = atts.getValue(i);
if ((name != null) && (value != null)) {
if (name.equals("type")) {
idnoType = value.toLowerCase();
}
}
}
// TBD: keep the idno type for further exploitation
} else if (qName.equals("bibl")) {
accumulator = new StringBuffer();
allContent = new StringBuffer();
labeled = new ArrayList<String>();
tokens = new ArrayList<LayoutToken>();
}
accumulator.setLength(0);
}
private void writeField(String text) {
if (tokens == null) {
// nothing to do, text must be ignored
return;
}
// we segment the text
List<LayoutToken> localTokens = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(text);
if (isEmpty(localTokens)) {
localTokens = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(text, new Language("en", 1.0));
}
if (isEmpty(localTokens)) {
return;
}
boolean begin = true;
for (LayoutToken token : localTokens) {
tokens.add(token);
String content = token.getText();
if (content.equals(" ") || content.equals("\n")) {
labeled.add(null);
continue;
}
content = UnicodeUtil.normaliseTextAndRemoveSpaces(content);
if (content.trim().length() == 0) {
labeled.add(null);
continue;
}
if (content.length() > 0) {
if (begin) {
labeled.add("I-" + currentTag);
begin = false;
} else {
labeled.add(currentTag);
}
}
}
}
}
| 11,333 | 36.160656 | 116 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/sax/TEIHeaderSaxParser.java
|
package org.grobid.trainer.sax;
import org.grobid.core.utilities.TextUtilities;
import org.xml.sax.Attributes;
import org.xml.sax.SAXException;
import org.xml.sax.helpers.DefaultHandler;
import java.util.ArrayList;
import java.util.List;
import java.util.Arrays;
import java.util.StringTokenizer;
import static org.grobid.core.engines.label.TaggingLabels.AVAILABILITY_LABEL;
/**
* SAX parser for the TEI format header data encoded for training. Normally all training data for the header model
* should be in this unique format (which replaces for instance the CORA format). Segmentation of tokens must be
* identical as the one from pdf2xml files so that training and online input tokens are aligned.
*
* @author Patrice Lopez
*/
public class TEIHeaderSaxParser extends DefaultHandler {
private StringBuffer accumulator = new StringBuffer(); // Accumulate parsed text
private String output = null;
private String currentTag = null;
private String fileName = null;
//public TreeMap<String, String> pdfs = null;
private String pdfName = null;
private ArrayList<String> labeled = null; // store line by line the labeled data
private List<String> endTags = Arrays.asList("titlePart", "note", "docAuthor", "affiliation", "address", "email", "idno",
"date", "keywords", "keyword", "reference", "ptr", "div", "editor", "meeting");
private List<String> intermediaryTags = Arrays.asList("byline", "front", "lb", "tei", "teiHeader", "fileDesc", "text", "byline", "docTitle", "p");
private List<String> ignoredTags = Arrays.asList("location", "version", "web", "degree", "page", "title", "phone", "publisher");
public TEIHeaderSaxParser() {
labeled = new ArrayList<String>();
}
public void characters(char[] buffer, int start, int length) {
accumulator.append(buffer, start, length);
}
public String getText() {
return accumulator.toString().trim();
}
public void setFileName(String name) {
fileName = name;
}
public String getPDFName() {
return pdfName;
}
public ArrayList<String> getLabeledResult() {
return labeled;
}
public void endElement(java.lang.String uri,
java.lang.String localName,
java.lang.String qName) throws SAXException {
if (endTags.contains(qName)) {
writeData();
accumulator.setLength(0);
} else if (qName.equals("front")) {
// write remaining test as <other>
String text = getText();
if (text != null) {
if (text.length() > 0) {
currentTag = "<other>";
writeData();
}
}
accumulator.setLength(0);
} else if (intermediaryTags.contains(qName)) {
// do nothing
} else if (ignoredTags.contains(qName)) {
// do nothing
} else {
System.out.println(" **** Warning **** Unexpected closing tag " + qName);
}
}
public void startElement(String namespaceURI,
String localName,
String qName,
Attributes atts)
throws SAXException {
if (qName.equals("lb")) {
accumulator.append(" ");
} /*else if (qName.equals("space")) {
accumulator.append(" ");
}*/ else {
// add acumulated text as <other>
String text = getText();
if (text != null) {
if (text.length() > 0) {
currentTag = "<other>";
writeData();
}
}
accumulator.setLength(0);
}
if (qName.equals("div")) {
int length = atts.getLength();
currentTag = "<other>";
// Process each attribute
for (int i = 0; i < length; i++) {
// Get names and values for each attribute
String name = atts.getQName(i);
String value = atts.getValue(i);
if (name != null) {
if (name.equals("type")) {
if (value.equals("abstract")) {
currentTag = "<abstract>";
} /*else if (value.equals("intro") || value.equals("introduction")) {
currentTag = "<intro>";
} else if (value.equals("paragraph")) {
currentTag = "<other>";
}*/
else
currentTag = "<other>";
}
}
}
} else if (qName.equals("note")) {
int length = atts.getLength();
currentTag = "<other>";
// Process each attribute
for (int i = 0; i < length; i++) {
// Get names and values for each attribute
String name = atts.getQName(i);
String value = atts.getValue(i);
if (name != null) {
if (name.equals("type")) {
/*if (value.equals("degree")) {
currentTag = "<degree>";
} else if (value.equals("dedication")) {
currentTag = "<dedication>";
} else*/
if (value.equals("submission")) {
currentTag = "<submission>";
} /*else if (value.equals("english-title")) {
currentTag = "<entitle>";
} else if (value.equals("other")) {
currentTag = "<note>";
}*/ else if (value.equals("reference")) {
currentTag = "<reference>";
} else if (value.equals("copyright")) {
currentTag = "<copyright>";
} else if (value.equals("funding")) {
currentTag = "<funding>";
} /*else if (value.equals("acknowledgment")) {
currentTag = "<note>";
}*/ else if (value.equals("document_type") || value.equals("doctype") || value.equals("docType") ||
value.equals("documentType") || value.equals("articleType")) {
currentTag = "<doctype>";
} /*else if (value.equals("version")) {
currentTag = "<version>";
} else if (value.equals("release")) {
currentTag = "<other>";
}*/ else if (value.equals("group")) {
currentTag = "<group>";
} else if (Arrays.asList("availability", "data_availability", "data-availability").contains(value)) {
currentTag = AVAILABILITY_LABEL;
} else
currentTag = "<other>";
}
} else
currentTag = "<other>";
}
} else if (qName.equals("ptr")) {
int length = atts.getLength();
// Process each attribute
for (int i = 0; i < length; i++) {
// Get names and values for each attribute
String name = atts.getQName(i);
String value = atts.getValue(i);
if (name != null) {
if (name.equals("type")) {
if (value.equals("web")) {
currentTag = "<web>";
}
}
} else
currentTag = "<other>";
}
} else if (qName.equals("titlePart")) {
currentTag = "<title>";
} else if (qName.equals("idno")) {
currentTag = "<pubnum>";
} else if (qName.equals("reference")) {
currentTag = "<reference>";
} /*else if (qName.equals("degree")) {
currentTag = "<degree>";
}*/ else if (qName.equals("docAuthor")) {
currentTag = "<author>";
} /*else if (qName.equals("web")) {
currentTag = "<web>";
}*/ else if (qName.equals("affiliation")) {
currentTag = "<affiliation>";
accumulator.setLength(0);
} else if (qName.equals("address")) {
currentTag = "<address>";
accumulator.setLength(0);
} else if (qName.equals("email")) {
currentTag = "<email>";
} else if (qName.equals("meeting")) {
currentTag = "<meeting>";
} /*else if (qName.equals("location")) {
currentTag = "<location>";
}*/ else if (qName.equals("editor")) {
currentTag = "<editor>";
} else if (qName.equals("date")) {
currentTag = "<date>";
/*int length = atts.getLength();
// Process each attribute
for (int i = 0; i < length; i++) {
// Get names and values for each attribute
String name = atts.getQName(i);
String value = atts.getValue(i);
if (name != null) {
if (name.equals("type")) {
if (value.equals("submission")) {
currentTag = "<date-submission>";
} else if (value.equals("download")) {
currentTag = "<date-download>";
}
}
}
}*/
} /*else if (qName.equals("p")) {
int length = atts.getLength();
// Process each attribute
for (int i = 0; i < length; i++) {
// Get names and values for each attribute
String name = atts.getQName(i);
String value = atts.getValue(i);
if (name != null) {
if (name.equals("type")) {
if (value.equals("intro") || value.equals("introduction")) {
currentTag = "<intro>";
}
}
}
}
}*/ else if ((qName.equals("keywords")) || (qName.equals("keyword"))) {
currentTag = "<keyword>";
} /*else if (qName.equals("title")) {
// only <title level="j"> for the moment, so don't need to check the attribute value
currentTag = "<journal>";
} else if (qName.equals("page")) {
currentTag = "<page>";
} else if (qName.equals("phone")) {
currentTag = "<phone>";
} else if (qName.equals("publisher")) {
currentTag = "<publisher>";
}*/
else if (qName.equals("fileDesc")) {
int length = atts.getLength();
// Process each attribute
for (int i = 0; i < length; i++) {
// Get names and values for each attribute
String name = atts.getQName(i);
String value = atts.getValue(i);
if (name != null) {
if (name.equals("xml:id")) {
pdfName = value;
}
}
}
} else if (intermediaryTags.contains(qName)) {
// do nothing
} else if (ignoredTags.contains(qName)) {
// do nothing
currentTag = "<other>";
} else {
System.out.println("Warning: Unexpected starting tag " + qName);
currentTag = "<other>";
}
}
private void writeData() {
if (currentTag == null) {
return;
}
String text = getText();
// we segment the text
StringTokenizer st = new StringTokenizer(text, TextUtilities.delimiters, true);
boolean begin = true;
while (st.hasMoreTokens()) {
String tok = st.nextToken().trim();
if (tok.length() == 0)
continue;
String content = tok;
int i = 0;
if (content.length() > 0) {
if (begin) {
labeled.add(content + " I-" + currentTag + "\n");
begin = false;
} else {
labeled.add(content + " " + currentTag + "\n");
}
}
begin = false;
}
accumulator.setLength(0);
}
}
| 12,720 | 37.432024 | 150 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/sax/ChemicalFormulasSaxParser.java
|
package org.grobid.trainer.sax;
import org.grobid.core.exceptions.GrobidException;
import org.xml.sax.Attributes;
import org.xml.sax.SAXException;
import org.xml.sax.helpers.DefaultHandler;
import java.util.ArrayList;
/**
* This SAX parser simply records the chemical formula stand off annotations and their corresponding word
* identifiers.
*
* @author Patrice Lopez
*/
public class ChemicalFormulasSaxParser extends DefaultHandler {
private ArrayList<ArrayList<String>> chemicalWords = null;
private ArrayList<String> localChemicalWords = null;
private int numberEntities = 0;
public ArrayList<ArrayList<String>> getChemicalFormulas() {
return chemicalWords;
}
public int getNumberEntities() {
return numberEntities;
}
public void endElement(java.lang.String uri,
java.lang.String localName,
java.lang.String qName) throws SAXException {
try {
if (qName.equals("formula-name")) {
if ((localChemicalWords != null) && (localChemicalWords.size() > 0)) {
// we need to keep only the first and last word id for a given sequence
// note that the order of the word ids in this file do not respect the original word order
String idd1 = null;
String idd2 = null;
for (String idd : localChemicalWords) {
if (idd1 == null) {
idd1 = idd;
}
if (idd2 == null) {
idd2 = idd;
}
if (idd.length() < idd1.length()) {
idd1 = idd;
} else if (idd.length() > idd2.length()) {
idd2 = idd;
} else if (idd.compareToIgnoreCase(idd1) < 0) {
idd1 = idd;
} else if (idd.compareToIgnoreCase(idd2) > 0) {
idd2 = idd;
}
}
localChemicalWords = new ArrayList<String>();
localChemicalWords.add(idd1);
localChemicalWords.add(idd2);
chemicalWords.add(localChemicalWords);
//System.out.println(localChemicalWords);
localChemicalWords = null;
}
numberEntities++;
}
} catch (Exception e) {
// e.printStackTrace();
throw new GrobidException("An exception occured while running Grobid.", e);
}
}
public void startElement(String namespaceURI,
String localName,
String qName,
Attributes atts) throws SAXException {
try {
if (qName.equals("nite:root")) {
chemicalWords = new ArrayList<ArrayList<String>>();
} else if (qName.equals("formula-name")) {
localChemicalWords = new ArrayList<String>();
} else if (qName.equals("nite:child")) {
int length = atts.getLength();
// Process each attribute
for (int i = 0; i < length; i++) {
// Get names and values for each attribute
String name = atts.getQName(i);
String value = atts.getValue(i);
if ((name != null) && (value != null)) {
if (name.equals("href")) {
// there are two notations to handle, one compact with .., one with one child per word
int ind = value.indexOf("..");
if (ind != -1) {
// we have a sequence with a first and last word id
int ind1 = value.indexOf("(");
int ind2 = value.indexOf(")");
String idd1 = value.substring(ind1 + 1, ind2);
localChemicalWords.add(idd1);
ind1 = value.indexOf("(", ind1 + 1);
ind2 = value.indexOf(")", ind2 + 1);
String idd2 = value.substring(ind1 + 1, ind2);
localChemicalWords.add(idd2);
} else {
ind = value.indexOf("(");
String idd = value.substring(ind + 1, value.length() - 1);
localChemicalWords.add(idd);
}
}
}
}
}
} catch (Exception e) {
// e.printStackTrace();
throw new GrobidException("An exception occured while running Grobid.", e);
}
}
}
| 5,014 | 40.106557 | 114 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/sax/ChemicalLigandSaxParser.java
|
package org.grobid.trainer.sax;
import org.grobid.core.exceptions.GrobidException;
import org.xml.sax.Attributes;
import org.xml.sax.SAXException;
import org.xml.sax.helpers.DefaultHandler;
import java.util.ArrayList;
/**
* This SAX parser simply records the chemical class name stand off annotations and their corresponding word
* identifiers.
*
* @author Patrice Lopez
*/
public class ChemicalLigandSaxParser extends DefaultHandler {
private ArrayList<ArrayList<String>> chemicalWords = null;
private ArrayList<String> localChemicalWords = null;
private int numberEntities = 0;
public ArrayList<ArrayList<String>> getChemicalLigand() {
return chemicalWords;
}
public int getNumberEntities() {
return numberEntities;
}
public void endElement(java.lang.String uri,
java.lang.String localName,
java.lang.String qName) throws SAXException {
try {
if (qName.equals("ligand-name")) {
if ((localChemicalWords != null) && (localChemicalWords.size() > 0)) {
// we need to keep only the first and last word id for a given sequence
// note that the order of the word ids in this file do not respect the original word order
String idd1 = null;
String idd2 = null;
for (String idd : localChemicalWords) {
if (idd1 == null) {
idd1 = idd;
}
if (idd2 == null) {
idd2 = idd;
}
if (idd.length() < idd1.length()) {
idd1 = idd;
} else if (idd.length() > idd2.length()) {
idd2 = idd;
} else if (idd.compareToIgnoreCase(idd1) < 0) {
idd1 = idd;
} else if (idd.compareToIgnoreCase(idd2) > 0) {
idd2 = idd;
}
}
localChemicalWords = new ArrayList<String>();
localChemicalWords.add(idd1);
localChemicalWords.add(idd2);
chemicalWords.add(localChemicalWords);
//System.out.println(localChemicalWords);
localChemicalWords = null;
}
numberEntities++;
}
} catch (Exception e) {
// e.printStackTrace();
throw new GrobidException("An exception occured while running Grobid.", e);
}
}
public void startElement(String namespaceURI,
String localName,
String qName,
Attributes atts) throws SAXException {
try {
if (qName.equals("nite:root")) {
chemicalWords = new ArrayList<ArrayList<String>>();
} else if (qName.equals("ligand-name")) {
localChemicalWords = new ArrayList<String>();
} else if (qName.equals("nite:child")) {
int length = atts.getLength();
// Process each attribute
for (int i = 0; i < length; i++) {
// Get names and values for each attribute
String name = atts.getQName(i);
String value = atts.getValue(i);
if ((name != null) && (value != null)) {
if (name.equals("href")) {
// there are two notations to handle, one compact with .., one with one child per word
int ind = value.indexOf("..");
if (ind != -1) {
// we have a sequence with a first and last word id
int ind1 = value.indexOf("(");
int ind2 = value.indexOf(")");
String idd1 = value.substring(ind1 + 1, ind2);
localChemicalWords.add(idd1);
ind1 = value.indexOf("(", ind1 + 1);
ind2 = value.indexOf(")", ind2 + 1);
String idd2 = value.substring(ind1 + 1, ind2);
localChemicalWords.add(idd2);
} else {
ind = value.indexOf("(");
String idd = value.substring(ind + 1, value.length() - 1);
localChemicalWords.add(idd);
}
}
}
}
}
} catch (Exception e) {
// e.printStackTrace();
throw new GrobidException("An exception occured while running Grobid.", e);
}
}
}
| 5,011 | 40.081967 | 114 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/sax/TEIAuthorSaxParser.java
|
package org.grobid.trainer.sax;
import org.xml.sax.Attributes;
import org.xml.sax.SAXException;
import org.xml.sax.helpers.DefaultHandler;
import org.grobid.core.utilities.TextUtilities;
import org.grobid.core.utilities.UnicodeUtil;
import org.grobid.core.layout.LayoutToken;
import org.grobid.core.analyzers.*;
import org.grobid.core.lang.Language;
import java.util.ArrayList;
import java.util.List;
/**
* SAX parser for author sequences encoded in the TEI format data.
* Segmentation of tokens must be identical as the one from pdf2xml files to that
* training and online input tokens are identical.
*
* @author Patrice Lopez
*/
public class TEIAuthorSaxParser extends DefaultHandler {
private StringBuffer accumulator = new StringBuffer(); // Accumulate parsed text
private String output = null;
private String currentTag = null;
private List<String> labeled = null; // store token by token the labels
private List<List<String>> allLabeled = null; // list of labels
private List<LayoutToken> tokens = null;
private List<List<LayoutToken>> allTokens = null; // list of LayoutToken segmentation
private String title = null;
private String affiliation = null;
private String address = null;
private String note = null;
private String keywords = null;
public int n = 0;
public TEIAuthorSaxParser() {
allTokens = new ArrayList<List<LayoutToken>>();
allLabeled = new ArrayList<List<String>>();
}
public void characters(char[] buffer, int start, int length) {
accumulator.append(buffer, start, length);
}
public String getText() {
return accumulator.toString().trim();
}
public List<List<String>> getLabeledResult() {
return allLabeled;
}
public List<List<LayoutToken>> getTokensResult() {
return allTokens;
}
public void endElement(java.lang.String uri,
java.lang.String localName,
java.lang.String qName) throws SAXException {
if ((qName.equals("firstname") || qName.equals("forename") || qName.equals("middlename") || qName.equals("title") ||
qName.equals("suffix") || qName.equals("surname") || qName.equals("lastname") || qName.equals("marker") ||
qName.equals("roleName")) & (currentTag != null)) {
String text = getText();
writeField(text);
} else if (qName.equals("lb")) {
// we note a line break
accumulator.append(" +L+ ");
} else if (qName.equals("pb")) {
accumulator.append(" +PAGE+ ");
} else if (qName.equals("author")) {
String text = getText();
if (text.length() > 0) {
currentTag = "<other>";
writeField(text);
}
allLabeled.add(labeled);
allTokens.add(tokens);
n++;
}
accumulator.setLength(0);
}
public void startElement(String namespaceURI,
String localName,
String qName,
Attributes atts)
throws SAXException {
String text = getText();
if (text.length() > 0) {
currentTag = "<other>";
writeField(text);
}
accumulator.setLength(0);
if (qName.equals("title") | qName.equals("roleName")) {
currentTag = "<title>";
} else if (qName.equals("marker")) {
currentTag = "<marker>";
} else if (qName.equals("surname") || qName.equals("lastname")) {
currentTag = "<surname>";
} else if (qName.equals("middlename")) {
currentTag = "<middlename>";
} else if (qName.equals("forename") || qName.equals("firstname")) {
currentTag = "<forename>";
} else if (qName.equals("suffix")) {
currentTag = "<suffix>";
} else if (qName.equals("author")) {
accumulator = new StringBuffer();
labeled = new ArrayList<String>();
tokens = new ArrayList<LayoutToken>();
} else if (!qName.equals("analytic") && !qName.equals("biblStruct") &&
!qName.equals("sourceDesc") && !qName.equals("fileDesc") &&
!qName.equals("teiHeader") && !qName.equals("TEI") &&
!qName.equals("persName") && !qName.equals("tei") && !qName.equals("lb")) {
System.out.println("Warning, invalid tag: <" + qName + ">");
}
}
private void writeField(String text) {
// we segment the text
//List<String> tokens = TextUtilities.segment(text, TextUtilities.punctuations);
List<LayoutToken> localTokens = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(text);
if ( (localTokens == null) || (localTokens.size() == 0) )
localTokens = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(text, new Language("en", 1.0));
if ( (localTokens == null) || (localTokens.size() == 0) )
return;
boolean begin = true;
for (LayoutToken token : localTokens) {
if (tokens == null) {
// should not be the case, it can indicate a structure problem in the training XML file
tokens = new ArrayList<LayoutToken>();
System.out.println("Warning: list of LayoutToken not initialized properly, parsing continue... ");
}
if (labeled == null) {
// should not be the case, it can indicate a structure problem in the training XML file
labeled = new ArrayList<String>();
System.out.println("Warning: list of labels not initialized properly, parsing continue... ");
}
tokens.add(token);
String content = token.getText();
if (content.equals(" ") || content.equals("\n")) {
labeled.add(null);
continue;
}
content = UnicodeUtil.normaliseTextAndRemoveSpaces(content);
if (content.trim().length() == 0) {
labeled.add(null);
continue;
}
if (content.length() > 0) {
if (begin) {
labeled.add("I-" + currentTag);
begin = false;
} else {
labeled.add(currentTag);
}
}
}
}
}
| 6,476 | 36.439306 | 124 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/sax/ChemicalClassNamesSaxParser.java
|
package org.grobid.trainer.sax;
import org.grobid.core.exceptions.GrobidException;
import org.xml.sax.Attributes;
import org.xml.sax.SAXException;
import org.xml.sax.helpers.DefaultHandler;
import java.util.ArrayList;
/**
* This SAX parser simply records the chemical class name stand off annotations and their corresponding word
* identifiers.
*
* @author Patrice Lopez
*/
public class ChemicalClassNamesSaxParser extends DefaultHandler {
private ArrayList<ArrayList<String>> chemicalWords = null;
private ArrayList<String> localChemicalWords = null;
private int numberEntities = 0;
public ArrayList<ArrayList<String>> getChemicalClassNames() {
return chemicalWords;
}
public int getNumberEntities() {
return numberEntities;
}
public void endElement(java.lang.String uri,
java.lang.String localName,
java.lang.String qName) throws SAXException {
try {
if (qName.equals("class-name")) {
if ((localChemicalWords != null) && (localChemicalWords.size() > 0)) {
// we need to keep only the first and last word id for a given sequence
// note that the order of the word ids in this file do not respect the original word order
String idd1 = null;
String idd2 = null;
for (String idd : localChemicalWords) {
if (idd1 == null) {
idd1 = idd;
}
if (idd2 == null) {
idd2 = idd;
}
if (idd.length() < idd1.length()) {
idd1 = idd;
} else if (idd.length() > idd2.length()) {
idd2 = idd;
} else if (idd.compareToIgnoreCase(idd1) < 0) {
idd1 = idd;
} else if (idd.compareToIgnoreCase(idd2) > 0) {
idd2 = idd;
}
}
localChemicalWords = new ArrayList<String>();
localChemicalWords.add(idd1);
localChemicalWords.add(idd2);
chemicalWords.add(localChemicalWords);
//System.out.println(localChemicalWords);
localChemicalWords = null;
}
numberEntities++;
}
} catch (Exception e) {
// e.printStackTrace();
throw new GrobidException("An exception occured while running Grobid.", e);
}
}
public void startElement(String namespaceURI,
String localName,
String qName,
Attributes atts) throws SAXException {
try {
if (qName.equals("nite:root")) {
chemicalWords = new ArrayList<ArrayList<String>>();
} else if (qName.equals("class-name")) {
localChemicalWords = new ArrayList<String>();
} else if (qName.equals("nite:child")) {
int length = atts.getLength();
// Process each attribute
for (int i = 0; i < length; i++) {
// Get names and values for each attribute
String name = atts.getQName(i);
String value = atts.getValue(i);
if ((name != null) && (value != null)) {
if (name.equals("href")) {
// there are two notations to handle, one compact with .., one with one child per word
int ind = value.indexOf("..");
if (ind != -1) {
// we have a sequence with a first and last word id
int ind1 = value.indexOf("(");
int ind2 = value.indexOf(")");
String idd1 = value.substring(ind1 + 1, ind2);
localChemicalWords.add(idd1);
ind1 = value.indexOf("(", ind1 + 1);
ind2 = value.indexOf(")", ind2 + 1);
String idd2 = value.substring(ind1 + 1, ind2);
localChemicalWords.add(idd2);
} else {
ind = value.indexOf("(");
String idd = value.substring(ind + 1, value.length() - 1);
localChemicalWords.add(idd);
}
}
}
}
}
} catch (Exception e) {
// e.printStackTrace();
throw new GrobidException("An exception occured while running Grobid.", e);
}
}
}
| 5,017 | 40.131148 | 114 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/sax/TEICitationOpenEdition.java
|
package org.grobid.trainer.sax;
import org.xml.sax.Attributes;
import org.xml.sax.SAXException;
import org.xml.sax.helpers.DefaultHandler;
/**
* SAX parser for the TEI format for OpenEdition citation data. Convert the data into Grobid TEI format for inclusion
* into the training set.
* <p/>
*
* @author Patrice Lopez
*/
public class TEICitationOpenEdition extends DefaultHandler {
private StringBuffer accumulator = new StringBuffer(); // Accumulate parsed text
private StringBuffer buffer = null;
private boolean isUrl = false;
private boolean isIssue = false;
public TEICitationOpenEdition() {
buffer = new StringBuffer();
}
public String getTEI() {
return buffer.toString();
}
public void cleanBuffer() {
buffer = new StringBuffer();
}
public void characters(char[] buffer, int start, int length) {
accumulator.append(buffer, start, length);
}
public String getText() {
return accumulator.toString();
}
public void endElement(java.lang.String uri,
java.lang.String localName,
java.lang.String qName) throws SAXException {
String text = getText();
if (text.length() > 0) {
buffer.append(text);
}
accumulator.setLength(0);
if (qName.equals("bibl")) {
buffer.append("</bibl>\n");
}
else if (qName.equals("title")) {
buffer.append("</title>");
}
else if (qName.equals("author")) {
buffer.append("</author>");
}
else if (qName.equals("editor")) {
buffer.append("</editor>");
}
else if (qName.equals("date")) {
buffer.append("</date>");
}
else if (qName.equals("biblScope")) {
buffer.append("</biblScope>");
if (isIssue) {
isIssue = false;
if (text.indexOf("(") != -1) {
System.err.println("warning, issue mixed with volume: " + text);
}
}
}
else if (qName.equals("publisher") || qName.equals("distributor") || qName.equals("sponsor")) {
// for us distributor = publisher
buffer.append("</publisher>");
}
else if (qName.equals("pubPlace")) {
buffer.append("</pubPlace>");
}
else if (qName.equals("orgName")) {
buffer.append("</orgName>");
}
else if (qName.equals("meeting")) {
// for us a meeting name is encoded as monography title
buffer.append("</title>");
}
else if (qName.equals("ref")) {
// the ref element is a little bit difficult to process.
// in OpenEdition, it seems that plain text url are encoded with <ref> element without any
// attributes
// Otherwise, when the url applies to another element, the attribute target is used.
// A <ref> can apply to different fields, but sometimes no field at all is present, for example:
// <bibl>CDU : <ref target="http://www.hannover2007.cdu.de/download/071203-beschluss-grundsatzprogramm-4.pdf">
// <hi rend="italic">Grundsätze für Deutschland</hi></ref> </bibl>
// -> we would encode "Grundsätze für Deutschland" as <title level="m">.
if (isUrl) {
buffer.append("</ptr>");
isUrl = false;
}
}
else if (qName.equals("extent")) {
// so far we also tagged the page extent as usual page scope
buffer.append("</biblScope>");
}
}
public void startElement(String namespaceURI,
String localName,
String qName,
Attributes atts) throws SAXException {
String text = getText();
if (text.length() > 0) {
buffer.append(text);
}
accumulator.setLength(0);
if (qName.equals("bibl")) {
buffer.append("/n/t/t<bibl>");
}
else if (qName.equals("title")) {
int length = atts.getLength();
buffer.append("<title");
// Process each attribute
for (int i = 0; i < length; i++) {
// Get names and values for each attribute
String name = atts.getQName(i);
String value = atts.getValue(i);
if ((name != null) && (value != null)) {
if (name.equals("level")) {
if (value.equals("u")) {
// unpublished works in OpenEdition are usually master thesis or reports
buffer.append(" level=\"m\">");
}
else if (value.equals("s")) {
// we note series as journals because they are process the same in OpenURL
buffer.append(" level=\"j\">");
}
else
buffer.append(" level=\""+ value + "\">");
}
}
}
buffer.append(">");
}
else if (qName.equals("author")) {
buffer.append("<author>");
}
else if (qName.equals("editor")) {
buffer.append("<editor>");
}
else if (qName.equals("date")) {
buffer.append("<date>");
}
else if (qName.equals("meeting")) {
// for us a meeting name is encoded as monography title
buffer.append("<title level=\"m\">");
}
else if (qName.equals("publisher") || qName.equals("distributor") || qName.equals("sponsor")) {
// for us distributor = publisher = sponsor
buffer.append("<publisher>");
}
else if (qName.equals("pubPlace")) {
buffer.append("<pubPlace>");
}
else if (qName.equals("orgName")) {
buffer.append("<orgName>");
}
else if (qName.equals("biblScope")) {
int length = atts.getLength();
// Process each attribute
for (int i = 0; i < length; i++) {
// Get names and values for each attribute
String name = atts.getQName(i);
String value = atts.getValue(i);
if ((name != null) && (value != null)) {
if (name.equals("type")) {
if (value.equals("vol")) {
buffer.append("<biblScope type=\"vol\">");
}
else if (value.equals("issue")) {
buffer.append("<biblScope type=\"issue\">");
// note: combination volume(issue) are often badly encoded as just issue, ex:
// <biblScope type="issue">82(3)</biblScope>
// which is volume 82 and issue 3
// we print a warning in these case to correct this by hand
isIssue = true;
}
else if (value.equals("pp")) {
buffer.append("<biblScope type=\"pp\">");
}
else {
System.err.println("warning, unexpected attribute value: " + name + "=" + value);
}
}
}
}
}
else if (qName.equals("extent")) {
buffer.append("<biblScope type=\"pp\">");
}
else if (qName.equals("lb")) {
buffer.append(" ");
}
else if (qName.equals("surname") || qName.equals("forename") || qName.equals("c") || qName.equals("edition")
|| qName.equals("abbr") || qName.equals("hi") || qName.equals("nameLink")
|| qName.equals("settlement") || qName.equals("country") || qName.equals("region") ) {
// we do nothing
}
else if (qName.equals("ref")) {
// Process each attribute
int length = atts.getLength();
if (length == 0) {
buffer.append("<ptr type=\"url\">");
isUrl = true;
}
}
else {
System.err.println("warning, unexpected element value: " + qName);
}
}
}
| 7,072 | 30.29646 | 117 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/sax/MPDL_METS_SaxParser.java
|
package org.grobid.trainer.sax;
import org.grobid.core.data.Person;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.lang.Language;
import org.xml.sax.Attributes;
import org.xml.sax.SAXException;
import org.xml.sax.helpers.DefaultHandler;
import java.io.*;
import java.util.ArrayList;
/**
* SAX parser for the MPDL METS catalogue data. The actual format for the bibliographical metadata is MODS.
*
* @author Patrice Lopez
*/
public class MPDL_METS_SaxParser extends DefaultHandler {
private StringBuffer accumulator = new StringBuffer(); // Accumulate parsed text
private ArrayList<Person> authors = null;
private ArrayList<String> subjects = null;
private ArrayList<String> files = null;
private ArrayList<String> keywords = null;
private String mods_title = null;
private String mods_identifier = null;
private String mods_start = null; // start page
private String mods_end = null; // end page
private String mods_language = Language.DE;
private String mods_genre = null; // type of paper/communication
private String author_family = null;
private String author_given = null;
private String mods_displayForm = null;
private boolean author = false;
private boolean family = false;
private boolean given = false;
private boolean displayForm = false; // name as displayed in the paper
private boolean outputFile = false;
ArrayList<String> titles = new ArrayList<String>();
ArrayList<String> ids = new ArrayList<String>();
//private Writer writer = null;
private String output = null;
public MPDL_METS_SaxParser() {
}
/*public MPDL_METS_SaxParser(Writer writ) {
writer = writ;
}*/
public MPDL_METS_SaxParser(String outp) {
output = outp;
outputFile = true;
}
public void characters(char[] buffer, int start, int length) {
accumulator.append(buffer, start, length);
}
public String getText() {
return accumulator.toString().trim();
}
public void setOutputFile(boolean b) {
outputFile = b;
}
public ArrayList<String> getModsTitles() {
return titles;
}
public ArrayList<String> getIds() {
return ids;
}
public void endElement(java.lang.String uri, java.lang.String localName, java.lang.String qName) throws SAXException {
if (qName.equals("mods:title")) {
// we register in the DB the new entry
mods_title = getText();
titles.add(mods_title);
ids.add(mods_identifier);
mods_title = mods_title.replace("\n", "<lb/>");
accumulator.setLength(0);
} else if (qName.equals("mods:identifier")) {
mods_identifier = getText();
accumulator.setLength(0);
} else if (qName.equals("mods:start")) {
mods_start = getText();
accumulator.setLength(0);
} else if (qName.equals("mods:language")) {
mods_language = getText();
// the language must be formatted
if (mods_language.equals("De"))
mods_language = Language.DE;
else if (mods_language.equals("En"))
mods_language = Language.EN;
else
mods_language = Language.DE;
accumulator.setLength(0);
} else if (qName.equals("mods:end")) {
mods_end = getText();
accumulator.setLength(0);
} else if (qName.equals("mods:name")) {
// we register the author
Person aut = new Person();
aut.setFirstName(author_given);
aut.setLastName(author_family);
//aut.setDisplayName(mods_displayForm);
//authors.add(aut);
accumulator.setLength(0);
} else if (qName.equals("mods:namePart")) {
if (family) {
author_family = getText();
family = false;
} else if (given) {
author_given = getText();
given = false;
}
accumulator.setLength(0);
} else if (qName.equals("mods:displayForm")) {
mods_displayForm = getText();
accumulator.setLength(0);
} else if (qName.equals("mods:mods")) {
// end of bibliographical entry
try {
if (outputFile) {
File outFile = new File(output + "/" + mods_identifier + "-train.tei");
OutputStream os = new FileOutputStream(outFile);
Writer writer = new OutputStreamWriter(os, "UTF-8");
writer.write("<tei>\n\t<teiHeader>\n\t<fileDesc xml:id=\"" + mods_identifier +
"\"/>\n\t</teiHeader>\n\t<text xml:lang=\"" + mods_language + "\">\n");
// we can write the title section
writer.write("\t\t<front>\n\t\t\t<titlePage>\n\t\t\t\t<docTitle>\n");
writer.write("\t\t\t\t\t<titlePart type=\"main\">" + mods_title + "</titlePart>\n");
writer.write("\t\t\t\t</docTitle>\n");
writer.write("\t\t\t\t<byline><docAuthor>" + mods_displayForm + "</docAuthor><lb/></byline>\n");
writer.write("\t\t\t\t<byline><affiliation><lb/></affiliation></byline>\n");
writer.write("\t\t\t\t<docImprint>(<title level=\"j\">Z. Naturforschg.</title> <biblScope type=\"vol\"></biblScope>, <biblScope type=\"pp\"></biblScope> [<date></date>]; <note>eingegangen am</note>)<lb/></docImprint>\n");
writer.write("\t\t\t</titlePage>\n");
writer.write("\t\t\t<div type=\"abstract\"><lb/></div>\n");
writer.write("\t\t\t<div type=\"intro\"></div>\n");
writer.write("\t\t</front>\n\t</text>\n</tei>\n");
writer.close();
os.close();
}
} catch (Exception e) {
// e.printStackTrace();
throw new GrobidException("An exception occured while running Grobid.", e);
}
accumulator.setLength(0);
}
accumulator.setLength(0);
}
public void startElement(String namespaceURI,
String localName,
String qName,
Attributes atts)
throws SAXException {
if (qName.equals("mods:namePart")) {
int length = atts.getLength();
// Process each attribute
for (int i = 0; i < length; i++) {
// Get names and values for each attribute
String name = atts.getQName(i);
String value = atts.getValue(i);
if (name != null) {
if (name.equals("type")) {
if (value.equals("family")) {
family = true;
given = false;
} else if (value.equals("given")) {
given = true;
family = false;
}
}
}
}
accumulator.setLength(0);
} else if (qName.equals("mods:mods")) {
// new bibliographical entry
// tei file is opened when this tag is closed
} else {
accumulator.setLength(0);
}
}
}
| 7,506 | 35.79902 | 241 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/sax/ChemicalNameSaxParser.java
|
package org.grobid.trainer.sax;
import org.grobid.core.exceptions.GrobidException;
import org.xml.sax.Attributes;
import org.xml.sax.SAXException;
import org.xml.sax.helpers.DefaultHandler;
import java.util.ArrayList;
/**
* This SAX parser simply records the chemical name stand off annotations and their corresponding word
* identifiers.
*
* @author Patrice Lopez
*/
public class ChemicalNameSaxParser extends DefaultHandler {
private ArrayList<ArrayList<String>> chemicalWords = null;
private ArrayList<String> localChemicalWords = null;
private int numberEntities = 0;
public ArrayList<ArrayList<String>> getChemicalAnnotations() {
return chemicalWords;
}
public int getNumberEntities() {
return numberEntities;
}
public void endElement(java.lang.String uri,
java.lang.String localName,
java.lang.String qName) throws SAXException {
try {
if (qName.equals("chem-name")) {
if ((localChemicalWords != null) && (localChemicalWords.size() > 0)) {
// we need to keep only the first and last word id for a given sequence
// note that the order of the word ids in this file do not respect the original word order
String idd1 = null;
String idd2 = null;
for (String idd : localChemicalWords) {
if (idd1 == null) {
idd1 = idd;
}
if (idd2 == null) {
idd2 = idd;
}
if (idd.length() < idd1.length()) {
idd1 = idd;
} else if (idd.length() > idd2.length()) {
idd2 = idd;
} else if (idd.compareToIgnoreCase(idd1) < 0) {
idd1 = idd;
} else if (idd.compareToIgnoreCase(idd2) > 0) {
idd2 = idd;
}
}
localChemicalWords = new ArrayList<String>();
localChemicalWords.add(idd1);
localChemicalWords.add(idd2);
chemicalWords.add(localChemicalWords);
//System.out.println(localChemicalWords);
localChemicalWords = null;
}
numberEntities++;
}
} catch (Exception e) {
// e.printStackTrace();
throw new GrobidException("An exception occured while running Grobid.", e);
}
}
public void startElement(String namespaceURI,
String localName,
String qName,
Attributes atts) throws SAXException {
try {
if (qName.equals("nite:root")) {
chemicalWords = new ArrayList<ArrayList<String>>();
} else if (qName.equals("chem-name")) {
localChemicalWords = new ArrayList<String>();
} else if (qName.equals("nite:child")) {
int length = atts.getLength();
// Process each attribute
for (int i = 0; i < length; i++) {
// Get names and values for each attribute
String name = atts.getQName(i);
String value = atts.getValue(i);
if ((name != null) && (value != null)) {
if (name.equals("href")) {
// there are two notations to handle, one compact with .., one with one child per word
int ind = value.indexOf("..");
if (ind != -1) {
// we have a sequence with a first and last word id
int ind1 = value.indexOf("(");
int ind2 = value.indexOf(")");
String idd1 = value.substring(ind1 + 1, ind2);
localChemicalWords.add(idd1);
ind1 = value.indexOf("(", ind1 + 1);
ind2 = value.indexOf(")", ind2 + 1);
String idd2 = value.substring(ind1 + 1, ind2);
localChemicalWords.add(idd2);
} else {
ind = value.indexOf("(");
String idd = value.substring(ind + 1, value.length() - 1);
localChemicalWords.add(idd);
}
}
}
}
}
} catch (Exception e) {
// e.printStackTrace();
throw new GrobidException("An exception occured while running Grobid.", e);
}
}
}
| 5,005 | 39.699187 | 114 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/sax/ChemicalSubstancesSaxParser.java
|
package org.grobid.trainer.sax;
import org.grobid.core.exceptions.GrobidException;
import org.xml.sax.Attributes;
import org.xml.sax.SAXException;
import org.xml.sax.helpers.DefaultHandler;
import java.util.ArrayList;
/**
* This SAX parser simply records the chemical substance name stand off annotations and their corresponding word
* identifiers.
*
* @author Patrice Lopez
*/
public class ChemicalSubstancesSaxParser extends DefaultHandler {
private ArrayList<ArrayList<String>> chemicalWords = null;
private ArrayList<String> localChemicalWords = null;
private int numberEntities = 0;
public ArrayList<ArrayList<String>> getChemicalSubstances() {
return chemicalWords;
}
public int getNumberEntities() {
return numberEntities;
}
public void endElement(java.lang.String uri,
java.lang.String localName,
java.lang.String qName) throws SAXException {
try {
if (qName.equals("substance-name")) {
if ((localChemicalWords != null) && (localChemicalWords.size() > 0)) {
// we need to keep only the first and last word id for a given sequence
// note that the order of the word ids in this file do not respect the original word order
String idd1 = null;
String idd2 = null;
for (String idd : localChemicalWords) {
if (idd1 == null) {
idd1 = idd;
}
if (idd2 == null) {
idd2 = idd;
}
if (idd.length() < idd1.length()) {
idd1 = idd;
} else if (idd.length() > idd2.length()) {
idd2 = idd;
} else if (idd.compareToIgnoreCase(idd1) < 0) {
idd1 = idd;
} else if (idd.compareToIgnoreCase(idd2) > 0) {
idd2 = idd;
}
}
localChemicalWords = new ArrayList<String>();
localChemicalWords.add(idd1);
localChemicalWords.add(idd2);
chemicalWords.add(localChemicalWords);
//System.out.println(localChemicalWords);
localChemicalWords = null;
}
numberEntities++;
}
} catch (Exception e) {
// e.printStackTrace();
throw new GrobidException("An exception occured while running Grobid.", e);
}
}
public void startElement(String namespaceURI,
String localName,
String qName,
Attributes atts) throws SAXException {
try {
if (qName.equals("nite:root")) {
chemicalWords = new ArrayList<ArrayList<String>>();
} else if (qName.equals("substance-name")) {
localChemicalWords = new ArrayList<String>();
} else if (qName.equals("nite:child")) {
int length = atts.getLength();
// Process each attribute
for (int i = 0; i < length; i++) {
// Get names and values for each attribute
String name = atts.getQName(i);
String value = atts.getValue(i);
if ((name != null) && (value != null)) {
if (name.equals("href")) {
// there are two notations to handle, one compact with .., one with one child per word
int ind = value.indexOf("..");
if (ind != -1) {
// we have a sequence with a first and last word id
int ind1 = value.indexOf("(");
int ind2 = value.indexOf(")");
String idd1 = value.substring(ind1 + 1, ind2);
localChemicalWords.add(idd1);
ind1 = value.indexOf("(", ind1 + 1);
ind2 = value.indexOf(")", ind2 + 1);
String idd2 = value.substring(ind1 + 1, ind2);
localChemicalWords.add(idd2);
} else {
ind = value.indexOf("(");
String idd = value.substring(ind + 1, value.length() - 1);
localChemicalWords.add(idd);
}
}
}
}
}
} catch (Exception e) {
// e.printStackTrace();
throw new GrobidException("An exception occured while running Grobid.", e);
}
}
}
| 5,029 | 40.229508 | 114 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/sax/WikiTextExtractSaxParser.java
|
package org.grobid.trainer.sax;
import org.grobid.core.exceptions.GrobidException;
import org.xml.sax.Attributes;
import org.xml.sax.SAXException;
import org.xml.sax.helpers.DefaultHandler;
import java.io.*;
import java.util.StringTokenizer;
/**
* SAX parser for XML Wikipedia page articles (file .hgw.xml). Grab a definition for the page ID.
*
* @author Patrice Lopez
*/
public class WikiTextExtractSaxParser extends DefaultHandler {
private StringBuffer accumulator = new StringBuffer(); // Accumulate parsed text
private String PageID = null;
private String lang = null;
private Writer writer = null;
private boolean textBegin = false;
private int page = 0;
private String path = null;
private int fileCount = 0;
public WikiTextExtractSaxParser() {
}
public WikiTextExtractSaxParser(String p) {
path = p;
}
public void characters(char[] buffer, int start, int length) {
if (textBegin)
accumulator.append(buffer, start, length);
}
//static final String INSERT_PAGEDEF_SQL =
// "UPDATE wiki_page SET def = ? WHERE PageID=?";
public void endElement(java.lang.String uri, java.lang.String localName, java.lang.String qName) throws SAXException {
if (qName.equals("text")) {
textBegin = false;
// we have a big piece of accumulated text, we dump it in the corpus file after some cleaning
String blabla = accumulator.toString();
StringTokenizer st = new StringTokenizer(blabla, "\n");
while (st.hasMoreTokens()) {
String line = st.nextToken();
//System.out.println(line);
if (line.length() == 0)
continue;
if (line.startsWith("__")) {
continue;
}
if (line.startsWith("PMID")) {
continue;
}
if (line.startsWith("#")) {
continue;
}
String line0 = "";
boolean end = false;
int pos = 0;
while (!end) {
int ind = line.indexOf("[", pos);
//System.out.println("ind: " + ind);
if (ind != -1) {
int inde = line.indexOf(']', pos);
//System.out.println("inde: " + inde);
if (inde != -1) {
line0 += line.substring(pos, ind);
pos = inde + 2;
} else {
line0 += line.substring(pos, ind) + line.substring(inde + 1, line.length());
end = true;
}
} else {
//System.out.println("pos: " + pos);
if (pos < line.length() - 1)
line0 += line.substring(pos, line.length());
end = true;
}
}
line = line0.trim();
if (line.indexOf("|") != -1)
continue;
if (line.startsWith("poly"))
continue;
for (int i = 0; i < 5; i++) {
if ((line.startsWith(".")) | (line.startsWith("*")) | (line.startsWith(":")) |
(line.startsWith("\"")) | (line.startsWith(";"))) {
line = line.substring(1, line.length());
line = line.trim();
}
}
//System.out.println(line);
if ((line.length() > 0) & (!line.startsWith("Help")) & (!line.startsWith("NONE"))
& (!line.startsWith("beg")) & (!line.startsWith(": See also")) & (!line.startsWith(": \"See also"))
& (!line.startsWith(":See also")) & (!line.startsWith("Wiktionary")) & (!line.startsWith("subgroup"))
) {
// do we need some more cleaning ?
try {
writer.write(line);
writer.write("\n");
writer.flush();
} catch (Exception e) {
// e.printStackTrace();
throw new GrobidException("An exception occured while running Grobid.", e);
}
}
}
// reinit
PageID = null;
}
}
public void startElement(String namespaceURI,
String localName,
String qName,
Attributes atts)
throws SAXException {
if (qName.equals("page")) {
int length = atts.getLength();
// Process each attribute
for (int i = 0; i < length; i++) {
// Get names and values for each attribute
String name = atts.getQName(i);
String value = atts.getValue(i);
if (name != null) {
if (name.equals("id")) {
PageID = value;
if (page > 4000) {
// reinit
page = 0;
}
if (page == 0) {
try {
// open a new file
if (writer != null)
writer.close();
File file = new File(path + "text-" + fileCount + ".txt");
System.out.println(path + "text-" + fileCount + ".txt");
OutputStream os = new FileOutputStream(file, false);
writer = new OutputStreamWriter(os, "UTF-8");
fileCount++;
} catch (Exception e) {
// e.printStackTrace();
throw new GrobidException("An exception occured while running Grobid.", e);
}
}
page++;
}
}
}
} else if (qName.equals("text")) {
textBegin = true;
accumulator.setLength(0); // we start to buffer text, no need to buffer the rest
} else {
// mmm.... nothing else ?
}
}
}
| 6,585 | 35.588889 | 125 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/sax/FieldExtractSaxHandler.java
|
package org.grobid.trainer.sax;
import org.xml.sax.Attributes;
import org.xml.sax.SAXException;
import org.xml.sax.helpers.DefaultHandler;
import java.util.ArrayList;
/**
* Utility SAX parser which extracts all the text content under a given TEI tag name or
* under a specified xml path.
*
* @author Patrice Lopez
*/
public class FieldExtractSaxHandler extends DefaultHandler {
private StringBuffer accumulator = new StringBuffer(); // Accumulate parsed text
private String field = null;
private String xmlPath = null;
private ArrayList<String> values = null; // store the content values for each tag occurrence
public FieldExtractSaxHandler() {
values = new ArrayList<String>();
}
public void setField(String f) {
field = f;
}
public void setXmlPath(String path) {
xmlPath = path;
}
public void characters(char[] buffer, int start, int length) {
accumulator.append(buffer, start, length);
}
public String getText() {
return accumulator.toString().trim();
}
public ArrayList<String> getValues() {
return values;
}
public void endElement(String uri,
String localName,
String qName) throws SAXException {
if (field != null) {
if (qName.equals(field)) {
values.add(getText());
accumulator.setLength(0);
}
}
}
public void startElement(String namespaceURI,
String localName,
String qName,
Attributes atts)
throws SAXException {
if (field != null) {
if (qName.equals(field)) {
accumulator.setLength(0);
}
}
}
}
| 1,833 | 24.123288 | 96 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/sax/TEIFigureSaxParser.java
|
package org.grobid.trainer.sax;
import org.grobid.core.utilities.TextUtilities;
import org.xml.sax.Attributes;
import org.xml.sax.SAXException;
import org.xml.sax.helpers.DefaultHandler;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Stack;
import java.util.StringTokenizer;
/**
* SAX parser for the TEI format for figure and table data encoded for training.
*
* @author Patrice Lopez
*/
public class TEIFigureSaxParser extends DefaultHandler {
private StringBuffer accumulator = null; // current accumulated text
private String output = null;
private Stack<String> currentTags = null;
private String currentTag = null;
private boolean figureBlock = false;
private boolean tableBlock = false;
private ArrayList<String> labeled = null; // store line by line the labeled data
private List<String> allTags = Arrays.asList("<figure_head>", "<figDesc>", "<content>", "<label>", "<note>", "<other>");;
public TEIFigureSaxParser() {
labeled = new ArrayList<String>();
currentTags = new Stack<String>();
accumulator = new StringBuffer();
}
public void characters(char[] buffer, int start, int length) {
accumulator.append(buffer, start, length);
}
public String getText() {
if (accumulator != null) {
//System.out.println(accumulator.toString().trim());
return accumulator.toString().trim();
} else {
return null;
}
}
public List<String> getLabeledResult() {
return labeled;
}
public void endElement(java.lang.String uri,
java.lang.String localName,
java.lang.String qName) throws SAXException {
if ( (!qName.equals("lb")) && (!qName.equals("pb")) ) {
if (!currentTags.empty()) {
currentTag = currentTags.peek();
}
writeData(currentTag, true);
}
if (qName.equals("figure")) {
figureBlock = false;
tableBlock = false;
labeled.add("");
}
}
public void startElement(String namespaceURI,
String localName,
String qName,
Attributes atts)
throws SAXException {
if (qName.equals("lb")) {
accumulator.append(" +L+ ");
}
else if (qName.equals("pb")) {
accumulator.append(" +PAGE+ ");
}
else if (qName.equals("space")) {
accumulator.append(" ");
}
else {
// we have to write first what has been accumulated yet with the upper-level tag
String text = getText();
if (text != null) {
if (text.length() > 0) {
writeData(currentTag, false);
}
}
accumulator.setLength(0);
if (qName.equals("head")) {
if (figureBlock || tableBlock) {
currentTags.push("<figure_head>");
currentTag = "<figure_head>";
}
}
else if (qName.equals("figDesc")) {
currentTags.push("<figDesc>");
currentTag = "<figDesc>";
}
else if (qName.equals("table")) {
currentTags.push("<content>");
currentTag = "<content>";
}
else if (qName.equals("trash") || qName.equals("content")) {
currentTags.push("<content>");
currentTag = "<content>";
}
else if (qName.equals("label")) {
currentTags.push("<label>");
currentTag = "<label>";
}
else if (qName.equals("note")) {
currentTags.push("<note>");
currentTag = "<note>";
}
else if (qName.equals("figure")) {
figureBlock = true;
int length = atts.getLength();
// Process each attribute
for (int i = 0; i < length; i++) {
// Get names and values for each attribute
String name = atts.getQName(i);
String value = atts.getValue(i);
if (name != null) {
if (name.equals("type")) {
if (value.equals("table")) {
tableBlock = true;
}
}
}
}
if (tableBlock) {
figureBlock = false;
}
currentTags.push("<other>");
currentTag = "<other>";
}
else {
qName = qName.toLowerCase();
if (!qName.equals("tei") && !qName.equals("teiheader") && !qName.equals("text") && !qName.equals("filedesc"))
System.out.println("Warning, unknown xml tag in training file: " + qName);
}
}
}
private void writeData(String currentTag, boolean pop) {
if (currentTag == null) {
return;
}
if (allTags.contains(currentTag)) {
if (pop) {
if (!currentTags.empty()) {
currentTags.pop();
}
}
String text = getText();
// we segment the text
StringTokenizer st = new StringTokenizer(text, " \n\t" + TextUtilities.fullPunctuations, true);
boolean begin = true;
while (st.hasMoreTokens()) {
String tok = st.nextToken().trim();
if (tok.length() == 0)
continue;
if (tok.equals("+L+")) {
labeled.add("@newline\n");
}
else if (tok.equals("+PAGE+")) {
// page break should be a distinct feature
labeled.add("@newpage\n");
}
else {
String content = tok;
int i = 0;
if (content.length() > 0) {
if (begin) {
labeled.add(content + " I-" + currentTag + "\n");
begin = false;
} else {
labeled.add(content + " " + currentTag + "\n");
}
}
}
begin = false;
}
accumulator.setLength(0);
} else {
System.out.println("Warning, unknown tag in training file: " + currentTag);
}
}
}
| 6,482 | 30.470874 | 125 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/sax/TEIFulltextSaxParser.java
|
package org.grobid.trainer.sax;
import org.grobid.core.utilities.TextUtilities;
import org.xml.sax.Attributes;
import org.xml.sax.SAXException;
import org.xml.sax.helpers.DefaultHandler;
import java.util.ArrayList;
import java.util.List;
import java.util.Stack;
import java.util.StringTokenizer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* SAX parser for the TEI format for fulltext data encoded for training. Normally all training data should
* be in this unique format for the fulltext model.
* The segmentation of tokens must be identical as the one from pdf2xml files so that
* training and online input tokens are aligned.
*
* @author Patrice Lopez
*/
public class TEIFulltextSaxParser extends DefaultHandler {
private static final Logger logger = LoggerFactory.getLogger(TEIFulltextSaxParser.class);
private StringBuffer accumulator = null; // current accumulated text
private String output = null;
private Stack<String> currentTags = null;
private String currentTag = null;
private boolean figureBlock = false;
private boolean tableBlock = false;
private ArrayList<String> labeled = null; // store line by line the labeled data
public TEIFulltextSaxParser() {
labeled = new ArrayList<String>();
currentTags = new Stack<String>();
accumulator = new StringBuffer();
}
public void characters(char[] buffer, int start, int length) {
accumulator.append(buffer, start, length);
}
public String getText() {
if (accumulator != null) {
//System.out.println(accumulator.toString().trim());
return accumulator.toString().trim();
} else {
return null;
}
}
public List<String> getLabeledResult() {
return labeled;
}
public void endElement(java.lang.String uri,
java.lang.String localName,
java.lang.String qName) throws SAXException {
if ( (!qName.equals("lb")) && (!qName.equals("pb")) && (!qName.equals("space")) ) {
writeData(qName, true);
if (!currentTags.empty()) {
currentTag = currentTags.peek();
}
}
if (qName.equals("figure") || qName.equals("table")) {
figureBlock = false;
tableBlock = false;
}
}
public void startElement(String namespaceURI,
String localName,
String qName,
Attributes atts)
throws SAXException {
if (qName.equals("lb")) {
//accumulator.append(" +LINE+ ");
accumulator.append(" ");
}
else if (qName.equals("space")) {
accumulator.append(" ");
}
else {
// we have to write first what has been accumulated yet with the upper-level tag
String text = getText();
if (text != null) {
if (text.length() > 0) {
writeData(qName, false);
}
}
accumulator.setLength(0);
if (qName.equals("div")) {
int length = atts.getLength();
// Process each attribute
for (int i = 0; i < length; i++) {
// Get names and values for each attribute
String name = atts.getQName(i);
String value = atts.getValue(i);
if (name != null) {
if (name.equals("type")) {
if (value.equals("paragraph")) {
currentTags.push("<paragraph>");
currentTag = "<paragraph>";
} else {
logger.error("Invalid attribute value for element div: " + name + "=" + value);
}
} else {
logger.error("Invalid attribute name for element div: " + name);
}
}
}
}
else if (qName.equals("p") ) {
currentTags.push("<paragraph>");
currentTag = "<paragraph>";
}
else if (qName.equals("other")) {
currentTags.push("<other>");
currentTag = "<other>";
}
else if (qName.equals("ref")) {
int length = atts.getLength();
// Process each attribute
for (int i = 0; i < length; i++) {
// Get names and values for each attribute
String name = atts.getQName(i);
String value = atts.getValue(i);
if (name != null) {
if (name.equals("type")) {
if (value.equals("biblio")) {
currentTags.push("<citation_marker>");
currentTag = "<citation_marker>";
} else if (value.equals("figure")) {
currentTags.push("<figure_marker>");
currentTag = "<figure_marker>";
} else if (value.equals("table")) {
currentTags.push("<table_marker>");
currentTag = "<table_marker>";
} else if (value.equals("formula") || value.equals("equation")) {
currentTags.push("<equation_marker>");
currentTag = "<equation_marker>";
} else if (value.equals("section")) {
currentTags.push("<section_marker>");
currentTag = "<section_marker>";
} else {
logger.error("Invalid attribute value for element ref: " + name + "=" + value);
}
} else {
logger.error("Invalid attribute name for element ref: " + name);
}
}
}
}
else if (qName.equals("formula")) {
currentTags.push("<equation>");
currentTag = "<equation>";
} else if (qName.equals("label")) {
currentTags.push("<equation_label>");
currentTag = "<equation_label>";
} else if (qName.equals("head")) {
{
currentTags.push("<section>");
currentTag = "<section>";
}
}
else if (qName.equals("table")) {
currentTags.push("<table>");
currentTag = "<table>";
tableBlock = true;
figureBlock = false;
}
else if (qName.equals("item")) {
currentTags.push("<paragraph>");
currentTag = "<paragraph>";
//currentTags.push("<item>");
//currentTag = "<item>";
}
else if (qName.equals("figure")) {
figureBlock = true;
tableBlock = false;
int length = atts.getLength();
// Process each attribute
for (int i = 0; i < length; i++) {
// Get names and values for each attribute
String name = atts.getQName(i);
String value = atts.getValue(i);
if (name != null) {
if (name.equals("type")) {
if (value.equals("table")) {
tableBlock = true;
} else {
logger.error("Invalid attribute value for element figure: " + name + "=" + value);
}
} else {
logger.error("Invalid attribute name for element figure: " + name);
}
}
}
if (tableBlock) {
figureBlock = false;
currentTags.push("<table>");
currentTag = "<table>";
}
else {
currentTags.push("<figure>");
currentTag = "<figure>";
}
}
else if (qName.equals("other")) {
currentTags.push("<other>");
currentTag = "<other>";
} else if (qName.equals("text")) {
currentTags.push("<other>");
currentTag = "<other>";
} else {
if (!qName.equals("tei") && !qName.equals("teiHeader") && !qName.equals("fileDesc") && !qName.equals("list")) {
logger.error("Invalid element name: " + qName + " - it will be mapped to the label <other>");
currentTags.push("<other>");
currentTag = "<other>";
}
}
}
}
private void writeData(String qName, boolean pop) {
if ( (qName.equals("other")) || (qName.equals("p")) ||
(qName.equals("ref")) || (qName.equals("head")) || (qName.equals("figure")) ||
(qName.equals("paragraph")) ||
(qName.equals("div")) || //(qName.equals("figDesc")) ||
(qName.equals("table")) || //(qName.equals("trash")) ||
(qName.equals("formula")) || (qName.equals("item")) || (qName.equals("label"))
) {
if (currentTag == null) {
return;
}
if (pop) {
if (!currentTags.empty()) {
currentTags.pop();
}
}
// adjust tag (conservative)
if (tableBlock) {
currentTag = "<table>";
}
else if (figureBlock) {
currentTag = "<figure>";
}
String text = getText();
// we segment the text
StringTokenizer st = new StringTokenizer(text, TextUtilities.delimiters, true);
boolean begin = true;
while (st.hasMoreTokens()) {
String tok = st.nextToken().trim();
if (tok.length() == 0)
continue;
/*if (tok.equals("+LINE+")) {
labeled.add("@newline\n");
} else*/ {
String content = tok;
int i = 0;
if (content.length() > 0) {
if (begin) {
labeled.add(content + " I-" + currentTag + "\n");
begin = false;
} else {
labeled.add(content + " " + currentTag + "\n");
}
}
}
begin = false;
}
accumulator.setLength(0);
}
}
}
| 10,585 | 35.006803 | 127 |
java
|
grobid
|
grobid-master/grobid-trainer/src/main/java/org/grobid/trainer/sax/TEIAffiliationAddressSaxParser.java
|
package org.grobid.trainer.sax;
import org.grobid.core.analyzers.GrobidAnalyzer;
import org.grobid.core.layout.LayoutToken;
import org.grobid.core.lexicon.Lexicon;
import org.grobid.core.utilities.OffsetPosition;
import org.grobid.core.utilities.TextUtilities;
import org.xml.sax.Attributes;
import org.xml.sax.SAXException;
import org.xml.sax.helpers.DefaultHandler;
import java.io.Writer;
import java.util.ArrayList;
import java.util.List;
import java.util.StringTokenizer;
/**
* SAX parser for affiliation+address sequences encoded in the TEI format data.
* Segmentation of tokens must be identical as the one from pdf2xml files to that
* training and online input tokens are identical.
*
* @author Patrice Lopez
*/
public class TEIAffiliationAddressSaxParser extends DefaultHandler {
private StringBuffer accumulator = new StringBuffer(); // Accumulate parsed text
private StringBuffer allContent = new StringBuffer();
private String output = null;
private String currentTag = null;
private List<String> labeled = null; // store line by line the labeled data
public List<List<OffsetPosition>> placesPositions = null; // list of offset positions of place names
public List<List<LayoutToken>> allTokens = null;
//private Writer writerAddress = null; // writer for the address model
private Writer writerCORA = null; // writer for conversion into TEI header model
public int n = 0;
public Lexicon lexicon = Lexicon.getInstance();
public void setTEIHeaderOutput(Writer writ) {
writerCORA = writ;
}
public TEIAffiliationAddressSaxParser() {
labeled = new ArrayList<String>();
placesPositions = new ArrayList<List<OffsetPosition>>();
allTokens = new ArrayList<List<LayoutToken>>();
}
public void characters(char[] buffer, int start, int length) {
accumulator.append(buffer, start, length);
//if (allContent != null) {
// allContent.append(buffer, start, length);
//}
}
public String getText() {
return accumulator.toString().trim();
}
public List<String> getLabeledResult() {
return labeled;
}
public List<List<OffsetPosition>> getPlacesPositions() {
return placesPositions;
}
public List<List<LayoutToken>> getAllTokens() {
return allTokens;
}
public void endElement(java.lang.String uri,
java.lang.String localName,
java.lang.String qName) throws SAXException {
if ((
(qName.equals("addrLine")) ||
(qName.equals("settlement")) ||
(qName.equals("region")) ||
(qName.equals("postCode")) ||
(qName.equals("postBox")) ||
(qName.equals("marker")) ||
(qName.equals("country") ||
(qName.equals("orgName")))
)) {
String text = getText();
writeField(text);
if (allContent != null) {
if (allContent.length() != 0) {
allContent.append(" ");
}
allContent.append(text);
}
accumulator.setLength(0);
} else if (qName.equals("lb") || qName.equals("pb")) {
// we note a line break
accumulator.append(" @newline ");
} else if (qName.equals("affiliation")) {
String text = getText();
if (text.length() > 0) {
currentTag = "<other>";
writeField(text);
if (allContent != null) {
if (allContent.length() != 0) {
allContent.append(" ");
}
allContent.append(text);
}
}
accumulator.setLength(0);
} else if (qName.equals("author")) {
String text = getText();
if (text.length() > 0) {
currentTag = "<other>";
writeField(text);
if (allContent != null) {
if (allContent.length() != 0) {
allContent.append(" ");
}
allContent.append(text);
}
}
labeled.add("\n \n");
String allString = allContent.toString().trim();
//allString = allString.replace("@newline", "");
allString = allString.replace("@newline", "\n");
//List<OffsetPosition> toto = lexicon.tokenPositionsCityNames(allString);
List<LayoutToken> tokens = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(allString);
List<OffsetPosition> toto = lexicon.tokenPositionsCityNames(tokens);
placesPositions.add(toto);
allTokens.add(tokens);
allContent = null;
allString = null;
accumulator.setLength(0);
} else {
accumulator.setLength(0);
}
}
public void startElement(String namespaceURI,
String localName,
String qName,
Attributes atts)
throws SAXException {
if (!qName.equals("lb") && !qName.equals("pb")) {
String text = getText();
if (text.length() > 0) {
currentTag = "<other>";
writeField(text);
if (allContent != null) {
if (allContent.length() != 0) {
allContent.append(" ");
}
allContent.append(text);
}
}
accumulator.setLength(0);
}
//else {
// writeField("+++");
//}
if (qName.equals("orgName")) {
int length = atts.getLength();
// Process each attribute
for (int i = 0; i < length; i++) {
// Get names and values for each attribute
String name = atts.getQName(i);
String value = atts.getValue(i);
if (name != null) {
if (name.equals("type")) {
value = value.toLowerCase();
if (value.equals("department") || value.equals("departement")) {
currentTag = "<department>";
} else if (value.equals("institution") || value.equals("institute")) {
currentTag = "<institution>";
} else if (value.equals("laboratory")) {
currentTag = "<laboratory>";
} else if (value.equals("consortium")) {
currentTag = "<institution>";
} else {
currentTag = null;
}
}
}
}
} else if (qName.equals("affiliation")) {
currentTag = null;
accumulator.setLength(0);
n++;
} else if (qName.equals("addrLine") || qName.equals("addrline")) {
currentTag = "<addrLine>";
} else if (qName.equals("settlement")) {
currentTag = "<settlement>";
} else if (qName.equals("region")) {
currentTag = "<region>";
} else if (qName.equals("postCode") || qName.equals("postcode")) {
currentTag = "<postCode>";
} else if (qName.equals("postBox") || qName.equals("postbox")) {
currentTag = "<postBox>";
} else if (qName.equals("country")) {
currentTag = "<country>";
} else if (qName.equals("marker")) {
currentTag = "<marker>";
} else if (qName.equals("author")) {
accumulator = new StringBuffer();
allContent = new StringBuffer();
} else {
//currentTag = null;
}
}
private void writeField(String text) {
// we segment the text
StringTokenizer st = new StringTokenizer(text, " \n\t" + TextUtilities.fullPunctuations, true);
boolean begin = true;
while (st.hasMoreTokens()) {
String tok = st.nextToken().trim();
if (tok.length() == 0) {
continue;
}
if (tok.equals("@newline")) {
labeled.add("@newline");
} else if (tok.equals("+PAGE+")) {
// page break - no influence here
labeled.add("@newline");
} else {
String content = tok;
int i = 0;
if (content.length() > 0) {
if (begin) {
labeled.add(content + " I-" + currentTag);
begin = false;
} else {
labeled.add(content + " " + currentTag);
}
}
}
begin = false;
}
}
}
| 9,098 | 35.396 | 104 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/GrobidModelsTest.java
|
package org.grobid.core;
import org.grobid.core.utilities.GrobidProperties;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
public class GrobidModelsTest {
@BeforeClass
public static void setInitialContext() throws Exception {
GrobidProperties.getInstance();
}
@Test
public void testGrobidModelsEnum_StandardModel_affiliation() throws Exception {
GrobidModel model = GrobidModels.AFFILIATION_ADDRESS;
assertThat(model.getFolderName(), is("affiliation-address"));
assertThat(model.getModelName(), is("affiliation-address"));
assertThat(model.getTemplateName(), is("affiliation-address.template"));
String[] splittedPath = model.getModelPath().split("[/\\\\]");
//assertThat(splittedPath[splittedPath.length - 1], is("model.wapiti"));
assertThat(splittedPath[splittedPath.length - 2], is("affiliation-address"));
assertThat(splittedPath[splittedPath.length - 3], is("models"));
}
@Test
public void testGrobidModelsEnum_StandardModel_name() throws Exception {
GrobidModel model = GrobidModels.HEADER;
assertThat(model.getFolderName(), is("header"));
assertThat(model.getModelName(), is("header"));
assertThat(model.getTemplateName(), is("header.template"));
String[] splittedPath = model.getModelPath().split("[/\\\\]");
//assertThat(splittedPath[splittedPath.length - 1], is("model.wapiti"));
assertThat(splittedPath[splittedPath.length - 2], is("header"));
assertThat(splittedPath[splittedPath.length - 4], is("grobid-home"));
}
//@Test
public void testGrobidModelsEnum_CustomModel() throws Exception {
GrobidModel model = GrobidModels.modelFor("myDreamModel");
assertThat(model.getFolderName(), is("myDreamModel"));
assertThat(model.getModelName(), is("myDreamModel"));
assertThat(model.getTemplateName(), is("myDreamModel.template"));
String[] tokenizePath = model.getModelPath().split("[/\\\\]");
//assertThat(tokenizePath[tokenizePath.length - 1], is("model.wapiti"));
assertThat(tokenizePath[tokenizePath.length - 2], is("myDreamModel"));
assertThat(tokenizePath[tokenizePath.length - 3], is("models"));
assertThat(tokenizePath[tokenizePath.length - 4], is("grobid-home"));
GrobidModel model2 = GrobidModels.modelFor("AnotherDreamModel");
assertThat(model2.equals(model), is(false));
GrobidModel model3 = GrobidModels.modelFor("myDreamModel");
assertThat(model3.equals(model), is(true));
}
}
| 2,691 | 39.787879 | 85 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/factory/GrobidPoolingFactoryTest.java
|
package org.grobid.core.factory;
import org.apache.commons.pool.impl.GenericObjectPool;
import org.apache.commons.pool.impl.StackObjectPool;
import org.grobid.core.engines.Engine;
import org.junit.Test;
public class GrobidPoolingFactoryTest {
@Test
public void testnewPoolInstance() throws Exception {
// GrobidPoolingFactory factory = GrobidPoolingFactory.newInstance();
// StackObjectPool pool = new StackObjectPool(factory);
// GenericObjectPool genericPool = GrobidPoolingFactory.newPoolInstance();
// Engine engine = (Engine)genericPool.borrowObject();
// engine.processDate("10 November 2012");
// engine.close();
}
}
| 643 | 29.666667 | 76 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/features/TestFeatures.java
|
package org.grobid.core.features;
import org.grobid.core.lexicon.Lexicon;
import org.grobid.core.utilities.GrobidProperties;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
public class TestFeatures {
FeatureFactory target;
@BeforeClass
public static void setInitialContext() throws Exception {
GrobidProperties.getInstance();
Lexicon.getInstance();
}
@AfterClass
public static void destroyInitialContext() throws Exception {
}
@Before
public void setUp() throws Exception {
target = FeatureFactory.getInstance();
}
@Test
public void testDiscretizeLinearScale_10Nbins_100total_30val() {
int val = 30;
int total = 100;
int nbBins = 10;
// System.out.println("linear " + nbBins);
assertThat(target.linearScaling(0, total, nbBins), is(0));
assertThat(target.linearScaling(10, total, nbBins), is(1));
assertThat(target.linearScaling(20, total, nbBins), is(2));
assertThat(target.linearScaling(50, total, nbBins), is(5));
assertThat(target.linearScaling(70, total, nbBins), is(7));
assertThat(target.linearScaling(100, total, nbBins), is(10));
assertEquals("Discretized value is not the expected one", 3, target.linearScaling(val, total, nbBins));
}
@Test
public void testDiscretizeLinearScale_7Nbins_100total_30val() {
int val = 30;
int total = 100;
int nbBins = 7;
// System.out.println("linear " + nbBins);
assertThat(target.linearScaling(10, total, nbBins), is(0));
assertThat(target.linearScaling(20, total, nbBins), is(1));
assertThat(target.linearScaling(50, total, nbBins), is(3));
assertThat(target.linearScaling(70, total, nbBins), is(4));
assertEquals("Discretized value is not the expected one", 2, target.linearScaling(val, total, nbBins));
}
@Test
public void testDiscretizeLinearScale_10Nbins_1total_03val() {
double valD = 0.3;
double totalD = 1.0;
int nbBins = 10;
// System.out.println("linear (double) " + nbBins);
assertThat(target.linearScaling(0.1, totalD, nbBins), is(1));
assertThat(target.linearScaling(0.2, totalD, nbBins), is(2));
assertThat(target.linearScaling(0.5, totalD, nbBins), is(5));
assertThat(target.linearScaling(0.7, totalD, nbBins), is(7));
assertEquals("Discretized value is not the expected one", 3, target.linearScaling(valD, totalD, nbBins));
nbBins = 8;
assertEquals("Discretized value is not the expected one", 2, target.linearScaling(valD, totalD, nbBins));
}
@Test
public void testDiscretizeLogScale_12Nbins_1total_03val() {
double valD = 0.3;
double totalD = 1.0;
int nbBins = 12;
// System.out.println("log (double) " + nbBins);
assertThat(target.logScaling(0.0, totalD, nbBins), is(0));
assertThat(target.logScaling(0.1, totalD, nbBins), is(1));
assertThat(target.logScaling(0.2, totalD, nbBins), is(3));
assertThat(target.logScaling(0.5, totalD, nbBins), is(7));
assertThat(target.logScaling(0.7, totalD, nbBins), is(9));
assertThat(target.logScaling(1.0, totalD, nbBins), is(12));
assertEquals("Discretized value is not the expected one", 4, target.logScaling(valD, totalD, nbBins));
}
@Test
public void testDiscretizeLogScale_8Nbins_1total_03val() {
double valD = 0.3;
double totalD = 1.0;
int nbBins = 8;
// System.out.println("log (double) " + nbBins);
assertThat(target.logScaling(0.0, totalD, nbBins), is(0));
assertThat(target.logScaling(0.1, totalD, nbBins), is(1));
assertThat(target.logScaling(0.2, totalD, nbBins), is(2));
assertThat(target.logScaling(0.5, totalD, nbBins), is(4));
assertThat(target.logScaling(0.7, totalD, nbBins), is(6));
assertThat(target.logScaling(1.0, totalD, nbBins), is(8));
assertEquals("Discretized value is not the expected one", 3, target.logScaling(valD, totalD, nbBins));
}
}
| 4,325 | 35.352941 | 113 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/lang/LanguageTest.java
|
package org.grobid.core.lang;
import org.grobid.core.exceptions.GrobidException;
import org.junit.Test;
import static org.junit.Assert.assertNotNull;
public class LanguageTest {
@Test
public void testLanguagesAvailableInLangdetect() {
String[] langList = new String[] {
"af",
"ar",
"bg",
"bn",
"cs",
"da",
"de",
"el",
"en",
"es",
"et",
"fa",
"fi",
"fr",
"gu",
"he",
"hi",
"hr",
"hu",
"id",
"it",
"ja",
"kn",
"ko",
"lt",
"lv",
"mk",
"ml",
"mr",
"ne",
"nl",
"no",
"pa",
"pl",
"pt",
"ro",
"ru",
"sk",
"sl",
"so",
"sq",
"sv",
"sw",
"ta",
"te",
"th",
"tl",
"tr",
"uk",
"ur",
"vi",
"zh-cn",
"zh-tw"
};
// Should not throw an exception
for (String lang : langList) {
assertNotNull(new Language(lang, 1d));
}
}
@Test(expected = GrobidException.class)
public void testLanguagesWithInvalidLang_shouldThrowException() {
new Language("baomiao", 1d);
}
@Test
public void testLanguagesWithInvalidLang_2chars_shouldThrowException() {
assertNotNull(new Language("bao", 1d));
}
@Test
public void testLanguagesWithInvalidLang_3chars_shouldThrowException() {
assertNotNull(new Language("aa", 1d));
}
@Test(expected = GrobidException.class)
public void testLanguagesWithNullLang_shouldThrowException() {
new Language(null, 1d);
}
}
| 2,002 | 20.084211 | 76 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/sax/PDFALTOAnnotationSaxParserTest.java
|
package org.grobid.core.sax;
import org.grobid.core.document.Document;
import org.grobid.core.document.DocumentSource;
import org.grobid.core.layout.PDFAnnotation;
import org.grobid.core.layout.LayoutToken;
import org.junit.Before;
import org.junit.Test;
import javax.xml.parsers.SAXParser;
import javax.xml.parsers.SAXParserFactory;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;
import static org.easymock.EasyMock.createMock;
import static org.hamcrest.collection.IsCollectionWithSize.hasSize;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
public class PDFALTOAnnotationSaxParserTest {
SAXParserFactory spf = SAXParserFactory.newInstance();
PDFALTOAnnotationSaxHandler target;
DocumentSource mockDocumentSource;
Document document;
@Before
public void setUp() throws Exception {
mockDocumentSource = createMock(DocumentSource.class);
document = Document.createFromText("");
target = new PDFALTOAnnotationSaxHandler(document, new ArrayList<PDFAnnotation>());
}
@Test
public void testParsing_pdf2XMLAnnotations_ShouldWork() throws Exception {
InputStream is = this.getClass().getResourceAsStream("pdfalto.xml_annot.xml");
SAXParser p = spf.newSAXParser();
p.parse(is, target);
List<PDFAnnotation> pdfAnnotations = target.getPDFAnnotations();
// System.out.println(pdfAnnotations.size());
assertTrue(pdfAnnotations.size() > 0);
assertThat(pdfAnnotations, hasSize(520));
}
}
| 1,574 | 28.716981 | 91 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/sax/PDFALTOOutlineSaxHandlerTest.java
|
package org.grobid.core.sax;
import org.grobid.core.document.Document;
import org.grobid.core.document.DocumentSource;
import org.grobid.core.document.DocumentNode;
import org.junit.Before;
import org.junit.Test;
import javax.xml.parsers.SAXParser;
import javax.xml.parsers.SAXParserFactory;
import java.io.InputStream;
import static org.easymock.EasyMock.createMock;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.hamcrest.collection.IsCollectionWithSize.hasSize;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
public class PDFALTOOutlineSaxHandlerTest {
SAXParserFactory spf = SAXParserFactory.newInstance();
PDFALTOOutlineSaxHandler target;
DocumentSource mockDocumentSource;
Document document;
@Before
public void setUp() throws Exception {
mockDocumentSource = createMock(DocumentSource.class);
document = Document.createFromText("");
target = new PDFALTOOutlineSaxHandler(document);
}
@Test
public void testParsing_pdf2XMLOutline_ShouldWork() throws Exception {
InputStream is = this.getClass().getResourceAsStream("pdfalto.xml_outline.xml");
SAXParser p = spf.newSAXParser();
p.parse(is, target);
DocumentNode root = target.getRootNode();
assertTrue(root.getChildren().size() > 0);
assertThat(root.getChildren(), hasSize(9));
assertThat(root.getChildren().get(0).getLabel(), is("Abstract"));
assertThat(root.getChildren().get(0).getChildren(), is(nullValue()));
assertThat(root.getChildren().get(0).getBoundingBox().getPage(), is(1));
//<LINK page="1" top="592.00" bottom="0.00" left="0.00" right="0.00"/>
// assertThat(root.getChildren().get(0).getBoundingBox().getY(), is(0.0));
// assertThat(root.getChildren().get(0).getBoundingBox().getHeight(), is(-1.0));
// assertThat(root.getChildren().get(0).getBoundingBox().getX(), is(0.0));
// assertThat(root.getChildren().get(0).getBoundingBox().getWidth(), is(0.0));
}
@Test
public void testParsing_pdf2XMLOutline_errorcase_ShouldWork() throws Exception {
InputStream is = this.getClass().getResourceAsStream("test_outline.xml");
SAXParser p = spf.newSAXParser();
p.parse(is, target);
DocumentNode root = target.getRootNode();
assertThat(root.getChildren(), hasSize(5));
assertThat(root.getChildren().get(0).getLabel(), is("A Identification"));
assertThat(root.getChildren().get(0).getChildren(), is(nullValue()));
//<LINK page="2" top="71.0000" bottom="0.0000" left="68.0000" right="0.0000"/>
assertThat(root.getChildren().get(0).getBoundingBox().getPage(), is(2));
// assertThat(root.getChildren().get(0).getBoundingBox().getY(), is(71.000));
// assertThat(root.getChildren().get(0).getBoundingBox().getHeight(), is(0.0));
// assertThat(root.getChildren().get(0).getBoundingBox().getX(), is(68.000));
// assertThat(root.getChildren().get(0).getBoundingBox().getWidth(), is(0.0));
assertThat(root.getChildren().get(1).getLabel(), is("B Résumé consolidé public."));
assertThat(root.getChildren().get(1).getChildren(), hasSize(1));
//<LINK page="2" top="377.000" bottom="0.0000" left="68.0000" right="0.0000"/>
assertThat(root.getChildren().get(1).getBoundingBox().getPage(), is(2));
// assertThat(root.getChildren().get(1).getBoundingBox().getY(), is(377.000));
// assertThat(root.getChildren().get(1).getBoundingBox().getHeight(), is(0.0));
// assertThat(root.getChildren().get(1).getBoundingBox().getX(), is(68.000));
// assertThat(root.getChildren().get(1).getBoundingBox().getWidth(), is(0.0));
assertThat(root.getChildren().get(1).getChildren(), hasSize(1));
assertThat(root.getChildren().get(1).getChildren().get(0).getLabel(), is("B.1 Résumé consolidé public en français"));
//<LINK page="2" top="412.000" bottom="0.0000" left="68.0000" right="0.0000"/>
assertThat(root.getChildren().get(1).getChildren().get(0).getBoundingBox().getPage(), is(2));
// assertThat(root.getChildren().get(1).getChildren().get(0).getBoundingBox().getY(), is(412.000));
// assertThat(root.getChildren().get(1).getChildren().get(0).getBoundingBox().getHeight(), is(0.0));
// assertThat(root.getChildren().get(1).getChildren().get(0).getBoundingBox().getX(), is(68.000));
// assertThat(root.getChildren().get(1).getChildren().get(0).getBoundingBox().getWidth(), is(0.0));
assertThat(root.getChildren().get(2).getLabel(), is("C Mémoire scientifique en français"));
assertThat(root.getChildren().get(2).getChildren(), hasSize(6));
assertThat(root.getChildren().get(2).getChildren().get(2).getLabel(), is("C.3 Approche scientifique et technique"));
assertThat(root.getChildren().get(3).getLabel(), is("D Liste des livrables"));
assertThat(root.getChildren().get(3).getChildren(), is(nullValue()));
assertThat(root.getChildren().get(4).getLabel(), is("E Impact du projet"));
assertThat(root.getChildren().get(4).getChildren(), hasSize(4));
assertThat(root.getChildren().get(4).getChildren().get(1).getLabel(), is("E.2 Liste des publications et communications"));
assertThat(root.getChildren().get(4).getChildren().get(2).getLabel(), is("E.3 Liste des autres valorisations scientifiques"));
//<LINK page="1" top="170.000" bottom="0.0000" left="68.0000" right="0.0000"/>
assertThat(root.getChildren().get(4).getChildren().get(2).getBoundingBox().getPage(), is(1));
// assertThat(root.getChildren().get(4).getChildren().get(2).getBoundingBox().getY(), is(170.000));
// assertThat(root.getChildren().get(4).getChildren().get(2).getBoundingBox().getHeight(), is(0.0));
// assertThat(root.getChildren().get(4).getChildren().get(2).getBoundingBox().getX(), is(68.000));
// assertThat(root.getChildren().get(4).getChildren().get(2).getBoundingBox().getWidth(), is(0.0));
}
}
| 6,146 | 54.378378 | 134 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/sax/CrossrefUnixrefSaxParserTest.java
|
package org.grobid.core.sax;
import org.grobid.core.data.BiblioItem;
import org.grobid.core.main.LibraryLoader;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import javax.xml.parsers.SAXParser;
import javax.xml.parsers.SAXParserFactory;
import java.io.InputStream;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
public class CrossrefUnixrefSaxParserTest {
SAXParserFactory spf = SAXParserFactory.newInstance();
CrossrefUnixrefSaxParser target;
BiblioItem item;
@BeforeClass
public static void init() throws Exception {
LibraryLoader.load();
}
@Before
public void setUp() throws Exception {
item = new BiblioItem();
target = new CrossrefUnixrefSaxParser(item);
}
@Test
public void testParseCrossrefDoi() throws Exception {
InputStream inputStream = this.getClass().getResourceAsStream("crossref_response.doi.xml");
SAXParser p = spf.newSAXParser();
p.parse(inputStream, target);
assertThat(item.getDOI(), is("10.1007/s00005-009-0056-3"));
}
@Test
public void testParseCrossrefDoi_References() throws Exception {
InputStream inputStream = this.getClass().getResourceAsStream("crossref_response.doi.2.xml");
SAXParser p = spf.newSAXParser();
p.parse(inputStream, target);
assertThat(item.getDOI(), is("10.1111/j.1467-8659.2007.01100.x"));
}
}
| 1,474 | 26.314815 | 101 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/sax/PDFALTOSaxHandlerTest.java
|
package org.grobid.core.sax;
import org.grobid.core.document.Document;
import org.grobid.core.document.DocumentSource;
import org.grobid.core.layout.GraphicObject;
import org.grobid.core.layout.LayoutToken;
import org.junit.Before;
import org.junit.Test;
import javax.xml.parsers.SAXParser;
import javax.xml.parsers.SAXParserFactory;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;
import static org.easymock.EasyMock.createMock;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.hasSize;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
public class PDFALTOSaxHandlerTest {
SAXParserFactory spf = SAXParserFactory.newInstance();
PDFALTOSaxHandler target;
DocumentSource mockDocumentSource;
Document document;
private List<GraphicObject> images;
@Before
public void setUp() throws Exception {
mockDocumentSource = createMock(DocumentSource.class);
document = Document.createFromText("");
images = new ArrayList<>();
target = new PDFALTOSaxHandler(document, images);
}
@Test
public void testParsing_pdf2XMLwithNoIMages_ShouldWork() throws Exception {
InputStream is = this.getClass().getResourceAsStream("pdfalto_noImages.xml");
SAXParser p = spf.newSAXParser();
p.parse(is, target);
List<LayoutToken> tokenList = target.getTokenization();
assertTrue(tokenList.size() > 0);
assertTrue(document.getImages().size() == 0);
assertTrue(images.size() == 0);
assertTrue(document.getPages().size() == 4);
assertTrue(document.getBlocks().size() == 26);
}
@Test
public void testParsing_pdf2XMLwithIMages_ShouldWork() throws Exception {
InputStream inputStream = this.getClass().getResourceAsStream("pdfalto_Images.xml");
SAXParser p = spf.newSAXParser();
p.parse(inputStream, target);
List<LayoutToken> tokenList = target.getTokenization();
assertTrue(tokenList.size() > 0);
assertThat(images.size(), is(16));
assertThat(document.getImages().size(), is(16));
assertTrue(document.getPages().size() == 4);
assertThat(document.getBlocks().size(), is(26));
}
@Test
public void testParsing_shouldWork() throws Exception {
InputStream inputStream = this.getClass().getResourceAsStream("JPS081033701-CC.xml");
SAXParser p = spf.newSAXParser();
p.parse(inputStream, target);
List<LayoutToken> tokenList = target.getTokenization();
assertThat(tokenList.stream().filter(t -> t.getText().equals("newly")).count(), is(1L));
assertThat(tokenList.get(0).getText(), is("Microscopic"));
assertThat(tokenList.get(0).getBold(), is(true));
assertThat(tokenList.get(25).getText(), is("BaFe"));
assertThat(tokenList.get(25).isSubscript(), is(false));
assertThat(tokenList.get(27).getText(), is("2"));
assertThat(tokenList.get(27).isSubscript(), is(true));
}
@Test
public void testParsing_BoldItalic_shouldWork() throws Exception {
InputStream inputStream = this.getClass().getResourceAsStream("s3xKQzHmBR.xml");
SAXParser p = spf.newSAXParser();
p.parse(inputStream, target);
List<LayoutToken> tokenList = target.getTokenization();
assertThat(tokenList.stream().filter(LayoutToken::isSuperscript).count(), is(4L));
assertThat(tokenList.stream().filter(LayoutToken::isSubscript).count(), is(3L));
assertThat(tokenList, hasSize(greaterThan(0)));
assertThat(tokenList.get(0).getText(), is("We"));
assertThat(tokenList.get(0).isSubscript(), is(false));
assertThat(tokenList.get(0).isSuperscript(), is(false));
assertThat(tokenList.get(0).getBold(), is(false));
assertThat(tokenList.get(0).getItalic(), is(false));
assertThat(tokenList.get(14).getText(), is("CO"));
assertThat(tokenList.get(14).isSubscript(), is(false));
assertThat(tokenList.get(14).isSuperscript(), is(false));
assertThat(tokenList.get(14).getBold(), is(false));
assertThat(tokenList.get(14).getItalic(), is(false));
assertThat(tokenList.get(16).getText(), is("2"));
assertThat(tokenList.get(16).isSubscript(), is(true));
assertThat(tokenList.get(16).isSuperscript(), is(false));
assertThat(tokenList.get(16).getBold(), is(false));
assertThat(tokenList.get(16).getItalic(), is(false));
assertThat(tokenList.get(35).getText(), is("Ur"));
assertThat(tokenList.get(35).isSubscript(), is(false));
assertThat(tokenList.get(35).isSuperscript(), is(false));
assertThat(tokenList.get(35).getBold(), is(true));
assertThat(tokenList.get(35).getItalic(), is(true));
assertThat(tokenList.get(37).getText(), is("123"));
assertThat(tokenList.get(37).isSubscript(), is(true));
assertThat(tokenList.get(37).isSuperscript(), is(false));
assertThat(tokenList.get(37).getBold(), is(true));
assertThat(tokenList.get(37).getItalic(), is(true));
assertThat(tokenList.get(39).getText(), is("6a"));
assertThat(tokenList.get(39).isSubscript(), is(false));
assertThat(tokenList.get(39).isSuperscript(), is(true));
assertThat(tokenList.get(39).getBold(), is(false));
assertThat(tokenList.get(39).getItalic(), is(true));
}
}
| 5,549 | 37.541667 | 96 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/tokenization/TaggingTokenSynchronizerTest.java
|
package org.grobid.core.tokenization;
import org.grobid.core.GrobidModels;
import org.grobid.core.layout.LayoutToken;
import org.grobid.core.utilities.LayoutTokensUtil;
import org.grobid.core.utilities.Pair;
import org.grobid.core.utilities.GrobidProperties;
import org.junit.Test;
import org.junit.BeforeClass;
import java.util.ArrayList;
import java.util.List;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.*;
/**
* Testing synchronization
*/
public class TaggingTokenSynchronizerTest {
public static final String P = "<paragraph>";
public static final String F = "<figure>";
@BeforeClass
public static void init() {
GrobidProperties.getInstance();
}
@Test
public void testBasic() {
TaggingTokenSynchronizer synchronizer = new TaggingTokenSynchronizer(GrobidModels.modelFor("fulltext"),
generateResult(p("This", P), p("Figure", F)), toks("This", " ", "Figure")
);
int cnt = 0;
boolean spacesPresent = false;
for (LabeledTokensContainer el : synchronizer) {
String text = LayoutTokensUtil.toText(el.getLayoutTokens());
assertFalse(text.startsWith(" "));
if (text.contains(" ")) {
spacesPresent = true;
}
cnt++;
}
assertThat(cnt, is(2));
assertThat(spacesPresent, is(true));
}
@Test(expected = IllegalStateException.class)
public void testFailure() {
TaggingTokenSynchronizer synchronizer = new TaggingTokenSynchronizer(GrobidModels.modelFor("fulltext"),
generateResult(p("This", P), p("Figure", F)), toks("This", " ", "Fig")
);
for (LabeledTokensContainer el : synchronizer) {
LayoutTokensUtil.toText(el.getLayoutTokens());
}
}
private static String generateResult(Pair<String, String>... tokens) {
StringBuilder res = new StringBuilder();
for (Pair<String, String> p : tokens) {
res.append(p.a).append("\t").append(p.b).append("\n");
}
return res.toString();
}
private static List<LayoutToken> toks(String... toks) {
List<LayoutToken> res = new ArrayList<>();
for (String t : toks) {
res.add(new LayoutToken(t));
}
return res;
}
private static Pair<String, String> p(String tok, String label) {
return new Pair<>(tok, label);
}
}
| 2,464 | 29.432099 | 111 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/tokenization/TaggingTokenClusterorTest.java
|
package org.grobid.core.tokenization;
import org.apache.commons.io.IOUtils;
import org.grobid.core.GrobidModels;
import org.grobid.core.analyzers.GrobidAnalyzer;
import org.grobid.core.engines.label.TaggingLabelImpl;
import org.grobid.core.engines.label.TaggingLabels;
import org.grobid.core.lang.Language;
import org.grobid.core.layout.LayoutToken;
import org.grobid.core.utilities.LayoutTokensUtil;
import org.grobid.core.utilities.GrobidProperties;
import org.junit.Test;
import org.junit.BeforeClass;
import java.io.InputStream;
import java.util.List;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.collection.IsCollectionWithSize.hasSize;
import static org.junit.Assert.assertThat;
public class TaggingTokenClusterorTest {
@BeforeClass
public static void init() {
GrobidProperties.getInstance();
}
@Test
public void testExclusion_notPresent_shouldReturnTrue() throws Exception {
final TaggingTokenClusteror.LabelTypeExcludePredicate labelTypeExcludePredicate =
new TaggingTokenClusteror.LabelTypeExcludePredicate(TaggingLabels.EQUATION, TaggingLabels.HEADER_KEYWORD);
assertThat(labelTypeExcludePredicate.apply(new TaggingTokenCluster(TaggingLabels.FIGURE)),
is(true));
}
@Test
public void testExclusion_shouldReturnFalse() throws Exception {
final TaggingTokenClusteror.LabelTypeExcludePredicate labelTypeExcludePredicate =
new TaggingTokenClusteror.LabelTypeExcludePredicate(TaggingLabels.EQUATION, TaggingLabels.FIGURE);
assertThat(labelTypeExcludePredicate.apply(new TaggingTokenCluster(TaggingLabels.FIGURE)),
is(false));
}
@Test
public void testInclusion_notPresent_shouldReturnFalse() throws Exception {
final TaggingTokenClusteror.LabelTypePredicate labelTypePredicate =
new TaggingTokenClusteror.LabelTypePredicate(TaggingLabels.HEADER_KEYWORD);
assertThat(labelTypePredicate.apply(new TaggingTokenCluster(TaggingLabels.FIGURE)),
is(false));
}
@Test
public void testInclusion_present_shouldReturnTrue() throws Exception {
final TaggingTokenClusteror.LabelTypePredicate labelTypePredicate =
new TaggingTokenClusteror.LabelTypePredicate(TaggingLabels.FIGURE);
assertThat(labelTypePredicate.apply(new TaggingTokenCluster(TaggingLabels.FIGURE)),
is(true));
}
/**
* In the NER the beginning labels are starting by B-
* In GROBID the beginning labels are starting by I-
**/
@Test
public void testCluster_mixedBeginningLabels_shouldWork() throws Exception {
final InputStream is = this.getClass().getResourceAsStream("example.wapiti.output.2.txt");
List<LayoutToken> tokenisation = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken("Austria invaded and fought the Serbian army at the Battle of Cer and Battle of Kolubara beginning on 12 August.",
new Language(Language.EN));
final String s = IOUtils.toString(is, UTF_8);
TaggingTokenClusteror target = new TaggingTokenClusteror(GrobidModels.ENTITIES_NER, s, tokenisation);
List<TaggingTokenCluster> clusters = target.cluster();
assertThat(clusters, hasSize(10));
assertThat(clusters.get(0).getTaggingLabel().getLabel(), is("LOCATION"));
assertThat(LayoutTokensUtil.normalizeText(LayoutTokensUtil.toText(clusters.get(0).concatTokens())), is("Austria"));
assertThat(clusters.get(2).getTaggingLabel().getLabel(), is("ORGANISATION"));
assertThat(LayoutTokensUtil.normalizeText(LayoutTokensUtil.toText(clusters.get(2).concatTokens())), is("Serbian army"));
assertThat(clusters.get(4).getTaggingLabel().getLabel(), is("EVENT"));
assertThat(LayoutTokensUtil.normalizeText(LayoutTokensUtil.toText(clusters.get(4).concatTokens())), is("Battle of Cer"));
assertThat(clusters.get(6).getTaggingLabel().getLabel(), is("EVENT"));
assertThat(LayoutTokensUtil.normalizeText(LayoutTokensUtil.toText(clusters.get(6).concatTokens())), is("Battle of Kolubara"));
assertThat(clusters.get(8).getTaggingLabel().getLabel(), is("PERIOD"));
assertThat(LayoutTokensUtil.normalizeText(LayoutTokensUtil.toText(clusters.get(8).concatTokens())), is("12 August"));
}
@Test
public void testCluster_longFile() throws Exception {
final InputStream is = this.getClass().getResourceAsStream("example.wapiti.output.1.txt");
List<LayoutToken> tokenisation = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken("Austria invaded and fought the Serbian army at the Battle of Cer and Battle of Kolubara beginning on 12 August. \n" +
"\n" +
"The army, led by general Paul von Hindenburg defeated Russia in a series of battles collectively known as the First Battle of Tannenberg (17 August - 2 September). But the failed Russian invasion, causing the fresh German troops to move to the east, allowed the tactical Allied victory at the First Battle of the Marne. \n" +
"\n" +
"Unfortunately for the Allies, the pro-German King Constantine I dismissed the pro-Allied government of E. Venizelos before the Allied expeditionary force could arrive. Beginning in 1915, the Italians under Cadorna mounted eleven offensives on the Isonzo front along the Isonzo River, northeast of Trieste.\n" +
"\n" +
" At the Siege of Maubeuge about 40000 French soldiers surrendered, at the battle of Galicia Russians took about 100-120000 Austrian captives, at the Brusilov Offensive about 325 000 to 417 000 Germans and Austrians surrendered to Russians, at the Battle of Tannenberg 92,000 Russians surrendered.\n" +
"\n" +
" After marching through Belgium, Luxembourg and the Ardennes, the German Army advanced, in the latter half of August, into northern France where they met both the French army, under Joseph Joffre, and the initial six divisions of the British Expeditionary Force, under Sir John French. A series of engagements known as the Battle of the Frontiers ensued. Key battles included the Battle of Charleroi and the Battle of Mons. In the former battle the French 5th Army was almost destroyed by the German 2nd and 3rd Armies and the latter delayed the German advance by a day. A general Allied retreat followed, resulting in more clashes such as the Battle of Le Cateau, the Siege of Maubeuge and the Battle of St. Quentin (Guise). \n" +
"\n" +
"The German army came within 70 km (43 mi) of Paris, but at the First Battle of the Marne (6-12 September), French and British troops were able to force a German retreat by exploiting a gap which appeared between the 1st and 2nd Armies, ending the German advance into France. The German army retreated north of the Aisne River and dug in there, establishing the beginnings of a static western front that was to last for the next three years. Following this German setback, the opposing forces tried to outflank each other in the Race for the Sea, and quickly extended their trench systems from the North Sea to the Swiss frontier. The resulting German-occupied territory held 64% of France's pig-iron production, 24% of its steel manufacturing, dealing a serious, but not crippling setback to French industry.\n" +
" ", new Language(Language.EN));
final String s = IOUtils.toString(is, UTF_8);
TaggingTokenClusteror target = new TaggingTokenClusteror(GrobidModels.ENTITIES_NER, s, tokenisation);
List<TaggingTokenCluster> clusters = target.cluster();
assertThat(clusters, hasSize(164));
// for (TaggingTokenCluster cluster : clusters) {
// String clusterContent = LayoutTokensUtil.normalizeText(LayoutTokensUtil.toText(cluster.concatTokens()));
// System.out.println(clusterContent + " --> " + cluster.getTaggingLabel().getLabel());
// }
}
}
| 8,042 | 59.022388 | 826 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/test/Runner.java
|
package org.grobid.core.test;
import org.grobid.core.factory.GrobidFactory;
import org.grobid.core.lexicon.LexiconIntegrationTest;
import org.junit.runner.JUnitCore;
import org.junit.runner.Result;
import org.junit.runner.notification.Failure;
public class Runner {
public static void main(String[] args) {
GrobidFactory.getInstance();
int totalTestRuns = 0;
int totalFailures = 0;
// test date parser
Result result = JUnitCore.runClasses(TestDate.class);
totalTestRuns++;
System.out.print("test Date: ");
if (result.getFailures().size() == 0) {
System.out.println("OK");
}
for (Failure failure : result.getFailures()) {
System.out.println(failure.toString());
totalFailures++;
}
// test name parser for headers
result = JUnitCore.runClasses(TestNameParser.class);
totalTestRuns++;
System.out.print("test NameParser: ");
if (result.getFailures().size() == 0) {
System.out.println("OK");
}
for (Failure failure : result.getFailures()) {
System.out.println(failure.toString());
totalFailures++;
}
// test lexicon fast matcher
result = JUnitCore.runClasses(LexiconIntegrationTest.class);
totalTestRuns++;
System.out.print("test Lexicon Fast Matcher: ");
if (result.getFailures().size() == 0) {
System.out.println("OK");
}
for (Failure failure : result.getFailures()) {
System.out.println(failure.toString());
totalFailures++;
}
// test affiliation parser
result = JUnitCore.runClasses(TestAffiliationAddressParser.class);
totalTestRuns++;
System.out.print("test AffiliationAddressParser: ");
if (result.getFailures().size() == 0) {
System.out.println("OK");
}
for (Failure failure : result.getFailures()) {
System.out.println(failure.toString());
totalFailures++;
}
// test header parser
result = JUnitCore.runClasses(TestHeaderParser.class);
totalTestRuns++;
System.out.print("test HeaderParser: ");
if (result.getFailures().size() == 0) {
System.out.println("OK");
}
for (Failure failure : result.getFailures()) {
System.out.println(failure.toString());
totalFailures++;
}
result = JUnitCore.runClasses(TestCitationParser.class);
totalTestRuns++;
System.out.print("test CitationParser: ");
if (result.getFailures().size() == 0) {
System.out.println("OK");
}
for (Failure failure : result.getFailures()) {
System.out.println(failure.toString());
totalFailures++;
}
result = JUnitCore.runClasses(TestReferencesParser.class);
totalTestRuns++;
System.out.print("test ReferencesParser: ");
if (result.getFailures().size() == 0) {
System.out.println("OK");
}
for (Failure failure : result.getFailures()) {
System.out.println(failure.toString());
totalFailures++;
}
result = JUnitCore.runClasses(TestFullTextParser.class);
totalTestRuns++;
System.out.print("test FullTextParser: ");
if (result.getFailures().size() == 0) {
System.out.println("OK");
}
for (Failure failure : result.getFailures()) {
System.out.println(failure.toString());
totalFailures++;
}
result = JUnitCore.runClasses(TestCitationPatentParser.class);
totalTestRuns++;
System.out.print("test CitationPatentParser: ");
if (result.getFailures().size() == 0) {
System.out.println("OK");
}
for (Failure failure : result.getFailures()) {
System.out.println(failure.toString());
totalFailures++;
}
/*result = JUnitCore.runClasses(TestChemicalNameParser.class);
totalTestRuns++;
System.out.print("test ChemicalNameParser: ");
if (result.getFailures().size() == 0) {
System.out.println("OK");
}
for (Failure failure : result.getFailures()) {
System.out.println(failure.toString());
totalFailures++;
}
*/
/*result = JUnitCore.runClasses(TestEbookParser.class);
totalTestRuns++;
System.out.print("test EbookParser: ");
if (result.getFailures().size() == 0) {
System.out.println("OK");
}
for (Failure failure : result.getFailures()) {
System.out.println(failure.toString());
totalFailures++;
}*/
System.out.println("Test run: " + totalTestRuns + ", Failures: " + totalFailures);
}
}
| 4,230 | 23.177143 | 84 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/test/EngineTest.java
|
package org.grobid.core.test;
import org.grobid.core.engines.Engine;
import org.grobid.core.factory.GrobidFactory;
import org.junit.AfterClass;
import org.junit.BeforeClass;
public abstract class EngineTest {
protected static Engine engine;
@BeforeClass
public static void setUpClass() throws Exception {
engine = GrobidFactory.getInstance().getEngine();
}
@AfterClass
public static void closeResources() throws Exception {
engine.close();
}
}
| 492 | 22.47619 | 58 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/test/TestHeaderParser.java
|
package org.grobid.core.test;
import org.grobid.core.data.BiblioItem;
import org.grobid.core.factory.GrobidFactory;
import org.grobid.core.utilities.GrobidProperties;
import org.junit.AfterClass;
import org.junit.Test;
import java.io.File;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertThat;
public class TestHeaderParser extends EngineTest {
private String testPath = null;
private String newTrainingPath = null;
public static final String TEST_RESOURCES_PATH = "./src/test/resources/test";
@AfterClass
public static void tearDown(){
GrobidFactory.reset();
}
private void getTestResourcePath() {
testPath = TEST_RESOURCES_PATH;
GrobidProperties.getInstance();
newTrainingPath = GrobidProperties.getTempPath().getAbsolutePath();
}
@Test
public void testHeaderHeader() throws Exception {
getTestResourcePath();
String pdfPath = testPath + File.separator + "Wang-paperAVE2008.pdf";
File pdfFile = new File(pdfPath);
BiblioItem resHeader = new BiblioItem();
String tei = engine.processHeader(pdfFile.getAbsolutePath(), 0, resHeader);
assertNotNull(resHeader);
assertThat(resHeader.getTitle(), is("Information Synthesis for Answer Validation"));
assertThat(resHeader.getKeyword(),
is("Answer Validation, Recognizing Textual Entailment, Information Synthesis"));
assertNotNull(resHeader.getFullAuthors());
pdfPath = testPath + File.separator + "ZFN-A-054-0304-0272.pdf";
resHeader = new BiblioItem();
tei = engine.processHeader(pdfPath, 0, resHeader);
assertNotNull(resHeader);
//System.out.println(tei);
pdfPath = testPath + File.separator + "ZNC-1988-43c-0034.pdf";
resHeader = new BiblioItem();
tei = engine.processHeader(pdfPath, 0, resHeader);
//System.out.println(tei);
//assertNotNull(resHeader);
pdfPath = testPath + File.separator + "ZNC-1988-43c-0065.pdf";
resHeader = new BiblioItem();
tei = engine.processHeader(pdfPath, 0, resHeader);
assertNotNull(resHeader);
//System.out.println(tei);
}
/*@Test
public void testSegmentationHeader() throws Exception {
getTestResourcePath();
File pdfPath = new File(testPath + File.separator + "Wang-paperAVE2008.pdf");
BiblioItem resHeader = new BiblioItem();
String tei = engine.segmentAndProcessHeader(pdfPath, 0, resHeader);
assertNotNull(resHeader);
assertThat(resHeader.getTitle(), is("Information Synthesis for Answer Validation"));
assertThat(resHeader.getKeyword(),
is("Answer Validation, Recognizing Textual Entailment, Information Synthesis"));
assertNotNull(resHeader.getFullAuthors());
}*/
}
| 2,927 | 32.655172 | 96 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/test/TestFullTextParser.java
|
package org.grobid.core.test;
import org.apache.commons.io.FileUtils;
import org.grobid.core.document.Document;
import org.grobid.core.document.DocumentPiece;
import org.grobid.core.document.DocumentPointer;
import org.grobid.core.document.xml.XmlBuilderUtils;
import org.grobid.core.engines.Engine;
import org.grobid.core.engines.label.SegmentationLabels;
import org.grobid.core.engines.config.GrobidAnalysisConfig;
import org.grobid.core.engines.label.TaggingLabel;
import org.grobid.core.factory.GrobidFactory;
import org.grobid.core.layout.Block;
import org.grobid.core.utilities.GrobidProperties;
import org.junit.*;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.util.Arrays;
import java.util.List;
import java.util.SortedSet;
import nu.xom.Element;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
public class TestFullTextParser extends EngineTest {
@BeforeClass
public static void init() {
GrobidProperties.getInstance();
}
@AfterClass
public static void tearDown(){
GrobidFactory.reset();
}
@Test
public void testFullTextParser_1() throws Exception {
File inputTmpFile = getInputDocument("/test/Wang-paperAVE2008.pdf");
Document tei = engine.fullTextToTEIDoc(inputTmpFile, GrobidAnalysisConfig.defaultInstance());
assertTei(tei);
}
private File getInputDocument(String inputPath) throws IOException {
InputStream is = this.getClass().getResourceAsStream(inputPath);
File inputTmpFile = File.createTempFile("tmpFileTest", "testFullTextParser");
inputTmpFile.deleteOnExit();
FileUtils.copyToFile(is, inputTmpFile);
return inputTmpFile;
}
@Test
public void testFullTextParser_2() throws Exception {
File inputTmpFile = getInputDocument("/test/two_pages.pdf");
Document tei = engine.fullTextToTEIDoc(inputTmpFile, GrobidAnalysisConfig.defaultInstance());
assertTei(tei);
}
@Test
public void testFullTextParser_3() throws Exception {
File inputTmpFile = getInputDocument("/test/MullenJSSv18i03.pdf");
Document tei = engine.fullTextToTEIDoc(inputTmpFile, GrobidAnalysisConfig.defaultInstance());
assertTei(tei);
}
@Test
public void testFullTextParser_4() throws Exception {
File inputTmpFile = getInputDocument("/test/1001._0908.0054.pdf");
Document tei = engine.fullTextToTEIDoc(inputTmpFile, GrobidAnalysisConfig.defaultInstance());
assertTei(tei);
}
@Test
public void testFullTextParser_5() throws Exception {
File inputTmpFile = getInputDocument("/test/submission_161.pdf");
Document tei = engine.fullTextToTEIDoc(inputTmpFile, GrobidAnalysisConfig.defaultInstance());
assertTei(tei);
}
@Test
public void testFullTextParser_6() throws Exception {
File inputTmpFile = getInputDocument("/test/submission_363.pdf");
Document tei = engine.fullTextToTEIDoc(inputTmpFile, GrobidAnalysisConfig.defaultInstance());
assertTei(tei);
}
private void assertTei(Document doc) {
assertDocAndBlockTokenizationSync(doc);
assertNotNull(doc.getTei());
//check that XML is valid
XmlBuilderUtils.fromString(doc.getTei());
}
private void assertDocAndBlockTokenizationSync(Document doc) {
List<Block> blocks = doc.getBlocks();
for (Block block : blocks) {
if (block.getNbTokens() == 0)
continue;
int start = block.getStartToken();
int end = block.getEndToken();
if (start == -1) {
continue;
}
for (int i = start; i < end; i++) {
assertEquals(doc.getTokenizations().get(i), block.getTokens().get(i - start));
}
// assertTrue(endPtr.getTokenBlockPos() < endBlock.getTokens().size());
}
for (TaggingLabel l : Arrays.asList(SegmentationLabels.BODY, SegmentationLabels.REFERENCES, SegmentationLabels.HEADER, SegmentationLabels.ACKNOWLEDGEMENT, SegmentationLabels.ANNEX,
SegmentationLabels.FOOTNOTE, SegmentationLabels.HEADNOTE, SegmentationLabels.TOC)) {
SortedSet<DocumentPiece> parts = doc.getDocumentPart(l);
if (parts == null) {
continue;
}
for (DocumentPiece p : parts) {
DocumentPointer startPtr = p.getLeft();
DocumentPointer endPtr = p.getRight();
Block endBlock = doc.getBlocks().get(endPtr.getBlockPtr());
assertTrue(endPtr.getTokenBlockPos() < endBlock.getTokens().size());
}
}
}
}
| 4,817 | 32.929577 | 188 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/test/TestDate.java
|
package org.grobid.core.test;
import org.grobid.core.data.Date;
import org.grobid.core.factory.GrobidFactory;
import org.grobid.core.main.LibraryLoader;
import org.grobid.core.utilities.GrobidProperties;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import java.util.List;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.Matchers.hasSize;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertThat;
public class TestDate extends EngineTest {
@BeforeClass
public static void setUp() {
GrobidProperties.getInstance();
}
@AfterClass
public static void tearDown(){
GrobidFactory.reset();
}
@Test
public void testDateCompare_completeVSYearMonth_moreSpecifiedWins() {
Date date1 = new Date();
date1.setYear(2000);
date1.setMonth(10);
date1.setDay(2);
Date date2 = new Date();
date2.setYear(2000);
date2.setMonth(10);
assertThat(date1.compareTo(date2), is(-1));
}
@Test
public void testDateCompare_YearMonthVsYear_moreSpecifiedWins() {
Date date1 = new Date();
date1.setYear(2007);
Date date2 = new Date();
date2.setYear(2007);
date2.setMonth(9);
assertThat(date1.compareTo(date2), is(1));
}
@Test
public void testDateParser_cleanInput() throws Exception {
String dateSequence1 = "10 January 2001";
List<Date> res = engine.processDate(dateSequence1);
assertNotNull(res);
assertThat(res, hasSize(1));
Date date = res.get(0);
assertThat(date.toTEI(), is("<date when=\"2001-1-10\" />"));
assertThat(date.getDayString(), is("10"));
assertThat(date.getMonthString(), is("January"));
assertThat(date.getYearString(), is("2001"));
assertThat(date.getDay(), is(10));
assertThat(date.getMonth(), is(1));
assertThat(date.getYear(), is(2001));
}
@Test
public void testDateParser_inputWithSpaces() throws Exception {
String dateSequence2 = "19 November 1 999";
List<Date> res = engine.processDate(dateSequence2);
assertNotNull(res);
assertThat(res, hasSize(1));
Date date = res.get(0);
assertThat(date.toTEI(), is("<date when=\"1999-11-19\" />"));
assertThat(date.getDayString(), is("19"));
assertThat(date.getMonthString(), is("November"));
assertThat(date.getYearString(), is("1999"));
assertThat(date.getDay(), is(19));
assertThat(date.getMonth(), is(11));
assertThat(date.getYear(), is(1999));
}
@Test
public void testDateParser_inputWithSpecialFormat() throws Exception {
String dateSequence3 = "15-08-2007";
List<Date> res = engine.processDate(dateSequence3);
assertNotNull(res);
assertThat(res, hasSize(1));
Date date = res.get(0);
assertThat(date.toTEI(), is("<date when=\"2007-8-15\" />"));
assertThat(date.getDayString(), is("15"));
assertThat(date.getMonthString(), is("08"));
assertThat(date.getYearString(), is("2007"));
assertThat(date.getDay(), is(15));
assertThat(date.getMonth(), is(8));
assertThat(date.getYear(), is(2007));
}
@Test
public void testDateParser_DifferentOrdering() throws Exception {
String dateSequence4 = "November 14 1999";
List<Date> res = engine.processDate(dateSequence4);
assertNotNull(res);
assertThat(res, hasSize(1));
Date date = res.get(0);
assertThat(date.toTEI(), is("<date when=\"1999-11-14\" />"));
assertThat(date.getDayString(), is("14"));
assertThat(date.getMonthString(), is("November"));
assertThat(date.getYearString(), is("1999"));
assertThat(date.getDay(), is(14));
assertThat(date.getMonth(), is(11));
assertThat(date.getYear(), is(1999));
}
}
| 4,031 | 29.315789 | 74 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/test/TestCitationParser.java
|
package org.grobid.core.test;
import org.grobid.core.data.BibDataSet;
import org.grobid.core.data.BiblioItem;
import org.grobid.core.factory.GrobidFactory;
import org.grobid.core.main.LibraryLoader;
import org.junit.*;
import java.util.List;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
public class TestCitationParser extends EngineTest {
@BeforeClass
public static void beforeClass() throws Exception {
LibraryLoader.load();
}
@AfterClass
public static void tearDown(){
GrobidFactory.reset();
}
@Test
@Ignore("Check this test")
public void processingReferenceSection() throws Exception {
String text = "(1) Ahrens, M. Home fires that began with upholstered furniture; National Fire Protection Association: Quincy, Massachusetts, 2011.\n" +
"(2) Evarts, B. Home fires that began with matresses and bedding; National Fire Protection Association: Quincy, Massachusetts, 2011.\n" +
"(3) Kramer, R. H.; Zammarano, M.; Linteris, G. T.; Gedde, U. W.; Gilman, J. W. Polym. Degrad. Stab. 2010, 95, 1115−1122.\n" +
"(4) Underwriter Laboratories, UL test: Legacy home contents and new content fires, YouTube online video clip, 2011.\n" +
"(5) Gallagher, S.; Campbell, J. In Siloxane-Phosphonate Finishes on Cellulose: Thermal Characterization and Flammability Data; Proceed-ings of the Beltwide Cotton Conference, San Antonio, TX, 2004; pp 2443-2847.\n" +
"(6) Watanabe, I.; Sakai, S. Environ. Int. 2003, 29, 665−682. (7) Babrauskas, V.; Blum,\n";
List<BibDataSet> res = engine.getParsers().getCitationParser().processingReferenceSection(text, engine.getParsers().getReferenceSegmenterParser());
assertNotNull(res);
assertTrue(res.size() > 2);
}
@Test
public void testCitationParser1_withoutConsolidation() throws Exception {
String citation1 = "A. Cau, R. Kuiper, and W.-P. de Roever. Formalising Dijkstra's development " +
"strategy within Stark's formalism. In C. B. Jones, R. C. Shaw, and " +
"T. Denvir, editors, Proc. 5th. BCS-FACS Refinement Workshop, London, UK, 1992.";
BiblioItem resCitation = engine.processRawReference(citation1, 0);
assertNotNull(resCitation);
assertThat(resCitation.getTitle(),
is("Formalising Dijkstra's development strategy within Stark's formalism"));
assertNotNull(resCitation.getFullAuthors());
}
@Test
public void testCitationParser2_withoutConsolidation() throws Exception {
String citation2 = "Sanda M. Harabagiu, Steven J. Maiorano and Marius A. Pasca. Open-Domain Textual " +
"Question Answering Techniques. Natural Language Engineering, 9 (3):1-38, 2003.";
BiblioItem resCitation = engine.processRawReference(citation2, 0);
assertNotNull(resCitation);
assertThat(resCitation.getTitle(),
is("Open-Domain Textual Question Answering Techniques"));
assertNotNull(resCitation.getFullAuthors());
}
//@Test
public void testCitationParser3_withConsolidation() throws Exception {
String citation3 = "Graff, Expert. Opin. Ther. Targets (2002) 6(1): 103-113";
BiblioItem resCitation = engine.processRawReference(citation3, 1);
assertNotNull(resCitation);
assertNotNull(resCitation.getNormalizedPublicationDate());
assertThat(resCitation.getNormalizedPublicationDate().getYear(),
is(2002));
}
//@Test
public void testCitationParser4_withConsolidation() throws Exception {
String citation4 = "Zholudev Vyacheslav, Kohlhase Michael, Rabe Florian. A [insert XML Format] " +
"Database for [insert cool application] (extended version); Technical Report , Jacobs " +
"University Bremen 2010.";
BiblioItem resCitation = engine.processRawReference(citation4, 1);
assertNotNull(resCitation);
assertNotNull(resCitation.getNormalizedPublicationDate());
assertThat(resCitation.getNormalizedPublicationDate().getYear(),
is(2010));
assertNotNull(resCitation.getFullAuthors());
}
@Test
public void testCitationParser5_withoutConsolidation() throws Exception {
String citation5 = "Altschul SF, Madden TL, Schäffer AA, Zhang J, Zhang Z, Miller W, Lipman DJ: Gapped BLAST and PSI-BLAST: a new generation of protein database search programs. Nucleic Acid Res 1997 25:3389-3402";
BiblioItem resCitation = engine.processRawReference(citation5, 0);
assertNotNull(resCitation);
assertNotNull(resCitation.getNormalizedPublicationDate());
assertThat(resCitation.getNormalizedPublicationDate().getYear(),
is(1997));
assertNotNull(resCitation.getFullAuthors());
}
}
| 4,953 | 46.180952 | 229 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/test/TestReferencesParser.java
|
package org.grobid.core.test;
import org.grobid.core.data.BibDataSet;
import org.junit.Test;
import java.io.File;
import java.util.List;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertThat;
public class TestReferencesParser extends EngineTest {
public static final String TEST_RESOURCES_PATH = "./src/test/resources/test";
//@Test
public void testReferences() throws Exception {
String testPath = TEST_RESOURCES_PATH;
String pdfPath = testPath + File.separator + "Wang-paperAVE2008.pdf";
List<BibDataSet> resRefs = engine.processReferences(new File(pdfPath), 1);
assertNotNull(resRefs);
assertThat(resRefs.size(), is(12));
}
}
| 771 | 26.571429 | 82 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/test/TestEbookParser.java
|
package org.grobid.core.test;
import org.grobid.core.utilities.GrobidProperties;
import org.junit.Ignore;
@Ignore
public class TestEbookParser extends EngineTest {
private String newTrainingPath = null;
private void getTestResourcePath() {
newTrainingPath = GrobidProperties.getInstance().getTempPath().getAbsolutePath();
}
//@Test
public void testEbookParser() throws Exception {
/*engine = new BookStructureParser();
getTestResourcePath();
String pdfPath = testPath + "/littleessaysoflo00elliuoft.pdf";
engine.createTrainingFullTextEbook(pdfPath, newTrainingPath, newTrainingPath, 0);*/
}
}
| 621 | 23.88 | 85 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/test/TestNameParser.java
|
package org.grobid.core.test;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertThat;
import java.util.List;
import org.grobid.core.data.Person;
import org.grobid.core.factory.GrobidFactory;
import org.junit.AfterClass;
import org.junit.Test;
public class TestNameParser extends EngineTest{
@AfterClass
public static void tearDown(){
GrobidFactory.reset();
}
@Test
public void testNameParserHeader() throws Exception {
String authorSequence1 = "José-María Carazo, Alberto Pascual-Montano";
List<Person> res = engine.processAuthorsHeader(authorSequence1);
assertNotNull(res);
assertEquals(2, res.size());
if (res.size() > 0) {
assertThat(res.get(0).getFirstName(), is("José-María"));
assertThat(res.get(0).getLastName(), is("Carazo"));
}
if (res.size() > 1) {
assertThat(res.get(1).getFirstName(), is("Alberto"));
assertThat(res.get(1).getLastName(), is("Pascual-Montano"));
}
String authorSequence2 =
"Farzaneh Sarafraz*, James M. Eales*, Reza Mohammadi, Jonathan Dickerson, David Robertson, Goran Nenadic*";
res = engine.processAuthorsHeader(authorSequence2);
assertNotNull(res);
assertEquals(6, res.size());
if (res.size() > 0) {
assertThat(res.get(0).getFirstName(), is("Farzaneh"));
assertThat(res.get(0).getLastName(), is("Sarafraz"));
}
if (res.size() > 1) {
assertThat(res.get(1).getFirstName(), is("James"));
assertThat(res.get(1).getMiddleName(), is("M"));
assertThat(res.get(1).getLastName(), is("Eales"));
}
if (res.size() > 2) {
assertThat(res.get(2).getFirstName(), is("Reza"));
assertThat(res.get(2).getLastName(), is("Mohammadi"));
}
String authorSequence3 = "KARL-HEINZ HÖCKER";
res = engine.processAuthorsHeader(authorSequence3);
assertNotNull(res);
if (res != null) {
//assertEquals(1, res.size());
if (res.size() > 0) {
//assertThat(res.get(0).getFirstName(), is("SF"));
assertThat(res.get(0).getLastName(), is("Höcker"));
assertThat(res.get(0).getFirstName(), is("Karl-Heinz"));
}
}
}
@Test
public void testNameParserCitation() throws Exception {
String authorSequence1 = "Tsuruoka Y. et al.";
List<Person> res = engine.processAuthorsCitation(authorSequence1);
assertNotNull(res);
assertEquals(1, res.size());
if (res.size() > 0) {
assertThat(res.get(0).getFirstName(), is("Y"));
assertThat(res.get(0).getLastName(), is("Tsuruoka"));
}
String authorSequence2 = "Altschul SF, Madden TL, Schäffer AA, Zhang J, Zhang Z, Miller W, Lipman DJ";
res = engine.processAuthorsCitation(authorSequence2);
assertNotNull(res);
if (res != null) {
//assertEquals(1, res.size());
if (res.size() > 0) {
//assertThat(res.get(0).getFirstName(), is("SF"));
assertThat(res.get(0).getLastName(), is("Altschul"));
}
}
}
}
| 2,940 | 30.623656 | 111 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/test/TestAffiliationAddressParser.java
|
package org.grobid.core.test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import java.util.List;
import org.grobid.core.data.Affiliation;
import org.grobid.core.engines.Engine;
import org.grobid.core.factory.GrobidFactory;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.Test;
public class TestAffiliationAddressParser extends EngineTest{
@Before
public void init(){
engine = new Engine(false);
}
@AfterClass
public static void tearDown(){
GrobidFactory.reset();
}
@Test
public void testParser() throws Exception {
String affiliationSequence1 = "Atomic Physics Division, Department of Atomic Physics and Luminescence, " +
"Faculty of Applied Physics and Mathematics, Gdansk University of " +
"Technology, Narutowicza 11/12, 80-233 Gdansk, Poland";
List<Affiliation> res = engine.processAffiliation(affiliationSequence1);
assertEquals(1, res.size());
if (res.size() > 0) {
assertNotNull(res.get(0).getInstitutions());
assertEquals(1, res.get(0).getInstitutions().size());
assertEquals(res.get(0).getInstitutions().get(0), "Gdansk University of Technology");
assertEquals(res.get(0).getCountry(), "Poland");
assertEquals(res.get(0).getAddrLine(), "Narutowicza 11/12");
}
}
@Test
public void testParser2() throws Exception {
String affiliationSequence2 = "Faculty of Health, School of Biomedical Sciences, " +
"University of Newcastle, New South Wales, Australia.";
List<Affiliation> res = engine.processAffiliation(affiliationSequence2);
if (res.size() > 0) {
assertNotNull(res.get(0).getInstitutions());
}
}
}
| 1,720 | 30.290909 | 109 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/test/TestCitationPatentParser.java
|
package org.grobid.core.test;
import java.io.File;
import java.util.ArrayList;
import java.util.List;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertEquals;
import org.apache.commons.io.FileUtils;
import org.grobid.core.data.BibDataSet;
import org.grobid.core.data.PatentItem;
import org.grobid.core.exceptions.GrobidException;
import org.grobid.core.factory.GrobidFactory;
import org.grobid.core.utilities.GrobidProperties;
import org.junit.AfterClass;
import org.junit.Ignore;
import org.junit.Test;
//@Ignore
public class TestCitationPatentParser extends EngineTest {
private String newTrainingPath = null;
@AfterClass
public static void tearDown(){
GrobidFactory.reset();
}
public File getResourceDir(String resourceDir) {
File file = new File(resourceDir);
if (!file.exists()) {
if (!file.mkdirs()) {
throw new GrobidException("Cannot start test, because test resource folder is not correctly set.");
}
}
return(file);
}
private void getTestResourcePath() {
newTrainingPath = GrobidProperties.getInstance().getTempPath().getAbsolutePath();
}
//@Test
public void testCitationPatentParser() throws Exception {
File textFile = new File(this.getResourceDir("./src/test/resources/").getAbsoluteFile()+"/patents/sample1.txt");
if (!textFile.exists()) {
throw new GrobidException("Cannot start test, because test resource folder is not correctly set.");
}
String text = FileUtils.readFileToString(textFile, "UTF-8");
List<BibDataSet> nplResults = new ArrayList<BibDataSet>();
List<PatentItem> patentResults = new ArrayList<PatentItem>();
boolean consolidateCitations = false;
/*engine.processAllCitationsInPatent(text, nplResults, patentResults, consolidateCitations);
assertThat(patentResults.size(), is(26));
assertThat(nplResults.size(), is(0));*/
textFile = new File(this.getResourceDir("./src/test/resources/").getAbsoluteFile()+"/patents/sample2.txt");
if (!textFile.exists()) {
throw new GrobidException("Cannot start test, because test resource folder is not correctly set.");
}
text = FileUtils.readFileToString(textFile, "UTF-8");
nplResults = new ArrayList<BibDataSet>();
patentResults = new ArrayList<PatentItem>();
/*engine.processAllCitationsInPatent(text, nplResults, patentResults, consolidateCitations);
assertThat(patentResults.size(), is(420));
assertThat(nplResults.size(), is(80));*/
File xmlFile = new File(this.getResourceDir("./src/test/resources/").getAbsoluteFile()
+ "/patents/EP1059354A2.xml");
if (!xmlFile.exists()) {
throw new GrobidException("Cannot start test, because test resource folder is not correctly set.");
}
/*engine.processAllCitationsInXMLPatent(xmlFile.getPath(), nplResults, patentResults, consolidateCitations);
System.out.println("Patent references: " + patentResults.size());
System.out.println("Non patent references: " + nplResults.size());*/
}
//@Test
public void testTrainingPatent() throws Exception {
getTestResourcePath();
String xmlPath = this.getResourceDir("./src/test/resources/").getAbsoluteFile()+"/patents/sample1.xml";
engine.createTrainingPatentCitations(xmlPath, newTrainingPath);
xmlPath = this.getResourceDir("./src/test/resources/").getAbsoluteFile()+"/patents/sample2.xml";
engine.createTrainingPatentCitations(xmlPath, newTrainingPath);
}
@Test
public void testCitationPatentParserFromText() throws Exception {
String text = "this patent refers to US-8303618, and filed in continuation of US patent 8153667 and European Patent publications 1000000 and 1000001. ";
System.out.println("text to parse: " + text);
List<BibDataSet> articles = new ArrayList<BibDataSet>();
List<PatentItem> patents = new ArrayList<PatentItem>();
int consolidateCitations = 0;
boolean includeRawCitations = false;
engine.processAllCitationsInPatent(text, articles, patents, consolidateCitations, includeRawCitations);
assertEquals(4, patents.size());
assertEquals(0, articles.size());
PatentItem patent = patents.get(0);
assertEquals("8303618", patent.getNumberEpoDoc());
System.out.println("context=" + patent.getContext());
System.out.println("offset start/end/raw=" + patent.getOffsetBegin() + "/"+ patent.getOffsetEnd()+"/"+patent.getOffsetRaw());
System.out.println("corresponding span: " + text.substring(patent.getOffsetBegin(), patent.getOffsetEnd()+1));
patent = patents.get(1);
assertEquals("8153667", patent.getNumberEpoDoc());
System.out.println("context=" + patent.getContext());
System.out.println("offset start/end/raw=" + patent.getOffsetBegin() + "/"+ patent.getOffsetEnd()+"/"+patent.getOffsetRaw());
System.out.println("corresponding span: " + text.substring(patent.getOffsetBegin(), patent.getOffsetEnd()+1));
patent = patents.get(2);
assertEquals("1000000", patent.getNumberEpoDoc());
System.out.println("context=" + patent.getContext());
System.out.println("offset start/end/raw=" + patent.getOffsetBegin() + "/"+ patent.getOffsetEnd()+"/"+patent.getOffsetRaw());
System.out.println("corresponding span: " + text.substring(patent.getOffsetBegin(), patent.getOffsetEnd()+1));
patent = patents.get(3);
assertEquals("1000001", patent.getNumberEpoDoc());
System.out.println("context=" + patent.getContext());
System.out.println("offset start/end/raw=" + patent.getOffsetBegin() + "/"+ patent.getOffsetEnd()+"/"+patent.getOffsetRaw());
System.out.println("corresponding span: " + text.substring(patent.getOffsetBegin(), patent.getOffsetEnd()+1));
}
}
| 5,679 | 42.030303 | 159 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/test/TestChemicalNameParser.java
|
package org.grobid.core.test;
import java.io.File;
import java.nio.charset.StandardCharsets;
import java.util.List;
import org.apache.commons.io.FileUtils;
import org.grobid.core.data.ChemicalEntity;
import org.grobid.core.exceptions.GrobidException;
import org.junit.Ignore;
@Ignore
public class TestChemicalNameParser extends EngineTest{
public File getResourceDir(String resourceDir) {
File file = new File(resourceDir);
if (!file.exists()) {
if (!file.mkdirs()) {
throw new GrobidException("Cannot start test, because test resource folder is not correctly set.");
}
}
return(file);
}
//@Test
public void testChemicalNameParser() throws Exception {
File textFile = new File(this.getResourceDir("./src/test/resources/").getAbsoluteFile()+"/patents/sample3.txt");
if (!textFile.exists()) {
throw new GrobidException("Cannot start test, because test resource folder is not correctly set.");
}
String text = FileUtils.readFileToString(textFile, StandardCharsets.UTF_8);
List<ChemicalEntity> chemicalResults = engine.extractChemicalEntities(text);
if (chemicalResults != null) {
System.out.println(chemicalResults.size() + " extracted chemical entities");
for(ChemicalEntity entity : chemicalResults) {
System.out.println(entity.toString());
}
}
else {
System.out.println("no extracted chemical entities");
}
}
}
| 1,382 | 29.733333 | 114 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/utilities/SHA1Test.java
|
package org.grobid.core.utilities;
import org.grobid.core.utilities.SHA1;
import org.junit.Assert;
import org.junit.Test;
public class SHA1Test {
@Test
public void testgetSHA1() {
Assert.assertEquals("Hashed value is not the expected one",
"9d4e1e23bd5b727046a9e3b4b7db57bd8d6ee684",
SHA1.getSHA1("pass"));
}
}
| 328 | 18.352941 | 61 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/utilities/UnicodeUtilTest.java
|
package org.grobid.core.utilities;
import org.grobid.core.analyzers.GrobidAnalyzer;
import org.grobid.core.layout.LayoutToken;
import org.grobid.core.test.EngineTest;
import org.junit.Ignore;
import org.junit.Test;
import com.google.common.collect.Lists;
import java.util.ArrayList;
import java.util.List;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.startsWith;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
@Ignore
public class UnicodeUtilTest extends EngineTest {
@Test
public void testNormaliseToken() throws Exception {
String test = "´\rÓÑÔÙØØØ";
String result = UnicodeUtil.normaliseText(test);
assertThat("´\nÓÑÔÙØØØ", is(result));
ArrayList<String> tokens = Lists.newArrayList(
"½ºº´\r",
"½ºº´\n",
"½ºº´\t",
"½ºº´\f",
"½ºº´ ",
"½ºº´\f\n",
"½ºº´\r\t");
for (String token : tokens) {
assertEquals("½ºº´", UnicodeUtil.normaliseText(token.replace(" ", "").replace("\n", "")));
}
}
}
| 1,182 | 27.166667 | 102 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/utilities/UtilitiesTest.java
|
package org.grobid.core.utilities;
import java.io.File;
import java.io.IOException;
import java.util.List;
import java.util.ArrayList;
import org.junit.Test;
import static org.junit.Assert.*;
public class UtilitiesTest {
@Test
public void testStringToBooleanTrue() {
assertEquals(
"stringToBoolean value does not match expected result", true,
Utilities.stringToBoolean("true"));
}
@Test
public void testStringToBooleanTrue2() {
assertEquals(
"stringToBoolean value does not match expected result", true,
Utilities.stringToBoolean(" TruE "));
}
@Test
public void testStringToBooleanFalse() {
assertEquals(
"stringToBoolean value does not match expected result", false,
Utilities.stringToBoolean("false"));
}
@Test
public void testStringToBooleanFalse2() {
assertEquals(
"stringToBoolean value does not match expected result", false,
Utilities.stringToBoolean(" fAlSe "));
}
@Test
public void testStringToBooleanFalse3() {
assertEquals(
"stringToBoolean value does not match expected result", false,
Utilities.stringToBoolean(" non boolean value"));
}
@Test
public void testStringToBooleanBlank() {
assertEquals(
"stringToBoolean value does not match expected result", false,
Utilities.stringToBoolean(""));
}
@Test
public void testStringToBooleanBlank2() {
assertEquals(
"stringToBoolean value does not match expected result", false,
Utilities.stringToBoolean(null));
}
@Test
public void testMergePositions1() throws IOException {
List<OffsetPosition> position1 = new ArrayList<OffsetPosition>();
List<OffsetPosition> position2 = new ArrayList<OffsetPosition>();
position1.add(new OffsetPosition(0,2));
position2.add(new OffsetPosition(3,5));
List<OffsetPosition> positions = Utilities.mergePositions(position1,position2);
assertEquals(positions.size(), 2);
assertEquals(positions.get(0).start, 0);
assertEquals(positions.get(0).end, 2);
assertEquals(positions.get(1).start, 3);
assertEquals(positions.get(1).end, 5);
}
@Test
public void testMergePositions2() throws IOException {
List<OffsetPosition> position1 = new ArrayList<OffsetPosition>();
List<OffsetPosition> position2 = new ArrayList<OffsetPosition>();
position1.add(new OffsetPosition(0,2));
position1.add(new OffsetPosition(4,5));
position2.add(new OffsetPosition(3,4));
position2.add(new OffsetPosition(8,10));
List<OffsetPosition> positions = Utilities.mergePositions(position1,position2);
assertEquals(positions.size(), 3);
assertEquals(positions.get(0).start, 0);
assertEquals(positions.get(0).end, 2);
assertEquals(positions.get(1).start, 3);
assertEquals(positions.get(1).end, 5);
assertEquals(positions.get(2).start, 8);
assertEquals(positions.get(2).end, 10);
}
@Test
public void testMergePositionsOverlap() throws IOException {
List<OffsetPosition> position1 = new ArrayList<OffsetPosition>();
List<OffsetPosition> position2 = new ArrayList<OffsetPosition>();
position1.add(new OffsetPosition(0,3));
position1.add(new OffsetPosition(5,6));
position1.add(new OffsetPosition(8,9));
position2.add(new OffsetPosition(1,2));
position2.add(new OffsetPosition(3,6));
position2.add(new OffsetPosition(7,10));
List<OffsetPosition> positions= Utilities.mergePositions(position1,position2);
assertEquals(positions.size(), 2);
assertEquals(positions.get(0).start, 0);
assertEquals(positions.get(0).end, 6);
assertEquals(positions.get(1).start, 7);
assertEquals(positions.get(1).end, 10);
}
}
| 3,547 | 29.586207 | 81 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/utilities/LayoutTokensUtilIntegrationTest.java
|
package org.grobid.core.utilities;
import org.grobid.core.document.Document;
import org.grobid.core.document.DocumentSource;
import org.grobid.core.engines.Engine;
import org.grobid.core.engines.config.GrobidAnalysisConfig;
import org.grobid.core.main.LibraryLoader;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.File;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
public class LayoutTokensUtilIntegrationTest {
@BeforeClass
public static void setUp() throws Exception {
LibraryLoader.load();
GrobidProperties.getInstance();
}
@Test
public void testDoesRequireDehyphenization2() throws Exception {
DocumentSource documentSource = DocumentSource.fromPdf(new File("src/test/resources/org/grobid/core/utilities/dehypenisation1.pdf"));
Document result = Engine.getEngine(false).getParsers().getSegmentationParser().processing(documentSource, GrobidAnalysisConfig.defaultInstance());
assertThat(LayoutTokensUtil.doesRequireDehypenisation(result.getTokenizations(), 7), is(true));
}
@Test
public void testDoesRequireDehyphenization() throws Exception {
DocumentSource documentSource = DocumentSource.fromPdf(new File("src/test/resources/org/grobid/core/utilities/dehypenisation2.pdf"));
Document result = Engine.getEngine(false).getParsers().getSegmentationParser().processing(documentSource, GrobidAnalysisConfig.defaultInstance());
assertThat(LayoutTokensUtil.doesRequireDehypenisation(result.getTokenizations(), 7), is(true));
}
}
| 1,604 | 35.477273 | 154 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/utilities/GrobidHomeFinderTest.java
|
package org.grobid.core.utilities;
import org.grobid.core.exceptions.GrobidPropertyException;
import org.grobid.core.main.GrobidHomeFinder;
import org.junit.Test;
import org.junit.After;
import java.io.File;
import java.util.Collections;
import static org.junit.Assert.assertTrue;
/**
* Testing location of grobid home
*/
public class GrobidHomeFinderTest {
@After
public void tearDown() throws Exception {
GrobidProperties.reset();
}
@Test
public void testDefault() {
GrobidProperties.getInstance();
}
/*@Test
public void testViaProp() {
System.setProperty(GrobidPropertyKeys.PROP_GROBID_HOME, "../grobid-home");
assertPath(new GrobidHomeFinder(Collections.<String>emptyList()).findGrobidHomeOrFail());
}*/
@Test(expected = GrobidPropertyException.class)
public void testNoDefaultLocations() {
assertPath(new GrobidHomeFinder(Collections.<String>emptyList()).findGrobidHomeOrFail());
}
private void assertPath(File p) {
assertTrue("Not exists or not a directory " + p, p.exists() && p.isDirectory());
}
}
| 1,122 | 24.522727 | 97 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/utilities/DataSetContextExtractorTest.java
|
package org.grobid.core.utilities;
import org.apache.commons.io.IOUtils;
import org.junit.Test;
import java.io.InputStream;
import java.nio.charset.StandardCharsets;
public class DataSetContextExtractorTest {
@Test
public void testRefEscapes() throws Exception {
InputStream is = this.getClass().getResourceAsStream("/test/tei-escape.xml");
String tei = IOUtils.toString(is, StandardCharsets.UTF_8);
is.close();
DataSetContextExtractor.getCitationReferences(tei);
}
}
| 517 | 24.9 | 85 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/utilities/ConsolidationIntegrationTest.java
|
package org.grobid.core.utilities;
import org.grobid.core.data.BiblioItem;
import org.grobid.core.main.LibraryLoader;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
import java.util.ArrayList;
import java.util.List;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.Matchers.hasSize;
import static org.junit.Assert.*;
public class ConsolidationIntegrationTest {
private Consolidation target = null;
public static String[] DOIs = {
"10.1086/107043",
"10.1086/102351",
"10.1086/100853",
"10.1086/105172"
};
@Before
public void setUp() {
LibraryLoader.load();
GrobidProperties.getInstance();
target = Consolidation.getInstance();
}
@Test
@Ignore("Crossref API not realiable enough")
public void testConsolidationDOISimple() throws Exception {
BiblioItem biblio = new BiblioItem();
biblio.setDOI(DOIs[0]);
BiblioItem bib = target.consolidate(biblio, null, 1);
boolean found = false;
if (bib != null)
found = true;
assertEquals("The consolidation has not the expected outcome", true, found);
}
}
| 1,204 | 24.104167 | 84 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/utilities/LayoutTokensUtilTest.java
|
package org.grobid.core.utilities;
import com.google.common.collect.Iterables;
import org.grobid.core.analyzers.GrobidAnalyzer;
import org.grobid.core.layout.LayoutToken;
import org.junit.Test;
import java.util.List;
import java.util.stream.IntStream;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.Matchers.hasSize;
import static org.junit.Assert.assertThat;
public class LayoutTokensUtilTest {
/**
* We fake the new line in the layout token coordinates
*/
@Test
public void testDoesRequireDehyphenization_shouldReturnTrue() throws Exception {
String input = "The study of iron-based supercondu- \n" +
"ctors superconductivity in the iron-pnictide LaFeAsO 1-x F x has been expanding and has \n";
List<LayoutToken> layoutTokens = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(input);
assertThat(LayoutTokensUtil.doesRequireDehypenisation(layoutTokens, 11), is(true));
}
@Test
public void testDoesRequireDehyphenization2_shouldReturnTrue() throws Exception {
String input = "The study of iron-based supercondu - \n" +
"ctors superconductivity in the iron-pnictide LaFeAsO 1-x F x has been expanding and has \n";
List<LayoutToken> layoutTokens = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(input);
assertThat(LayoutTokensUtil.doesRequireDehypenisation(layoutTokens, 12), is(true));
}
@Test
public void testDoesRequireDehyphenization_composedWords_shouldReturnFalse() throws Exception {
String input = "The study of iron-based supercondu - \n" +
"ctors superconductivity in the iron-pnictide LaFeAsO 1-x F x has been expanding and has \n";
List<LayoutToken> layoutTokens = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(input);
assertThat(LayoutTokensUtil.doesRequireDehypenisation(layoutTokens, 7), is(false));
assertThat(LayoutTokensUtil.doesRequireDehypenisation(layoutTokens, 24), is(false));
}
@Test
public void testDoesRequireDehyphenization2_composedWords_shouldReturnFalse() throws Exception {
String input = "The study of iron- based supercondu - \n" +
"ctors superconductivity in the iron-pnictide LaFeAsO 1-x F x has been expanding and has \n";
List<LayoutToken> layoutTokens = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(input);
assertThat(LayoutTokensUtil.doesRequireDehypenisation(layoutTokens, 7), is(false));
}
@Test
public void testDoesRequireDehyphenization3_composedWords_shouldReturnFalse() throws Exception {
String input = "The study of iron - based supercondu - \n" +
"ctors superconductivity in the iron-pnictide LaFeAsO 1-x F x has been expanding and has \n";
List<LayoutToken> layoutTokens = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(input);
assertThat(LayoutTokensUtil.doesRequireDehypenisation(layoutTokens, 8), is(false));
}
@Test
public void testDoesRequireDehyphenization_usingCoordinates_shouldReturnTrue() throws Exception {
String input = "The study of iron-based supercondu - " +
"ctors superconductivity in the iron-pnictide LaFeAsO 1-x F x has been expanding and has \n";
List<LayoutToken> layoutTokens = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(input);
IntStream.range(0, 15).forEach(i -> layoutTokens.get(i).setY(10));
IntStream.range(15, layoutTokens.size()).forEach(i -> layoutTokens.get(i).setY(30));
assertThat(LayoutTokensUtil.doesRequireDehypenisation(layoutTokens, 12), is(true));
}
// @Test
// public void testDoesRequireDehyphenization_withoutNewLine() throws Exception {
// String input = "The study of iron-based supercondu - " +
// "ctors superconductivity in the iron-pnictide LaFeAsO 1-x F x has been expanding and has \n";
//
// List<LayoutToken> layoutTokens = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(input);
//
// IntStream.range(0, 15).forEach(i -> layoutTokens.get(i).setY(10));
// IntStream.range(15, layoutTokens.size()).forEach(i -> layoutTokens.get(i).setY(30));
//
// assertThat(LayoutTokensUtil.doesRequireDehypenisation(layoutTokens, 12), is(true));
// }
@Test
public void testDoesRequireDehyphenization_hypenAtEndOfString_shouldReturnFalse() throws Exception {
String input = "The study of iron-based supercondu-";
List<LayoutToken> layoutTokens = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(input);
assertThat(LayoutTokensUtil.doesRequireDehypenisation(layoutTokens, 11), is(false));
}
@Test
public void testSubList() throws Exception {
String text = "This is a simple text that I'm making up just for fun... or well for the sake of the test!";
List<LayoutToken> layoutTokens = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(text);
layoutTokens.stream().forEach(layoutToken -> layoutToken.setOffset(layoutToken.getOffset() + 1000));
List<LayoutToken> result = LayoutTokensUtil.subListByOffset(layoutTokens, 1005, 1008);
assertThat(result, hasSize(2));
assertThat(result.get(0).getText(), is("is"));
assertThat(result.get(1).getText(), is(" "));
}
@Test
public void testSubList_noEnd() throws Exception {
String text = "This is a simple text that I'm making up just for fun... or well for the sake of the test!";
List<LayoutToken> layoutTokens = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(text);
layoutTokens.stream().forEach(layoutToken -> layoutToken.setOffset(layoutToken.getOffset() + 1000));
List<LayoutToken> result = LayoutTokensUtil.subListByOffset(layoutTokens, 1005);
assertThat(result, hasSize(43));
assertThat(result.get(0).getText(), is("is"));
assertThat(Iterables.getLast(result).getText(), is("!"));
}
@Test
public void testMaterialNameWithHypenInFormula_shouldNotDehypenise() throws Exception {
String text = "based \n" +
"(Nd 1-x Ce x ) 2 Fe 14-y Co y B nanostructured magnets";
List<LayoutToken> layoutTokens = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(text);
layoutTokens.stream().forEach(layoutToken -> layoutToken.setOffset(layoutToken.getOffset() + 1000));
layoutTokens.get(4).setY(406.746);
layoutTokens.get(4).setX(55.754000000000005);
layoutTokens.get(6).setSubscript(true);
layoutTokens.get(6).setY(410.506);
layoutTokens.get(6).setX(65.2255);
layoutTokens.get(7).setSubscript(true);
layoutTokens.get(7).setY(410.506);
layoutTokens.get(7).setX(67.66675);
layoutTokens.get(8).setSubscript(true);
layoutTokens.get(8).setY(410.538);
layoutTokens.get(8).setX(70.108);
layoutTokens.get(10).setY(406.76);
layoutTokens.get(10).setX(73.3461);
layoutTokens.get(12).setSubscript(true);
layoutTokens.get(21).setSubscript(true);
List<LayoutToken> result = LayoutTokensUtil.dehyphenize(layoutTokens);
assertThat(result.get(7).getText(), is("-"));
System.out.println(result);
}
}
| 7,288 | 39.049451 | 115 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/utilities/GrobidPropertiesTest.java
|
package org.grobid.core.utilities;
import org.apache.commons.lang3.StringUtils;
import org.grobid.core.GrobidModel;
import org.grobid.core.GrobidModels;
import org.grobid.core.engines.tagging.GrobidCRFEngine;
import org.grobid.core.exceptions.GrobidPropertyException;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.Ignore;
import java.io.File;
import java.io.IOException;
import java.util.Collections;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
@Ignore
public class GrobidPropertiesTest {
@Before
public void setUp() {
GrobidProperties.getInstance();
}
@After
public void tearDown() throws Exception {
GrobidProperties.reset();
}
@Test(expected = GrobidPropertyException.class)
public void testSet_GROBID_HOME_PATH_NullPath_shouldThrowException() {
GrobidProperties.setGrobidHome(null);
}
@Test(expected = GrobidPropertyException.class)
public void testSet_GROBID_HOME_PATH_FileNotExisting_shouldThrowException() {
GrobidProperties.setGrobidHome("/NotExistingPath");
}
@Test
public void testNativeLibraryPath() throws IOException {
// File expectedFile = new File(MockContext.GROBID_HOME_PATH
// + File.separator + "/lib");
assertNotNull(GrobidProperties
.getNativeLibraryPath().getCanonicalFile());
}
@Test
public void testgetsetNativeLibraryPath() {
String value = "value";
GrobidProperties.setNativeLibraryPath(value);
assertEquals("The parameter has not the value expected", value,
GrobidProperties.getNativeLibraryPath().getPath());
}
@Test
public void testsetgetProxyHost() {
String value = "host";
GrobidProperties.setProxyHost(value);
assertEquals("The parameter has not the value expected", value,
GrobidProperties.getProxyHost());
}
@Test
public void testsetgetProxyPort() {
int value = 1;
GrobidProperties.setProxyPort(value);
assertEquals("The parameter has not the value expected", value,
GrobidProperties.getProxyPort().intValue());
}
@Test
public void testsetgetWapitiNbThreads() {
int value = 1;
GrobidProperties.setWapitiNbThreads(value);
assertEquals("The parameter has not the value expected", value,
GrobidProperties.getWapitiNbThreads().intValue());
}
@Test
public void testgetNBThreadsShouldReturnAvailableProcessorsIfZero() {
int value = 0;
GrobidProperties.setWapitiNbThreads(value);
assertEquals("The parameter has not the value expected",
String.valueOf(Runtime.getRuntime().availableProcessors()),
GrobidProperties.getWapitiNbThreads());
assertTrue("The parameter is not greater than zero",
GrobidProperties.getWapitiNbThreads().intValue() > 0);
}
@Test
public void testShouldReturnModelPathWithExtension() {
GrobidModels model = GrobidModels.DATE;
String extension = GrobidProperties.getGrobidCRFEngine(model).getExt();
assertEquals(
"model path for " + model.name(),
new File(GrobidProperties.getGrobidHome(),
GrobidProperties.FOLDER_NAME_MODELS
+ File.separator
+ model.getFolderName()
+ File.separator
+ GrobidProperties.FILE_NAME_MODEL
+ "."
+ extension
).getAbsoluteFile(),
GrobidProperties.getModelPath(model).getAbsoluteFile()
);
}
@Test
public void testgetLanguageDetectorFactory() {
assertEquals("The parameter has not the value expected",
"org.grobid.core.lang.impl.CybozuLanguageDetectorFactory",
GrobidProperties.getLanguageDetectorFactory());
}
@Test
public void testgetPdfaltoPath() throws Exception {
assertNotNull("The parameter has not the value expected", GrobidProperties
.getPdfaltoPath().getAbsolutePath());
}
@Test
public void testGetInstance() throws Exception {
GrobidProperties.reset();
GrobidProperties.getInstance();
// test the resue of the instance created previously
GrobidProperties.reset();
assertTrue("GrobidProperties.getInstance() does not return an instance of GrobidProperties",
GrobidProperties.getInstance() != null);
}
}
| 4,774 | 33.107143 | 100 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/utilities/SentenceUtilitiesTest.java
|
package org.grobid.core.utilities;
import org.grobid.core.GrobidModels;
import org.grobid.core.engines.DateParser;
import org.grobid.core.lang.SentenceDetector;
import org.grobid.core.lang.SentenceDetectorFactory;
import org.grobid.core.lexicon.Lexicon;
import org.grobid.core.main.LibraryLoader;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.powermock.api.easymock.PowerMock;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.core.classloader.annotations.SuppressStaticInitializationFor;
import org.powermock.modules.junit4.PowerMockRunner;
import org.powermock.reflect.Whitebox;
import java.util.Arrays;
import java.util.List;
import java.util.ArrayList;
import static org.easymock.EasyMock.expect;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.Matchers.hasSize;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertNull;
import static org.powermock.api.easymock.PowerMock.*;
@RunWith(PowerMockRunner.class)
@SuppressStaticInitializationFor("org.grobid.core.lang.SentenceDetectorFactory")
@PrepareForTest({SentenceUtilities.class})
public class SentenceUtilitiesTest {
SentenceDetectorFactory sentenceDetectorFactoryMock;
SentenceDetector sentenceDetectorMock;
SentenceUtilities target;
@Before
public void setUp() {
GrobidProperties.getInstance();
GrobidConfig.ModelParameters modelParameters = new GrobidConfig.ModelParameters();
modelParameters.name = "bao";
GrobidProperties.addModel(modelParameters);
sentenceDetectorFactoryMock = createMock(SentenceDetectorFactory.class);
sentenceDetectorMock = createMock(SentenceDetector.class);
target = SentenceUtilities.getInstance();
Whitebox.setInternalState(target, sentenceDetectorFactoryMock);
}
@Test
public void testNullText() throws Exception {
String text = null;
List<OffsetPosition> theSentences = SentenceUtilities.getInstance().runSentenceDetection(text);
assertNull(theSentences);
}
@Test
public void testEmptyText() throws Exception {
String text = "";
expect(sentenceDetectorFactoryMock.getInstance()).andReturn(sentenceDetectorMock);
expect(sentenceDetectorMock.detect(text)).andReturn(new ArrayList<>());
replay(sentenceDetectorFactoryMock, sentenceDetectorMock);
List<OffsetPosition> theSentences = SentenceUtilities.getInstance().runSentenceDetection(text);
verify(sentenceDetectorFactoryMock, sentenceDetectorMock);
assertThat(theSentences.size(), is(0));
}
@Test
public void testOneSentenceText() throws Exception {
String text = "Bla bla bla.";
expect(sentenceDetectorFactoryMock.getInstance()).andReturn(sentenceDetectorMock);
expect(sentenceDetectorMock.detect(text)).andReturn(Arrays.asList(new OffsetPosition(0, 12)));
replay(sentenceDetectorFactoryMock, sentenceDetectorMock);
List<OffsetPosition> theSentences = SentenceUtilities.getInstance().runSentenceDetection(text);
verify(sentenceDetectorFactoryMock, sentenceDetectorMock);
assertThat(theSentences.size(), is(1));
}
@Test
public void testTwoSentencesText() throws Exception {
String text = "Bla bla bla. Bli bli bli.";
expect(sentenceDetectorFactoryMock.getInstance()).andReturn(sentenceDetectorMock);
expect(sentenceDetectorMock.detect(text)).andReturn(Arrays.asList(new OffsetPosition(0, 12), new OffsetPosition(13, 24)));
replay(sentenceDetectorFactoryMock, sentenceDetectorMock);
List<OffsetPosition> theSentences = SentenceUtilities.getInstance().runSentenceDetection(text);
verify(sentenceDetectorFactoryMock, sentenceDetectorMock);
assertThat(theSentences.size(), is(2));
}
@Test
public void testTwoSentencesTextWithUselessForbidden() throws Exception {
String text = "Bla bla bla. Bli bli bli.";
List<OffsetPosition> forbidden = new ArrayList<>();
forbidden.add(new OffsetPosition(2, 8));
expect(sentenceDetectorFactoryMock.getInstance()).andReturn(sentenceDetectorMock);
expect(sentenceDetectorMock.detect(text, null)).andReturn(Arrays.asList(new OffsetPosition(0, 12), new OffsetPosition(13, 24)));
replay(sentenceDetectorFactoryMock, sentenceDetectorMock);
List<OffsetPosition> theSentences = SentenceUtilities.getInstance().runSentenceDetection(text, forbidden);
verify(sentenceDetectorFactoryMock, sentenceDetectorMock);
assertThat(theSentences.size(), is(2));
}
@Test
public void testTwoSentencesTextWithUsefullForbidden() throws Exception {
String text = "Bla bla bla. Bli bli bli.";
List<OffsetPosition> forbidden = new ArrayList<>();
forbidden.add(new OffsetPosition(2, 8));
forbidden.add(new OffsetPosition(9, 15));
expect(sentenceDetectorFactoryMock.getInstance()).andReturn(sentenceDetectorMock);
expect(sentenceDetectorMock.detect(text, null)).andReturn(Arrays.asList(new OffsetPosition(0, 12), new OffsetPosition(13, 24)));
replay(sentenceDetectorFactoryMock, sentenceDetectorMock);
List<OffsetPosition> theSentences = SentenceUtilities.getInstance().runSentenceDetection(text, forbidden);
verify(sentenceDetectorFactoryMock, sentenceDetectorMock);
assertThat(theSentences.size(), is(1));
}
@Test
public void testCorrectSegmentation_shouldNotCancelSegmentation() throws Exception {
String paragraph = "This is a sentence. [3] Another sentence.";
List<String> refs = Arrays.asList("[3]");
List<String> sentences = Arrays.asList("This is a sentence.", "Another sentence.");
List<OffsetPosition> refSpans = getPositions(paragraph, refs);
List<OffsetPosition> sentenceSpans = getPositions(paragraph, sentences);
expect(sentenceDetectorFactoryMock.getInstance()).andReturn(sentenceDetectorMock);
expect(sentenceDetectorMock.detect(paragraph, null)).andReturn(sentenceSpans);
replay(sentenceDetectorFactoryMock, sentenceDetectorMock);
List<OffsetPosition> theSentences = SentenceUtilities.getInstance().runSentenceDetection(paragraph, refSpans);
verify(sentenceDetectorFactoryMock, sentenceDetectorMock);
assertThat(theSentences.size(), is(2));
}
@Test
public void testCorrectSegmentation_shouldNotCancelSegmentation2() throws Exception {
String paragraph = "This is a sentence [3] and the continuing sentence.";
List<String> refs = Arrays.asList("[3]");
List<String> sentences = Arrays.asList("This is a sentence", "and the continuing sentence.");
List<OffsetPosition> refSpans = getPositions(paragraph, refs);
List<OffsetPosition> sentenceSpans = getPositions(paragraph, sentences);
expect(sentenceDetectorFactoryMock.getInstance()).andReturn(sentenceDetectorMock);
expect(sentenceDetectorMock.detect(paragraph, null)).andReturn(sentenceSpans);
replay(sentenceDetectorFactoryMock, sentenceDetectorMock);
List<OffsetPosition> theSentences = SentenceUtilities.getInstance().runSentenceDetection(paragraph, refSpans);
verify(sentenceDetectorFactoryMock, sentenceDetectorMock);
assertThat(theSentences.size(), is(2));
}
@Test
public void testCorrectSegmentation_shouldCancelWrongSegmentation() throws Exception {
String paragraph = "(Foppiano and al. 2021) explains what he's thinking.";
List<String> refs = Arrays.asList("(Foppiano and al. 2021)");
List<String> sentences = Arrays.asList("(Foppiano and al.", "2021) explains what he's thinking.");
List<OffsetPosition> refSpans = getPositions(paragraph, refs);
List<OffsetPosition> sentenceSpans = getPositions(paragraph, sentences);
expect(sentenceDetectorFactoryMock.getInstance()).andReturn(sentenceDetectorMock);
expect(sentenceDetectorMock.detect(paragraph, null)).andReturn(sentenceSpans);
replay(sentenceDetectorFactoryMock, sentenceDetectorMock);
List<OffsetPosition> theSentences = SentenceUtilities.getInstance().runSentenceDetection(paragraph, refSpans);
verify(sentenceDetectorFactoryMock, sentenceDetectorMock);
assertThat(theSentences.size(), is(1));
}
@Test
public void testCorrectSegmentation_shouldCancelWrongSegmentation2() throws Exception {
String paragraph = "What we claim corresponds with what (Foppiano and al. 2021) explains what he's thinking.";
List<String> refs = Arrays.asList("(Foppiano and al. 2021)");
List<String> sentences = Arrays.asList("What we claim corresponds with what (Foppiano and al.", "2021) explains what he's thinking.");
List<OffsetPosition> refSpans = getPositions(paragraph, refs);
List<OffsetPosition> sentenceSpans = getPositions(paragraph, sentences);
expect(sentenceDetectorFactoryMock.getInstance()).andReturn(sentenceDetectorMock);
expect(sentenceDetectorMock.detect(paragraph, null)).andReturn(sentenceSpans);
replay(sentenceDetectorFactoryMock, sentenceDetectorMock);
List<OffsetPosition> theSentences = SentenceUtilities.getInstance().runSentenceDetection(paragraph, refSpans);
verify(sentenceDetectorFactoryMock, sentenceDetectorMock);
assertThat(theSentences.size(), is(1));
}
private List<OffsetPosition> getPositions(String paragraph, List<String> refs) {
List<OffsetPosition> positions = new ArrayList<>();
int previousRefEnd = 0;
for (String ref : refs) {
int startRef = paragraph.indexOf(ref, previousRefEnd);
int endRef = startRef + ref.length();
positions.add(new OffsetPosition(startRef, endRef));
previousRefEnd = endRef;
}
return positions;
}
}
| 10,000 | 45.516279 | 142 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/utilities/GrobidTestUtils.java
|
package org.grobid.core.utilities;
import org.apache.commons.lang3.tuple.Triple;
import java.util.ArrayList;
import java.util.List;
import static org.hamcrest.collection.IsCollectionWithSize.hasSize;
import static org.junit.Assert.assertThat;
public class GrobidTestUtils {
/**
* Utility method to generate a hypotetical result from wapiti.
* Useful for testing the extraction of the sequence labeling.
*
* @param labels label maps. A list of Triples, containing label (left), start_index (middle) and end_index exclusive (right)
* @return a string containing the resulting features + labels returned by wapiti
*/
public static String getWapitiResult(List<String> features, List<Triple<String, Integer, Integer>> labels) {
List<String> labeled = new ArrayList<>();
int idx = 0;
for (Triple<String, Integer, Integer> label : labels) {
if (idx < label.getMiddle()) {
for (int i = idx; i < label.getMiddle(); i++) {
labeled.add("<other>");
idx++;
}
}
for (int i = label.getMiddle(); i < label.getRight(); i++) {
labeled.add(label.getLeft());
idx++;
}
}
if (idx < features.size()) {
for (int i = idx; i < features.size(); i++) {
labeled.add("<other>");
idx++;
}
}
assertThat(features, hasSize(labeled.size()));
StringBuilder sb = new StringBuilder();
for (int i = 0; i < features.size(); i++) {
if (features.get(i) == null || features.get(i).startsWith(" ")) {
continue;
}
sb.append(features.get(i)).append(" ").append(labeled.get(i)).append("\n");
}
return sb.toString();
}
}
| 1,877 | 29.786885 | 129 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/utilities/TextUtilitiesTest.java
|
package org.grobid.core.utilities;
import org.grobid.core.analyzers.GrobidAnalyzer;
import org.grobid.core.analyzers.GrobidDefaultAnalyzer;
import org.grobid.core.layout.LayoutToken;
import org.grobid.core.test.EngineTest;
import org.junit.Ignore;
import org.junit.Test;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.startsWith;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.junit.Assert.*;
public class TextUtilitiesTest extends EngineTest {
@Test
public void testHTMLEncode_complete() throws Exception {
String test = "Dé&s, C & Bidule, D.;";
String result = TextUtilities.HTMLEncode(test);
assertThat("Dé&amps, C & Bidule, D.;", is(result));
}
@Test
public void testHTMLEncode_partial() throws Exception {
String test = "Dé&s, C &";
String result = TextUtilities.HTMLEncode(test);
assertThat("Dé&amps, C &", is(result));
}
@Test
public void testDephynization_withoutSpaces() {
assertThat(TextUtilities.dehyphenize("This is hype-\nnized.We are here."),
is("This is hypenized.We are here."));
assertThat(TextUtilities.dehyphenize("This is hype-\nnized. We are here."),
is("This is hypenized. We are here."));
}
@Test
public void testDephynization_withSpaces() {
assertThat(TextUtilities.dehyphenize("This is hype- \n nized. We are here."), is("This is hypenized. We are here."));
assertThat(TextUtilities.dehyphenize("This is hype- \nnized. We are here."), is("This is hypenized. We are here."));
assertThat(TextUtilities.dehyphenize("This is hype - \n nized. We are here."), is("This is hypenized. We are here."));
}
@Test
public void testDephynization_withDigits_shouldNotDephypenize() {
// assertThat(TextUtilities.dehyphenize("This is 1234-\n44A. Patent."), is("This is 123444A. Patent."));
// assertThat(TextUtilities.dehyphenize("This is 1234 - \n44A. Patent."), is("This is 123444A. Patent."));
assertThat(TextUtilities.dehyphenize("This is 1234-44A. Patent."), is("This is 1234-44A. Patent."));
assertThat(TextUtilities.dehyphenize("This is 1234 - 44A. Patent."), is("This is 1234 - 44A. Patent."));
}
@Test
public void testDephynization_citation() {
assertThat(TextUtilities.dehyphenize("Anonymous. Runtime process infection. Phrack, 11(59):ar-\n" +
" ticle 8 of 18, December 2002."),
is("Anonymous. Runtime process infection. Phrack, 11(59):article 8 of 18, December 2002."));
}
@Test
public void testDephynization_falseTruncation_shouldReturnSameString() {
assertThat(TextUtilities.dehyphenize("sd. Linux on-the-fly kernel patching without lkm. Phrack, 11(58):article 7 of 15, December 2001."),
is("sd. Linux on-the-fly kernel patching without lkm. Phrack, 11(58):article 7 of 15, December 2001."));
// assertThat(TextUtilities.dehyphenize("sd. Linux on-the-fly kernel patching without lkm. Phrack, \n" +
// "11(58):article 7 of 15, December 2001. \n" +
// "[41] K. Seifried. \n" +
// "Honeypotting with VMware: basics. \n" +
// "http://www.seifried.org/security/ids/ \n" +
// "20020107-honeypot-vmware-basics.ht%ml. \n" +
// "[42] Silvio Cesare. \n" +
// "Runtime Kernel Kmem Patch-\n" +
// "ing. \n" +
// "http://www.big.net.au/˜silvio/ \n" +
// "runtime-kernel-kmem-patching.txt."), startsWith("sd. Linux on-the-fly kernel"));
}
@Test
public void testDephynization_FalseTruncation_shouldReturnSameString() {
assertThat(TextUtilities.dehyphenize("Nettop also relies on VMware Workstation for its VMM. Ultimately, since VMware is a closed-source product, it is impossible to verify this claim through open review."),
is("Nettop also relies on VMware Workstation for its VMM. Ultimately, since VMware is a closed-source product, it is impossible to verify this claim through open review."));
}
@Test
public void testDephynization_NormalCase() {
assertThat(TextUtilities.dehyphenize("Implementation bugs in the VMM can compromise its ability to provide secure isolation, and modify-\n ing the VMM presents the risk of introducing bugs."),
is("Implementation bugs in the VMM can compromise its ability to provide secure isolation, and modifying the VMM presents the risk of introducing bugs."));
}
@Test
public void testGetLastToken_spaceParenthesis() {
assertThat(TextUtilities.getLastToken("secure isolation, and modify"),
is("modify"));
assertThat(TextUtilities.getLastToken("secure isolation, (and modify"),
is("modify"));
assertThat(TextUtilities.getLastToken("secure isolation, and) modify"),
is("modify"));
assertThat(TextUtilities.getLastToken("secure isolation, and (modify"),
is("(modify"));
assertThat(TextUtilities.getLastToken("secure isolation, .and modify"),
is("modify"));
}
@Test
public void testGetFirstToken_spaceParenthesis() {
assertThat(TextUtilities.getFirstToken("Secure isolation, and modify"),
is("Secure"));
assertThat(TextUtilities.getFirstToken(" secure isolation, (and modify"),
is("secure"));
assertThat(TextUtilities.getFirstToken("\n secure isolation, and) modify"),
is("\n"));
assertThat(TextUtilities.getFirstToken(" \nsecure isolation, and (modify"),
is("\nsecure"));
assertThat(TextUtilities.getFirstToken("\nsecure isolation, and (modify"),
is("\nsecure"));
}
@Ignore
@Test
public void testDephynizationHard_withoutSpaces() {
assertThat(TextUtilities.dehyphenizeHard("This is hype-\nnized.We are here."),
is("This is hypenized.We are here."));
assertThat(TextUtilities.dehyphenizeHard("This is hype-\nnized. We are here."),
is("This is hypenized. We are here."));
}
@Ignore
@Test
public void testDephynizationHard_withSpaces() {
assertThat(TextUtilities.dehyphenizeHard("This is hype- \n nized. We are here."), is("This is hypenyzed. We are here."));
assertThat(TextUtilities.dehyphenizeHard("This is hype- \nnized. We are here."), is("This is hypenyzed. We are here."));
assertThat(TextUtilities.dehyphenizeHard("This is hype - \n nized. We are here."), is("This is hypenyzed. We are here."));
}
@Ignore
@Test
public void testDephynizationHard_withDigits_shouldNotDephypenize() {
assertThat(TextUtilities.dehyphenizeHard("This is 1234-\n44A. Patent."), is("This is 1234-44A. Patent."));
assertThat(TextUtilities.dehyphenizeHard("This is 1234 - \n44A. Patent."), is("This is 1234 - 44A.Patent."));
}
@Ignore
@Test
public void testDephynizationHard_citation() {
assertThat(TextUtilities.dehyphenizeHard("Anonymous. Runtime process infection. Phrack, 11(59):ar-\n+ " +
" ticle 8 of 18, December 2002."),
is("Anonymous. Runtime process infection. Phrack, 11(59):article 8 of 18, December 2002."));
}
@Test
public void testDehyphenizationWithLayoutTokens() throws Exception {
List<String> tokens = GrobidAnalyzer.getInstance().tokenize("This is hype-\n nized.");
List<LayoutToken> layoutTokens = new ArrayList<>();
for (String token : tokens) {
if (token.equals("\n")) {
layoutTokens.get(layoutTokens.size() - 1).setNewLineAfter(true);
}
layoutTokens.add(new LayoutToken(token));
}
List<LayoutToken> output = TextUtilities.dehyphenize(layoutTokens);
assertNotNull(output);
assertThat(LayoutTokensUtil.toText(output), is("This is hypenized."));
}
@Test
public void testPrefix() {
String word = "Grobid";
assertEquals("", TextUtilities.prefix(word, 0));
assertEquals("G", TextUtilities.prefix(word, 1));
assertEquals("Gr", TextUtilities.prefix(word, 2));
assertEquals("Gro", TextUtilities.prefix(word, 3));
assertEquals("Grob", TextUtilities.prefix(word, 4));
assertEquals("Grobid", TextUtilities.prefix(word, 6));
assertEquals("Grobid", TextUtilities.prefix(word, 100));
assertEquals(null, TextUtilities.prefix(null, 0));
assertEquals(null, TextUtilities.prefix(null, 1));
}
@Test
public void testSuffixes() {
String word = "Grobid";
assertEquals("", TextUtilities.suffix(word, 0));
assertEquals("d", TextUtilities.suffix(word, 1));
assertEquals("id", TextUtilities.suffix(word, 2));
assertEquals("bid", TextUtilities.suffix(word, 3));
assertEquals("obid", TextUtilities.suffix(word, 4));
assertEquals("Grobid", TextUtilities.suffix(word, 6));
assertEquals("Grobid", TextUtilities.suffix(word, 100));
assertEquals(null, TextUtilities.suffix(null, 0));
assertEquals(null, TextUtilities.suffix(null, 1));
}
@Test
public void testWordShape() {
testWordShape("This", "Xxxx", "Xx");
testWordShape("Equals", "Xxxx", "Xx");
testWordShape("O'Conor", "X'Xxxx", "X'Xx");
testWordShape("McDonalds", "XxXxxx", "XxXx");
testWordShape("any-where", "xx-xxx", "x-x");
testWordShape("1.First", "d.Xxxx", "d.Xx");
testWordShape("ThisIsCamelCase", "XxXxXxXxxx", "XxXxXxXx");
testWordShape("This:happens", "Xx:xxx", "Xx:x");
testWordShape("ABC", "XXX", "X");
testWordShape("AC", "XX", "X");
testWordShape("A", "X", "X");
testWordShape("Ab", "Xx", "Xx");
testWordShape("AbA", "XxX", "XxX");
testWordShape("uü", "xx", "x");
testWordShape("Üwe", "Xxx", "Xx");
testWordShape(" ", " ", " ");
testWordShape("Tes9t99", "Xxdxdd", "Xxdxd");
testWordShape("T", "X", "X");
}
private void testWordShape(String orig, String expected, String expectedTrimmed) {
assertThat(TextUtilities.wordShape(orig), is(expected));
assertThat(TextUtilities.wordShapeTrimmed(orig), is(expectedTrimmed));
}
@Test
public void testFormat4Digits() throws Exception {
assertThat(TextUtilities.formatFourDecimals(0.0002), is("0.0002"));
assertThat(TextUtilities.formatFourDecimals(20000), is("20000"));
assertThat(TextUtilities.formatFourDecimals(2000.00234434), is("2000.0023"));
assertThat(TextUtilities.formatFourDecimals(0.00234434), is("0.0023"));
}
@Test
public void testFormat2Digits() throws Exception {
assertThat(TextUtilities.formatTwoDecimals(0.0002), is("0"));
assertThat(TextUtilities.formatTwoDecimals(20000), is("20000"));
assertThat(TextUtilities.formatTwoDecimals(2000.00234434), is("2000"));
assertThat(TextUtilities.formatTwoDecimals(0.01234434), is("0.01"));
}
@Test
public void testDoesRequireDehypenisation_standard_shouldReturnTrue() {
List<LayoutToken> tokens = GrobidDefaultAnalyzer.getInstance().tokenizeWithLayoutToken("This is a sample dehypen-\nyzation text");
assertThat(TextUtilities.doesRequireDehypenisation(tokens, 9), is(true));
}
@Test
public void testDoesRequireDehypenisation_withSpaceAfter_shouldReturnTrue() {
List<LayoutToken> tokens = GrobidDefaultAnalyzer.getInstance().tokenizeWithLayoutToken("This is a sample dehypen- \nyzation text");
assertThat(TextUtilities.doesRequireDehypenisation(tokens, 9), is(true));
}
@Test
public void testDoesRequireDehypenisation_withSpacesAfter_shouldReturnTrue() {
List<LayoutToken> tokens = GrobidDefaultAnalyzer.getInstance().tokenizeWithLayoutToken("This is a sample dehypen- \n yzation text");
assertThat(TextUtilities.doesRequireDehypenisation(tokens, 9), is(true));
}
@Test
public void testDoesRequireDehypenisation_withSpaceBefore_shouldReturnTrue() {
List<LayoutToken> tokens = GrobidDefaultAnalyzer.getInstance().tokenizeWithLayoutToken("This is a sample dehypen -\nyzation text");
assertThat(TextUtilities.doesRequireDehypenisation(tokens, 10), is(true));
}
@Test
public void testDoesRequireDehypenisation_withSpacesBefore_shouldReturnTrue() {
List<LayoutToken> tokens = GrobidDefaultAnalyzer.getInstance().tokenizeWithLayoutToken("This is a sample dehypen -\nyzation text");
assertThat(TextUtilities.doesRequireDehypenisation(tokens, 13), is(true));
}
@Test
public void testDoesRequireDehypenisation_usualWord_shouldReturnFalse() {
List<LayoutToken> tokens = GrobidDefaultAnalyzer.getInstance().tokenizeWithLayoutToken("This is a sample open-source text");
assertThat(TextUtilities.doesRequireDehypenisation(tokens, 9), is(false));
}
@Test
public void testDoesRequireDehypenisation_usualWordWithSpace_shouldReturnFalse() {
List<LayoutToken> tokens = GrobidDefaultAnalyzer.getInstance().tokenizeWithLayoutToken("This is a sample open- source text");
assertThat(TextUtilities.doesRequireDehypenisation(tokens, 9), is(false));
}
@Test
public void testDoesRequireDehypenisation_usualWordWith2Space_shouldReturnFalse() {
List<LayoutToken> tokens = GrobidDefaultAnalyzer.getInstance().tokenizeWithLayoutToken("This is a sample open - source text");
assertThat(TextUtilities.doesRequireDehypenisation(tokens, 9), is(false));
}
@Test
public void testDoesRequireDehypenisation_sequence_shouldReturnFalse() {
List<LayoutToken> tokens = GrobidDefaultAnalyzer.getInstance().tokenizeWithLayoutToken("This is a sample ABC123-3434 text");
assertThat(TextUtilities.doesRequireDehypenisation(tokens, 9), is(false));
}
@Test
public void testDoesRequireDehypenisation_sequence2_shouldReturnFalse() {
List<LayoutToken> tokens = GrobidDefaultAnalyzer.getInstance().tokenizeWithLayoutToken("This is a sample ABC123 - 3434 text");
assertThat(TextUtilities.doesRequireDehypenisation(tokens, 9), is(false));
}
@Test
public void testDoesRequireDehypenisation_sequence3_shouldReturnFalse() {
List<LayoutToken> tokens = GrobidDefaultAnalyzer.getInstance().tokenizeWithLayoutToken("This is a sample ABC123 - 3434 text");
assertThat(TextUtilities.doesRequireDehypenisation(tokens, 9), is(false));
}
@Test
public void testDoesRequireDehypenisation_trickySequence1_shouldReturnFalse() {
List<LayoutToken> tokens = GrobidDefaultAnalyzer.getInstance().tokenizeWithLayoutToken("This is a bad sample -\n\n");
assertThat(TextUtilities.doesRequireDehypenisation(tokens, 10), is(false));
}
@Test
public void testDoesRequireDehypenisation_trickySequence2_shouldReturnFalse() {
List<LayoutToken> tokens = GrobidDefaultAnalyzer.getInstance().tokenizeWithLayoutToken("This is a bad sample -");
assertThat(TextUtilities.doesRequireDehypenisation(tokens, 10), is(false));
}
@Test
public void testDoesRequireDehypenisation_trickySequence3_shouldReturnFalse() {
List<LayoutToken> tokens = GrobidDefaultAnalyzer.getInstance().tokenizeWithLayoutToken("This is a bad sample - \n\n ");
assertThat(TextUtilities.doesRequireDehypenisation(tokens, 10), is(false));
}
@Test
public void testDoesRequireDehypenisation_trickySequence4_shouldReturnFalse() {
List<LayoutToken> tokens = GrobidDefaultAnalyzer.getInstance().tokenizeWithLayoutToken("This is a bad sample-\n\n");
assertThat(TextUtilities.doesRequireDehypenisation(tokens, 9), is(false));
}
@Test
public void testDoesRequireDehypenisation_trickySequence5_shouldReturnFalse() {
List<LayoutToken> tokens = GrobidDefaultAnalyzer.getInstance().tokenizeWithLayoutToken("This is a bad sample-");
assertThat(TextUtilities.doesRequireDehypenisation(tokens, 9), is(false));
}
@Test
public void testDoesRequireDehypenisation_trickySequence6_shouldReturnFalse() {
List<LayoutToken> tokens = GrobidDefaultAnalyzer.getInstance().tokenizeWithLayoutToken("This is a bad sample- \n\n ");
assertThat(TextUtilities.doesRequireDehypenisation(tokens, 9), is(false));
}
@Test
public void testDoesRequireDehypenisation_trickySequence7_shouldReturnTrue() {
List<LayoutToken> tokens = GrobidDefaultAnalyzer.getInstance().tokenizeWithLayoutToken("-\ncore is a bad sample.");
assertThat(TextUtilities.doesRequireDehypenisation(tokens, 0), is(true));
}
@Test
public void testDoesRequireDehypenisation_trickySequence8_shouldReturnTrue() {
List<LayoutToken> tokens = GrobidDefaultAnalyzer.getInstance().tokenizeWithLayoutToken("- \n\n core is a bad sample.");
assertThat(TextUtilities.doesRequireDehypenisation(tokens, 0), is(true));
}
@Test
public void testDoesRequireDehypenisation_falseFriend1_shouldReturnTrue() {
List<LayoutToken> tokens = GrobidDefaultAnalyzer.getInstance().tokenizeWithLayoutToken("which was mediated through the inhibition of expression of α2-\n integrin (1,2). ");
assertThat(TextUtilities.doesRequireDehypenisation(tokens, 19), is(false));
}
@Test
public void testDoesRequireDehypenisation_falseFriend2_shouldReturnTrue() {
List<LayoutToken> tokens = GrobidDefaultAnalyzer.getInstance().tokenizeWithLayoutToken("which was mediated through the inhibition of expression of α2 -\n integrin (1,2). ");
assertThat(TextUtilities.doesRequireDehypenisation(tokens, 19), is(false));
}
@Test
public void testDoesRequireDehypenisation_falseFriend3_shouldReturnTrue() {
List<LayoutToken> tokens = GrobidDefaultAnalyzer.getInstance().tokenizeWithLayoutToken("which was mediated through the inhibition of expression of α 2 - \n integrin (1,2). ");
assertThat(TextUtilities.doesRequireDehypenisation(tokens, 19), is(false));
}
@Test
public void testIsAllUpperCaseOrDigitOrDot() throws Exception {
assertThat(TextUtilities.isAllUpperCaseOrDigitOrDot("this"), is(false));
assertThat(TextUtilities.isAllUpperCaseOrDigitOrDot("UPPERCASE"), is(true));
assertThat(TextUtilities.isAllUpperCaseOrDigitOrDot("."), is(true));
assertThat(TextUtilities.isAllUpperCaseOrDigitOrDot("123456"), is(true));
assertThat(TextUtilities.isAllUpperCaseOrDigitOrDot("P.C.T."), is(true));
assertThat(TextUtilities.isAllUpperCaseOrDigitOrDot("P.C,T."), is(false));
}
@Test
public void testOrcidPattern() {
String[] falseOrcids = {"1234", "1234-5698-137X", "0000-0001-9877-137Y","http://orcid.fr/0000-0001-9877-137X"};
String[] trueOrcids = {"0000-0001-9877-137X", "http://orcid.org/0000-0001-9877-137X", "orcid.org/0000-0001-9877-137X"};
for(String falseOrcid : falseOrcids) {
Matcher orcidMatcher = TextUtilities.ORCIDPattern.matcher(falseOrcid);
assertFalse (orcidMatcher.find());
}
for(String trueOrcid : trueOrcids) {
Matcher orcidMatcher = TextUtilities.ORCIDPattern.matcher(trueOrcid);
if (orcidMatcher.find()) {
assertThat(orcidMatcher.group(1) + "-"
+ orcidMatcher.group(2) + "-" + orcidMatcher.group(3) + "-" + orcidMatcher.group(4) , is("0000-0001-9877-137X"));
}
}
}
}
| 19,804 | 47.187348 | 214 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/utilities/IOUtilitiesTest.java
|
package org.grobid.core.utilities;
import java.io.File;
import java.io.IOException;
import java.util.List;
import java.util.ArrayList;
import org.junit.Test;
import static org.junit.Assert.*;
public class IOUtilitiesTest {
@Test
public void testwriteInFileANDreadFile() throws IOException {
File file = File.createTempFile("temp", "test");
IOUtilities.writeInFile(file.getAbsolutePath(), getString());
assertEquals("Not expected value", getString(), IOUtilities.readFile(file.getAbsolutePath()));
}
private static String getString() {
return "1 \" ' A \n \t \r test\n\\n \n M";
}
}
| 639 | 25.666667 | 102 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/utilities/counters/GrobidTimerTest.java
|
package org.grobid.core.utilities.counters;
import static org.junit.Assert.*;
import java.util.Map;
import org.junit.Test;
public class GrobidTimerTest {
/**
* The Grobid timer.
*/
GrobidTimer timer;
@Test
public void testGrobidTimerEmptyConstructor() {
timer = new GrobidTimer();
assertNotNull("The Map should be initiated.", timer.getAllSavedTimes());
assertTrue("The Map should be empty", timer.getAllSavedTimes()
.isEmpty());
}
@Test
public void testGrobidTimerBoolConstructorTrue() {
timer = new GrobidTimer(true);
assertNotNull("The Map should be initiated.", timer.getAllSavedTimes());
assertEquals("The Map should have 1 element (START)", 1, timer
.getAllSavedTimes().size());
assertNotNull("The START time should not be null",
timer.getTime(GrobidTimer.START));
}
@Test
public void testGrobidTimerBoolConstructorFalse() {
timer = new GrobidTimer(false);
assertNotNull("The Map should be initiated.", timer.getAllSavedTimes());
assertTrue("The Map should be empty", timer.getAllSavedTimes()
.isEmpty());
assertNull("The START time should be null",
timer.getTime(GrobidTimer.START));
}
//@Test
public void testStartStop() {
timer = new GrobidTimer();
timer.start();
timer.stop("STOP");
Map<String, Long> times = timer.getAllSavedTimes();
long elapsedTime = times.get(GrobidTimer.START) - times.get("STOP");
assertEquals("2 times should be saved in Grobid Timer", 2, times.size());
assertEquals("Not matching times", elapsedTime,
(long) timer.getElapsedTimeFromStart("STOP"));
assertEquals("Not matching times", elapsedTime,
(long) timer.getElapsedTime(GrobidTimer.START, "STOP"));
}
}
| 1,686 | 27.59322 | 75 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/utilities/counters/impl/CntManagerImplTest.java
|
package org.grobid.core.utilities.counters.impl;
import org.grobid.core.engines.label.TaggingLabels;
import org.grobid.core.engines.counters.CitationParserCounters;
import org.grobid.core.engines.counters.Countable;
import org.grobid.core.engines.counters.FigureCounters;
import org.grobid.core.utilities.GrobidProperties;
import org.junit.Before;
import org.junit.Test;
import java.util.Arrays;
import java.util.Map;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertThat;
public class CntManagerImplTest {
CntManagerImpl target;
@Before
public void setUp() throws Exception {
GrobidProperties.getInstance();
target = new CntManagerImpl();
}
@Test
public void testCountSingleGroup() throws Exception {
target.i(TaggingLabels.EQUATION);
assertThat(target.getCounter(TaggingLabels.EQUATION).cnt(), is(1l));
}
@Test
public void testGetAllCounters() throws Exception {
target.i(TaggingLabels.ITEM);
target.i(TaggingLabels.OTHER);
final Map<String, Map<String, Long>> allCounters = target.getAllCounters();
assertThat(allCounters.size(), is(1));
final Map<String, Long> stringLongMap = allCounters.get("org.grobid.core.engines.label.TaggingLabelImpl");
assertThat(stringLongMap.size(), is(2));
assertThat(stringLongMap.get(TaggingLabels.OTHER.getName()), is(1l));
assertThat(stringLongMap.get(TaggingLabels.ITEM.getName()), is(1l));
}
@Test
public void testGetAllCounters2() throws Exception {
target.i(TaggingLabels.ITEM);
target.i(FigureCounters.SKIPPED_DUE_TO_MISMATCH_OF_CAPTIONS_AND_VECTOR_AND_BITMAP_GRAPHICS);
final Map<String, Map<String, Long>> allCounters = target.getAllCounters();
assertThat(allCounters.size(), is(2));
final Map<String, Long> taggingLabelMap = allCounters.get("org.grobid.core.engines.label.TaggingLabelImpl");
assertThat(taggingLabelMap.size(), is(1));
final Map<String, Long> countersMap = allCounters.get("org.grobid.core.engines.counters.FigureCounters");
assertThat(countersMap.size(), is(1));
}
@Test
public void testGetAllCounters_checkGroupName() throws Exception {
target.i(FigureCounters.TOO_MANY_FIGURES_PER_PAGE);
target.i(FigureCounters.SKIPPED_DUE_TO_MISMATCH_OF_CAPTIONS_AND_VECTOR_AND_BITMAP_GRAPHICS);
target.i(CitationParserCounters.EMPTY_REFERENCES_BLOCKS);
target.i(CitationParserCounters.SEGMENTED_REFERENCES);
target.i(CitationParserCounters.SEGMENTED_REFERENCES);
target.i(CitationParserCounters.SEGMENTED_REFERENCES);
target.i(CitationParserCounters.SEGMENTED_REFERENCES);
final Map<String, Map<String, Long>> allCounters = target.getAllCounters();
assertThat(allCounters.size(), is(2));
final Map<String, Long> taggingLabelMap = allCounters.get("org.grobid.core.engines.label.TaggingLabelImpl");
assertNull(taggingLabelMap);
final Map<String, Long> counterFigures = allCounters.get("org.grobid.core.engines.counters.FigureCounters");
assertThat(counterFigures.size(), is(2));
final Map<String, Long> counterCitations = allCounters.get("org.grobid.core.engines.counters.CitationParserCounters");
assertThat(counterCitations.size(), is(2));
assertThat(counterCitations.get("SEGMENTED_REFERENCES"), is(4l));
assertThat(counterCitations.get("EMPTY_REFERENCES_BLOCKS"), is(1l));
}
@Test
public void testCnt_withClass() throws Exception {
assertThat(target.cnt(TaggingLabels.ITEM), is(0l));
target.i(TaggingLabels.ITEM);
assertThat(target.cnt(TaggingLabels.ITEM), is(1l));
assertThat(target.cnt(FigureCounters.SKIPPED_DUE_TO_MISMATCH_OF_CAPTIONS_AND_VECTOR_AND_BITMAP_GRAPHICS), is(0l));
target.i(FigureCounters.SKIPPED_DUE_TO_MISMATCH_OF_CAPTIONS_AND_VECTOR_AND_BITMAP_GRAPHICS);
assertThat(target.cnt(FigureCounters.SKIPPED_DUE_TO_MISMATCH_OF_CAPTIONS_AND_VECTOR_AND_BITMAP_GRAPHICS), is(1l));
}
@Test
public void testCnt_withExplicitValues() throws Exception {
assertThat(target.cnt("figures", "element"), is(0l));
target.i("figures", "element");
assertThat(target.cnt("figures", "element"), is(1l));
assertThat(target.cnt("figures", "item"), is(0l));
target.i("figures", "item");
assertThat(target.cnt("figures", "item"), is(1l));
assertThat(target.cnt("tables", "item"), is(0l));
target.i("tables", "item");
assertThat(target.cnt("tables", "item"), is(1l));
target.i("figures", "item");
assertThat(target.cnt("figures", "item"), is(2l));
}
@Test
public void getCounter_shouldWork() throws Exception {
target.i("figures", "element", 2);
assertThat(target.getCounter("figures", "element").cnt(), is(2l));
target.i(TaggingLabels.CITATION_MARKER, 20);
assertThat(target.getCounter(TaggingLabels.CITATION_MARKER).cnt(), is(20l));
}
@Test
public void getCounters_shouldWork() throws Exception {
target.i("figures", "element", 2);
target.i("table", "john", 4);
target.i("table", "miao", 2);
assertThat(target.getCounters("figures").size(), is(1));
assertThat(target.getCounters("table").size(), is(2));
assertThat(target.getCounter("table", "john").cnt(), is(4l));
target.i(TaggingLabels.CITATION_MARKER, 20);
assertThat(target.getCounter(TaggingLabels.CITATION_MARKER).cnt(), is(20l));
final Class<? extends Countable> countableClass = (Class<? extends Countable>) Class.forName(TaggingLabels.CITATION_MARKER.getClass().getName());
assertThat(target.getCounters(countableClass).size(), is(1));
final String[] tables = target.getCounters("table").keySet().toArray(new String[0]);
Arrays.sort(tables);
assertThat(tables, is(new String[]{"john", "miao"}));
}
@Test
public void getCounterEnclosingClass_NoEnclosingClass_shouldWork() throws Exception {
assertThat(target.getCounterEnclosingName(TaggingLabels.CITATION_MARKER), is("org.grobid.core.engines.label.TaggingLabelImpl"));
}
@Test
public void getCounterEnclosingClass_EnclosingClass_sholdWork() throws Exception {
assertThat(target.getCounterEnclosingName(FigureCounters.TOO_MANY_FIGURES_PER_PAGE), is("org.grobid.core.engines.counters.FigureCounters"));
}
}
| 6,584 | 41.211538 | 153 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/document/TEIFormatterTest.java
|
package org.grobid.core.document;
import org.grobid.core.analyzers.GrobidAnalyzer;
import org.grobid.core.data.Note;
import org.grobid.core.layout.LayoutToken;
import org.grobid.core.utilities.GrobidProperties;
import org.grobid.core.utilities.LayoutTokensUtil;
import org.junit.BeforeClass;
import org.junit.Test;
import java.util.List;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
public class TEIFormatterTest {
@BeforeClass
public static void setInitialContext() throws Exception {
GrobidProperties.getInstance();
}
@Test
public void testMakeFootNote() throws Exception {
String text = "1 This is a footnote";
List<LayoutToken> tokens = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(text);
List<Note> footnotes = new TEIFormatter(null, null).makeNotes(tokens, text, Note.NoteType.FOOT, 0);
assertThat(footnotes.size(), is(1));
Note footnote = footnotes.get(0);
assertThat(footnote.getText(), is("This is a footnote"));
assertThat(LayoutTokensUtil.toText(footnote.getTokens()), is("This is a footnote"));
assertThat(footnote.getLabel(), is("1"));
}
}
| 1,211 | 28.560976 | 107 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/document/TEIFormatterIntegrationTest.java
|
package org.grobid.core.document;
import org.grobid.core.data.Note;
import org.grobid.core.engines.EngineParsers;
import org.grobid.core.engines.config.GrobidAnalysisConfig;
import org.grobid.core.main.LibraryLoader;
import org.grobid.core.utilities.GrobidProperties;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.File;
import java.util.List;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.Matchers.hasSize;
import static org.junit.Assert.assertThat;
public class TEIFormatterIntegrationTest {
@BeforeClass
public static void setInitialContext() throws Exception {
GrobidProperties.getInstance();
LibraryLoader.load();
}
@Test
public void testGetTeiNotes() throws Exception {
EngineParsers engine = new EngineParsers();
File input = new File(this.getClass().getResource("/footnotes/test.pdf").toURI());
Document doc = engine.getSegmentationParser().processing(DocumentSource.fromPdf(input), GrobidAnalysisConfig.defaultInstance());
List<Note> teiNotes = new TEIFormatter(null, null).getTeiNotes(doc);
/*assertThat(teiNotes, hasSize(1));
assertThat(teiNotes.get(0).getText(), is(" http://wikipedia.org "));
assertThat(teiNotes.get(0).getLabel(), is("1"));
assertThat(teiNotes.get(0).getPageNumber(), is(1));*/
}
}
| 1,372 | 33.325 | 136 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/document/DocumentTest.java
|
package org.grobid.core.document;
import org.grobid.core.utilities.GrobidProperties;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.xml.sax.SAXParseException;
import org.xml.sax.helpers.DefaultHandler;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.ByteArrayInputStream;
import javax.xml.parsers.SAXParserFactory;
/**
* See https://github.com/kermitt2/grobid/pull/475
*
*/
public class DocumentTest {
@BeforeClass
public static void setInitialContext() throws Exception {
GrobidProperties.getInstance();
}
@Before
public void setUp() throws Exception {
GrobidProperties.getInstance();
}
private static byte[] getValidXmlBytes() {
return "<xml>test</xml>".getBytes();
}
private static byte[] getXmlBytesWithInvalidUtf8Sequence() throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
out.write("<xml>".getBytes());
out.write(0xe0);
out.write(0xd8);
out.write(0x35);
out.write("</xml>".getBytes());
return out.toByteArray();
}
@Test
public void shouldNotFailToParseValidXml() throws Exception {
Document.parseInputStream(
new ByteArrayInputStream(getValidXmlBytes()),
SAXParserFactory.newInstance(),
new DefaultHandler()
);
}
@Test
public void shouldNotFailToParseInvalidUtf8ByteSequenceXmlByDefault() throws Exception {
Document.parseInputStream(
new ByteArrayInputStream(getXmlBytesWithInvalidUtf8Sequence()),
SAXParserFactory.newInstance(),
new DefaultHandler()
);
}
}
| 1,735 | 25.707692 | 92 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/visualization/TestCitationsVisualizer.java
|
package org.grobid.core.visualization;
import org.apache.commons.io.FileUtils;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.grobid.core.document.Document;
import org.grobid.core.engines.Engine;
import org.grobid.core.engines.config.GrobidAnalysisConfig;
import org.grobid.core.factory.GrobidFactory;
import org.junit.AfterClass;
import org.junit.Test;
import java.io.File;
import java.io.InputStream;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import com.fasterxml.jackson.core.Versioned;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertEquals;
public class TestCitationsVisualizer {
static final ObjectMapper mapper = new ObjectMapper();
@AfterClass
public static void tearDown(){
GrobidFactory.reset();
}
@Test
public void testJSONAnnotationStructure() throws Exception {
Engine engine = GrobidFactory.getInstance().getEngine();
File inputTmpFile = getInputDocument("/test/test_Grobid_1_05452615.pdf");
Document tei = engine.fullTextToTEIDoc(inputTmpFile, GrobidAnalysisConfig.defaultInstance());
String refURL = "http://example.com/xyz";
List<String> refURLs = Arrays.asList(refURL);
String json = CitationsVisualizer.getJsonAnnotations(tei, refURLs);
JsonNode root = mapper.readTree(json);
assertTrue(root.has("pages"));
assertTrue(root.has("refBibs"));
assertTrue(root.has("refMarkers"));
JsonNode pages = root.get("pages");
assertTrue(pages.isArray());
assertEquals(tei.getPages().size(), pages.size());
assertTrue(pages.size() > 0);
JsonNode firstPage = pages.get(0);
assertTrue(firstPage.has("page_height"));
assertTrue(firstPage.has("page_width"));
JsonNode bibs = root.get("refBibs");
JsonNode firstBib = bibs.get(0);
assertTrue(firstBib.has("id"));
assertTrue(firstBib.has("url"));
assertEquals(refURL, firstBib.get("url").asText());
assertTrue(firstBib.has("pos"));
JsonNode fbPos = firstBib.get("pos");
assertTrue(fbPos.isArray());
assertTrue(fbPos.size() > 0);
for (JsonNode bbox : fbPos) {
assertTrue(bbox.isObject());
assertTrue(bbox.has("p"));
assertTrue(bbox.has("x"));
assertTrue(bbox.has("y"));
assertTrue(bbox.has("w"));
assertTrue(bbox.has("h"));
}
// XXX: this isn't working, not sure if it needs a different
// test document or some extra processing step
/*
JsonNode markers = root.get("refMarkers");
JsonNode firstMarker = markers.get(0);
assertTrue(firstMarker.has("id"));
assertTrue(firstMarker.has("p"));
assertTrue(firstMarker.has("x"));
assertTrue(firstMarker.has("y"));
assertTrue(firstMarker.has("w"));
assertTrue(firstMarker.has("h"));
*/
}
@Test
public void testJSONAnnotationEscaping() throws Exception {
Engine engine = GrobidFactory.getInstance().getEngine();
File inputTmpFile = getInputDocument("/test/test_Grobid_1_05452615.pdf");
Document tei = engine.fullTextToTEIDoc(inputTmpFile, GrobidAnalysisConfig.defaultInstance());
// check that this embedded backslash is escaped properly
String refURL = "http://example.com/xyz?a=ab\\c123";
List<String> refURLs = Arrays.asList(refURL);
String json = CitationsVisualizer.getJsonAnnotations(tei, refURLs);
JsonNode root = mapper.readTree(json);
}
// XXX: copied from TestFullTextParser
private File getInputDocument(String inputPath) throws IOException {
InputStream is = this.getClass().getResourceAsStream(inputPath);
File inputTmpFile = File.createTempFile("tmpFileTest", "testFullTextParser");
inputTmpFile.deleteOnExit();
FileUtils.copyToFile(is, inputTmpFile);
return inputTmpFile;
}
}
| 4,080 | 36.1 | 101 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/engines/EngineTest.java
|
package org.grobid.core.engines;
import com.google.common.collect.Lists;
import fr.limsi.wapiti.SWIGTYPE_p_mdl_t;
import fr.limsi.wapiti.Wapiti;
import org.apache.pdfbox.pdmodel.PDDocument;
import org.grobid.core.data.BibDataSet;
import org.grobid.core.data.BiblioItem;
import org.grobid.core.data.Date;
import org.grobid.core.document.Document;
import org.grobid.core.document.DocumentSource;
import org.grobid.core.engines.config.GrobidAnalysisConfig;
import org.grobid.core.factory.GrobidFactory;
import org.grobid.core.features.FeaturesVectorDate;
import org.grobid.core.jni.WapitiModel;
import org.grobid.core.main.LibraryLoader;
import org.grobid.core.utilities.TextUtilities;
import org.grobid.core.visualization.CitationsVisualizer;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
import java.awt.*;
import java.io.File;
import java.io.FileFilter;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.StringTokenizer;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
@Ignore
public class EngineTest {
private static Engine engine;
@BeforeClass
public static void init() {
LibraryLoader.load();
}
@Test
public void testGetNewModel() {
// assertEquals("Wrong value of getModel", "-m "+GrobidModels.CITATION.getModelPath()+" ", GrobidModels.CITATION.getModelPath());
}
@BeforeClass
public static void setUpClass() throws Exception {
// MockContext.setInitialContext();
engine = GrobidFactory.getInstance().createEngine();
}
@AfterClass
public static void destroyInitialContext() throws Exception {
// MockContext.destroyInitialContext();
}
@Test
public void testWapiti() {
String s = "References references R Re Ref Refe s es ces nces LINESTART INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
"1 1 1 1 1 1 1 1 1 1 LINESTART NOCAPS ALLDIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 1 <reference-block>\n" +
"Bar bar B Ba Bar Bar r ar Bar Bar LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
"- - - - - - - - - - LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 HYPHEN 1 <reference-block>\n" +
"Haim haim H Ha Hai Haim m im aim Haim LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 1 <reference-block>\n" +
"R r R R R R R R R R LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 1 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 1 <reference-block>\n" +
"Dagan dagan D Da Dag Daga n an gan agan LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 1 <reference-block>\n" +
"I i I I I I I I I I LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 1 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 1 <reference-block>\n" +
"Dolan dolan D Do Dol Dola n an lan olan LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 1 <reference-block>\n" +
"B b B B B B B B B B LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 1 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 1 <reference-block>\n" +
"Ferro ferro F Fe Fer Ferr o ro rro erro LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 1 <reference-block>\n" +
"L l L L L L L L L L LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 1 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 1 <reference-block>\n" +
"Giampiccolo giampiccolo G Gi Gia Giam o lo olo colo LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 1 <reference-block>\n" +
"D d D D D D D D D D LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 1 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 1 <reference-block>\n" +
"Magnini magnini M Ma Mag Magn i ni ini nini LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 1 <reference-block>\n" +
"B b B B B B B B B B LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 1 <reference-block>\n" +
"and and a an and and d nd and and LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
"Szpektor szpektor S Sz Szp Szpe r or tor ktor LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 1 <reference-block>\n" +
"I i I I I I I I I I LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 1 <reference-block>\n" +
"2006 2006 2 20 200 2006 6 06 006 2006 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 1 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 1 <reference-block>\n" +
"The the T Th The The e he The The LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
"Second second S Se Sec Seco d nd ond cond LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
"PASCAL pascal P PA PAS PASC L AL CAL SCAL LINEEND ALLCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
"Recognising recognising R Re Rec Reco g ng ing sing LINESTART INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
"Textual textual T Te Tex Text l al ual tual LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
"Entailment entailment E En Ent Enta t nt ent ment LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"Challenge challenge C Ch Cha Chal e ge nge enge LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 2 <reference-block>\n" +
"In in I In In In n In In In LINEIN INITCAP NODIGIT 0 1 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"Proceedings proceedings P Pr Pro Proc s gs ngs ings LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"of of o of of of f of of of LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"the the t th the the e he the the LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"Second second S Se Sec Seco d nd ond cond LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"PASCAL pascal P PA PAS PASC L AL CAL SCAL LINEIN ALLCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"Challenges challenges C Ch Cha Chal s es ges nges LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"Workshop workshop W Wo Wor Work p op hop shop LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"on on o on on on n on on on LINEEND NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"Recognising recognising R Re Rec Reco g ng ing sing LINESTART INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"Textual textual T Te Tex Text l al ual tual LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"Entailment entailment E En Ent Enta t nt ent ment LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 2 <reference-block>\n" +
"Venice venice V Ve Ven Veni e ce ice nice LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 2 <reference-block>\n" +
"Italy italy I It Ita Ital y ly aly taly LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
". . . . . . . . . . LINEEND ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 2 <reference-block>\n" +
"2 2 2 2 2 2 2 2 2 2 LINESTART NOCAPS ALLDIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 2 <reference-block>\n" +
"Bunescu bunescu B Bu Bun Bune u cu scu escu LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 2 <reference-block>\n" +
"R r R R R R R R R R LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 2 <reference-block>\n" +
"and and a an and and d nd and and LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"Mooney mooney M Mo Moo Moon y ey ney oney LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 2 <reference-block>\n" +
"R r R R R R R R R R LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 2 <reference-block>\n" +
"2006 2006 2 20 200 2006 6 06 006 2006 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 2 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 2 <reference-block>\n" +
"Subsequence subsequence S Su Sub Subs e ce nce ence LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"Kernels kernels K Ke Ker Kern s ls els nels LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"for for f fo for for r or for for LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"Relation relation R Re Rel Rela n on ion tion LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"Extraction extraction E Ex Ext Extr n on ion tion LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 2 <reference-block>\n" +
"In in I In In In n In In In LINEIN INITCAP NODIGIT 0 1 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"Advances advances A Ad Adv Adva s es ces nces LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"in in i in in in n in in in LINEIN NOCAPS NODIGIT 0 1 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"Neural neural N Ne Neu Neur l al ral ural LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"Information information I In Inf Info n on ion tion LINEEND INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"Processing processing P Pr Pro Proc g ng ing sing LINESTART INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"Systems systems S Sy Sys Syst s ms ems tems LINEIN INITCAP NODIGIT 0 1 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"18 18 1 18 18 18 8 18 18 18 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 3 <reference-block>\n" +
"MIT mit M MI MIT MIT T IT MIT MIT LINEIN ALLCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
"Press press P Pr Pre Pres s ss ess ress LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
". . . . . . . . . . LINEEND ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 3 <reference-block>\n" +
"3 3 3 3 3 3 3 3 3 3 LINESTART NOCAPS ALLDIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 3 <reference-block>\n" +
"Dagan dagan D Da Dag Daga n an gan agan LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 3 <reference-block>\n" +
"I i I I I I I I I I LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 3 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 3 <reference-block>\n" +
"Glickman glickman G Gl Gli Glic n an man kman LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 3 <reference-block>\n" +
"O o O O O O O O O O LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 3 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 3 <reference-block>\n" +
"and and a an and and d nd and and LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
"Magnini magnini M Ma Mag Magn i ni ini nini LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 3 <reference-block>\n" +
"B b B B B B B B B B LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 3 <reference-block>\n" +
"2006 2006 2 20 200 2006 6 06 006 2006 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 3 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 3 <reference-block>\n" +
"The the T Th The The e he The The LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
"PASCAL pascal P PA PAS PASC L AL CAL SCAL LINEIN ALLCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
"Recognising recognising R Re Rec Reco g ng ing sing LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
"Textual textual T Te Tex Text l al ual tual LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
"Entailment entailment E En Ent Enta t nt ent ment LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
"Challenge challenge C Ch Cha Chal e ge nge enge LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 3 <reference-block>\n" +
"In in I In In In n In In In LINEIN INITCAP NODIGIT 0 1 1 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
"Quiñonero quiñonero Q Qu Qui Quiñ o ro ero nero LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
"- - - - - - - - - - LINEEND ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 HYPHEN 3 <reference-block>\n" +
"Candela candela C Ca Can Cand a la ela dela LINESTART INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
"et et e et et et t et et et LINEIN NOCAPS NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
"al al a al al al l al al al LINEIN NOCAPS NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 3 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 3 <reference-block>\n" +
"editors editors e ed edi edit s rs ors tors LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 3 <reference-block>\n" +
"MLCW mlcw M ML MLC MLCW W CW LCW MLCW LINEIN ALLCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
"2005 2005 2 20 200 2005 5 05 005 2005 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 3 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 3 <reference-block>\n" +
"LNAI lnai L LN LNA LNAI I AI NAI LNAI LINEIN ALLCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
"Volume volume V Vo Vol Volu e me ume lume LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"3944 3944 3 39 394 3944 4 44 944 3944 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 4 <reference-block>\n" +
"pages pages p pa pag page s es ges ages LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"177 177 1 17 177 177 7 77 177 177 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"- - - - - - - - - - LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 HYPHEN 4 <reference-block>\n" +
"190 190 1 19 190 190 0 90 190 190 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 4 <reference-block>\n" +
"Springer springer S Sp Spr Spri r er ger nger LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"- - - - - - - - - - LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 HYPHEN 4 <reference-block>\n" +
"Verlag verlag V Ve Ver Verl g ag lag rlag LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
". . . . . . . . . . LINEEND ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 4 <reference-block>\n" +
"4 4 4 4 4 4 4 4 4 4 LINESTART NOCAPS ALLDIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 4 <reference-block>\n" +
"Jenny jenny J Je Jen Jenn y ny nny enny LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"Rose rose R Ro Ros Rose e se ose Rose LINEIN INITCAP NODIGIT 0 1 1 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"Finkel finkel F Fi Fin Fink l el kel nkel LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 4 <reference-block>\n" +
"Trond trond T Tr Tro Tron d nd ond rond LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"Grenager grenager G Gr Gre Gren r er ger ager LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 4 <reference-block>\n" +
"and and a an and and d nd and and LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"Christopher christopher C Ch Chr Chri r er her pher LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"Manning manning M Ma Man Mann g ng ing ning LINEIN INITCAP NODIGIT 0 1 1 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 4 <reference-block>\n" +
"2005 2005 2 20 200 2005 5 05 005 2005 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 4 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 4 <reference-block>\n" +
"Incorporating incorporating I In Inc Inco g ng ing ting LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"Non non N No Non Non n on Non Non LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"- - - - - - - - - - LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 HYPHEN 4 <reference-block>\n" +
"local local l lo loc loca l al cal ocal LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"Information information I In Inf Info n on ion tion LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"into into i in int into o to nto into LINEEND NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"Information information I In Inf Info n on ion tion LINESTART INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"Extraction extraction E Ex Ext Extr n on ion tion LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"Systems systems S Sy Sys Syst s ms ems tems LINEIN INITCAP NODIGIT 0 1 1 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"by by b by by by y by by by LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"Gibbs gibbs G Gi Gib Gibb s bs bbs ibbs LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"Sampling sampling S Sa Sam Samp g ng ing ling LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 4 <reference-block>\n" +
"Proceedings proceedings P Pr Pro Proc s gs ngs ings LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"of of o of of of f of of of LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"the the t th the the e he the the LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"43nd 43nd 4 43 43n 43nd d nd 3nd 43nd LINEIN NOCAPS CONTAINSDIGITS 0 0 0 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"Annual annual A An Ann Annu l al ual nual LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"Meeting meeting M Me Mee Meet g ng ing ting LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"of of o of of of f of of of LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
"the the t th the the e he the the LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
"Association association A As Ass Asso n on ion tion LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
"for for f fo for for r or for for LINEEND NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
"Computational computational C Co Com Comp l al nal onal LINESTART INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
"Linguistics linguistics L Li Lin Ling s cs ics tics LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
"( ( ( ( ( ( ( ( ( ( LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 OPENBRACKET 5 <reference-block>\n" +
"ACL acl A AC ACL ACL L CL ACL ACL LINEIN ALLCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
"2005 2005 2 20 200 2005 5 05 005 2005 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 5 <reference-block>\n" +
") ) ) ) ) ) ) ) ) ) LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 ENDBRACKET 5 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 5 <reference-block>\n" +
"pp pp p pp pp pp p pp pp pp LINEIN NOCAPS NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 5 <reference-block>\n" +
"363 363 3 36 363 363 3 63 363 363 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
"- - - - - - - - - - LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 HYPHEN 5 <reference-block>\n" +
"370 370 3 37 370 370 0 70 370 370 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
". . . . . . . . . . LINEEND ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 5 <reference-block>\n" +
"5 5 5 5 5 5 5 5 5 5 LINESTART NOCAPS ALLDIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 5 <reference-block>\n" +
"Giampiccolo giampiccolo G Gi Gia Giam o lo olo colo LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 5 <reference-block>\n" +
"D d D D D D D D D D LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 5 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 5 <reference-block>\n" +
"Magnini magnini M Ma Mag Magn i ni ini nini LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 5 <reference-block>\n" +
"B b B B B B B B B B LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 5 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 5 <reference-block>\n" +
"Dagan dagan D Da Dag Daga n an gan agan LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 5 <reference-block>\n" +
"I i I I I I I I I I LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 5 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 5 <reference-block>\n" +
"and and a an and and d nd and and LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
"Dolan dolan D Do Dol Dola n an lan olan LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 5 <reference-block>\n" +
"B b B B B B B B B B LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 5 <reference-block>\n" +
"2007 2007 2 20 200 2007 7 07 007 2007 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 5 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 5 <reference-block>\n" +
"The the T Th The The e he The The LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
"Third third T Th Thi Thir d rd ird hird LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
"PASCAL pascal P PA PAS PASC L AL CAL SCAL LINEIN ALLCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
"Recognizing recognizing R Re Rec Reco g ng ing zing LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
"Textual textual T Te Tex Text l al ual tual LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
"Entailment entailment E En Ent Enta t nt ent ment LINEEND INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"Challenge challenge C Ch Cha Chal e ge nge enge LINESTART INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 6 <reference-block>\n" +
"In in I In In In n In In In LINEIN INITCAP NODIGIT 0 1 1 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"Proceedings proceedings P Pr Pro Proc s gs ngs ings LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"of of o of of of f of of of LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"the the t th the the e he the the LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"Workshop workshop W Wo Wor Work p op hop shop LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"on on o on on on n on on on LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"Textual textual T Te Tex Text l al ual tual LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"Entailment entailment E En Ent Enta t nt ent ment LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"and and a an and and d nd and and LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"Paraphrasing paraphrasing P Pa Par Para g ng ing sing LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 6 <reference-block>\n" +
"pages pages p pa pag page s es ges ages LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"1 1 1 1 1 1 1 1 1 1 LINEIN NOCAPS ALLDIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"– – – – – – – – – – LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"9 9 9 9 9 9 9 9 9 9 LINEIN NOCAPS ALLDIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 6 <reference-block>\n" +
"Prague prague P Pr Pra Prag e ue gue ague LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 6 <reference-block>\n" +
"June june J Ju Jun June e ne une June LINEIN INITCAP NODIGIT 0 1 0 0 0 0 1 0 0 NOPUNCT 6 <reference-block>\n" +
"2007 2007 2 20 200 2007 7 07 007 2007 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 6 <reference-block>\n" +
". . . . . . . . . . LINEEND ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 6 <reference-block>\n" +
"6 6 6 6 6 6 6 6 6 6 LINESTART NOCAPS ALLDIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 6 <reference-block>\n" +
"Gildea gildea G Gi Gil Gild a ea dea ldea LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 6 <reference-block>\n" +
"D d D D D D D D D D LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 6 <reference-block>\n" +
"and and a an and and d nd and and LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"Palmer palmer P Pa Pal Palm r er mer lmer LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 6 <reference-block>\n" +
"M m M M M M M M M M LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 6 <reference-block>\n" +
"2002 2002 2 20 200 2002 2 02 002 2002 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 6 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 6 <reference-block>\n" +
"The the T Th The The e he The The LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"Necessity necessity N Ne Nec Nece y ty ity sity LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"of of o of of of f of of of LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"Parsing parsing P Pa Par Pars g ng ing sing LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"for for f fo for for r or for for LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"Predicate predicate P Pr Pre Pred e te ate cate LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"Argument argument A Ar Arg Argu t nt ent ment LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"Recognition recognition R Re Rec Reco n on ion tion LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 7 <reference-block>\n" +
"In in I In In In n In In In LINEIN INITCAP NODIGIT 0 1 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"Proceedings proceedings P Pr Pro Proc s gs ngs ings LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"of of o of of of f of of of LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"the the t th the the e he the the LINEEND NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"40th 40th 4 40 40t 40th h th 0th 40th LINESTART NOCAPS CONTAINSDIGITS 0 0 0 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"Meeting meeting M Me Mee Meet g ng ing ting LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"of of o of of of f of of of LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"the the t th the the e he the the LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"Association association A As Ass Asso n on ion tion LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"for for f fo for for r or for for LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"Computational computational C Co Com Comp l al nal onal LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"Linguistics linguistics L Li Lin Ling s cs ics tics LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"( ( ( ( ( ( ( ( ( ( LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 OPENBRACKET 7 <reference-block>\n" +
"ACL acl A AC ACL ACL L CL ACL ACL LINEIN ALLCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"2002 2002 2 20 200 2002 2 02 002 2002 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 7 <reference-block>\n" +
") ) ) ) ) ) ) ) ) ) LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 ENDBRACKET 7 <reference-block>\n" +
": : : : : : : : : : LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 PUNCT 7 <reference-block>\n" +
"239 239 2 23 239 239 9 39 239 239 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"- - - - - - - - - - LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 HYPHEN 7 <reference-block>\n" +
"246 246 2 24 246 246 6 46 246 246 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 7 <reference-block>\n" +
"Philadelphia philadelphia P Ph Phi Phil a ia hia phia LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 7 <reference-block>\n" +
"PA pa P PA PA PA A PA PA PA LINEIN ALLCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
". . . . . . . . . . LINEEND ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 7 <reference-block>\n" +
"7 7 7 7 7 7 7 7 7 7 LINESTART NOCAPS ALLDIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 7 <reference-block>\n" +
"Lin lin L Li Lin Lin n in Lin Lin LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 7 <reference-block>\n" +
"D d D D D D D D D D LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 7 <reference-block>\n" +
"1998 1998 1 19 199 1998 8 98 998 1998 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 7 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 7 <reference-block>\n" +
"Dependency dependency D De Dep Depe y cy ncy ency LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"- - - - - - - - - - LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 HYPHEN 7 <reference-block>\n" +
"based based b ba bas base d ed sed ased LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"Evaluation evaluation E Ev Eva Eval n on ion tion LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"of of o of of of f of of of LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"MINIPAR minipar M MI MIN MINI R AR PAR IPAR LINEIN ALLCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 7 <reference-block>\n" +
"In in I In In In n In In In LINEIN INITCAP NODIGIT 0 1 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"Workshop workshop W Wo Wor Work p op hop shop LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"on on o on on on n on on on LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"the the t th the the e he the the LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"Evaluation evaluation E Ev Eva Eval n on ion tion LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"of of o of of of f of of of LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
"Parsing parsing P Pa Par Pars g ng ing sing LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
"Systems systems S Sy Sys Syst s ms ems tems LINEIN INITCAP NODIGIT 0 1 1 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
". . . . . . . . . . LINEEND ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 8 <reference-block>\n" +
"8 8 8 8 8 8 8 8 8 8 LINESTART NOCAPS ALLDIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 8 <reference-block>\n" +
"Neumann neumann N Ne Neu Neum n nn ann mann LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 8 <reference-block>\n" +
"G g G G G G G G G G LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 8 <reference-block>\n" +
"and and a an and and d nd and and LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
"Piskorski piskorski P Pi Pis Pisk i ki ski rski LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 8 <reference-block>\n" +
"J j J J J J J J J J LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 8 <reference-block>\n" +
"2002 2002 2 20 200 2002 2 02 002 2002 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 8 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 8 <reference-block>\n" +
"A a A A A A A A A A LINEIN ALLCAP NODIGIT 1 0 1 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
"Shallow shallow S Sh Sha Shal w ow low llow LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
"Text text T Te Tex Text t xt ext Text LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
"Processing processing P Pr Pro Proc g ng ing sing LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
"Core core C Co Cor Core e re ore Core LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
"Engine engine E En Eng Engi e ne ine gine LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 8 <reference-block>\n" +
"Journal journal J Jo Jou Jour l al nal rnal LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
"of of o of of of f of of of LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
"Computational computational C Co Com Comp l al nal onal LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
"Intelligence intelligence I In Int Inte e ce nce ence LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
", , , , , , , , , , LINEEND ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 8 <reference-block>\n" +
"Volume volume V Vo Vol Volu e me ume lume LINESTART INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
"18 18 1 18 18 18 8 18 18 18 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 8 <reference-block>\n" +
"Number number N Nu Num Numb r er ber mber LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
"3 3 3 3 3 3 3 3 3 3 LINEIN NOCAPS ALLDIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 8 <reference-block>\n" +
"2002 2002 2 20 200 2002 2 02 002 2002 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 8 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 8 <reference-block>\n" +
"pages pages p pa pag page s es ges ages LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
"451 451 4 45 451 451 1 51 451 451 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
"- - - - - - - - - - LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 HYPHEN 8 <reference-block>\n" +
"476 476 4 47 476 476 6 76 476 476 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
". . . . . . . . . . LINEEND ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 8 <reference-block>\n" +
"9 9 9 9 9 9 9 9 9 9 LINESTART NOCAPS ALLDIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 8 <reference-block>\n" +
"Anselmo anselmo A An Ans Anse o mo lmo elmo LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
"Peñas peñas P Pe Peñ Peña s as ñas eñas LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 9 <reference-block>\n" +
"Álvaro álvaro Á Ál Álv Álva o ro aro varo LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"Rodrigo rodrigo R Ro Rod Rodr o go igo rigo LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 9 <reference-block>\n" +
"Felisa felisa F Fe Fel Feli a sa isa lisa LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"Verdejo verdejo V Ve Ver Verd o jo ejo dejo LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 9 <reference-block>\n" +
"2007 2007 2 20 200 2007 7 07 007 2007 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 9 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 9 <reference-block>\n" +
"Overview overview O Ov Ove Over w ew iew view LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"of of o of of of f of of of LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"the the t th the the e he the the LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"Answer answer A An Ans Answ r er wer swer LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"Validation validation V Va Val Vali n on ion tion LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"Exercise exercise E Ex Exe Exer e se ise cise LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"2007 2007 2 20 200 2007 7 07 007 2007 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 9 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 9 <reference-block>\n" +
"In in I In In In n In In In LINEIN INITCAP NODIGIT 0 1 1 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"the the t th the the e he the the LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"CLEF clef C CL CLE CLEF F EF LEF CLEF LINEEND ALLCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"2007 2007 2 20 200 2007 7 07 007 2007 LINESTART NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 9 <reference-block>\n" +
"Working working W Wo Wor Work g ng ing king LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"Notes notes N No Not Note s es tes otes LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
". . . . . . . . . . LINEEND ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 9 <reference-block>\n" +
"10 10 1 10 10 10 0 10 10 10 LINESTART NOCAPS ALLDIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 9 <reference-block>\n" +
"Wang wang W Wa Wan Wang g ng ang Wang LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 9 <reference-block>\n" +
"R r R R R R R R R R LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 9 <reference-block>\n" +
"and and a an and and d nd and and LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"Neumann neumann N Ne Neu Neum n nn ann mann LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 9 <reference-block>\n" +
"G g G G G G G G G G LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 9 <reference-block>\n" +
"2007a 2007a 2 20 200 2007 a 7a 07a 007a LINEIN NOCAPS CONTAINSDIGITS 0 0 0 0 0 1 0 0 0 NOPUNCT 9 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 9 <reference-block>\n" +
"Recognizing recognizing R Re Rec Reco g ng ing zing LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"Textual textual T Te Tex Text l al ual tual LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"Entailment entailment E En Ent Enta t nt ent ment LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"Using using U Us Usi Usin g ng ing sing LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"a a a a a a a a a a LINEIN NOCAPS NODIGIT 1 0 1 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"Subsequence subsequence S Su Sub Subs e ce nce ence LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"Kernel kernel K Ke Ker Kern l el nel rnel LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"Method method M Me Met Meth d od hod thod LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 10 <reference-block>\n" +
"In in I In In In n In In In LINEIN INITCAP NODIGIT 0 1 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"Proc proc P Pr Pro Proc c oc roc Proc LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 10 <reference-block>\n" +
"of of o of of of f of of of LINEEND NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"AAAI aaai A AA AAA AAAI I AI AAI AAAI LINESTART ALLCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"2007 2007 2 20 200 2007 7 07 007 2007 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 10 <reference-block>\n" +
". . . . . . . . . . LINEEND ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 10 <reference-block>\n" +
"11 11 1 11 11 11 1 11 11 11 LINESTART NOCAPS ALLDIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 10 <reference-block>\n" +
"Wang wang W Wa Wan Wang g ng ang Wang LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 10 <reference-block>\n" +
"R r R R R R R R R R LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 10 <reference-block>\n" +
"and and a an and and d nd and and LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"Neumann neumann N Ne Neu Neum n nn ann mann LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 10 <reference-block>\n" +
"G g G G G G G G G G LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 10 <reference-block>\n" +
"2007b 2007b 2 20 200 2007 b 7b 07b 007b LINEIN NOCAPS CONTAINSDIGITS 0 0 0 0 0 1 0 0 0 NOPUNCT 10 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 10 <reference-block>\n" +
"Recognizing recognizing R Re Rec Reco g ng ing zing LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"Textual textual T Te Tex Text l al ual tual LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"Entailment entailment E En Ent Enta t nt ent ment LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"Using using U Us Usi Usin g ng ing sing LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"Sentence sentence S Se Sen Sent e ce nce ence LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"Similarity similarity S Si Sim Simi y ty ity rity LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"based based b ba bas base d ed sed ased LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"on on o on on on n on on on LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"Dependency dependency D De Dep Depe y cy ncy ency LINEEND INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"Tree tree T Tr Tre Tree e ee ree Tree LINESTART INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"Skeletons skeletons S Sk Ske Skel s ns ons tons LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 10 <reference-block>\n" +
"In in I In In In n In In In LINEIN INITCAP NODIGIT 0 1 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"Proceedings proceedings P Pr Pro Proc s gs ngs ings LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"of of o of of of f of of of LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"the the t th the the e he the the LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"Workshop workshop W Wo Wor Work p op hop shop LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"on on o on on on n on on on LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"Textual textual T Te Tex Text l al ual tual LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"Entailment entailment E En Ent Enta t nt ent ment LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"and and a an and and d nd and and LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"Paraphrasing paraphrasing P Pa Par Para g ng ing sing LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 10 <reference-block>\n" +
"pages pages p pa pag page s es ges ages LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"36 36 3 36 36 36 6 36 36 36 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"– – – – – – – – – – LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
"41 41 4 41 41 41 1 41 41 41 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 11 <reference-block>\n" +
"Prague prague P Pr Pra Prag e ue gue ague LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 11 <reference-block>\n" +
"June june J Ju Jun June e ne une June LINEEND INITCAP NODIGIT 0 1 0 0 0 0 1 0 0 NOPUNCT 11 <reference-block>\n" +
"2007 2007 2 20 200 2007 7 07 007 2007 LINESTART NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 11 <reference-block>\n" +
". . . . . . . . . . LINEEND ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 11 <reference-block>\n" +
"12 12 1 12 12 12 2 12 12 12 LINESTART NOCAPS ALLDIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 11 <reference-block>\n" +
"Wang wang W Wa Wan Wang g ng ang Wang LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 11 <reference-block>\n" +
"R r R R R R R R R R LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 11 <reference-block>\n" +
"and and a an and and d nd and and LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
"Neumann neumann N Ne Neu Neum n nn ann mann LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 11 <reference-block>\n" +
"G g G G G G G G G G LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 11 <reference-block>\n" +
"2007c 2007c 2 20 200 2007 c 7c 07c 007c LINEIN NOCAPS CONTAINSDIGITS 0 0 0 0 0 1 0 0 0 NOPUNCT 11 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 11 <reference-block>\n" +
"DFKI dfki D DF DFK DFKI I KI FKI DFKI LINEIN ALLCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
"– – – – – – – – – – LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
"LT lt L LT LT LT T LT LT LT LINEIN ALLCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
"at at a at at at t at at at LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
"AVE ave A AV AVE AVE E VE AVE AVE LINEIN ALLCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
"2007 2007 2 20 200 2007 7 07 007 2007 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 11 <reference-block>\n" +
": : : : : : : : : : LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 PUNCT 11 <reference-block>\n" +
"Using using U Us Usi Usin g ng ing sing LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
"Recognizing recognizing R Re Rec Reco g ng ing zing LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
"Textual textual T Te Tex Text l al ual tual LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
"Entailment entailment E En Ent Enta t nt ent ment LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
"for for f fo for for r or for for LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
"Answer answer A An Ans Answ r er wer swer LINEEND INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
"Validation validation V Va Val Vali n on ion tion LINESTART INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 11 <reference-block>\n" +
"In in I In In In n In In In LINEIN INITCAP NODIGIT 0 1 1 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
"online online o on onl onli e ne ine line LINEIN NOCAPS NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
"proceedings proceedings p pr pro proc s gs ngs ings LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
"of of o of of of f of of of LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
"CLEF clef C CL CLE CLEF F EF LEF CLEF LINEIN ALLCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
"2007 2007 2 20 200 2007 7 07 007 2007 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 11 <reference-block>\n" +
"Working working W Wo Wor Work g ng ing king LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
"Notes notes N No Not Note s es tes otes LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 11 <reference-block>\n" +
"ISBN isbn I IS ISB ISBN N BN SBN ISBN LINEIN ALLCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
": : : : : : : : : : LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 PUNCT 12 <reference-block>\n" +
"2 2 2 2 2 2 2 2 2 2 LINEIN NOCAPS ALLDIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 12 <reference-block>\n" +
"- - - - - - - - - - LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 HYPHEN 12 <reference-block>\n" +
"912335 912335 9 91 912 9123 5 35 335 2335 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 12 <reference-block>\n" +
"- - - - - - - - - - LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 HYPHEN 12 <reference-block>\n" +
"31 31 3 31 31 31 1 31 31 31 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 12 <reference-block>\n" +
"- - - - - - - - - - LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 HYPHEN 12 <reference-block>\n" +
"0 0 0 0 0 0 0 0 0 0 LINEIN NOCAPS ALLDIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 12 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 12 <reference-block>\n" +
"September september S Se Sep Sept r er ber mber LINEIN INITCAP NODIGIT 0 1 0 0 0 0 1 0 0 NOPUNCT 12 <reference-block>\n" +
"2007 2007 2 20 200 2007 7 07 007 2007 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 12 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 12 <reference-block>\n" +
"Budapest budapest B Bu Bud Buda t st est pest LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 12 <reference-block>\n" +
", , , , , , , , , , LINEEND ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 12 <reference-block>\n" +
"Hungary hungary H Hu Hun Hung y ry ary gary LINESTART INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 12 <reference-block>\n" +
". . . . . . . . . . LINEEND ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 12 <reference-block>\n" +
"\n";
s = s + s + s + s;
// Engine engine = GrobidFactory.getInstance().getEngine();
// WapitiTagger t = new WapitiTagger(GrobidModels.REFERENCE_SEGMENTER);
SWIGTYPE_p_mdl_t mod = Wapiti.loadModel("label -m /Work/workspace/grobid-rg/grobid-home/models/reference-segmenter/model.wapiti");
for (int i = 0; i < 1000000; i++) {
if (i % 100 == 0) {
System.out.println("Processed: " + i);
}
Wapiti.labelFromModel(mod, s);
}
}
private static String getDateStr(String input) throws Exception {
if (input == null)
return null;
ArrayList<String> dateBlocks = new ArrayList<String>();
StringTokenizer st = new StringTokenizer(input, "([" + TextUtilities.punctuations, true);
if (st.countTokens() == 0)
return null;
while (st.hasMoreTokens()) {
String tok = st.nextToken();
if (!tok.equals(" ")) {
dateBlocks.add(tok + " <date>");
}
}
return FeaturesVectorDate.addFeaturesDate(dateBlocks);
}
/*@Test
public void testFromText() {
// String text = "David Green et al 2015 Nanoscale DOI:10.1039/C6NR05046H recenty demonstrated that gecko microspinules (hairs) and " +
// "their equivalent replicas, bearing nanoscale tips, can kill or impair surface associating oral pathogenic " +
// "bacteria with high efficiency even after 7 days of repeated attacks. " +
// "Scanning Electron Microscopy suggests that there is more than one mechanism contributing to " +
// "cell death which appears to be related to the scaling of the bacteria type with the hair arrays " +
// "and accessibility to the underlying nano-topography of the hierarchical surfaces.";
//
final Engine engine = GrobidFactory.getInstance().getEngine();
// GrobidAnalysisConfig config = new GrobidAnalysisConfig.GrobidAnalysisConfigBuilder().build();
//
//// Document doc = Document.createFromText(text);
//
// List<LabeledReferenceResult> segRes = engine.getParsers().getReferenceSegmenterParser().extract(text);
String text = "Physics and test";
engine.getParsers().getCitationParser().processingReferenceSection(text, engine.getParsers().getReferenceSegmenterParser());
}*/
private void testWap(final String forTest, File modelFile) throws InterruptedException {
final WapitiModel wm = new WapitiModel(modelFile);
String res;
Thread[] threads = new Thread[10];
for (int i = 0; i < threads.length; i++) {
threads[i] = new Thread() {
@Override
public void run() {
for (int i = 0; i < 100; i++) {
String res = wm.label(forTest);
System.out.println("RES: " + res.trim());
}
}
};
threads[i].start();
}
for (int i = 0; i < threads.length; i++) {
threads[i].join();
}
wm.close();
}
@Test
public void testDateParser() throws Exception {
String d = "12 August, 1985";
List<Date> processedDates = new DateParser().processing(d);
assertThat(processedDates.size(), is(1));
assertThat(processedDates.get(0).getDayString(), is("12"));
assertThat(processedDates.get(0).getMonthString(), is("August"));
assertThat(processedDates.get(0).getYearString(), is("1985"));
}
@Test
public void testPDF() throws Exception {
Engine engine = GrobidFactory.getInstance().getEngine();
// BiblioItem resHeader = new BiblioItem();
File input = new File("/Users/zholudev/Downloads/AS-454757820178434@1485434121902_content_1.pdf");
// engine.getParsers().getHeaderParser().processing(input, resHeader, GrobidAnalysisConfig.defaultInstance());
// System.out.println(resHeader.getAbstract());
//
Document d =
engine.fullTextToTEIDoc(input, GrobidAnalysisConfig.defaultInstance());
d.getBlocks();
System.out.println(d.getTei());
// System.out.println(d.getResHeader());
// System.out.println(engine.fullTextToTEI(new File("//Work/temp/1.pdf"), GrobidAnalysisConfig.defaultInstance()));
}
/*@Test
public void testEmailPDF() throws Exception {
Engine engine = GrobidFactory.getInstance().getEngine();
BiblioItem resHeader = new BiblioItem();
engine.getParsers().getHeaderParser().processing(new File("/Work/temp/1.pdf"), resHeader, GrobidAnalysisConfig.defaultInstance());
System.out.println(resHeader);
// System.out.println(engine.fullTextToTEI("/tmp/2.pdf", false, false));
}*/
/*@Test
public void stress() throws Exception {
for (int i = 0; i < 1000000; i++) {
testReferenceSegmenter();
}
}*/
@Test
public void extractCitationsFromPDF() throws Exception {
Engine engine = GrobidFactory.getInstance().getEngine();
// String input = "/Work/workspace/data/pdf2xmlreflow/1.pdf";
// String input = "/Users/zholudev/Downloads/ttt.pdf";
// String input = "/Users/zholudev/Downloads/stem.pdf";
// String input = "/Work/workspace/data/elsevier_pdfs/8.pdf";
// String input = "/tmp/1.pdf";
// for (int i = 0; i < 10000; i++) {
// String input = "/Work/workspace/pdf-analysis/pdf-analysis-service/scripts/grobid/pdfs/grobid-input-1072141691733992581.pdf";
// String input = "/Work/workspace/pdf-analysis/pdf-analysis-service/scripts/grobid/pdfs/grobid-input-2086711400313078388.pdf";
// String input = "/Work/workspace/pdf-analysis/pdf-analysis-service/scripts/grobid/AS_190528951947270_1422437050969.pdf";
String input = "/Work/temp/1.pdf";
DocumentSource documentSource = DocumentSource.fromPdf(new File(input));
Document doc = engine.getParsers().getSegmentationParser().processing(documentSource, GrobidAnalysisConfig.defaultInstance());
//Document doc = engine.getParsers().getSegmentationParser().processing(new File(input), GrobidAnalysisConfig.defaultInstance());
System.out.println("Extracting citations");
List<BibDataSet> cits = engine.getParsers().getCitationParser().processingReferenceSection(doc, engine.getParsers().getReferenceSegmenterParser(), 0);
for (BibDataSet c : cits) {
System.out.println(c.getResBib().getTitle() + "--->" + c.getResBib().getAuthors());
}
System.out.println("CITATIONS: " + cits.size());
// }
}
@Test
public void testSegmentation() throws Exception {
Engine engine = GrobidFactory.getInstance().getEngine();
//Document result = engine.getParsers().getSegmentationParser().processing(new File("/Work/workspace/data/pdf2xmlreflow/1.pdf"),
// GrobidAnalysisConfig.defaultInstance());
DocumentSource documentSource = DocumentSource.fromPdf(new File("/Work/workspace/data/pdf2xmlreflow/1.pdf"));
Document result = engine.getParsers().getSegmentationParser().processing(documentSource, GrobidAnalysisConfig.defaultInstance());
System.out.println(result);
}
/* @Test
public void testAuthorExtraction() throws Exception {
Engine engine = GrobidFactory.getInstance().getEngine();
String a = "Amelia Kenner Brininger, MPH, CPH , Emergency Medical Services, County of San Diego, San Diego, CA\n" +
"Barbara M. Stepanski, MPH , Emergency Medical Services, County of San Diego Health and Human Services Agency, San Diego, CA\n" +
"Diane Royer, RN, BSN , County of San Diego, Emergency Medical Services, San Diego, CA\n" +
"Bruce Haynes, MD , Emergency Medical Services, County of San Diego Health and Human Services Agency, San Diego, CA\n" +
"Leslie Ray, MPH, MPPA, MA , Emergency Medical Services, County of San Diego Health and Human Services Agency, San Diego, CA\n" +
"Sanaa Abedin, MPH , Community Health Statistics Unit, Health Care Policy Administration, County of San Diego Health and Human Services Agency, San Diego, CA\n" +
"Alicia Sampson, MPH, CPH , Health & Human Services Agency, Public Health Services, County of San Diego, San Diego, CA\n" +
"Joshua Smith, PhD, MPH , Emergency Medical Services, County of San Diego Health and Human Services Agency, San Diego, CA\n" +
"Isabel Corcos, PhD, MPH , County of San Diego, Emergency Medical Services, County of San Diego, San Diego, CA\n" +
"Ryan Smith, MPH , Emergency Medical Services, County of San Diego, San Diego, CA";
// a = "M. Yoshida, T. Yomogida, T. Mineo (Keio University), K. Nitta, K. Kato (JASRI), T. Masuda (National Institute for Materials Science), H. Nitani, H. Abe (KEK), S. Takakusagi (Hokkaido University), T. Uruga (JASRI/SPring-8), K. Asakura (Hokkaido University), K. Uosaki (National Institute for Materials Science), and H. Kondoh (Keio University)";
List<LabeledReferenceResult> references = engine.getParsers().getReferenceSegmenterParser().extract(a);
BiblioItem res = engine.getParsers().getCitationParser().processing("Amelia Kenner Brininger, MPH, CPH , Emergency Medical Services, County of San Diego, San Diego, CA", false);
System.out.println(res);
// List<BibDataSet> results = Lists.newArrayList();
// for (LabeledReferenceResult ref : references) {
// BiblioItem bib = engine.getParsers().getCitationParser().processing(ref.getReferenceText(), false);
// BibDataSet bds = new BibDataSet();
// bds.setResBib(bib);
// bds.setRefSymbol(ref.getLabel());
// bds.setRawBib(ref.getReferenceText());
// results.add(bds);
// }
List<Person> authors = engine.getParsers().getAuthorParser().processing(Arrays.asList(a.split("\n")), false);
for (Person p : authors) {
System.out.println(p);
}
authors = engine.getParsers().getAuthorParser().processing(Arrays.asList(a.split("\n")), false);
System.out.println("+++++++++++++++++++++");
for (Person p : authors) {
System.out.println(p);
}
// for (Object r : results) {
// System.out.println(r);
// }
// Pair<String, Document> result = engine.getParsers().getHeaderParser().pro
// BiblioItem res =
// engine.getParsers().getCitationParser().processingReferenceSection(a, false);
// System.out.println("--------------------");
//// for (Person p : res) {
//// System.out.println(p);
//// }
// System.out.println(res);
}*/
/*@Test
public void testReferenceSegmenter() throws Exception {
String block = "Adelman, J. S., Marquis, S. J., & Sabatos-DeVito, M. G. (2010). Letters in words are read simultaneously, not in left-to-right sequence. Psychological Science, 21, 1799–1801. Arditi, A., Knoblauch, K., & Grunwald, I. (1990). Reading with fixed and variable character pitch. Journal of the Optical Society of America, 7, 2011–2015. Bernard, J. -B., & Chung, S. T. L. (2011). The dependence of crowding on flanker complex- ity and target–flanker similarity. Journal of Vision, 11(8), 1–16 (1). Chanceaux, M., & Grainger, J. (2012). Serial position effects in the identification of letters, digits, symbols, and shapes in peripheral vision. Acta Psychologica, 141, 149–158. Chanceaux, M., Mathôt, S., & Grainger, J. (2013). Flank to the left, flank to the right: Testing the modified receptive field hypothesis of letter-specific crowding. Journal of Cognitive Psychology, 25, 774–780. Chung, S. T. L. (2002). The effect of letter spacing on reading speed in central and periph- eral vision. Investigative Ophthalmology & Visual Science, 43, 1270–1276. Grainger, J. (2008). Cracking the orthographic code: An introduction. Language and Cognitive Processes, 23, 1–35. Grainger, J., Tydgat, I., & Isselé, J. (2010). Crowding affects letters and symbols differ- ently. Journal of Experimental Psychology: Human Perception and Performance, 36, 673–688. Grainger, J., & Van Heuven, W. (2003). Modeling letter position coding in printed word perception. In P. Bonin (Ed.), The mental lexicon (pp. 1–24). New York: Nova Science Publishers. Johnson, R. L., & Eisler, M. E. (2012). The importance of the first and last letter in words during sentence reading. Acta Psychologica, 141, 336–351.\n" +
"\n" +
"Legge, G. E., Pelli, D.G., Rubin, G. S., & Schleske, M. M. (1985). Psychophysics of reading. I. Normal vision. Vision Research, 25, 239–252. Perea, M., Moret-Tatay, C., & Gomez, P. (2011). The effects of inter letter spacing in visual-word recognition. Acta Psychologica, 137, 345–351. Scaltritti, M., & Balota, D. A. (2013). Are all letters processed equally in parallel? Further evidence of a robust first-letter advantage. Acta Psychologica, 144, 397–410. Stevens, M., & Grainger, J. (2003). Letter visibility and the viewing position effect in visual word recognition. Perception & Psychophysics, 65, 133–151. Tripathy, S. P., & Cavanagh, P. (2002). The extent of crowding in peripheral vision does not scale with target size. Vision Research, 42, 2357–2369. Tripathy, S., Cavanagh, P., & Bedell, H. (2013, May). Large interaction zones for visual crowding for briefly presented peripheral stimuli. Poster session presented at 13th Annual Meeting of Vision Science Society, Naples, Florida. Tydgat, I., & Grainger, J. (2009). Serial position effects in the identification of letters, digits, and symbols. Journal of Experimental Psychology: Human Perception and Performance, 35, 480–498. Vinkcier, F., Qiao, E., Pallier, C., Dehaene, S., & Cohen, L. (2011). The impact of letter spacing on reading: A test of the bigram coding hypothesis. Journal of Vision, 11, 1–21. Whitney, C. (2008). Supporting the serial in the SERIOL model. Language & Cognitive Processes, 23, 824–865. Yu, D., Cheung, S. -H., Legge, G. E., & Chung, S. T. L. (2007). Effect of letter spacing on visual span and reading speed. Journal of Vision, 7, 1–10. Zorzi, M., Barbiero, C., Facoetti, A., Lonciari, L., Carrozzi, M., Montico, M., et al. (2012). Extra-large letter spacing improves reading in dyslexia. Proceedings of the National Academy of Sciences, 109, 11455–11459.";
block = "[1] C.P. Wild, Environmental exposure measurement in cancer epidemiology, Mutagenesis 24 (2009) 117–125. \n" +
"[2] G.N. Wogan, T.W. Kensler, J.D. Groopman, Present and future directions of translational research on aflatoxin and hepatocellular carcinoma. A review, Food Addit. Contam. Part A: Chem. Anal. Control Expos. Risk Assess. 29 (2012) 249–257. \n" +
"[3] H.M. Shen, C.N. Ong, Mutations of the p53 tumor suppressor gene and ras onco- genes in aflatoxin hepatocarcinogenesis, Mutat. Res. Rev. Genet. Toxicol. 366 (1996) 23–44. \n" +
"[4] K.A. McGlynn, W.T. London, The global epidemiology of hepatocellular carci- noma: present and future, Clin. Liver Dis. 15 (2011) 223–243. \n" +
"[5] J.F. Solus, B.J. Arietta, J.R. Harris, D.P. Sexton, J.Q. Steward, C. McMunn, P. Ihrie, J.M. Mehall, T.L. Edwards, E.P. Dawson, Genetic variation in eleven phase I drug metabolism genes in an ethnically diverse population, Pharmacogenomics 5 (2004) 895–931.\n" +
"\n" +
"[6] C. Ioannides, D.F. Lewis, Cytochromes P450 in the bioactivation of chemicals, Curr. Top. Med. Chem. 4 (2004) 1767–1788. \n" +
"[7] T. Omura, Forty years of cytochrome P450, Biochem. Biophys. Res. Commun. 266 (1999) 690–698. \n" +
"[8] C.N. Martin, R.C. Garner, Aflatoxin B1-oxide generated by chemical or enzy- matic oxidation of aflatoxin B1 causes guanine substitution in nucleic acids, Nature 267 (1977) 863–865. \n" +
"[9] M.E. Smela, M.L. Hamm, P.T. Henderson, C.M. Harris, T.M. Harris, J.M. Essig- mann, The aflatoxin B(1) formamidopyrimidine adduct plays a major role in causing the types of mutations observed in human hepatocellular carcinoma, PNAS 99 (2002) 6655–6660. \n" +
"[10] D.W. Nebert, T.P. Dalton, The role of cytochrome P450 enzymes in endoge- nous signalling pathways and environmental carcinogenesis, Nat. Rev. Cancer 6 (2006) 947–960. \n" +
"[11] A. Gunes, M.L. Dahl, Variation in CYP1A2 activity and its clinical implications: influence of environmental factors and genetic polymorphisms, Pharmacoge- nomics 9 (2008) 625–637. \n" +
"[12] T. Shimada, Xenobiotic-metabolizing enzymes involved in activation and detoxification of carcinogenic polycyclic aromatic hydrocarbons, Drug Metab. Pharmacokinet. 21 (2006) 257–276. \n" +
"[13] A.R. Boobis, N.J. Gooderham, K.J. Rich, K. Zhao, R.J. Edwards, B.P. Murray, A.M. Lynch, S. Murray, D.S. Davies, Enzymatic studies of the activation of heterocyclic food mutagens in man, Princess Takamatsu Symp. 23 (1995) 134–144. \n" +
"[14] D.L. Eaton, E.P. Gallagher, Mechanisms of aflatoxin carcinogenesis, Ann. Rev. Pharmacol. Toxicol. 34 (1994) 135–172. \n" +
"[15] D. Kim, F.P. Guengerich, Cytochrome P450 activation of arylamines and hete- rocyclic amines, Annu. Rev. Pharmacol. Toxicol. 45 (2005) 27–49. \n" +
"[16] E.P. Gallagher, K.L. Kunze, P.L. Stapleton, D.L. Eaton, The kinetics of afla- toxin B1 oxidation by human cDNA-expressed and human liver microsomal cytochromes P450 1A2 and 3A4, Toxicol. Appl. Pharmacol. 141 (1996) 595–606. \n" +
"[17] F.P. Guengerich, A. Parikh, R.J. Turesky, P.D. Josephy, Inter-individual differ- ences in the metabolism of environmental toxicants: cytochrome P450 1A2 as a prototype, Mutat. Res. 428 (1999) 115–124. \n" +
"[18] H.C. Liang, H. Li, R.A. McKinnon, J.J. Duffy, S.S. Potter, A. Puga, D.W. Nebert, Cyp1a2(−/−) null mutant mice develop normally but show deficient drug metabolism, PNAS 93 (1996) 1671–1676. \n" +
"[19] N. Dragin, S. Uno, B. Wang, T.P. Dalton, D.W. Nebert, Generation of ‘humanized’ hCYP1A1 1A2 Cyp1a1/1a2(−/−) mouse line, Biochem. Biophys. Res. Commun. 359 (2007) 635–642. \n" +
"[20] M.T. Landi, R. Sinha, N.P. Lang, F.F. Kadlubar, Human cytochrome P4501A2JT IARC Sci. Publ (1999) 173–195. \n" +
"[21] W. Kalow, B.K. Tang, Caffeine as a metabolic probe: exploration of the enzyme- inducing effect of cigarette smoking, Clin. Pharmacol. Ther. 49 (1991) 44–48. \n" +
"[22] D.W. Nebert, T.P. Dalton, A.B. Okey, F.J. Gonzalez, Role of aryl hydrocarbon receptor-mediated induction of the CYP1 enzymes in environmental toxicity and cancer, J. Biol. Chem. 279 (2004) 23847–23850. \n" +
"[23] B.B. Rasmussen, T.H. Brix, K.O. Kyvik, K. Brøsen, The interindividual differences in the 3-demthylation of caffeine alias CYP1A2 is determined by both genetic and environmental factors, Pharmacogenetics 12 (2002) 473–478. \n" +
"[24] K. Klein, S. Winter, M. Turpeinen, M. Schwab, U.M. Zanger, Pathway-targeted pharmacogenomics of CYP1A2 in human liver, Front. Pharmacol. 1 (2010) 129. \n" +
"[25] K. Ikeya, A.K. Jaiswal, R.A. Owens, J.E. Jones, D.W. Nebert, S. Kimura, Human CYP1A2: sequence, gene structure, comparison with the mouse and rat orthol- ogous gene, and differences in liver 1A2 mRNA expression, Mol. Endocrinol. 3 (1989) 1399–1408. ";
block = "References \n" +
"\n" +
"1. Bar-Haim, R., Dagan, I., Dolan, B., Ferro, L., Giampiccolo, D., Magnini, B. and Szpektor, I. 2006. The Second PASCAL \n" +
"Recognising Textual Entailment Challenge. In Proceedings of the Second PASCAL Challenges Workshop on \n" +
"Recognising Textual Entailment, Venice, Italy. \n" +
"2. Bunescu, R. and Mooney, R. 2006. Subsequence Kernels for Relation Extraction. In Advances in Neural Information \n" +
"Processing Systems 18. MIT Press. \n" +
"3. Dagan, I., Glickman, O., and Magnini, B. 2006. The PASCAL Recognising Textual Entailment Challenge. In Quiñonero-\n" +
"Candela et al., editors, MLCW 2005, LNAI Volume 3944, pages 177-190. Springer-Verlag. \n" +
"4. Jenny Rose Finkel, Trond Grenager, and Christopher Manning. 2005. Incorporating Non-local Information into \n" +
"Information Extraction Systems by Gibbs Sampling. Proceedings of the 43nd Annual Meeting of the Association for \n" +
"Computational Linguistics (ACL 2005), pp. 363-370. \n" +
"5. Giampiccolo, D., Magnini, B., Dagan, I., and Dolan, B. 2007. The Third PASCAL Recognizing Textual Entailment \n" +
"Challenge. In Proceedings of the Workshop on Textual Entailment and Paraphrasing, pages 1–9, Prague, June 2007. \n" +
"6. Gildea, D. and Palmer, M. 2002. The Necessity of Parsing for Predicate Argument Recognition. In Proceedings of the \n" +
"40th Meeting of the Association for Computational Linguistics (ACL 2002):239-246, Philadelphia, PA. \n" +
"7. Lin, D. 1998. Dependency-based Evaluation of MINIPAR. In Workshop on the Evaluation of Parsing Systems. \n" +
"8. Neumann, G. and Piskorski, J. 2002. A Shallow Text Processing Core Engine. Journal of Computational Intelligence, \n" +
"Volume 18, Number 3, 2002, pages 451-476. \n" +
"9. Anselmo Peñas, Álvaro Rodrigo, Felisa Verdejo. 2007. Overview of the Answer Validation Exercise 2007. In the CLEF \n" +
"2007 Working Notes. \n" +
"10. Wang, R. and Neumann, G. 2007a. Recognizing Textual Entailment Using a Subsequence Kernel Method. In Proc. of \n" +
"AAAI 2007. \n" +
"11. Wang, R. and Neumann, G. 2007b. Recognizing Textual Entailment Using Sentence Similarity based on Dependency \n" +
"Tree Skeletons. In Proceedings of the Workshop on Textual Entailment and Paraphrasing, pages 36–41, Prague, June \n" +
"2007. \n" +
"12. Wang, R. and Neumann, G. 2007c. DFKI–LT at AVE 2007: Using Recognizing Textual Entailment for Answer \n" +
"Validation. In online proceedings of CLEF 2007 Working Notes, ISBN: 2-912335-31-0, September 2007, Budapest, \n" +
"Hungary.\n";
// block = "Jacobsen, S., \n2013. Serum amyloid A and haptoglobin ";
Engine engine = GrobidFactory.getInstance().getEngine();
ReferenceSegmenterParser p = new ReferenceSegmenterParser();
System.out.println("Testing block: " + block);
for (int i = 0; i < 10000000; i++)
for (LabeledReferenceResult pair : p.extract(block)) {
if (pair.getLabel() != null) {
System.out.println(pair.getLabel() + " ---> " + pair.getReferenceText());
} else {
System.out.println("---> " + pair.getReferenceText());
}
}
// System.out.println("Training data:");
// System.out.println("--------------");
// System.out.println(p.createTrainingData(block));
// System.out.println("--------------");
}*/
@Test
public void testFulltext() throws Exception {
final Engine engine = GrobidFactory.getInstance().getEngine();
GrobidAnalysisConfig config = new GrobidAnalysisConfig.GrobidAnalysisConfigBuilder().build();
// System.out.println(engine.fullTextToTEI(new File("/Work/temp/context/coords/2.pdf"), config));
// engine.fullTextToTEI(new File("/Work/temp/pub_citation_styles/1996PRBAConfProc00507417Vos.pdf"), GrobidAnalysisConfig.defaultInstance());
// System.out.println(engine.fullTextToTEI(new File("/Work/temp/pub_citation_styles/SicamSnellenburgPFRT_OptomVisSci84E915_923.pdf"), config)); //footnote citations
// System.out.println(engine.fullTextToTEI(new File("/Work/temp/pub_citation_styles/MullenJSSv18i03.pdf"), config)); //long author style citations
// System.out.println(engine.fullTextToTEI(new File("/Work/temp/pub_citation_styles/1996ParPrecConfProc00507369.pdf"), config)); // simple numbered
// System.out.println(engine.fullTextToTEI(new File("/Work/temp/context/1000k/AS_200548461617156_1424825887720.pdf"), config)); // numbered
// File pdf = new File("/Users/zholudev/Downloads/AS-454757820178434@1485434121902_content_1.pdf");
// File pdf = new File("/Users/zholudev/Downloads/AS-99907918630920@1400831312313_content_1.pdf");
File pdf = new File("/Users/zholudev/Downloads/9908107.pdf");
Document doc = engine.getParsers().getFullTextParser().processing(DocumentSource.fromPdf(pdf, -1, -1, false, true, true), config);
System.out.println(doc.getTei());
// System.out.println(engine.fullTextToTEI(inputFile, config)); // numbered
// System.out.println(engine.fullTextToTEI(new File("/Work/temp/pub_citation_styles/MullenJSSv18i03.pdf"), GrobidAnalysisConfig.defaultInstance()));
// engine.fullTextToTEI(new File("/Work/temp/pub_citation_styles/1994FEBSLett350_235Hadden.pdf"), GrobidAnalysisConfig.defaultInstance());
// System.out.println(engine.fullTextToTEI(new File("/Users/zholudev/Work/workspace/pdf-analysis/pdf-analysis-service/src/test/resources/net/researchgate/pdfanalysisservice/papers.bad.input/40th_Conf_unprotected.pdf"), GrobidAnalysisConfig.defaultInstance()));
// System.out.println(engine.fullTextToTEI(new File("/var/folders/h4/np1lg7256q3c3s6b2lhm9w0r0000gn/T/habibi-pdf996586749219753040.pdf"), GrobidAnalysisConfig.defaultInstance()));
// System.out.println(engine.fullTextToTEI("/tmp/x1.pdf", true, true, null, -1, -1, true));
// /Work/temp/context/1000k/AS_200548461617156_1424825887720.pdf
//
System.out.println(Engine.getCntManager());
}
@Test
public void testFulltexts() throws Exception {
final Engine engine = GrobidFactory.getInstance().getEngine();
// GrobidAnalysisConfig config = GrobidAnalysisConfig.defaultInstance();
GrobidAnalysisConfig config = new GrobidAnalysisConfig.GrobidAnalysisConfigBuilder().generateTeiCoordinates(Lists.newArrayList("ref", "biblStruct")).build();
int cnt = 0;
// for (File f : new File("/Work/temp/pub_citation_styles").listFiles(new FileFilter() {
// @Override
// public boolean accept(File pathname) {
for (File f : new File("/Work/temp/context/1000k")
// for (File f : new File("/Work/temp/timeout") // bad PDF that produces dozens of files
.listFiles(new FileFilter() {
@Override
public boolean accept(File pathname) {
return pathname.getName().endsWith(".pdf");
}
})) {
try {
Engine.getCntManager().i("PDFS", "INPUT_CNT");
System.out.println("Processing: " + f);
String tei = engine.fullTextToTEI(f, config);
System.out.println(tei.length());
} catch (Exception e) {
e.printStackTrace();
Engine.getCntManager().i("FAILED", e.getClass().getSimpleName());
}
if (++cnt % 10 == 0) {
System.out.println("Processed: " + cnt);
System.out.println(Engine.getCntManager());
}
}
// System.out.println(engine.fullTextToTEI(new File("/Users/zholudev/Work/workspace/pdf-analysis/pdf-analysis-service/src/test/resources/net/researchgate/pdfanalysisservice/papers.bad.input/40th_Conf_unprotected.pdf"), GrobidAnalysisConfig.defaultInstance()));
// System.out.println(engine.fullTextToTEI(new File("/var/folders/h4/np1lg7256q3c3s6b2lhm9w0r0000gn/T/habibi-pdf996586749219753040.pdf"), GrobidAnalysisConfig.defaultInstance()));
// System.out.println(engine.fullTextToTEI("/tmp/x1.pdf", true, true, null, -1, -1, true));
System.out.println(Engine.getCntManager());
Thread.sleep(100000);
System.out.println("DONE!");
}
@Test
public void visualizeCitations() throws Exception {
// File f = new File("/Users/zholudev/Downloads/The_planetary_system_Web_30_active_documents_for_S.pdf");
// File f = new File("/Users/zholudev/Downloads/Lack_of_in_vitro_constitutive_activity_for_four_pr.pdf");
File f = new File("/Users/zholudev/Downloads/AS-432836994965504@1480207789262_content_1.pdf");
// File f = new File("/Work/temp/figureExtraction/5.pdf");
GrobidAnalysisConfig config = new GrobidAnalysisConfig.GrobidAnalysisConfigBuilder().generateTeiCoordinates(Lists.newArrayList("ref", "biblStruct")).build();
Document doc = engine.getParsers().getFullTextParser().processing(DocumentSource.fromPdf(f, -1, -1, false, true, true), config);
PDDocument document = PDDocument.load(f);
document = CitationsVisualizer.annotatePdfWithCitations(document, doc, Collections.<String>emptyList());
File out = new File("/tmp/citAnn.pdf");
document.save(out);
if (Desktop.getDesktop().isSupported(Desktop.Action.OPEN)) {
Desktop.getDesktop().open(out);
}
}
@Test
public void testHeaders() throws Exception {
final Engine engine = GrobidFactory.getInstance().getEngine();
// GrobidAnalysisConfig config = GrobidAnalysisConfig.defaultInstance();
GrobidAnalysisConfig config = new GrobidAnalysisConfig.GrobidAnalysisConfigBuilder().consolidateHeader(0).build();
// File f = new File("/Users/zholudev/Downloads/Publications sample/AS-292290007453702@1446698776063_content_1.pdf");
File f = new File("/Users/zholudev/Downloads/Publications sample/AS-395712329207812@1471356579731_content_1.pdf");
BiblioItem res = new BiblioItem();
System.out.println(engine.processHeader(f.getAbsolutePath(), config, res));
int cnt = 0;
// for (File f : new File("/Work/temp/pub_citation_styles").listFiles(new FileFilter() {
// @Override
// public boolean accept(File pathname) {
// System.out.println(engine.fullTextToTEI(new File("/Users/zholudev/Work/workspace/pdf-analysis/pdf-analysis-service/src/test/resources/net/researchgate/pdfanalysisservice/papers.bad.input/40th_Conf_unprotected.pdf"), GrobidAnalysisConfig.defaultInstance()));
// System.out.println(engine.fullTextToTEI(new File("/var/folders/h4/np1lg7256q3c3s6b2lhm9w0r0000gn/T/habibi-pdf996586749219753040.pdf"), GrobidAnalysisConfig.defaultInstance()));
// System.out.println(engine.fullTextToTEI("/tmp/x1.pdf", true, true, null, -1, -1, true));
System.out.println(Engine.getCntManager());
Thread.sleep(100000);
System.out.println("DONE!");
}
/*@Test
public void testReferenceString() {
// String ref = "Agharahimi, M.R., LeBel, N.A., 1995. Synthesis of (–)-monoterpenylmagnolol and \n" +
// "magnolol. J. Org. Chem. 60, 1856–1863. ";
// String ref = "Lipsitch M, 1997, ANTIMICROB AGENTS CH, V41, P363";
final Engine engine = GrobidFactory.getInstance().getEngine();
// BiblioItem x = engine.processRawReference(ref, false);
// System.out.println(x.getTitle() + "; " + x.getAuthors());
// System.out.println(x.getJournal());
// System.out.println(x.getPublicationDate());
// System.out.println(x);
String text = "Below are the papers published within BIOMEX. They are organized by topic:\n" +
"\n" +
"I ñ Overview (Refs 1 and 2)\n" +
"II ñ Habitability of extraterrestrial environments, and lifeís limits (Refs 3-11)\n" +
"III ñ Biosignatures (Refs 12-20)\n" +
"IV - Water sorption (Refs 21-23)\n" +
"V - Astronaut support (Refs 24 and 25)\n" +
"\n" +
"\n" +
"\n" +
"I - Overview\n" +
"\n" +
"1.\tde Vera, J.-P., et al.. (2012). Supporting Mars exploration: BIOMEX in Low Earth Orbit and further astrobiological studies on the Moon using Raman and PanCam technology. Planetary and Space Science 74 (1): 103-110.\n" +
"\n" +
"2.\tWagner, D., de Vera, J.-P, Joshi, J., Leya T., Schulze-Makuch, D., 2015. Astrobiologie ñ dem Leben im Universum auf der Spur. System Erde 5 (1), 40-47, | DOI: 10.2312/GFZ.syserde.05.01.7.\n" +
"\n" +
"\n" +
"II ñ Habitability of extraterrestrial environments, and lifeís limits\n" +
"\n" +
"3.\tBackhaus, T., de la Torre, R., Lyhme, K., de Vera, J.-P. and Meeflen, J. (2014). Desiccation and low temperature attenuate the effect of UVC254 nm in the photobiont of the astrobiologically relevant lichens Circinaria gyrosa and Buellia frigida. International Journal of Astrobiology, doi:10.1017/S1473550414000470.\n" +
"\n" +
"4.\tBaquÈ M., de Vera, J.-P., Rettberg, P., Billi, D., 2013. The BOSS and BIOMEX space experiments on the EXPOSE-R2 mission: Endurance of the desert cyanobacterium Chroococcidiopsis under simulated space vacuum, Martian atmosphere, UVC radiation and temperature extremes. Acta Astronautica 91 (2013) 180ñ186.\n" +
"\n" +
"5.\tKukharenko, O., Podolich, O., Rybitska, A., Reshetnyak, G., Burlak, L., Ovcharenko, L., Voznyuk, T., Moshynets, O., Rogutskyi, I., Zaets, I., Yaneva, O., Reva, O., Pidgorskiy, V., Rabbow, E., de Vera, J.P., Yatsenko, V., Kozyrovska, N. (2012). Robust symbiotic microbial communities in space research. Report to COSPAR, Space Research in Ukraine, National Academy of Science of Ukraine, State Agency of Ukraine.\n" +
"\n" +
"6.\tMeesen, J., Wuthenow, P., Schille, P., Rabbow, E., de Vera, J.-P.P. and Ott, S. (2015). Resistance of the lichen Buellia frigida to simulated space conditions during the pre-flight tests for BIOMEX ñ viability assay and morphological stability. Astrobiology 15 (8): 601-615.\n" +
"\n" +
"7.\tMeeflen, J., Backhaus, T., Sadowsky, A., Mrkalj, M., S·nchez, F.J., de la Torre, R. and Ott, S. (2014). Effects of UVC254 nm on the photosynthetic activity of photobionts from the astrobiologically relevant lichens Buellia frigida and Circinaria gyrosa. International Journal of Astrobiology 13 (4): 340ñ352, doi:10.1017/S1473550414000275\n" +
"\n" +
"8.\tMeeflen, J., S·nchez, F. J., Brandt, A., Balzer, E.-M., de la Torre, R., Sancho, L. G., de Vera, J.-P. and Ott , S., 2013. Extremotolerance and Resistance of Lichens: Comparative Studies on Five Species Used in Astrobiological Research I. Morphological and Anatomical Characteristics. Orig Life Evol Biosph 43: 283ñ303.\n" +
"\n" +
"9.\tMeeflen, J., S·nchez, F. J., Sadowsky, A., de la Torre, R., Ott, S., de Vera, J.-P. (2013). Extremotolerance and Resistance of Lichens: Comparative Studies on Five Species Used in Astrobiological Research II. Secondary Lichen Compounds. Orig Life Evol Biosph (2013) 43:501ñ526.\n" +
"\n" +
"10.\tPacelli, C., Selbmann, L., Zucconi, L., de Vera, J.-P., Rabbow, E., Horneck, G., de la Torre, R. and Onofri, S. (2016). BIOMEX Experiment: Ultrastructural Alterations, Molecular Damage and Survival of the Fungus Cryomyces antarcticus after the Experiment Verification Tests. Orig Life Evol Biosph, DOI 10.1007/s11084-016-9485-2.\n" +
"\n" +
"11.\tS·nchez, F.J., Mateo-MartÌ, E., Raggio, J., Meeflen, J., MartÌnez-FrÌas, J., Sancho, L. Ga., Ott, S., de la Torre, R., 2012. The resistance of the lichen Circinaria gyrosa (nom. provis.) towards simulated Mars conditionsóa model test for the survival capacity of an eukaryotic extremophile. Planetary and Space Science 72, 102ñ110.\n" +
"\n" +
"\n" +
"III ñ Biosignatures \n" +
"\n" +
"12.\tBaquÈ, M., Verseux, C., Rabbow, E., de Vera, J.P.P., Billi, D., 2014. Detection of macromolecules in desert cyanobacteria mixed with a lunar mineral analogue after space simulations. Orig Life Evol Biosph, DOI 10.007/s11084-014-9367-4.\n" +
"\n" +
"13.\tBaquÈ, M., Verseux, C., Bˆttger, U., Rabbow, E., de Vera, J.-P.P. and Billi, D. 2015. Biosignature preservation of cyanobacteria mixed with phyllosilicatic and sulfatic Martian regoliths under simulated Martian atmosphere and UV flux. Orig Life Evol Biosph, Volume 46 (2), 289-310, DOI 10.1007/s11084-015-9467-9.\n" +
"\n" +
"14.\tBˆttger, U., de Vera, J.-P., Fritz, J., Weber, I., H¸bers, H.-W., Schulze-Makuch, D., 2012. Optimizing the detection of carotene in cyanobacteria in a Martian regolith analogue with a Raman spectrometer for the ExoMars mission. Planetary and Space Science 60 (2012) 356ñ362.\n" +
"\n" +
"15.\tBˆttger, U., de la Torre, R., Frias, J.-M., Rull, F., Meessen, J., S·nchez ÕÒigo, F.J., H¸bers, H.-W., de Vera, J.P. (2014). Raman spectroscopic analysis of the oxalate producing extremophile Circinaria Gyrosa. International Journal of Astrobiology, 13 (1): 19ñ27.\n" +
"\n" +
"16.\tBˆttger, U., de Vera, J.P., Hermelink, A., Fritz, J., Weber, I., Schulze-Makuch, D., H¸bers, H.-W. (2013). Application of Raman spectroscopy, as in situ technology for the search for life. In de Vera, J.P. and Seckbach, J. (eds.), Cellular origins, life in extreme habitats and astrobiology 28: Habitability of other planets and satellitesì, 333-345.\n" +
"\n" +
"17.\tPodolich, O., et al. (2016). The First Space-Related Study of a Kombucha Multimicrobial Cellulose-Forming Community: Preparatory Laboratory Experiments. Orig Life Evol Biosph, DOI 10.1007/s11084-016-9483-4.\n" +
"\n" +
"18.\tSerrano, P., Hermelink, A., Boettger, U., de Vera, J.-P., Wagner, D., 2014. Biosignature detection of methanogenic archaea from Siberian permafrost using confocal Raman spectroscopy. Planetary and Space Science 98, 191ñ197.\n" +
"\n" +
"19.\tSerrano, P., Hermelink, A., Lasch, P., de Vera, J.-P., Kˆnig, N., Burckhardt, O. and Wagner, D. (2015). Confocal Raman microspectroscopy reveals a convergence of the chemical composition in methanogenic archaea from a Siberian permafrost-affected soil. FEMS Microbiology Ecology, 91, 2015, fiv126.\n" +
"\n" +
"20.\tZaets, I., Podolich, O., Kukharenko, O., Reshetnyak, G., Shpylova, S., Sosnin, M., Khirunenko, L., Kozyrovska, N., de Vera, J.-P. (2014). Bacterial cellulose may provide the microbial-life biosignature in the rock records. Advances in Space Research 53: 828ñ835.\n" +
"\n" +
"\n" +
"IV - Water sorption\n" +
"\n" +
"21.\tJ‰nchen, J., Bauermeister, A., Feyh, N., de Vera, J.-P., Rettberg, P., Flemming, H.-C., Szewzyk, U. (2014). Water retention of selected microorganisms and Martian soil simulants under close to Martian environmental conditions. Planetary and Space Science 98, 163-168.\n" +
"\n" +
"22.\tJ‰nchen, J., Meeflen, J., Herzog, T.H., Feist, M., de la Torre, R. and deVera, J.-P.P., 2015. Humidity interaction of lichens under astrobiological aspects: the impact of UVC exposure on their water retention properties. International Journal of Astrobiology, 14 (3): 445-456.\n" +
"\n" +
"23.\tJ‰nchen, J., Feyh, N., Szewzyk, U., and de Vera, J.-P.P. (2016). Provision of water by halite deliquescence for Nostoc commune biofilms under Mars relevant surface conditions. International Journal of Astrobiology 15 (2), 107ñ118.\n" +
"\n" +
"\n" +
"V - Astronaut support\n" +
"\n" +
"24.\tKozyrovska, N.O., Reva1, O.M., Goginyan, V., de Vera, J.P. (2012). Kombucha microbiome as a probiotic: a view from the perspective of post-genomics and synthetic ecology. Biopolymers and Cell, 28(2): 103-113.\n" +
"\n" +
"25.\tReva, O.N., Zaets, I.E., Ovcharenko, L.P., Kukharenko, O.E., Shpylova, S.P., Podolich, O.V., de Vera, J.-P. and Kozyrovska N.O. (2015). Metabarcoding of the kombucha microbial community grown in different microenvironments. AMB Expr 5:35, DOI 10.1186/s13568-015-0124-5.\n";
// text = "Aaker, J. L. (1997). Dimensions of Brand Personality. Journal of Marketing Research, 34(3), 347. http://doi.org/10.2307/3151897";;
// text = "Meyer, F. et al. The metagenomics RAST server -a public resource for the automatic phylogenetic and functional analysis of metagenomes. BMC bioinformatics 9, 386, doi: 10.1186/1471-2105-9-386 (2008).";
text = "Lowe, R. K. (2004). Interrogation of a dynamic visualization during learning. Learning and Instruction, 14, 257e274. http://dx.doi.org/10.1016/j.learninstruc.2004.06.003.";
Document res = engine.getParsers().getSegmentationParser().processing(text);
// SortedSet<DocumentPiece> part = res.getDocumentPart(SegmentationLabel.REFERENCES);
BiblioItem item = engine.getParsers().getCitationParser().processing(text, false);
List<BibDataSet> citResults = engine.getParsers().getCitationParser().processingReferenceSection(text, engine.getParsers().getReferenceSegmenterParser());
for (BibDataSet bds: citResults) {
BiblioItem bib = bds.getResBib();
if ((bib != null) && !bib.rejectAsReference()) {
if (bib.getTitle() != null && bib.getFullAuthors() != null) {
System.out.println("\n-------------\n" + bib.getTitle() + "\n" + bib.getAuthors());
}
}
}
int i = 0;
}*/
@Test
public void testMultiThreading() throws Exception {
final Engine engine = GrobidFactory.getInstance().getEngine();
// String res = engine.fullTextToTEI("/tmp/planetary-moc.pdf", false, false);
// List<BibDataSet> citRes = engine.processReferences("/tmp/planetary-moc.pdf", false);
// System.out.println(res);
final String cit = " M. Kitsuregawa, H. Tanaka, and T. Moto-oka. Application of hash to data base machine and its architecture. New Generation Computing, 1 (1), 1983.";
long t = System.currentTimeMillis();
int n = 3;
Thread[] threads = new Thread[n];
for (int i = 0; i < n; i++) {
threads[i] = new Thread() {
@Override
public void run() {
int cnt = 0;
for (int i = 0; i < 100; i++) {
try {
engine.processRawReference(cit, 0);
} catch (Exception e) {
//no op
}
if (++cnt % 10 == 0) {
System.out.println(cnt);
}
}
}
};
}
for (int i = 0; i < n; i++) {
threads[i].start();
// threads[i].join();
}
for (int i = 0; i < n; i++) {
threads[i].join();
}
System.out.println("Took ms: " + (System.currentTimeMillis() - t));
}
public static void main(String[] args) {
String s = "References references R Re Ref Refe s es ces nces LINESTART INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
"1 1 1 1 1 1 1 1 1 1 LINESTART NOCAPS ALLDIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 1 <reference-block>\n" +
"Bar bar B Ba Bar Bar r ar Bar Bar LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
"- - - - - - - - - - LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 HYPHEN 1 <reference-block>\n" +
"Haim haim H Ha Hai Haim m im aim Haim LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 1 <reference-block>\n" +
"R r R R R R R R R R LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 1 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 1 <reference-block>\n" +
"Dagan dagan D Da Dag Daga n an gan agan LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 1 <reference-block>\n" +
"I i I I I I I I I I LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 1 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 1 <reference-block>\n" +
"Dolan dolan D Do Dol Dola n an lan olan LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 1 <reference-block>\n" +
"B b B B B B B B B B LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 1 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 1 <reference-block>\n" +
"Ferro ferro F Fe Fer Ferr o ro rro erro LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 1 <reference-block>\n" +
"L l L L L L L L L L LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 1 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 1 <reference-block>\n" +
"Giampiccolo giampiccolo G Gi Gia Giam o lo olo colo LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 1 <reference-block>\n" +
"D d D D D D D D D D LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 1 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 1 <reference-block>\n" +
"Magnini magnini M Ma Mag Magn i ni ini nini LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 1 <reference-block>\n" +
"B b B B B B B B B B LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 1 <reference-block>\n" +
"and and a an and and d nd and and LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
"Szpektor szpektor S Sz Szp Szpe r or tor ktor LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 1 <reference-block>\n" +
"I i I I I I I I I I LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 1 <reference-block>\n" +
"2006 2006 2 20 200 2006 6 06 006 2006 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 1 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 1 <reference-block>\n" +
"The the T Th The The e he The The LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
"Second second S Se Sec Seco d nd ond cond LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
"PASCAL pascal P PA PAS PASC L AL CAL SCAL LINEEND ALLCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
"Recognising recognising R Re Rec Reco g ng ing sing LINESTART INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
"Textual textual T Te Tex Text l al ual tual LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 1 <reference-block>\n" +
"Entailment entailment E En Ent Enta t nt ent ment LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"Challenge challenge C Ch Cha Chal e ge nge enge LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 2 <reference-block>\n" +
"In in I In In In n In In In LINEIN INITCAP NODIGIT 0 1 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"Proceedings proceedings P Pr Pro Proc s gs ngs ings LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"of of o of of of f of of of LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"the the t th the the e he the the LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"Second second S Se Sec Seco d nd ond cond LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"PASCAL pascal P PA PAS PASC L AL CAL SCAL LINEIN ALLCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"Challenges challenges C Ch Cha Chal s es ges nges LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"Workshop workshop W Wo Wor Work p op hop shop LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"on on o on on on n on on on LINEEND NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"Recognising recognising R Re Rec Reco g ng ing sing LINESTART INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"Textual textual T Te Tex Text l al ual tual LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"Entailment entailment E En Ent Enta t nt ent ment LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 2 <reference-block>\n" +
"Venice venice V Ve Ven Veni e ce ice nice LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 2 <reference-block>\n" +
"Italy italy I It Ita Ital y ly aly taly LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
". . . . . . . . . . LINEEND ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 2 <reference-block>\n" +
"2 2 2 2 2 2 2 2 2 2 LINESTART NOCAPS ALLDIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 2 <reference-block>\n" +
"Bunescu bunescu B Bu Bun Bune u cu scu escu LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 2 <reference-block>\n" +
"R r R R R R R R R R LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 2 <reference-block>\n" +
"and and a an and and d nd and and LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"Mooney mooney M Mo Moo Moon y ey ney oney LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 2 <reference-block>\n" +
"R r R R R R R R R R LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 2 <reference-block>\n" +
"2006 2006 2 20 200 2006 6 06 006 2006 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 2 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 2 <reference-block>\n" +
"Subsequence subsequence S Su Sub Subs e ce nce ence LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"Kernels kernels K Ke Ker Kern s ls els nels LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"for for f fo for for r or for for LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"Relation relation R Re Rel Rela n on ion tion LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"Extraction extraction E Ex Ext Extr n on ion tion LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 2 <reference-block>\n" +
"In in I In In In n In In In LINEIN INITCAP NODIGIT 0 1 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"Advances advances A Ad Adv Adva s es ces nces LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"in in i in in in n in in in LINEIN NOCAPS NODIGIT 0 1 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"Neural neural N Ne Neu Neur l al ral ural LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"Information information I In Inf Info n on ion tion LINEEND INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"Processing processing P Pr Pro Proc g ng ing sing LINESTART INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"Systems systems S Sy Sys Syst s ms ems tems LINEIN INITCAP NODIGIT 0 1 1 0 0 0 0 0 0 NOPUNCT 2 <reference-block>\n" +
"18 18 1 18 18 18 8 18 18 18 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 3 <reference-block>\n" +
"MIT mit M MI MIT MIT T IT MIT MIT LINEIN ALLCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
"Press press P Pr Pre Pres s ss ess ress LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
". . . . . . . . . . LINEEND ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 3 <reference-block>\n" +
"3 3 3 3 3 3 3 3 3 3 LINESTART NOCAPS ALLDIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 3 <reference-block>\n" +
"Dagan dagan D Da Dag Daga n an gan agan LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 3 <reference-block>\n" +
"I i I I I I I I I I LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 3 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 3 <reference-block>\n" +
"Glickman glickman G Gl Gli Glic n an man kman LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 3 <reference-block>\n" +
"O o O O O O O O O O LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 3 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 3 <reference-block>\n" +
"and and a an and and d nd and and LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
"Magnini magnini M Ma Mag Magn i ni ini nini LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 3 <reference-block>\n" +
"B b B B B B B B B B LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 3 <reference-block>\n" +
"2006 2006 2 20 200 2006 6 06 006 2006 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 3 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 3 <reference-block>\n" +
"The the T Th The The e he The The LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
"PASCAL pascal P PA PAS PASC L AL CAL SCAL LINEIN ALLCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
"Recognising recognising R Re Rec Reco g ng ing sing LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
"Textual textual T Te Tex Text l al ual tual LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
"Entailment entailment E En Ent Enta t nt ent ment LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
"Challenge challenge C Ch Cha Chal e ge nge enge LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 3 <reference-block>\n" +
"In in I In In In n In In In LINEIN INITCAP NODIGIT 0 1 1 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
"Quiñonero quiñonero Q Qu Qui Quiñ o ro ero nero LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
"- - - - - - - - - - LINEEND ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 HYPHEN 3 <reference-block>\n" +
"Candela candela C Ca Can Cand a la ela dela LINESTART INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
"et et e et et et t et et et LINEIN NOCAPS NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
"al al a al al al l al al al LINEIN NOCAPS NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 3 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 3 <reference-block>\n" +
"editors editors e ed edi edit s rs ors tors LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 3 <reference-block>\n" +
"MLCW mlcw M ML MLC MLCW W CW LCW MLCW LINEIN ALLCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
"2005 2005 2 20 200 2005 5 05 005 2005 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 3 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 3 <reference-block>\n" +
"LNAI lnai L LN LNA LNAI I AI NAI LNAI LINEIN ALLCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 3 <reference-block>\n" +
"Volume volume V Vo Vol Volu e me ume lume LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"3944 3944 3 39 394 3944 4 44 944 3944 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 4 <reference-block>\n" +
"pages pages p pa pag page s es ges ages LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"177 177 1 17 177 177 7 77 177 177 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"- - - - - - - - - - LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 HYPHEN 4 <reference-block>\n" +
"190 190 1 19 190 190 0 90 190 190 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 4 <reference-block>\n" +
"Springer springer S Sp Spr Spri r er ger nger LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"- - - - - - - - - - LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 HYPHEN 4 <reference-block>\n" +
"Verlag verlag V Ve Ver Verl g ag lag rlag LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
". . . . . . . . . . LINEEND ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 4 <reference-block>\n" +
"4 4 4 4 4 4 4 4 4 4 LINESTART NOCAPS ALLDIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 4 <reference-block>\n" +
"Jenny jenny J Je Jen Jenn y ny nny enny LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"Rose rose R Ro Ros Rose e se ose Rose LINEIN INITCAP NODIGIT 0 1 1 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"Finkel finkel F Fi Fin Fink l el kel nkel LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 4 <reference-block>\n" +
"Trond trond T Tr Tro Tron d nd ond rond LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"Grenager grenager G Gr Gre Gren r er ger ager LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 4 <reference-block>\n" +
"and and a an and and d nd and and LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"Christopher christopher C Ch Chr Chri r er her pher LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"Manning manning M Ma Man Mann g ng ing ning LINEIN INITCAP NODIGIT 0 1 1 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 4 <reference-block>\n" +
"2005 2005 2 20 200 2005 5 05 005 2005 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 4 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 4 <reference-block>\n" +
"Incorporating incorporating I In Inc Inco g ng ing ting LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"Non non N No Non Non n on Non Non LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"- - - - - - - - - - LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 HYPHEN 4 <reference-block>\n" +
"local local l lo loc loca l al cal ocal LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"Information information I In Inf Info n on ion tion LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"into into i in int into o to nto into LINEEND NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"Information information I In Inf Info n on ion tion LINESTART INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"Extraction extraction E Ex Ext Extr n on ion tion LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"Systems systems S Sy Sys Syst s ms ems tems LINEIN INITCAP NODIGIT 0 1 1 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"by by b by by by y by by by LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"Gibbs gibbs G Gi Gib Gibb s bs bbs ibbs LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"Sampling sampling S Sa Sam Samp g ng ing ling LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 4 <reference-block>\n" +
"Proceedings proceedings P Pr Pro Proc s gs ngs ings LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"of of o of of of f of of of LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"the the t th the the e he the the LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"43nd 43nd 4 43 43n 43nd d nd 3nd 43nd LINEIN NOCAPS CONTAINSDIGITS 0 0 0 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"Annual annual A An Ann Annu l al ual nual LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"Meeting meeting M Me Mee Meet g ng ing ting LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 4 <reference-block>\n" +
"of of o of of of f of of of LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
"the the t th the the e he the the LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
"Association association A As Ass Asso n on ion tion LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
"for for f fo for for r or for for LINEEND NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
"Computational computational C Co Com Comp l al nal onal LINESTART INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
"Linguistics linguistics L Li Lin Ling s cs ics tics LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
"( ( ( ( ( ( ( ( ( ( LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 OPENBRACKET 5 <reference-block>\n" +
"ACL acl A AC ACL ACL L CL ACL ACL LINEIN ALLCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
"2005 2005 2 20 200 2005 5 05 005 2005 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 5 <reference-block>\n" +
") ) ) ) ) ) ) ) ) ) LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 ENDBRACKET 5 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 5 <reference-block>\n" +
"pp pp p pp pp pp p pp pp pp LINEIN NOCAPS NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 5 <reference-block>\n" +
"363 363 3 36 363 363 3 63 363 363 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
"- - - - - - - - - - LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 HYPHEN 5 <reference-block>\n" +
"370 370 3 37 370 370 0 70 370 370 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
". . . . . . . . . . LINEEND ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 5 <reference-block>\n" +
"5 5 5 5 5 5 5 5 5 5 LINESTART NOCAPS ALLDIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 5 <reference-block>\n" +
"Giampiccolo giampiccolo G Gi Gia Giam o lo olo colo LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 5 <reference-block>\n" +
"D d D D D D D D D D LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 5 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 5 <reference-block>\n" +
"Magnini magnini M Ma Mag Magn i ni ini nini LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 5 <reference-block>\n" +
"B b B B B B B B B B LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 5 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 5 <reference-block>\n" +
"Dagan dagan D Da Dag Daga n an gan agan LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 5 <reference-block>\n" +
"I i I I I I I I I I LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 5 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 5 <reference-block>\n" +
"and and a an and and d nd and and LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
"Dolan dolan D Do Dol Dola n an lan olan LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 5 <reference-block>\n" +
"B b B B B B B B B B LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 5 <reference-block>\n" +
"2007 2007 2 20 200 2007 7 07 007 2007 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 5 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 5 <reference-block>\n" +
"The the T Th The The e he The The LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
"Third third T Th Thi Thir d rd ird hird LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
"PASCAL pascal P PA PAS PASC L AL CAL SCAL LINEIN ALLCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
"Recognizing recognizing R Re Rec Reco g ng ing zing LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
"Textual textual T Te Tex Text l al ual tual LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 5 <reference-block>\n" +
"Entailment entailment E En Ent Enta t nt ent ment LINEEND INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"Challenge challenge C Ch Cha Chal e ge nge enge LINESTART INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 6 <reference-block>\n" +
"In in I In In In n In In In LINEIN INITCAP NODIGIT 0 1 1 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"Proceedings proceedings P Pr Pro Proc s gs ngs ings LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"of of o of of of f of of of LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"the the t th the the e he the the LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"Workshop workshop W Wo Wor Work p op hop shop LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"on on o on on on n on on on LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"Textual textual T Te Tex Text l al ual tual LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"Entailment entailment E En Ent Enta t nt ent ment LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"and and a an and and d nd and and LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"Paraphrasing paraphrasing P Pa Par Para g ng ing sing LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 6 <reference-block>\n" +
"pages pages p pa pag page s es ges ages LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"1 1 1 1 1 1 1 1 1 1 LINEIN NOCAPS ALLDIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"– – – – – – – – – – LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"9 9 9 9 9 9 9 9 9 9 LINEIN NOCAPS ALLDIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 6 <reference-block>\n" +
"Prague prague P Pr Pra Prag e ue gue ague LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 6 <reference-block>\n" +
"June june J Ju Jun June e ne une June LINEIN INITCAP NODIGIT 0 1 0 0 0 0 1 0 0 NOPUNCT 6 <reference-block>\n" +
"2007 2007 2 20 200 2007 7 07 007 2007 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 6 <reference-block>\n" +
". . . . . . . . . . LINEEND ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 6 <reference-block>\n" +
"6 6 6 6 6 6 6 6 6 6 LINESTART NOCAPS ALLDIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 6 <reference-block>\n" +
"Gildea gildea G Gi Gil Gild a ea dea ldea LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 6 <reference-block>\n" +
"D d D D D D D D D D LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 6 <reference-block>\n" +
"and and a an and and d nd and and LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"Palmer palmer P Pa Pal Palm r er mer lmer LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 6 <reference-block>\n" +
"M m M M M M M M M M LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 6 <reference-block>\n" +
"2002 2002 2 20 200 2002 2 02 002 2002 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 6 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 6 <reference-block>\n" +
"The the T Th The The e he The The LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"Necessity necessity N Ne Nec Nece y ty ity sity LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"of of o of of of f of of of LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"Parsing parsing P Pa Par Pars g ng ing sing LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"for for f fo for for r or for for LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"Predicate predicate P Pr Pre Pred e te ate cate LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"Argument argument A Ar Arg Argu t nt ent ment LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
"Recognition recognition R Re Rec Reco n on ion tion LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 6 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 7 <reference-block>\n" +
"In in I In In In n In In In LINEIN INITCAP NODIGIT 0 1 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"Proceedings proceedings P Pr Pro Proc s gs ngs ings LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"of of o of of of f of of of LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"the the t th the the e he the the LINEEND NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"40th 40th 4 40 40t 40th h th 0th 40th LINESTART NOCAPS CONTAINSDIGITS 0 0 0 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"Meeting meeting M Me Mee Meet g ng ing ting LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"of of o of of of f of of of LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"the the t th the the e he the the LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"Association association A As Ass Asso n on ion tion LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"for for f fo for for r or for for LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"Computational computational C Co Com Comp l al nal onal LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"Linguistics linguistics L Li Lin Ling s cs ics tics LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"( ( ( ( ( ( ( ( ( ( LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 OPENBRACKET 7 <reference-block>\n" +
"ACL acl A AC ACL ACL L CL ACL ACL LINEIN ALLCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"2002 2002 2 20 200 2002 2 02 002 2002 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 7 <reference-block>\n" +
") ) ) ) ) ) ) ) ) ) LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 ENDBRACKET 7 <reference-block>\n" +
": : : : : : : : : : LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 PUNCT 7 <reference-block>\n" +
"239 239 2 23 239 239 9 39 239 239 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"- - - - - - - - - - LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 HYPHEN 7 <reference-block>\n" +
"246 246 2 24 246 246 6 46 246 246 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 7 <reference-block>\n" +
"Philadelphia philadelphia P Ph Phi Phil a ia hia phia LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 7 <reference-block>\n" +
"PA pa P PA PA PA A PA PA PA LINEIN ALLCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
". . . . . . . . . . LINEEND ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 7 <reference-block>\n" +
"7 7 7 7 7 7 7 7 7 7 LINESTART NOCAPS ALLDIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 7 <reference-block>\n" +
"Lin lin L Li Lin Lin n in Lin Lin LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 7 <reference-block>\n" +
"D d D D D D D D D D LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 7 <reference-block>\n" +
"1998 1998 1 19 199 1998 8 98 998 1998 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 7 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 7 <reference-block>\n" +
"Dependency dependency D De Dep Depe y cy ncy ency LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"- - - - - - - - - - LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 HYPHEN 7 <reference-block>\n" +
"based based b ba bas base d ed sed ased LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"Evaluation evaluation E Ev Eva Eval n on ion tion LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"of of o of of of f of of of LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"MINIPAR minipar M MI MIN MINI R AR PAR IPAR LINEIN ALLCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 7 <reference-block>\n" +
"In in I In In In n In In In LINEIN INITCAP NODIGIT 0 1 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"Workshop workshop W Wo Wor Work p op hop shop LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"on on o on on on n on on on LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"the the t th the the e he the the LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"Evaluation evaluation E Ev Eva Eval n on ion tion LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 7 <reference-block>\n" +
"of of o of of of f of of of LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
"Parsing parsing P Pa Par Pars g ng ing sing LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
"Systems systems S Sy Sys Syst s ms ems tems LINEIN INITCAP NODIGIT 0 1 1 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
". . . . . . . . . . LINEEND ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 8 <reference-block>\n" +
"8 8 8 8 8 8 8 8 8 8 LINESTART NOCAPS ALLDIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 8 <reference-block>\n" +
"Neumann neumann N Ne Neu Neum n nn ann mann LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 8 <reference-block>\n" +
"G g G G G G G G G G LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 8 <reference-block>\n" +
"and and a an and and d nd and and LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
"Piskorski piskorski P Pi Pis Pisk i ki ski rski LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 8 <reference-block>\n" +
"J j J J J J J J J J LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 8 <reference-block>\n" +
"2002 2002 2 20 200 2002 2 02 002 2002 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 8 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 8 <reference-block>\n" +
"A a A A A A A A A A LINEIN ALLCAP NODIGIT 1 0 1 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
"Shallow shallow S Sh Sha Shal w ow low llow LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
"Text text T Te Tex Text t xt ext Text LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
"Processing processing P Pr Pro Proc g ng ing sing LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
"Core core C Co Cor Core e re ore Core LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
"Engine engine E En Eng Engi e ne ine gine LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 8 <reference-block>\n" +
"Journal journal J Jo Jou Jour l al nal rnal LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
"of of o of of of f of of of LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
"Computational computational C Co Com Comp l al nal onal LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
"Intelligence intelligence I In Int Inte e ce nce ence LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
", , , , , , , , , , LINEEND ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 8 <reference-block>\n" +
"Volume volume V Vo Vol Volu e me ume lume LINESTART INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
"18 18 1 18 18 18 8 18 18 18 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 8 <reference-block>\n" +
"Number number N Nu Num Numb r er ber mber LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
"3 3 3 3 3 3 3 3 3 3 LINEIN NOCAPS ALLDIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 8 <reference-block>\n" +
"2002 2002 2 20 200 2002 2 02 002 2002 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 8 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 8 <reference-block>\n" +
"pages pages p pa pag page s es ges ages LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
"451 451 4 45 451 451 1 51 451 451 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
"- - - - - - - - - - LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 HYPHEN 8 <reference-block>\n" +
"476 476 4 47 476 476 6 76 476 476 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
". . . . . . . . . . LINEEND ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 8 <reference-block>\n" +
"9 9 9 9 9 9 9 9 9 9 LINESTART NOCAPS ALLDIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 8 <reference-block>\n" +
"Anselmo anselmo A An Ans Anse o mo lmo elmo LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
"Peñas peñas P Pe Peñ Peña s as ñas eñas LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 8 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 9 <reference-block>\n" +
"Álvaro álvaro Á Ál Álv Álva o ro aro varo LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"Rodrigo rodrigo R Ro Rod Rodr o go igo rigo LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 9 <reference-block>\n" +
"Felisa felisa F Fe Fel Feli a sa isa lisa LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"Verdejo verdejo V Ve Ver Verd o jo ejo dejo LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 9 <reference-block>\n" +
"2007 2007 2 20 200 2007 7 07 007 2007 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 9 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 9 <reference-block>\n" +
"Overview overview O Ov Ove Over w ew iew view LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"of of o of of of f of of of LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"the the t th the the e he the the LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"Answer answer A An Ans Answ r er wer swer LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"Validation validation V Va Val Vali n on ion tion LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"Exercise exercise E Ex Exe Exer e se ise cise LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"2007 2007 2 20 200 2007 7 07 007 2007 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 9 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 9 <reference-block>\n" +
"In in I In In In n In In In LINEIN INITCAP NODIGIT 0 1 1 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"the the t th the the e he the the LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"CLEF clef C CL CLE CLEF F EF LEF CLEF LINEEND ALLCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"2007 2007 2 20 200 2007 7 07 007 2007 LINESTART NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 9 <reference-block>\n" +
"Working working W Wo Wor Work g ng ing king LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"Notes notes N No Not Note s es tes otes LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
". . . . . . . . . . LINEEND ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 9 <reference-block>\n" +
"10 10 1 10 10 10 0 10 10 10 LINESTART NOCAPS ALLDIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 9 <reference-block>\n" +
"Wang wang W Wa Wan Wang g ng ang Wang LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 9 <reference-block>\n" +
"R r R R R R R R R R LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 9 <reference-block>\n" +
"and and a an and and d nd and and LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"Neumann neumann N Ne Neu Neum n nn ann mann LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 9 <reference-block>\n" +
"G g G G G G G G G G LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 9 <reference-block>\n" +
"2007a 2007a 2 20 200 2007 a 7a 07a 007a LINEIN NOCAPS CONTAINSDIGITS 0 0 0 0 0 1 0 0 0 NOPUNCT 9 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 9 <reference-block>\n" +
"Recognizing recognizing R Re Rec Reco g ng ing zing LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"Textual textual T Te Tex Text l al ual tual LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"Entailment entailment E En Ent Enta t nt ent ment LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"Using using U Us Usi Usin g ng ing sing LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"a a a a a a a a a a LINEIN NOCAPS NODIGIT 1 0 1 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"Subsequence subsequence S Su Sub Subs e ce nce ence LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"Kernel kernel K Ke Ker Kern l el nel rnel LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
"Method method M Me Met Meth d od hod thod LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 9 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 10 <reference-block>\n" +
"In in I In In In n In In In LINEIN INITCAP NODIGIT 0 1 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"Proc proc P Pr Pro Proc c oc roc Proc LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 10 <reference-block>\n" +
"of of o of of of f of of of LINEEND NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"AAAI aaai A AA AAA AAAI I AI AAI AAAI LINESTART ALLCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"2007 2007 2 20 200 2007 7 07 007 2007 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 10 <reference-block>\n" +
". . . . . . . . . . LINEEND ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 10 <reference-block>\n" +
"11 11 1 11 11 11 1 11 11 11 LINESTART NOCAPS ALLDIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 10 <reference-block>\n" +
"Wang wang W Wa Wan Wang g ng ang Wang LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 10 <reference-block>\n" +
"R r R R R R R R R R LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 10 <reference-block>\n" +
"and and a an and and d nd and and LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"Neumann neumann N Ne Neu Neum n nn ann mann LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 10 <reference-block>\n" +
"G g G G G G G G G G LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 10 <reference-block>\n" +
"2007b 2007b 2 20 200 2007 b 7b 07b 007b LINEIN NOCAPS CONTAINSDIGITS 0 0 0 0 0 1 0 0 0 NOPUNCT 10 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 10 <reference-block>\n" +
"Recognizing recognizing R Re Rec Reco g ng ing zing LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"Textual textual T Te Tex Text l al ual tual LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"Entailment entailment E En Ent Enta t nt ent ment LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"Using using U Us Usi Usin g ng ing sing LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"Sentence sentence S Se Sen Sent e ce nce ence LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"Similarity similarity S Si Sim Simi y ty ity rity LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"based based b ba bas base d ed sed ased LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"on on o on on on n on on on LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"Dependency dependency D De Dep Depe y cy ncy ency LINEEND INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"Tree tree T Tr Tre Tree e ee ree Tree LINESTART INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"Skeletons skeletons S Sk Ske Skel s ns ons tons LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 10 <reference-block>\n" +
"In in I In In In n In In In LINEIN INITCAP NODIGIT 0 1 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"Proceedings proceedings P Pr Pro Proc s gs ngs ings LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"of of o of of of f of of of LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"the the t th the the e he the the LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"Workshop workshop W Wo Wor Work p op hop shop LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"on on o on on on n on on on LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"Textual textual T Te Tex Text l al ual tual LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"Entailment entailment E En Ent Enta t nt ent ment LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"and and a an and and d nd and and LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"Paraphrasing paraphrasing P Pa Par Para g ng ing sing LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 10 <reference-block>\n" +
"pages pages p pa pag page s es ges ages LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"36 36 3 36 36 36 6 36 36 36 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 10 <reference-block>\n" +
"– – – – – – – – – – LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
"41 41 4 41 41 41 1 41 41 41 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 11 <reference-block>\n" +
"Prague prague P Pr Pra Prag e ue gue ague LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 11 <reference-block>\n" +
"June june J Ju Jun June e ne une June LINEEND INITCAP NODIGIT 0 1 0 0 0 0 1 0 0 NOPUNCT 11 <reference-block>\n" +
"2007 2007 2 20 200 2007 7 07 007 2007 LINESTART NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 11 <reference-block>\n" +
". . . . . . . . . . LINEEND ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 11 <reference-block>\n" +
"12 12 1 12 12 12 2 12 12 12 LINESTART NOCAPS ALLDIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 11 <reference-block>\n" +
"Wang wang W Wa Wan Wang g ng ang Wang LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 11 <reference-block>\n" +
"R r R R R R R R R R LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 11 <reference-block>\n" +
"and and a an and and d nd and and LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
"Neumann neumann N Ne Neu Neum n nn ann mann LINEIN INITCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 11 <reference-block>\n" +
"G g G G G G G G G G LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 11 <reference-block>\n" +
"2007c 2007c 2 20 200 2007 c 7c 07c 007c LINEIN NOCAPS CONTAINSDIGITS 0 0 0 0 0 1 0 0 0 NOPUNCT 11 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 11 <reference-block>\n" +
"DFKI dfki D DF DFK DFKI I KI FKI DFKI LINEIN ALLCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
"– – – – – – – – – – LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
"LT lt L LT LT LT T LT LT LT LINEIN ALLCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
"at at a at at at t at at at LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
"AVE ave A AV AVE AVE E VE AVE AVE LINEIN ALLCAP NODIGIT 0 1 0 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
"2007 2007 2 20 200 2007 7 07 007 2007 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 11 <reference-block>\n" +
": : : : : : : : : : LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 PUNCT 11 <reference-block>\n" +
"Using using U Us Usi Usin g ng ing sing LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
"Recognizing recognizing R Re Rec Reco g ng ing zing LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
"Textual textual T Te Tex Text l al ual tual LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
"Entailment entailment E En Ent Enta t nt ent ment LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
"for for f fo for for r or for for LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
"Answer answer A An Ans Answ r er wer swer LINEEND INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
"Validation validation V Va Val Vali n on ion tion LINESTART INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
". . . . . . . . . . LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 11 <reference-block>\n" +
"In in I In In In n In In In LINEIN INITCAP NODIGIT 0 1 1 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
"online online o on onl onli e ne ine line LINEIN NOCAPS NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
"proceedings proceedings p pr pro proc s gs ngs ings LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
"of of o of of of f of of of LINEIN NOCAPS NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
"CLEF clef C CL CLE CLEF F EF LEF CLEF LINEIN ALLCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
"2007 2007 2 20 200 2007 7 07 007 2007 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 11 <reference-block>\n" +
"Working working W Wo Wor Work g ng ing king LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
"Notes notes N No Not Note s es tes otes LINEIN INITCAP NODIGIT 0 0 1 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 11 <reference-block>\n" +
"ISBN isbn I IS ISB ISBN N BN SBN ISBN LINEIN ALLCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 11 <reference-block>\n" +
": : : : : : : : : : LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 PUNCT 12 <reference-block>\n" +
"2 2 2 2 2 2 2 2 2 2 LINEIN NOCAPS ALLDIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 12 <reference-block>\n" +
"- - - - - - - - - - LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 HYPHEN 12 <reference-block>\n" +
"912335 912335 9 91 912 9123 5 35 335 2335 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 12 <reference-block>\n" +
"- - - - - - - - - - LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 HYPHEN 12 <reference-block>\n" +
"31 31 3 31 31 31 1 31 31 31 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 12 <reference-block>\n" +
"- - - - - - - - - - LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 HYPHEN 12 <reference-block>\n" +
"0 0 0 0 0 0 0 0 0 0 LINEIN NOCAPS ALLDIGIT 1 0 0 0 0 0 0 0 0 NOPUNCT 12 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 12 <reference-block>\n" +
"September september S Se Sep Sept r er ber mber LINEIN INITCAP NODIGIT 0 1 0 0 0 0 1 0 0 NOPUNCT 12 <reference-block>\n" +
"2007 2007 2 20 200 2007 7 07 007 2007 LINEIN NOCAPS ALLDIGIT 0 0 0 0 0 1 0 0 0 NOPUNCT 12 <reference-block>\n" +
", , , , , , , , , , LINEIN ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 12 <reference-block>\n" +
"Budapest budapest B Bu Bud Buda t st est pest LINEIN INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 12 <reference-block>\n" +
", , , , , , , , , , LINEEND ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 COMMA 12 <reference-block>\n" +
"Hungary hungary H Hu Hun Hung y ry ary gary LINESTART INITCAP NODIGIT 0 0 0 0 0 0 0 0 0 NOPUNCT 12 <reference-block>\n" +
". . . . . . . . . . LINEEND ALLCAP NODIGIT 1 0 0 0 0 0 0 0 0 DOT 12 <reference-block>\n" +
"\n";
System.out.println(s.length());
System.out.println(s);
}
}
| 172,771 | 100.690406 | 1,865 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/engines/TaggingLabelsTestImpl.java
|
package org.grobid.core.engines;
import org.grobid.core.GrobidModel;
import org.grobid.core.GrobidModels;
import org.grobid.core.engines.label.TaggingLabel;
import org.grobid.core.engines.label.TaggingLabels;
import org.grobid.core.utilities.GrobidProperties;
import org.junit.Test;
import org.junit.BeforeClass;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertThat;
public class TaggingLabelsTestImpl {
@BeforeClass
public static void init() {
GrobidProperties.getInstance();
}
@Test
public void testTaggingLabel_StandardLabelSection() throws Exception {
TaggingLabel label = TaggingLabels.SECTION;
assertNotNull(label);
assertThat(label.getLabel(), is("<section>"));
assertThat(label.getGrobidModel(), is((GrobidModel) GrobidModels.FULLTEXT));
assertThat(label.getName(), is("FULLTEXT_SECTION"));
}
@Test
public void testModelFor_StandardLabelStartingSection() throws Exception {
TaggingLabel label = TaggingLabels.labelFor(GrobidModels.FULLTEXT, "I-<section>");
assertNotNull(label);
assertThat(label.getLabel(), is("<section>"));
assertThat(label.getGrobidModel(), is((GrobidModel) GrobidModels.FULLTEXT));
assertThat(label.getName(), is("FULLTEXT_SECTION"));
}
@Test
public void testModelFor_LabelNoPresentInCache_shouldRemovePrefix() throws Exception {
TaggingLabel label = TaggingLabels.labelFor(GrobidModels.FULLTEXT, "I-<sectionsLabel>");
assertNotNull(label);
assertThat(label.getLabel(), is("<sectionsLabel>"));
assertThat(label.getGrobidModel(), is((GrobidModel) GrobidModels.FULLTEXT));
assertThat(label.getName(), is("FULLTEXT_SECTIONSLABEL"));
}
@Test
public void testModelFor_StandardLabelMiddleSection() throws Exception {
TaggingLabel label = TaggingLabels.labelFor(GrobidModels.FULLTEXT, "<section>");
assertNotNull(label);
assertThat(label.getLabel(), is("<section>"));
assertThat(label.getGrobidModel(), is((GrobidModel) GrobidModels.FULLTEXT));
assertThat(label.getName(), is("FULLTEXT_SECTION"));
}
@Test
public void testTaggingLabel_CustomLabel() throws Exception {
TaggingLabel label = TaggingLabels.labelFor(GrobidModels.DICTIONARIES_LEXICAL_ENTRIES, "<lemma>");
assertNotNull(label);
assertThat(label.getLabel(), is("<lemma>"));
assertThat(label.getGrobidModel(), is((GrobidModel) GrobidModels.DICTIONARIES_LEXICAL_ENTRIES));
assertThat(label.getName(), is("dictionaries-lexical-entries_LEMMA".toUpperCase()));
}
}
| 2,724 | 32.641975 | 106 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/engines/DateParserTest.java
|
package org.grobid.core.engines;
import org.apache.commons.lang3.tuple.Triple;
import org.grobid.core.GrobidModels;
import org.grobid.core.analyzers.GrobidAnalyzer;
import org.grobid.core.data.Date;
import org.grobid.core.features.FeaturesVectorDate;
import org.grobid.core.layout.LayoutToken;
import org.grobid.core.lexicon.Lexicon;
import org.grobid.core.utilities.GrobidConfig;
import org.grobid.core.utilities.GrobidProperties;
import org.grobid.core.utilities.GrobidTestUtils;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.powermock.api.easymock.PowerMock;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.hamcrest.Matchers.hasSize;
import static org.junit.Assert.assertThat;
@RunWith(PowerMockRunner.class)
@PrepareForTest(Lexicon.class)
public class DateParserTest {
private DateParser target;
@Before
public void setUp() throws Exception {
PowerMock.mockStatic(Lexicon.class);
GrobidConfig.ModelParameters modelParameters = new GrobidConfig.ModelParameters();
modelParameters.name = "bao";
GrobidProperties.addModel(modelParameters);
target = new DateParser(GrobidModels.DUMMY);
}
@Test
public void testPostValidation_invalidYear_onlyString_shouldSetNull() {
Date inputDate = new Date();
inputDate.setYearString("10360 10370");
Date outputDate = DateParser.cleaning(inputDate);
assertThat(outputDate.getYearString(), is(nullValue()));
}
@Test
public void testPostValidation_invalidYear2_onlyString_shouldSetNull() {
Date inputDate = new Date();
inputDate.setYearString("10360 10370 10380 10390 10400");
Date outputDate = DateParser.cleaning(inputDate);
assertThat(outputDate.getYearString(), is(nullValue()));
}
@Test
public void testPostValidation_invalidYear_bothStringAndInt_shouldSetNull() {
Date inputDate = new Date();
inputDate.setYear(1036010370);
inputDate.setYearString("10360 10370");
Date outputDate = DateParser.cleaning(inputDate);
assertThat(outputDate.getYearString(), is(nullValue()));
assertThat(outputDate.getYear(), is(-1));
}
@Test
public void testPostValidation_invalidMonth_bothStringAndInt_shouldSetNull() {
Date inputDate = new Date();
inputDate.setMonth(1234);
inputDate.setMonthString("1234");
Date outputDate = DateParser.cleaning(inputDate);
assertThat(outputDate.getMonthString(), is(nullValue()));
assertThat(outputDate.getMonth(), is(-1));
}
@Test
public void testPostValidation_invalidDay_bothStringAndInt_shouldSetNull() {
Date inputDate = new Date();
inputDate.setDay(12345);
inputDate.setDayString("12345");
Date outputDate = DateParser.cleaning(inputDate);
assertThat(outputDate.getDayString(), is(nullValue()));
assertThat(outputDate.getDay(), is(-1));
}
@Test
public void testNormalize_yearContainsWholeDate_shouldReconstructCorrectly() {
Date inputDate = new Date();
inputDate.setYear(20021212);
Date outputDate = target.normalizeAndClean(inputDate);
assertThat(outputDate.getMonth(), is(12));
assertThat(outputDate.getDay(), is(12));
assertThat(outputDate.getYear(), is(2002));
}
@Test
public void testNormalize_dayContainsWholeDate_shouldReturnEmptyDate() {
Date inputDate = new Date();
inputDate.setDay(20021212);
Date outputDate = target.normalizeAndClean(inputDate);
assertThat(outputDate.getMonth(), is(-1));
assertThat(outputDate.getDay(), is(-1));
assertThat(outputDate.getYear(), is(-1));
}
@Test
public void testNormalize_monthContainsWholeDate_shouldReturnEmptyDate() {
Date inputDate = new Date();
inputDate.setMonth(20021212);
Date outputDate = target.normalizeAndClean(inputDate);
assertThat(outputDate.getMonth(), is(-1));
assertThat(outputDate.getDay(), is(-1));
assertThat(outputDate.getYear(), is(-1));
}
@Test
public void testNormalize_yearOnly_validValue_shouldParseYearCorrectly() {
Date inputDate = new Date();
inputDate.setYearString("2002");
Date outputDate = target.normalizeAndClean(inputDate);
assertThat(outputDate.getMonth(), is(-1));
assertThat(outputDate.getDay(), is(-1));
assertThat(outputDate.getYear(), is(2002));
assertThat(outputDate.getYearString(), is("2002"));
}
@Test
public void testNormalize_wholeDate_invalidYearValue_shouldRemoveValue() {
Date inputDate = new Date();
inputDate.setDayString("12");
inputDate.setMonthString("12");
inputDate.setYearString("2222222012");
Date outputDate = target.normalizeAndClean(inputDate);
assertThat(outputDate.getDay(), is(12));
assertThat(outputDate.getDayString(), is("12"));
assertThat(outputDate.getMonth(), is(12));
assertThat(outputDate.getMonthString(), is("12"));
assertThat(outputDate.getYear(), is(-1));
assertThat(outputDate.getYearString(), is(nullValue()));
}
@Test
public void testNormalize_monthOnly_validValue_shouldParseMonthCorrectly() {
Date inputDate = new Date();
inputDate.setMonthString("12");
Date outputDate = target.normalizeAndClean(inputDate);
assertThat(outputDate.getMonth(), is(12));
assertThat(outputDate.getDay(), is(-1));
assertThat(outputDate.getYear(), is(-1));
assertThat(outputDate.getMonthString(), is("12"));
}
@Test
public void testNormalize_wholeDate_invalidMonthValue_shouldRemoveValue() {
Date inputDate = new Date();
inputDate.setDayString("12");
inputDate.setMonthString("1222222222");
inputDate.setYearString("2012");
Date outputDate = target.normalizeAndClean(inputDate);
assertThat(outputDate.getMonth(), is(-1));
assertThat(outputDate.getMonthString(), is(nullValue()));
assertThat(outputDate.getDay(), is(12));
assertThat(outputDate.getDayString(), is("12"));
assertThat(outputDate.getYear(), is(2012));
assertThat(outputDate.getYearString(), is("2012"));
}
@Test
public void testNormalize_dayOnly_validValue_shouldParseDayCorrectly() {
Date inputDate = new Date();
inputDate.setDayString("12");
Date outputDate = target.normalizeAndClean(inputDate);
assertThat(outputDate.getMonth(), is(-1));
assertThat(outputDate.getDay(), is(12));
assertThat(outputDate.getYear(), is(-1));
assertThat(outputDate.getDayString(), is("12"));
}
@Test
public void testNormalize_wholeDate_invalidDayValue_shouldRemoveValue() {
Date inputDate = new Date();
inputDate.setDayString("1221");
inputDate.setMonthString("12");
inputDate.setYearString("2012");
Date outputDate = target.normalizeAndClean(inputDate);
assertThat(outputDate.getDay(), is(-1));
assertThat(outputDate.getDayString(), is(nullValue()));
assertThat(outputDate.getMonth(), is(12));
assertThat(outputDate.getMonthString(), is("12"));
assertThat(outputDate.getYear(), is(2012));
assertThat(outputDate.getYearString(), is("2012"));
}
@Test
public void testResultExtraction_StandardDate_shouldWork() throws Exception {
String input = "1983-1-1";
List<LayoutToken> inputAsLayoutTokens = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(input);
List<String> features = generateFeatures(inputAsLayoutTokens);
// These triples made in following way: label, starting index (included), ending index (excluded)
List<Triple<String, Integer, Integer>> labels = Arrays.asList(
Triple.of("<year>", 0, 1),
Triple.of("<month>", 2, 3),
Triple.of("<day>", 4, 5)
);
String result = GrobidTestUtils.getWapitiResult(features, labels);
List<Date> dates = target.resultExtraction(result, inputAsLayoutTokens);
assertThat(dates, hasSize(1));
assertThat(dates.get(0).getYearString(), is("1983"));
assertThat(dates.get(0).getMonthString(), is("1"));
assertThat(dates.get(0).getDayString(), is("1"));
}
@Test
public void testResultExtraction_DoubleDate_shouldWork() throws Exception {
String input = "1983-1-1 1982-1-2";
List<LayoutToken> inputAsLayoutTokens = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(input);
List<String> features = generateFeatures(inputAsLayoutTokens);
// These triples made in following way: label, starting index (included), ending index (excluded)
List<Triple<String, Integer, Integer>> labels = Arrays.asList(
Triple.of("<year>", 0, 1),
Triple.of("<month>", 2, 3),
Triple.of("<day>", 4, 5),
Triple.of("<year>", 5, 6),
Triple.of("<month>", 7, 8),
Triple.of("<day>", 9, 10)
);
String result = GrobidTestUtils.getWapitiResult(features, labels);
List<Date> dates = target.resultExtraction(result, inputAsLayoutTokens);
assertThat(dates, hasSize(2));
assertThat(dates.get(0).getYearString(), is("1983"));
assertThat(dates.get(0).getMonthString(), is("1"));
assertThat(dates.get(0).getDayString(), is("1"));
assertThat(dates.get(1).getYearString(), is("1982"));
assertThat(dates.get(1).getMonthString(), is("1"));
assertThat(dates.get(1).getDayString(), is("2"));
}
private List<String> generateFeatures(List<LayoutToken> layoutTokens) throws Exception {
List<String> tokensAsStrings = layoutTokens.stream()
.map(layoutToken -> layoutToken.getText() + " " + "<date>")
.collect(Collectors.toList());
String features = FeaturesVectorDate.addFeaturesDate(tokensAsStrings);
return Arrays.asList(features.split("\n"));
}
}
| 10,452 | 37.010909 | 108 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/engines/SegmentationTest.java
|
package org.grobid.core.engines;
import org.grobid.core.document.Document;
import org.grobid.core.document.DocumentSource;
import org.grobid.core.engines.config.GrobidAnalysisConfig;
import org.grobid.core.factory.AbstractEngineFactory;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.File;
import static org.hamcrest.CoreMatchers.*;
import static org.junit.Assert.assertThat;
public class SegmentationTest {
Segmentation target;
@BeforeClass
public static void setInitialContext() throws Exception {
// MockContext.setInitialContext();
AbstractEngineFactory.init();
}
@AfterClass
public static void destroyInitialContext() throws Exception {
// MockContext.destroyInitialContext();
}
@Before
public void setUp() throws Exception {
target = new Segmentation();
}
@Test
public void testGetAllLinesFeatures_SimpleDocument_shouldWork() throws Exception {
File input = new File(this.getClass().getResource("samplePdf.segmentation.pdf").toURI());
DocumentSource doc = DocumentSource.fromPdf(input);
final Document document = new Document(doc);
document.addTokenizedDocument(GrobidAnalysisConfig.defaultInstance());
String output = target.getAllLinesFeatured(document);
String[] splittedOutput = output.split("\n");
assertThat(splittedOutput.length, is(25));
assertThat(splittedOutput[0], startsWith("Title"));
assertThat(splittedOutput[0], is("Title Title title T Ti Tit Titl BLOCKSTART PAGESTART NEWFONT HIGHERFONT 1 0 INITCAP NODIGIT 0 0 1 0 0 0 0 0 12 12 no 0 10 0 1 0 0 1"));
doc.close(true, true, true);
}
@Test
public void testPrepareDocument_SimpleDocument_shouldWork() throws Exception {
File input = new File(this.getClass().getResource("samplePdf.segmentation.pdf").toURI());
DocumentSource doc = DocumentSource.fromPdf(input);
final Document document = new Document(doc);
document.addTokenizedDocument(GrobidAnalysisConfig.defaultInstance());
Document output = target.prepareDocument(document);
assertThat(output, notNullValue());
assertThat(output.getPages().size(), is(1));
// assertThat(output.getBody(), notNullValue());
assertThat(output.getBlocks().size(), is(3));
assertThat(output.getTokenizations().size(), is(344));
doc.close(true, true, true);
}
}
| 2,509 | 33.383562 | 177 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/engines/AffiliationAddressParserTest.java
|
package org.grobid.core.engines;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.AfterClass;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.nullValue;
import static org.junit.Assert.assertThat;
import static org.hamcrest.CoreMatchers.is;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import com.google.common.base.Joiner;
import org.grobid.core.analyzers.GrobidAnalyzer;
import org.grobid.core.data.Affiliation;
import org.grobid.core.factory.GrobidFactory;
import org.grobid.core.features.FeaturesVectorAffiliationAddress;
import org.grobid.core.layout.LayoutToken;
import org.grobid.core.main.LibraryLoader;
import org.grobid.core.utilities.GrobidProperties;
import org.grobid.core.utilities.OffsetPosition;
import org.grobid.core.utilities.LayoutTokensUtil;
public class AffiliationAddressParserTest {
public static final Logger LOGGER = LoggerFactory.getLogger(AffiliationAddressParserTest.class);
private static boolean NO_USE_PRELABEL = false;
private static List<List<OffsetPosition>> NO_PLACES_POSITIONS = Arrays.asList(
Collections.emptyList()
);
private AffiliationAddressParser target;
private GrobidAnalyzer analyzer;
@Before
public void setUp() throws Exception {
this.target = new AffiliationAddressParser();
this.analyzer = GrobidAnalyzer.getInstance();
}
@BeforeClass
public static void init() {
LibraryLoader.load();
GrobidProperties.getInstance();
}
@AfterClass
public static void tearDown() {
GrobidFactory.reset();
}
@Test
public void shouldNotFailOnEmptyLabelResult() throws Exception {
String labelResult = "";
List<LayoutToken> tokenizations = Collections.emptyList();
List<Affiliation> result = this.target.resultBuilder(
labelResult,
tokenizations,
NO_USE_PRELABEL
);
assertThat("affiliations should be null", result, is(nullValue()));
}
private static List<String> getAffiliationBlocksWithLineFeed(List<LayoutToken> tokenizations) {
ArrayList<String> affiliationBlocks = new ArrayList<String>();
for(LayoutToken tok : tokenizations) {
if (tok.getText().length() == 0) continue;
if (!tok.getText().equals(" ")) {
if (tok.getText().equals("\n")) {
affiliationBlocks.add("@newline");
} else
affiliationBlocks.add(tok + " <affiliation>");
}
}
return affiliationBlocks;
}
private static String addLabelsToFeatures(String header, List<String> labels) {
String[] headerLines = header.split("\n");
if (headerLines.length != labels.size()) {
throw new IllegalArgumentException(String.format(
"number of header lines and labels must match, %d != %d",
headerLines.length, labels.size()
));
}
ArrayList<String> resultLines = new ArrayList<>(headerLines.length);
for (int i = 0; i < headerLines.length; i++) {
resultLines.add(headerLines[i] + " " + labels.get(i));
}
return Joiner.on("\n").join(resultLines);
}
private List<Affiliation> processLabelResults(
List<String> tokens,
List<String> labels
) throws Exception {
List<LayoutToken> tokenizations = LayoutTokensUtil.getLayoutTokensForTokenizedText(tokens);
LOGGER.debug("tokenizations: {}", tokenizations);
List<String> affiliationBlocks = getAffiliationBlocksWithLineFeed(tokenizations);
String header = FeaturesVectorAffiliationAddress.addFeaturesAffiliationAddress(
affiliationBlocks, Arrays.asList(tokenizations), NO_PLACES_POSITIONS
);
LOGGER.debug("header: {}", header);
String labelResult = addLabelsToFeatures(header, labels);
LOGGER.debug("labelResult: {}", labelResult);
return this.target.resultBuilder(
labelResult,
tokenizations,
NO_USE_PRELABEL
);
}
private List<Affiliation> processLabelResults(String[][] tokenLabelPairs) throws Exception {
ArrayList<String> tokens = new ArrayList<>();
ArrayList<String> labels = new ArrayList<>();
boolean prevWhitespace = false;
for (String[] pair: tokenLabelPairs) {
if (!tokens.isEmpty() && (!prevWhitespace)) {
tokens.add(" ");
}
prevWhitespace = pair[0].trim().isEmpty();
tokens.add(pair[0]);
if (pair.length > 1) {
labels.add(pair[1]);
}
}
return this.processLabelResults(tokens, labels);
}
@Test
public void shouldExtractSimpleAffiliation() throws Exception {
List<Affiliation> affiliations = this.processLabelResults(new String[][] {
{"1", "I-<marker>"},
{"University", "I-<institution>"},
{"of", "<institution>"},
{"Science", "<institution>"}
});
assertThat("should have one affiliation", affiliations, is(hasSize(1)));
Affiliation affiliation = affiliations.get(0);
assertThat("institution.marker", affiliation.getMarker(), is("1"));
assertThat(
"institution.institutions",
affiliation.getInstitutions(),
is(Arrays.asList("University of Science"))
);
assertThat(
"institution.rawAffiliationString",
affiliation.getRawAffiliationString(),
is("University of Science")
);
}
@Test
public void shouldExtractMultipleInstitutions() throws Exception {
List<Affiliation> affiliations = this.processLabelResults(new String[][] {
{"1", "I-<marker>"},
{"University", "I-<institution>"},
{"of", "<institution>"},
{"Science", "<institution>"},
{"University", "I-<institution>"},
{"of", "<institution>"},
{"Madness", "<institution>"}
});
assertThat("should have one affiliation", affiliations, is(hasSize(1)));
Affiliation affiliation = affiliations.get(0);
assertThat("institution.marker", affiliation.getMarker(), is("1"));
assertThat(
"institution.institutions",
affiliation.getInstitutions(),
is(Arrays.asList("University of Science", "University of Madness"))
);
assertThat(
"institution.rawAffiliationString",
affiliation.getRawAffiliationString(),
is("University of Science University of Madness")
);
}
@Test
public void shouldExtractSecondInstitutionAsSeparateAffiliationIfNewLine() throws Exception {
List<Affiliation> affiliations = this.processLabelResults(new String[][] {
{"1", "I-<marker>"},
{"University", "I-<institution>"},
{"of", "<institution>"},
{"Science", "<institution>"},
{"\n"},
{"University", "I-<institution>"},
{"of", "<institution>"},
{"Madness", "<institution>"}
});
assertThat("should have one affiliation", affiliations, is(hasSize(2)));
assertThat("(0).institution.marker", affiliations.get(0).getMarker(), is("1"));
assertThat(
"(0).institution.institutions",
affiliations.get(0).getInstitutions(),
is(Arrays.asList("University of Science"))
);
assertThat(
"(0).institution.rawAffiliationString",
affiliations.get(0).getRawAffiliationString(),
is("University of Science")
);
assertThat("(1).institution.marker", affiliations.get(1).getMarker(), is("1"));
assertThat(
"(1).institution.institutions",
affiliations.get(1).getInstitutions(),
is(Arrays.asList("University of Madness"))
);
assertThat(
"(1).institution.rawAffiliationString",
affiliations.get(1).getRawAffiliationString(),
is("University of Madness")
);
}
@Test
public void shouldExtractMultipleAffiliations() throws Exception {
List<Affiliation> affiliations = this.processLabelResults(new String[][] {
{"1", "I-<marker>"},
{"University", "I-<institution>"},
{"of", "<institution>"},
{"Science", "<institution>"},
{"2", "I-<marker>"},
{"University", "I-<institution>"},
{"of", "<institution>"},
{"Madness", "<institution>"}
});
assertThat("should have one affiliation", affiliations, is(hasSize(2)));
assertThat("institution.marker", affiliations.get(0).getMarker(), is("1"));
assertThat(
"institution.institutions",
affiliations.get(0).getInstitutions(),
is(Arrays.asList("University of Science"))
);
assertThat(
"institution.rawAffiliationString",
affiliations.get(0).getRawAffiliationString(),
is("University of Science")
);
assertThat("institution.marker", affiliations.get(1).getMarker(), is("2"));
assertThat(
"institution.institutions",
affiliations.get(1).getInstitutions(),
is(Arrays.asList("University of Madness"))
);
assertThat(
"institution.rawAffiliationString",
affiliations.get(1).getRawAffiliationString(),
is("University of Madness")
);
}
}
| 9,834 | 36.681992 | 100 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/engines/DateParserIntegrationTest.java
|
package org.grobid.core.engines;
import org.grobid.core.data.Date;
import org.grobid.core.factory.AbstractEngineFactory;
import org.junit.*;
import java.util.Arrays;
import java.util.List;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.hasSize;
import static org.junit.Assert.assertEquals;
/**
* Created by lfoppiano on 02/01/17.
*/
public class DateParserIntegrationTest {
DateParser target;
@BeforeClass
public static void setInitialContext() throws Exception {
// MockContext.setInitialContext();
AbstractEngineFactory.init();
}
@AfterClass
public static void destroyInitialContext() throws Exception {
// MockContext.destroyInitialContext();
}
@Before
public void setUp() throws Exception {
target = new DateParser();
}
@Test
public void processing_englishStandardDate_shouldWork() throws Exception {
List<Date> output = target.process("19 January 1983");
assertThat(output, hasSize(1));
final Date date = output.get(0);
assertThat(date.getDay(), is(19));
assertThat(date.getMonth(), is(1));
assertThat(date.getYear(), is(1983));
assertThat(date.getDayString(), is("19"));
assertThat(date.getMonthString(), is("January"));
assertThat(date.getYearString(), is("1983"));
}
@Test
public void processing_englishStandardDate1_shouldWork() throws Exception {
List<Date> output = target.process("19. January 19 83");
assertThat(output, hasSize(1));
final Date date = output.get(0);
assertThat(date.getDay(), is(19));
assertThat(date.getMonth(), is(1));
assertThat(date.getYear(), is(1983));
assertThat(date.getDayString(), is("19"));
assertThat(date.getMonthString(), is("January"));
// TODO: With the clusteror the space is removed...
// assertThat(date.getYearString(), is("19 83"));
assertThat(date.getYearString(), is("1983"));
}
@Test
public void processing_englishStandardDate2_shouldWork() throws Exception {
List<Date> output = target.process("1918-1939");
assertThat(output, hasSize(2));
final Date date1 = output.get(0);
assertThat(date1.getDay(), is(-1));
assertThat(date1.getMonth(), is(-1));
assertThat(date1.getYear(), is(1918));
assertThat(date1.getDayString(), nullValue());
assertThat(date1.getMonthString(), nullValue());
assertThat(date1.getYearString(), is("1918"));
final Date date2 = output.get(1);
assertThat(date2.getDay(), is(-1));
assertThat(date2.getMonth(), is(-1));
assertThat(date2.getYear(), is(1939));
assertThat(date2.getDayString(), nullValue());
assertThat(date2.getMonthString(), nullValue());
assertThat(date2.getYearString(), is("1939"));
}
@Test
public void processing_englishStandardDate3_shouldWork() throws Exception {
List<Date> output = target.process("16.06.1942-28.04.1943");
assertThat(output, hasSize(2));
final Date date1 = output.get(0);
assertThat(date1.getDay(), is(16));
assertThat(date1.getMonth(), is(6));
assertThat(date1.getYear(), is(1942));
assertThat(date1.getDayString(), is("16"));
assertThat(date1.getMonthString(), is("06"));
assertThat(date1.getYearString(), is("1942"));
final Date date2 = output.get(1);
assertThat(date2.getDay(), is(28));
assertThat(date2.getMonth(), is(4));
assertThat(date2.getYear(), is(1943));
assertThat(date2.getDayString(), is("28"));
assertThat(date2.getMonthString(), is("04"));
assertThat(date2.getYearString(), is("1943"));
}
@Ignore("Need more training data, perhaps")
@Test
public void processing_englishStandardDate4_shouldWork() throws Exception {
List<Date> output = target.process("4.01.1943-21.10.1943");
assertThat(output, hasSize(2));
final Date date1 = output.get(0);
assertThat(date1.getDay(), is(4));
assertThat(date1.getMonth(), is(1));
assertThat(date1.getYear(), is(1943));
assertThat(date1.getDayString(), is("4"));
assertThat(date1.getMonthString(), is("1"));
assertThat(date1.getYearString(), is("1943"));
final Date date2 = output.get(1);
assertThat(date2.getDay(), is(21));
assertThat(date2.getMonth(), is(10));
assertThat(date2.getYear(), is(1943));
assertThat(date2.getDayString(), is("21"));
assertThat(date2.getMonthString(), is("04"));
assertThat(date2.getYearString(), is("1943"));
}
@Test
public void processing_englishStandardDate5_shouldWork() throws Exception {
List<Date> output = target.process("12.03.1942-10.1943");
assertThat(output, hasSize(2));
final Date date1 = output.get(0);
assertThat(date1.getDay(), is(12));
assertThat(date1.getMonth(), is(3));
assertThat(date1.getYear(), is(1942));
assertThat(date1.getDayString(), is("12"));
assertThat(date1.getMonthString(), is("03"));
assertThat(date1.getYearString(), is("1942"));
final Date date2 = output.get(1);
assertThat(date2.getDay(), is(-1));
assertThat(date2.getMonth(), is(10));
assertThat(date2.getYear(), is(1943));
assertThat(date2.getDayString(), nullValue());
assertThat(date2.getMonthString(), is("10"));
assertThat(date2.getYearString(), is("1943"));
}
@Ignore("Need more training data, perhaps")
@Test
public void processing_englishStandardDate6_shouldWork() throws Exception {
List<Date> output = target.process("1941-45");
assertThat(output, hasSize(2));
final Date date1 = output.get(0);
assertThat(date1.getDay(), is(-1));
assertThat(date1.getMonth(), is(-1));
assertThat(date1.getYear(), is(1941));
assertThat(date1.getDayString(), nullValue());
assertThat(date1.getMonthString(), nullValue());
assertThat(date1.getYearString(), is("1941"));
final Date date2 = output.get(1);
assertThat(date2.getDay(), is(-1));
assertThat(date2.getMonth(), is(-1));
assertThat(date2.getYear(), is(45));
assertThat(date2.getDayString(), nullValue());
assertThat(date2.getMonthString(), nullValue());
assertThat(date2.getYearString(), is("45"));
}
@Test
public void processing_englishStandardDate7_shouldWork() throws Exception {
List<Date> output = target.process("2015-10-21");
assertThat(output, hasSize(1));
final Date date1 = output.get(0);
assertThat(date1.getDay(), is(21));
assertThat(date1.getMonth(), is(10));
assertThat(date1.getYear(), is(2015));
assertThat(date1.getDayString(), is("" + date1.getDay()));
assertThat(date1.getMonthString(), is("" + date1.getMonth()));
assertThat(date1.getYearString(), is("" + date1.getYear()));
}
@Test
public void processing_englishStandardDate9_shouldWork() throws Exception {
List<Date> output = target.process("2015-10-21 10-12-2016");
assertThat(output, hasSize(2));
final Date date1 = output.get(0);
assertThat(date1.getDay(), is(21));
assertThat(date1.getMonth(), is(10));
assertThat(date1.getYear(), is(2015));
assertThat(date1.getDayString(), is("" + date1.getDay()));
assertThat(date1.getMonthString(), is("" + date1.getMonth()));
assertThat(date1.getYearString(), is("" + date1.getYear()));
final Date date2 = output.get(1);
assertThat(date2.getDay(), is(10));
assertThat(date2.getMonth(), is(12));
assertThat(date2.getYear(), is(2016));
assertThat(date2.getDayString(), is("" + date2.getDay()));
assertThat(date2.getMonthString(), is("" + date2.getMonth()));
assertThat(date2.getYearString(), is("" + date2.getYear()));
}
@Test
public void testTrainingExtraction_simpleDate1() throws Exception {
List<String> input = Arrays.asList("December 1943");
StringBuilder sb = target.trainingExtraction(input);
String output = sb.toString();
assertThat(output, is("\t<date><month>December</month> <year>1943</year></date>\n"));
}
@Test
public void testTrainingExtraction_simpleDate2() throws Exception {
List<String> input = Arrays.asList("15 March 1942");
StringBuilder sb = target.trainingExtraction(input);
String output = sb.toString();
assertThat(output, is("\t<date><day>15</day> <month>March</month> <year>1942</year></date>\n"));
}
@Test
public void testTrainingExtraction_simpleDate3() throws Exception {
List<String> input = Arrays.asList("1943-1944");
StringBuilder sb = target.trainingExtraction(input);
String output = sb.toString();
assertThat(output, is("\t<date><year>1943</year>-</date>\n\t<date><year>1944</year></date>\n"));
}
@Test
public void testTrainingExtraction_emptyInput() throws Exception {
assertThat(target.trainingExtraction(null), nullValue());
}
@Test
public void testMayAndMarchOverlap_1() throws Exception {
List<Date> dates = target.process("Mar 2003");
assertEquals(1, dates.size());
Date date = dates.get(0);
assertEquals(2003, date.getYear());
assertEquals(3, date.getMonth());
}
@Test
public void testMayAndMarchOverlap_2() throws Exception {
List<Date> dates = target.process("May 2003");
assertEquals(1, dates.size());
Date date = dates.get(0);
assertEquals(2003, date.getYear());
assertEquals(5, date.getMonth());
}
}
| 10,038 | 33.737024 | 104 |
java
|
grobid
|
grobid-master/grobid-core/src/test/java/org/grobid/core/engines/FullTextParserTest.java
|
package org.grobid.core.engines;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.tuple.Pair;
import org.grobid.core.analyzers.GrobidAnalyzer;
import org.grobid.core.factory.GrobidFactory;
import org.grobid.core.layout.LayoutToken;
import org.grobid.core.main.LibraryLoader;
import org.grobid.core.utilities.GrobidProperties;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.collection.IsCollectionWithSize.hasSize;
public class FullTextParserTest {
private FullTextParser target;
@Before
public void setUp() throws Exception {
target = new FullTextParser(new EngineParsers());
}
@BeforeClass
public static void init() {
LibraryLoader.load();
GrobidProperties.getInstance();
}
@AfterClass
public static void tearDown() {
GrobidFactory.reset();
}
@Test
public void testProcessTrainingDataFigures_single_figure() throws Exception {
String text = "The mechanism for superconductivity FIG. 1. λ(T) vs . T for YBCO";
List<LayoutToken> tokens = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(text);
String rese = "The\tthe\tT\tTh\tThe\tThe\te\the\tThe\tThe\tBLOCKSTART\tLINESTART\tALIGNEDLEFT\tNEWFONT\tHIGHERFONT\t0\t0\tINITCAP\tNODIGIT\t0\tNOPUNCT\t0\t4\t0\tNUMBER\t0\t0\tI-<paragraph>\n" +
"mechanism\tmechanism\tm\tme\tmec\tmech\tm\tsm\tism\tnism\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t0\t4\t0\tNUMBER\t0\t0\t<paragraph>\n" +
"for\tfor\tf\tfo\tfor\tfor\tr\tor\tfor\tfor\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t0\t4\t0\tNUMBER\t0\t0\t<paragraph>\n" +
"superconductivity\tsuperconductivity\ts\tsu\tsup\tsupe\ty\tty\tity\tvity\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t0\t4\t0\tNUMBER\t0\t0\t<paragraph>\n" +
"FIG\tfig\tF\tFI\tFIG\tFIG\tG\tIG\tFIG\tFIG\tBLOCKSTART\tLINESTART\tLINEINDENT\tNEWFONT\tHIGHERFONT\t0\t0\tALLCAP\tNODIGIT\t0\tNOPUNCT\t10\t3\t0\tNUMBER\t0\t0\tI-<figure>\n" +
".\t.\t.\t.\t.\t.\t.\t.\t.\t.\tBLOCKIN\tLINEIN\tLINEINDENT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tALLCAP\tNODIGIT\t1\tDOT\t10\t3\t0\tNUMBER\t0\t0\t<figure>\n" +
"1\t1\t1\t1\t1\t1\t1\t1\t1\t1\tBLOCKIN\tLINEIN\tLINEINDENT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tALLDIGIT\t1\tNOPUNCT\t10\t3\t0\tNUMBER\t1\t0\t<figure>\n" +
".\t.\t.\t.\t.\t.\t.\t.\t.\t.\tBLOCKIN\tLINEIN\tLINEINDENT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tALLCAP\tNODIGIT\t1\tDOT\t10\t3\t0\tNUMBER\t0\t0\t<figure>\n" +
"λ\tλ\tλ\tλ\tλ\tλ\tλ\tλ\tλ\tλ\tBLOCKIN\tLINEIN\tLINEINDENT\tNEWFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t1\tNOPUNCT\t10\t3\t0\tNUMBER\t0\t0\t<figure>\n" +
"(\t(\t(\t(\t(\t(\t(\t(\t(\t(\tBLOCKIN\tLINEIN\tLINEINDENT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tALLCAP\tNODIGIT\t1\tOPENBRACKET\t10\t3\t0\tNUMBER\t0\t0\t<figure>\n" +
"T\tt\tT\tT\tT\tT\tT\tT\tT\tT\tBLOCKIN\tLINEIN\tLINEINDENT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tALLCAP\tNODIGIT\t1\tNOPUNCT\t10\t3\t0\tNUMBER\t0\t0\t<figure>\n" +
")\t)\t)\t)\t)\t)\t)\t)\t)\t)\tBLOCKIN\tLINEIN\tLINEINDENT\tNEWFONT\tSAMEFONTSIZE\t0\t0\tALLCAP\tNODIGIT\t1\tENDBRACKET\t10\t3\t0\tNUMBER\t0\t0\t<figure>\n" +
"vs\tvs\tv\tvs\tvs\tvs\ts\tvs\tvs\tvs\tBLOCKIN\tLINEIN\tLINEINDENT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t10\t3\t0\tNUMBER\t0\t0\t<figure>\n" +
".\t.\t.\t.\t.\t.\t.\t.\t.\t.\tBLOCKIN\tLINEEND\tLINEINDENT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tALLCAP\tNODIGIT\t1\tDOT\t10\t3\t0\tNUMBER\t0\t0\t<figure>\n" +
"T\tt\tT\tT\tT\tT\tT\tT\tT\tT\tBLOCKIN\tLINESTART\tLINEINDENT\tNEWFONT\tSAMEFONTSIZE\t0\t0\tALLCAP\tNODIGIT\t1\tNOPUNCT\t10\t3\t0\tNUMBER\t0\t0\t<figure>\n" +
"for\tfor\tf\tfo\tfor\tfor\tr\tor\tfor\tfor\tBLOCKIN\tLINEIN\tLINEINDENT\tNEWFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t10\t3\t0\tNUMBER\t0\t0\t<figure>\n" +
"YBCO\tybco\tY\tYB\tYBC\tYBCO\tO\tCO\tBCO\tYBCO\tBLOCKIN\tLINEIN\tLINEINDENT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tALLCAP\tNODIGIT\t0\tNOPUNCT\t10\t3\t0\tNUMBER\t0\t0\t<figure>\n\n";
Pair<String, String> stringStringPair = target.processTrainingDataFigures(rese, tokens, "123");
String tei = stringStringPair.getLeft();
String tokenisation = stringStringPair.getRight();
String reconstructedText = Arrays.stream(tokenisation.split("\n")).map(l -> l.split("\t")[0]).collect(Collectors.joining(" "));
assertThat(reconstructedText, is("FIG . 1 . λ ( T ) vs . T for YBCO"));
assertThat(tokenisation.split("\n").length, is(13));
}
@Test
public void testProcessTrainingDataFigures_multiple_figures() throws Exception {
String text = "The mechanism for superconductivity FIG. 1. λ(T) vs . T for YBCO";
List<LayoutToken> tokens = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(text);
String rese = "The\tthe\tT\tTh\tThe\tThe\te\the\tThe\tThe\tBLOCKSTART\tLINESTART\tALIGNEDLEFT\tNEWFONT\tHIGHERFONT\t0\t0\tINITCAP\tNODIGIT\t0\tNOPUNCT\t0\t4\t0\tNUMBER\t0\t0\tI-<paragraph>\n" +
"mechanism\tmechanism\tm\tme\tmec\tmech\tm\tsm\tism\tnism\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t0\t4\t0\tNUMBER\t0\t0\t<paragraph>\n" +
"for\tfor\tf\tfo\tfor\tfor\tr\tor\tfor\tfor\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t0\t4\t0\tNUMBER\t0\t0\t<paragraph>\n" +
"superconductivity\tsuperconductivity\ts\tsu\tsup\tsupe\ty\tty\tity\tvity\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t0\t4\t0\tNUMBER\t0\t0\t<paragraph>\n" +
"FIG\tfig\tF\tFI\tFIG\tFIG\tG\tIG\tFIG\tFIG\tBLOCKSTART\tLINESTART\tLINEINDENT\tNEWFONT\tHIGHERFONT\t0\t0\tALLCAP\tNODIGIT\t0\tNOPUNCT\t10\t3\t0\tNUMBER\t0\t0\tI-<figure>\n" +
".\t.\t.\t.\t.\t.\t.\t.\t.\t.\tBLOCKIN\tLINEIN\tLINEINDENT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tALLCAP\tNODIGIT\t1\tDOT\t10\t3\t0\tNUMBER\t0\t0\t<figure>\n" +
"1\t1\t1\t1\t1\t1\t1\t1\t1\t1\tBLOCKIN\tLINEIN\tLINEINDENT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tALLDIGIT\t1\tNOPUNCT\t10\t3\t0\tNUMBER\t1\t0\t<figure>\n" +
".\t.\t.\t.\t.\t.\t.\t.\t.\t.\tBLOCKIN\tLINEIN\tLINEINDENT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tALLCAP\tNODIGIT\t1\tDOT\t10\t3\t0\tNUMBER\t0\t0\t<figure>\n" +
"λ\tλ\tλ\tλ\tλ\tλ\tλ\tλ\tλ\tλ\tBLOCKIN\tLINEIN\tLINEINDENT\tNEWFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t1\tNOPUNCT\t10\t3\t0\tNUMBER\t0\t0\t<figure>\n" +
"(\t(\t(\t(\t(\t(\t(\t(\t(\t(\tBLOCKIN\tLINEIN\tLINEINDENT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tALLCAP\tNODIGIT\t1\tOPENBRACKET\t10\t3\t0\tNUMBER\t0\t0\t<figure>\n" +
"T\tt\tT\tT\tT\tT\tT\tT\tT\tT\tBLOCKIN\tLINEIN\tLINEINDENT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tALLCAP\tNODIGIT\t1\tNOPUNCT\t10\t3\t0\tNUMBER\t0\t0\t<figure>\n" +
")\t)\t)\t)\t)\t)\t)\t)\t)\t)\tBLOCKIN\tLINEIN\tLINEINDENT\tNEWFONT\tSAMEFONTSIZE\t0\t0\tALLCAP\tNODIGIT\t1\tENDBRACKET\t10\t3\t0\tNUMBER\t0\t0\t<figure>\n" +
"vs\tvs\tv\tvs\tvs\tvs\ts\tvs\tvs\tvs\tBLOCKIN\tLINEIN\tLINEINDENT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t10\t3\t0\tNUMBER\t0\t0\tI-<figure>\n" +
".\t.\t.\t.\t.\t.\t.\t.\t.\t.\tBLOCKIN\tLINEEND\tLINEINDENT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tALLCAP\tNODIGIT\t1\tDOT\t10\t3\t0\tNUMBER\t0\t0\t<figure>\n" +
"T\tt\tT\tT\tT\tT\tT\tT\tT\tT\tBLOCKIN\tLINESTART\tLINEINDENT\tNEWFONT\tSAMEFONTSIZE\t0\t0\tALLCAP\tNODIGIT\t1\tNOPUNCT\t10\t3\t0\tNUMBER\t0\t0\t<figure>\n" +
"for\tfor\tf\tfo\tfor\tfor\tr\tor\tfor\tfor\tBLOCKIN\tLINEIN\tLINEINDENT\tNEWFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t10\t3\t0\tNUMBER\t0\t0\t<figure>\n" +
"YBCO\tybco\tY\tYB\tYBC\tYBCO\tO\tCO\tBCO\tYBCO\tBLOCKIN\tLINEIN\tLINEINDENT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tALLCAP\tNODIGIT\t0\tNOPUNCT\t10\t3\t0\tNUMBER\t0\t0\t<figure>\n\n";
Pair<String, String> stringStringPair = target.processTrainingDataFigures(rese, tokens, "123");
String tei = stringStringPair.getLeft();
String tokenisation = stringStringPair.getRight();
List<String> output = new ArrayList<>();
for (String block : tokenisation.split("\n\n\n")) {
String collect = Arrays.stream(block.split("\n")).map(l -> l.split("\t")[0]).collect(Collectors.joining(" "));
if (StringUtils.isNotBlank(collect)) {
output.add(collect);
}
}
assertThat(output, hasSize(2));
assertThat(output.get(0), is("FIG . 1 . λ ( T )"));
assertThat(output.get(1), is("vs . T for YBCO"));
assertThat(tokenisation.split("\n").length, is(15));
}
@Test
public void testProcessTrainingDataTables_single_table() throws Exception {
String text = "The mechanism for superconductivity FIG. 1. λ(T) vs . T for YBCO";
List<LayoutToken> tokens = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(text);
String rese = "The\tthe\tT\tTh\tThe\tThe\te\the\tThe\tThe\tBLOCKSTART\tLINESTART\tALIGNEDLEFT\tNEWFONT\tHIGHERFONT\t0\t0\tINITCAP\tNODIGIT\t0\tNOPUNCT\t0\t4\t0\tNUMBER\t0\t0\tI-<paragraph>\n" +
"mechanism\tmechanism\tm\tme\tmec\tmech\tm\tsm\tism\tnism\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t0\t4\t0\tNUMBER\t0\t0\t<paragraph>\n" +
"for\tfor\tf\tfo\tfor\tfor\tr\tor\tfor\tfor\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t0\t4\t0\tNUMBER\t0\t0\t<paragraph>\n" +
"superconductivity\tsuperconductivity\ts\tsu\tsup\tsupe\ty\tty\tity\tvity\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t0\t4\t0\tNUMBER\t0\t0\t<paragraph>\n" +
"FIG\tfig\tF\tFI\tFIG\tFIG\tG\tIG\tFIG\tFIG\tBLOCKSTART\tLINESTART\tLINEINDENT\tNEWFONT\tHIGHERFONT\t0\t0\tALLCAP\tNODIGIT\t0\tNOPUNCT\t10\t3\t0\tNUMBER\t0\t0\tI-<table>\n" +
".\t.\t.\t.\t.\t.\t.\t.\t.\t.\tBLOCKIN\tLINEIN\tLINEINDENT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tALLCAP\tNODIGIT\t1\tDOT\t10\t3\t0\tNUMBER\t0\t0\t<table>\n" +
"1\t1\t1\t1\t1\t1\t1\t1\t1\t1\tBLOCKIN\tLINEIN\tLINEINDENT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tALLDIGIT\t1\tNOPUNCT\t10\t3\t0\tNUMBER\t1\t0\t<table>\n" +
".\t.\t.\t.\t.\t.\t.\t.\t.\t.\tBLOCKIN\tLINEIN\tLINEINDENT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tALLCAP\tNODIGIT\t1\tDOT\t10\t3\t0\tNUMBER\t0\t0\t<table>\n" +
"λ\tλ\tλ\tλ\tλ\tλ\tλ\tλ\tλ\tλ\tBLOCKIN\tLINEIN\tLINEINDENT\tNEWFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t1\tNOPUNCT\t10\t3\t0\tNUMBER\t0\t0\t<table>\n" +
"(\t(\t(\t(\t(\t(\t(\t(\t(\t(\tBLOCKIN\tLINEIN\tLINEINDENT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tALLCAP\tNODIGIT\t1\tOPENBRACKET\t10\t3\t0\tNUMBER\t0\t0\t<table>\n" +
"T\tt\tT\tT\tT\tT\tT\tT\tT\tT\tBLOCKIN\tLINEIN\tLINEINDENT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tALLCAP\tNODIGIT\t1\tNOPUNCT\t10\t3\t0\tNUMBER\t0\t0\t<table>\n" +
")\t)\t)\t)\t)\t)\t)\t)\t)\t)\tBLOCKIN\tLINEIN\tLINEINDENT\tNEWFONT\tSAMEFONTSIZE\t0\t0\tALLCAP\tNODIGIT\t1\tENDBRACKET\t10\t3\t0\tNUMBER\t0\t0\t<table>\n" +
"vs\tvs\tv\tvs\tvs\tvs\ts\tvs\tvs\tvs\tBLOCKIN\tLINEIN\tLINEINDENT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t10\t3\t0\tNUMBER\t0\t0\t<table>\n" +
".\t.\t.\t.\t.\t.\t.\t.\t.\t.\tBLOCKIN\tLINEEND\tLINEINDENT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tALLCAP\tNODIGIT\t1\tDOT\t10\t3\t0\tNUMBER\t0\t0\t<table>\n" +
"T\tt\tT\tT\tT\tT\tT\tT\tT\tT\tBLOCKIN\tLINESTART\tLINEINDENT\tNEWFONT\tSAMEFONTSIZE\t0\t0\tALLCAP\tNODIGIT\t1\tNOPUNCT\t10\t3\t0\tNUMBER\t0\t0\t<table>\n" +
"for\tfor\tf\tfo\tfor\tfor\tr\tor\tfor\tfor\tBLOCKIN\tLINEIN\tLINEINDENT\tNEWFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t10\t3\t0\tNUMBER\t0\t0\t<table>\n" +
"YBCO\tybco\tY\tYB\tYBC\tYBCO\tO\tCO\tBCO\tYBCO\tBLOCKIN\tLINEIN\tLINEINDENT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tALLCAP\tNODIGIT\t0\tNOPUNCT\t10\t3\t0\tNUMBER\t0\t0\t<table>\n\n";
Pair<String, String> stringStringPair = target.processTrainingDataTables(rese, tokens, "123");
String tei = stringStringPair.getLeft();
String tokenisation = stringStringPair.getRight();
String reconstructedText = Arrays.stream(tokenisation.split("\n")).map(l -> l.split("\t")[0]).collect(Collectors.joining(" "));
assertThat(reconstructedText, is("FIG . 1 . λ ( T ) vs . T for YBCO"));
assertThat(tokenisation.split("\n").length, is(13));
}
@Test
public void testProcessTrainingDataTable_multiple_tables() throws Exception {
String text = "The mechanism for superconductivity FIG. 1. λ(T) vs . T for YBCO";
List<LayoutToken> tokens = GrobidAnalyzer.getInstance().tokenizeWithLayoutToken(text);
String rese = "The\tthe\tT\tTh\tThe\tThe\te\the\tThe\tThe\tBLOCKSTART\tLINESTART\tALIGNEDLEFT\tNEWFONT\tHIGHERFONT\t0\t0\tINITCAP\tNODIGIT\t0\tNOPUNCT\t0\t4\t0\tNUMBER\t0\t0\tI-<paragraph>\n" +
"mechanism\tmechanism\tm\tme\tmec\tmech\tm\tsm\tism\tnism\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t0\t4\t0\tNUMBER\t0\t0\t<paragraph>\n" +
"for\tfor\tf\tfo\tfor\tfor\tr\tor\tfor\tfor\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t0\t4\t0\tNUMBER\t0\t0\t<paragraph>\n" +
"superconductivity\tsuperconductivity\ts\tsu\tsup\tsupe\ty\tty\tity\tvity\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t0\t4\t0\tNUMBER\t0\t0\t<paragraph>\n" +
"FIG\tfig\tF\tFI\tFIG\tFIG\tG\tIG\tFIG\tFIG\tBLOCKSTART\tLINESTART\tLINEINDENT\tNEWFONT\tHIGHERFONT\t0\t0\tALLCAP\tNODIGIT\t0\tNOPUNCT\t10\t3\t0\tNUMBER\t0\t0\tI-<table>\n" +
".\t.\t.\t.\t.\t.\t.\t.\t.\t.\tBLOCKIN\tLINEIN\tLINEINDENT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tALLCAP\tNODIGIT\t1\tDOT\t10\t3\t0\tNUMBER\t0\t0\t<table>\n" +
"1\t1\t1\t1\t1\t1\t1\t1\t1\t1\tBLOCKIN\tLINEIN\tLINEINDENT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tALLDIGIT\t1\tNOPUNCT\t10\t3\t0\tNUMBER\t1\t0\t<table>\n" +
".\t.\t.\t.\t.\t.\t.\t.\t.\t.\tBLOCKIN\tLINEIN\tLINEINDENT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tALLCAP\tNODIGIT\t1\tDOT\t10\t3\t0\tNUMBER\t0\t0\t<table>\n" +
"λ\tλ\tλ\tλ\tλ\tλ\tλ\tλ\tλ\tλ\tBLOCKIN\tLINEIN\tLINEINDENT\tNEWFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t1\tNOPUNCT\t10\t3\t0\tNUMBER\t0\t0\t<table>\n" +
"(\t(\t(\t(\t(\t(\t(\t(\t(\t(\tBLOCKIN\tLINEIN\tLINEINDENT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tALLCAP\tNODIGIT\t1\tOPENBRACKET\t10\t3\t0\tNUMBER\t0\t0\t<table>\n" +
"T\tt\tT\tT\tT\tT\tT\tT\tT\tT\tBLOCKIN\tLINEIN\tLINEINDENT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tALLCAP\tNODIGIT\t1\tNOPUNCT\t10\t3\t0\tNUMBER\t0\t0\t<table>\n" +
")\t)\t)\t)\t)\t)\t)\t)\t)\t)\tBLOCKIN\tLINEIN\tLINEINDENT\tNEWFONT\tSAMEFONTSIZE\t0\t0\tALLCAP\tNODIGIT\t1\tENDBRACKET\t10\t3\t0\tNUMBER\t0\t0\t<table>\n" +
"vs\tvs\tv\tvs\tvs\tvs\ts\tvs\tvs\tvs\tBLOCKIN\tLINEIN\tLINEINDENT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t10\t3\t0\tNUMBER\t0\t0\tI-<table>\n" +
".\t.\t.\t.\t.\t.\t.\t.\t.\t.\tBLOCKIN\tLINEEND\tLINEINDENT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tALLCAP\tNODIGIT\t1\tDOT\t10\t3\t0\tNUMBER\t0\t0\t<table>\n" +
"T\tt\tT\tT\tT\tT\tT\tT\tT\tT\tBLOCKIN\tLINESTART\tLINEINDENT\tNEWFONT\tSAMEFONTSIZE\t0\t0\tALLCAP\tNODIGIT\t1\tNOPUNCT\t10\t3\t0\tNUMBER\t0\t0\t<table>\n" +
"for\tfor\tf\tfo\tfor\tfor\tr\tor\tfor\tfor\tBLOCKIN\tLINEIN\tLINEINDENT\tNEWFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t10\t3\t0\tNUMBER\t0\t0\t<table>\n" +
"YBCO\tybco\tY\tYB\tYBC\tYBCO\tO\tCO\tBCO\tYBCO\tBLOCKIN\tLINEIN\tLINEINDENT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tALLCAP\tNODIGIT\t0\tNOPUNCT\t10\t3\t0\tNUMBER\t0\t0\t<table>\n\n";
Pair<String, String> stringStringPair = target.processTrainingDataTables(rese, tokens, "123");
String tei = stringStringPair.getLeft();
String tokenisation = stringStringPair.getRight();
List<String> output = new ArrayList<>();
for (String block : tokenisation.split("\n\n\n")) {
String collect = Arrays.stream(block.split("\n")).map(l -> l.split("\t")[0]).collect(Collectors.joining(" "));
if (StringUtils.isNotBlank(collect)) {
output.add(collect);
}
}
assertThat(output, hasSize(2));
assertThat(output.get(0), is("FIG . 1 . λ ( T )"));
assertThat(output.get(1), is("vs . T for YBCO"));
assertThat(tokenisation.split("\n").length, is(15));
}
@Test
public void testPostProcessLabeledAbstract_shouldTransformTableLabelInParagraphLabel() {
String resultWithTables = "This\tthis\tT\tTh\tThi\tThis\ts\tis\this\tThis\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tNEWFONT\tHIGHERFONT\t0\t0\tINITCAP\tNODIGIT\t0\tNOPUNCT\t0\t10\t0\tNUMBER\t0\t0\tI-<table>\n" +
"study\tstudy\ts\tst\tstu\tstud\ty\tdy\tudy\ttudy\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t0\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"was\twas\tw\twa\twas\twas\ts\tas\twas\twas\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t0\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"supported\tsupported\ts\tsu\tsup\tsupp\td\ted\tted\trted\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t0\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"by\tby\tb\tby\tby\tby\ty\tby\tby\tby\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t0\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"the\tthe\tt\tth\tthe\tthe\te\the\tthe\tthe\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t1\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"South\tsouth\tS\tSo\tSou\tSout\th\tth\tuth\touth\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tINITCAP\tNODIGIT\t0\tNOPUNCT\t1\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"Asian\tasian\tA\tAs\tAsi\tAsia\tn\tan\tian\tsian\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tINITCAP\tNODIGIT\t0\tNOPUNCT\t1\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"Clinical\tclinical\tC\tCl\tCli\tClin\tl\tal\tcal\tical\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tINITCAP\tNODIGIT\t0\tNOPUNCT\t1\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"Toxicology\ttoxicology\tT\tTo\tTox\tToxi\ty\tgy\togy\tlogy\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tINITCAP\tNODIGIT\t0\tNOPUNCT\t1\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"Research\tresearch\tR\tRe\tRes\tRese\th\tch\trch\tarch\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tINITCAP\tNODIGIT\t0\tNOPUNCT\t2\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"Collaboration\tcollaboration\tC\tCo\tCol\tColl\tn\ton\tion\ttion\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tINITCAP\tNODIGIT\t0\tNOPUNCT\t2\t10\t0\tNUMBER\t0\t0\t<table>\n" +
",\t,\t,\t,\t,\t,\t,\t,\t,\t,\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tALLCAP\tNODIGIT\t1\tCOMMA\t3\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"which\twhich\tw\twh\twhi\twhic\th\tch\tich\thich\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t3\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"is\tis\ti\tis\tis\tis\ts\tis\tis\tis\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t3\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"funded\tfunded\tf\tfu\tfun\tfund\td\ted\tded\tnded\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t3\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"by\tby\tb\tby\tby\tby\ty\tby\tby\tby\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t3\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"The\tthe\tT\tTh\tThe\tThe\te\the\tThe\tThe\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tINITCAP\tNODIGIT\t0\tNOPUNCT\t3\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"Wellcome\twellcome\tW\tWe\tWel\tWell\te\tme\tome\tcome\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tINITCAP\tNODIGIT\t0\tNOPUNCT\t4\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"Trust\ttrust\tT\tTr\tTru\tTrus\tt\tst\tust\trust\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tINITCAP\tNODIGIT\t0\tNOPUNCT\t4\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"/\t/\t/\t/\t/\t/\t/\t/\t/\t/\tBLOCKIN\tLINEEND\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tALLCAP\tNODIGIT\t1\tNOPUNCT\t4\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"National\tnational\tN\tNa\tNat\tNati\tl\tal\tnal\tonal\tBLOCKIN\tLINESTART\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tINITCAP\tNODIGIT\t0\tNOPUNCT\t4\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"Health\thealth\tH\tHe\tHea\tHeal\th\tth\tlth\talth\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tINITCAP\tNODIGIT\t0\tNOPUNCT\t5\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"and\tand\ta\tan\tand\tand\td\tnd\tand\tand\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t5\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"Medical\tmedical\tM\tMe\tMed\tMedi\tl\tal\tcal\tical\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tINITCAP\tNODIGIT\t0\tNOPUNCT\t5\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"Research\tresearch\tR\tRe\tRes\tRese\th\tch\trch\tarch\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tINITCAP\tNODIGIT\t0\tNOPUNCT\t5\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"Council\tcouncil\tC\tCo\tCou\tCoun\tl\til\tcil\tncil\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tINITCAP\tNODIGIT\t0\tNOPUNCT\t6\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"International\tinternational\tI\tIn\tInt\tInte\tl\tal\tnal\tonal\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tINITCAP\tNODIGIT\t0\tNOPUNCT\t6\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"Collaborative\tcollaborative\tC\tCo\tCol\tColl\te\tve\tive\ttive\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tINITCAP\tNODIGIT\t0\tNOPUNCT\t6\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"Research\tresearch\tR\tRe\tRes\tRese\th\tch\trch\tarch\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tINITCAP\tNODIGIT\t0\tNOPUNCT\t7\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"Grant\tgrant\tG\tGr\tGra\tGran\tt\tnt\tant\trant\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tINITCAP\tNODIGIT\t0\tNOPUNCT\t7\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"GR071669MA\tgr071669ma\tG\tGR\tGR0\tGR07\tA\tMA\t9MA\t69MA\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tALLCAP\tCONTAINSDIGITS\t0\tNOPUNCT\t8\t10\t0\tNUMBER\t0\t0\t<table>\n" +
".\t.\t.\t.\t.\t.\t.\t.\t.\t.\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tALLCAP\tNODIGIT\t1\tDOT\t8\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"The\tthe\tT\tTh\tThe\tThe\te\the\tThe\tThe\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tINITCAP\tNODIGIT\t0\tNOPUNCT\t8\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"funding\tfunding\tf\tfu\tfun\tfund\tg\tng\ting\tding\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t8\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"bodies\tbodies\tb\tbo\tbod\tbodi\ts\tes\ties\tdies\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t8\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"had\thad\th\tha\thad\thad\td\tad\thad\thad\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t9\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"no\tno\tn\tno\tno\tno\to\tno\tno\tno\tBLOCKIN\tLINEEND\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t9\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"role\trole\tr\tro\trol\trole\te\tle\tole\trole\tBLOCKIN\tLINESTART\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t9\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"in\tin\ti\tin\tin\tin\tn\tin\tin\tin\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t9\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"analyzing\tanalyzing\ta\tan\tana\tanal\tg\tng\ting\tzing\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t9\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"or\tor\to\tor\tor\tor\tr\tor\tor\tor\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t10\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"interpreting\tinterpreting\ti\tin\tint\tinte\tg\tng\ting\tting\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t10\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"the\tthe\tt\tth\tthe\tthe\te\the\tthe\tthe\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t10\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"data\tdata\td\tda\tdat\tdata\ta\tta\tata\tdata\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t10\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"or\tor\to\tor\tor\tor\tr\tor\tor\tor\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t11\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"writing\twriting\tw\twr\twri\twrit\tg\tng\ting\tting\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t11\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"the\tthe\tt\tth\tthe\tthe\te\the\tthe\tthe\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t11\t10\t0\tNUMBER\t0\t0\t<table>\n" +
"article\tarticle\ta\tar\tart\tarti\te\tle\tcle\ticle\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tNOCAPS\tNODIGIT\t0\tNOPUNCT\t11\t10\t0\tNUMBER\t0\t0\t<table>\n" +
".\t.\t.\t.\t.\t.\t.\t.\t.\t.\tBLOCKIN\tLINEIN\tALIGNEDLEFT\tSAMEFONT\tSAMEFONTSIZE\t0\t0\tALLCAP\tNODIGIT\t1\tDOT\t11\t10\t0\tNUMBER\t0\t0\t<table>";
String postprocessed = FullTextParser.postProcessFullTextLabeledText(resultWithTables);
assertThat(Arrays.stream(StringUtils.split(postprocessed, "\n"))
.filter(l -> l.endsWith("<table>"))
.count(), is(0L));
assertThat(Arrays.stream(StringUtils.split(postprocessed, "\n"))
.filter(l -> l.endsWith("<paragraph>"))
.count(), is (Arrays.stream(StringUtils.split(resultWithTables, "\n"))
.filter(l -> l.endsWith("<table>"))
.count()));
}
}
| 27,125 | 102.140684 | 215 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.