repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
Janus | Janus-master/src/trashbin/minerful/io/encdec/declaremap/OldDeclareMapEncoderDecoderMethods.java | package trashbin.minerful.io.encdec.declaremap;
import java.io.File;
import java.util.ArrayList;
import java.util.TreeSet;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import minerful.concept.ProcessModel;
import minerful.concept.TaskCharArchive;
import minerful.concept.constraint.Constraint;
import minerful.concept.constraint.ConstraintsBag;
import minerful.concept.constraint.MetaConstraintUtils;
import minerful.concept.constraint.existence.AtMostOne;
import minerful.concept.constraint.existence.Init;
import minerful.concept.constraint.existence.Participation;
import minerful.concept.constraint.relation.AlternatePrecedence;
import minerful.concept.constraint.relation.AlternateResponse;
import minerful.concept.constraint.relation.AlternateSuccession;
import minerful.concept.constraint.relation.ChainPrecedence;
import minerful.concept.constraint.relation.ChainResponse;
import minerful.concept.constraint.relation.ChainSuccession;
import minerful.concept.constraint.relation.CoExistence;
import minerful.concept.constraint.relation.NotChainSuccession;
import minerful.concept.constraint.relation.NotCoExistence;
import minerful.concept.constraint.relation.NotSuccession;
import minerful.concept.constraint.relation.Precedence;
import minerful.concept.constraint.relation.RespondedExistence;
import minerful.concept.constraint.relation.Response;
import minerful.concept.constraint.relation.Succession;
import minerful.io.encdec.TaskCharEncoderDecoder;
import minerful.logparser.StringTaskClass;
import org.processmining.plugins.declareminer.visualizing.ActivityDefinition;
import org.processmining.plugins.declareminer.visualizing.AssignmentModel;
import org.processmining.plugins.declareminer.visualizing.AssignmentModelView;
import org.processmining.plugins.declareminer.visualizing.AssignmentViewBroker;
import org.processmining.plugins.declareminer.visualizing.ConstraintDefinition;
import org.processmining.plugins.declareminer.visualizing.Parameter;
import org.processmining.plugins.declareminer.visualizing.XMLBrokerFactory;
public class OldDeclareMapEncoderDecoderMethods {
public static final String IF_EXTRACTION_REG_EXP = ".*IF;([0-9\\.]+).*";
public static final String CONFIDENCE_EXTRACTION_REG_EXP = ".*confidence;([0-9\\.]+).*";
public static final String SUPPORT_EXTRACTION_REG_EXP = ".*support;([0-9\\.]+).*";
@Deprecated
public static ProcessModel fromDeclareMapToMinerfulProcessModel(String declareMapFilePath) {
return fromDeclareMapToMinerfulProcessModel(declareMapFilePath, null);
}
@Deprecated
public static ProcessModel fromDeclareMapToMinerfulProcessModel(AssignmentModel declareMapModel) {
return fromDeclareMapToMinerfulProcessModel(declareMapModel, null);
}
@Deprecated
private static ProcessModel fromDeclareMapToMinerfulProcessModel(String declareMapFilePath, TaskCharArchive taskCharArchive) {
File inputFile = new File(declareMapFilePath);
if (!inputFile.canRead() || !inputFile.isFile()) {
throw new IllegalArgumentException("Unreadable file: " + declareMapFilePath);
}
AssignmentViewBroker broker = XMLBrokerFactory.newAssignmentBroker(declareMapFilePath);
AssignmentModel model = broker.readAssignment();
AssignmentModelView view = new AssignmentModelView(model);
broker.readAssignmentGraphical(model, view);
return fromDeclareMapToMinerfulProcessModel(model, taskCharArchive);
}
@Deprecated
private static ProcessModel fromDeclareMapToMinerfulProcessModel(AssignmentModel declareMapModel, TaskCharArchive taskCharArchive) {
ArrayList<String> params = new ArrayList<String>();
ArrayList<Constraint> minerFulConstraints = new ArrayList<Constraint>();
if (taskCharArchive == null) {
TaskCharEncoderDecoder encdec = new TaskCharEncoderDecoder();
for (ConstraintDefinition cd : declareMapModel.getConstraintDefinitions()) {
for (Parameter p : cd.getParameters()) {
for (ActivityDefinition ad : cd.getBranches(p)) {
encdec.encode(new StringTaskClass(ad.getName()));
}
}
}
for (ActivityDefinition ad : declareMapModel.getActivityDefinitions()) {
encdec.encode(new StringTaskClass(ad.getName()));
}
taskCharArchive = new TaskCharArchive(encdec.getTranslationMap());
}
for (ConstraintDefinition cd : declareMapModel.getConstraintDefinitions()) {
String template = cd.getName().replace("-", "").replace(" ", "").toLowerCase();
params = new ArrayList<String>();
Pattern
supPattern = Pattern.compile(OldDeclareMapEncoderDecoderMethods.SUPPORT_EXTRACTION_REG_EXP),
confiPattern = Pattern.compile(OldDeclareMapEncoderDecoderMethods.CONFIDENCE_EXTRACTION_REG_EXP),
inteFaPattern = Pattern.compile(OldDeclareMapEncoderDecoderMethods.IF_EXTRACTION_REG_EXP);
Matcher
supMatcher = supPattern.matcher(cd.getText().trim()),
confiMatcher = confiPattern.matcher(cd.getText().trim()),
inteFaMatcher = inteFaPattern.matcher(cd.getText().trim());
Double
support = (supMatcher.matches() && supMatcher.groupCount() > 0 ? Double.valueOf(supMatcher.group(1)) : Constraint.DEFAULT_SUPPORT),
confidence = (confiMatcher.matches() && confiMatcher.groupCount() > 0 ? Double.valueOf(confiMatcher.group(1)) : Constraint.DEFAULT_CONFIDENCE),
interestFact = (inteFaMatcher.matches() && inteFaMatcher.groupCount() > 0 ? Double.valueOf(inteFaMatcher.group(1)): Constraint.DEFAULT_INTEREST_FACTOR);
if (template.equals("alternateprecedence")) {
for (Parameter p : cd.getParameters()) {
for (ActivityDefinition ad : cd.getBranches(p)) {
params.add(ad.getName());
}
}
AlternatePrecedence minerConstr = new AlternatePrecedence(taskCharArchive.getTaskChar(params.get(0)),taskCharArchive.getTaskChar(params.get(1)),support);
minerConstr.setConfidence(confidence);
minerConstr.setInterestFactor(interestFact);
minerFulConstraints.add(minerConstr);
} else if (template.equals("alternateresponse")) {
for (Parameter p : cd.getParameters()) {
for (ActivityDefinition ad : cd.getBranches(p)) {
params.add(ad.getName());
}
}
AlternateResponse minerConstr = new AlternateResponse(taskCharArchive.getTaskChar(params.get(0)),taskCharArchive.getTaskChar(params.get(1)),support);
minerConstr.setConfidence(confidence);
minerConstr.setInterestFactor(interestFact);
minerFulConstraints.add(minerConstr);
} else if (template.equals("alternatesuccession")) {
for (Parameter p : cd.getParameters()) {
for (ActivityDefinition ad : cd.getBranches(p)) {
params.add(ad.getName());
}
}
AlternateSuccession minerConstr = new AlternateSuccession(taskCharArchive.getTaskChar(params.get(0)),taskCharArchive.getTaskChar(params.get(1)),support);
minerConstr.setConfidence(confidence);
minerConstr.setInterestFactor(interestFact);
minerFulConstraints.add(minerConstr);
} else if (template.equals("chainprecedence")) {
for (Parameter p : cd.getParameters()) {
for (ActivityDefinition ad : cd.getBranches(p)) {
params.add(ad.getName());
}
}
ChainPrecedence minerConstr = new ChainPrecedence(taskCharArchive.getTaskChar(params.get(0)),taskCharArchive.getTaskChar(params.get(1)),support);
minerConstr.setConfidence(confidence);
minerConstr.setInterestFactor(interestFact);
minerFulConstraints.add(minerConstr);
} else if (template.equals("chainresponse")) {
for (Parameter p : cd.getParameters()) {
for (ActivityDefinition ad : cd.getBranches(p)) {
params.add(ad.getName());
}
}
ChainResponse minerConstr = new ChainResponse(taskCharArchive.getTaskChar(params.get(0)),taskCharArchive.getTaskChar(params.get(1)),support);
minerConstr.setConfidence(confidence);
minerConstr.setInterestFactor(interestFact);
minerFulConstraints.add(minerConstr);
} else if (template.equals("chainsuccession")) {
for (Parameter p : cd.getParameters()) {
for (ActivityDefinition ad : cd.getBranches(p)) {
params.add(ad.getName());
}
}
ChainSuccession minerConstr = new ChainSuccession(taskCharArchive.getTaskChar(params.get(0)),taskCharArchive.getTaskChar(params.get(1)),support);
minerConstr.setConfidence(confidence);
minerConstr.setInterestFactor(interestFact);
minerFulConstraints.add(minerConstr);
} else if (template.equals("coexistence")) {
for (Parameter p : cd.getParameters()) {
for (ActivityDefinition ad : cd.getBranches(p)) {
params.add(ad.getName());
}
}
CoExistence minerConstr = new CoExistence(taskCharArchive.getTaskChar(params.get(0)),taskCharArchive.getTaskChar(params.get(1)),support);
minerConstr.setConfidence(confidence);
minerConstr.setInterestFactor(interestFact);
minerFulConstraints.add(minerConstr);
} else if (template.equals("notchainsuccession")) {
for (Parameter p : cd.getParameters()) {
for (ActivityDefinition ad : cd.getBranches(p)) {
params.add(ad.getName());
}
}
NotChainSuccession minerConstr = new NotChainSuccession(taskCharArchive.getTaskChar(params.get(0)),taskCharArchive.getTaskChar(params.get(1)),support);
minerConstr.setConfidence(confidence);
minerConstr.setInterestFactor(interestFact);
minerFulConstraints.add(minerConstr);
} else if (template.equals("notcoexistence")) {
for (Parameter p : cd.getParameters()) {
for (ActivityDefinition ad : cd.getBranches(p)) {
params.add(ad.getName());
}
}
NotCoExistence minerConstr = new NotCoExistence(taskCharArchive.getTaskChar(params.get(0)),taskCharArchive.getTaskChar(params.get(1)),support);
minerConstr.setConfidence(confidence);
minerConstr.setInterestFactor(interestFact);
minerFulConstraints.add(minerConstr);
} else if (template.equals("notsuccession")) {
for (Parameter p : cd.getParameters()) {
for (ActivityDefinition ad : cd.getBranches(p)) {
params.add(ad.getName());
}
}
NotSuccession minerConstr = new NotSuccession(taskCharArchive.getTaskChar(params.get(0)),taskCharArchive.getTaskChar(params.get(1)),support);
minerConstr.setConfidence(confidence);
minerConstr.setInterestFactor(interestFact);
minerFulConstraints.add(minerConstr);
} else if (template.equals("precedence")) {
for (Parameter p : cd.getParameters()) {
for (ActivityDefinition ad : cd.getBranches(p)) {
params.add(ad.getName());
}
}
Precedence minerConstr = new Precedence(taskCharArchive.getTaskChar(params.get(0)),taskCharArchive.getTaskChar(params.get(1)),support);
minerConstr.setConfidence(confidence);
minerConstr.setInterestFactor(interestFact);
minerFulConstraints.add(minerConstr);
} else if (template.equals("response")) {
for (Parameter p : cd.getParameters()) {
for (ActivityDefinition ad : cd.getBranches(p)) {
params.add(ad.getName());
}
}
Response minerConstr = new Response(taskCharArchive.getTaskChar(params.get(0)),taskCharArchive.getTaskChar(params.get(1)),support);
minerConstr.setConfidence(confidence);
minerConstr.setInterestFactor(interestFact);
minerFulConstraints.add(minerConstr);
} else if (template.equals("succession")) {
for (Parameter p : cd.getParameters()) {
for (ActivityDefinition ad : cd.getBranches(p)) {
params.add(ad.getName());
}
}
Succession minerConstr = new Succession(taskCharArchive.getTaskChar(params.get(0)),taskCharArchive.getTaskChar(params.get(1)),support);
minerConstr.setConfidence(confidence);
minerConstr.setInterestFactor(interestFact);
minerFulConstraints.add(minerConstr);
} else if (template.equals("respondedexistence")) {
for (Parameter p : cd.getParameters()) {
for (ActivityDefinition ad : cd.getBranches(p)) {
params.add(ad.getName());
}
}
RespondedExistence minerConstr = new RespondedExistence(taskCharArchive.getTaskChar(params.get(0)),taskCharArchive.getTaskChar(params.get(1)),support);
minerConstr.setConfidence(confidence);
minerConstr.setInterestFactor(interestFact);
minerFulConstraints.add(minerConstr);
} else if (template.equals("init")) {
for (Parameter p : cd.getParameters()) {
for (ActivityDefinition ad : cd.getBranches(p)) {
params.add(ad.getName());
}
}
Init minerConstr = new Init(taskCharArchive.getTaskChar(params.get(0)),support);
minerConstr.setConfidence(confidence);
minerConstr.setInterestFactor(interestFact);
minerFulConstraints.add(minerConstr);
} else if (template.equals("existence")) {
for (Parameter p : cd.getParameters()) {
for (ActivityDefinition ad : cd.getBranches(p)) {
params.add(ad.getName());
}
}
Participation minerConstr = new Participation(taskCharArchive.getTaskChar(params.get(0)),support);
minerConstr.setConfidence(confidence);
minerConstr.setInterestFactor(interestFact);
minerFulConstraints.add(minerConstr);
} else if (template.equals("absence2")) {
for (Parameter p : cd.getParameters()) {
for (ActivityDefinition ad : cd.getBranches(p)) {
params.add(ad.getName());
}
}
AtMostOne minerConstr = new AtMostOne(taskCharArchive.getTaskChar(params.get(0)),support);
minerConstr.setConfidence(confidence);
minerConstr.setInterestFactor(interestFact);
minerFulConstraints.add(minerConstr);
}
}
MetaConstraintUtils.createHierarchicalLinks(new TreeSet<Constraint>(minerFulConstraints));
ConstraintsBag constraintsBag = new ConstraintsBag(taskCharArchive.getTaskChars(), minerFulConstraints);
String processModelName = declareMapModel.getName();
return new ProcessModel(taskCharArchive, constraintsBag, processModelName);
}
} | 13,686 | 46.196552 | 157 | java |
nl2bash | nl2bash-master/bashlint/man_parser/ManParserInterface.java | package man_parser;
import com.fasterxml.jackson.annotation.JsonAutoDetect;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.PropertyAccessor;
import com.fasterxml.jackson.core.sym.Name;
import com.fasterxml.jackson.databind.ObjectMapper;
import main.Config;
import man_parser.cmd.Cmd;
import javafx.util.Pair;
import man_parser.parser.*;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
public class ManParserInterface {
public static String parseSynopsisBNF() throws IOException, ParseException {
// summarizing options of the file tar.1.txt
List<Cmd.Command> commands = ManParserInterface.parseGrammarFile(Config.SynopsisGrammar).commandsGrammar;
ObjectMapper mapper = new ObjectMapper();
mapper.setVisibility(PropertyAccessor.FIELD, JsonAutoDetect.Visibility.ANY);
return mapper.writeValueAsString(commands);
}
public static void parseManPage(boolean testSmallExamples) throws IOException {
String PREFIX = Config.ProjectFolder;
if (testSmallExamples) {
String[] targetFiles = {
PREFIX + "data/plain-man/find.1.txt",
PREFIX + "data/plain-man/mv.1.txt",
PREFIX + "data/plain-man/sort.1.txt",
PREFIX + "data/plain-man/grep.1.txt",
PREFIX + "data/plain-man/egrep.1.txt",
PREFIX + "data/plain-man/cp.1.txt",
PREFIX + "data/plain-man/ls.1.txt",
PREFIX + "data/plain-man/tar.1.txt",
PREFIX + "data/plain-man/xargs.1.txt",
PREFIX + "data/plain-man/sed.1.txt",
PREFIX + "data/plain-man/awk.1.txt",
PREFIX + "data/plain-man/rm.1.txt",
PREFIX + "data/plain-man/cd.1.txt",
PREFIX + "data/plain-man/wc.1.txt",
PREFIX + "data/plain-man/chmod.1.txt",
PREFIX + "data/plain-man/chgrp.1.txt",
PREFIX + "data/plain-man/head.1.txt",
PREFIX + "data/plain-man/tail.1.txt",
PREFIX + "data/plain-man/seq.1.txt",
PREFIX + "data/plain-man/unlink.1.txt",
PREFIX + "data/plain-man/cat.1.txt",
PREFIX + "data/plain-man/zip.1.txt",
PREFIX + "data/plain-man/unzip.1.txt",
PREFIX + "data/plain-man/du.1.txt",
PREFIX + "data/plain-man/echo.1.txt",
PREFIX + "data/plain-man/diff.1.txt",
PREFIX + "data/plain-man/comm.1.txt",
PREFIX + "data/plain-man/sh.1.txt"
};
List<Cmd.ManPage> manPages = new ArrayList<>();
for (String f : targetFiles) {
//System.out.println(f);
Cmd.ManPage mp = ManParserInterface.parseFile(new File(f));
if (mp.aliases.isEmpty())
System.out.println("???" + f);
manPages.add(mp);
}
ObjectMapper mapper = new ObjectMapper();
mapper.setSerializationInclusion(JsonInclude.Include.NON_NULL);
String jsonInString = mapper.writeValueAsString(manPages);
System.out.println(jsonInString);
} else {
File[] files = new File(PREFIX + "data/plain-man").listFiles();
for (File f : files) {
if (! f.getName().matches("\\w*\\.\\d\\.txt"))
continue;
ManParserInterface.parseFile(f);
}
}
}
public static class GrammarFile {
public List<Cmd.Command> commandsGrammar = new ArrayList<>();
public Map<String, String> nameToTypeDict = new HashMap<>();
public Map<String, String> constantNameToSymbol = new HashMap<>();
public Map<String, List<Cmd.CmdOp>> nonTerminals = new HashMap<>();
public GrammarFile(List<Cmd.Command> commandsGrammar, Map<String, List<Cmd.CmdOp>> nonTerminals,
Map<String, String> nameToTypeDict, Map<String, String> constantNameToSymbol) {
this.commandsGrammar = commandsGrammar;
this.nameToTypeDict = nameToTypeDict;
this.constantNameToSymbol = constantNameToSymbol;
this.nonTerminals = nonTerminals;
}
}
// parse the grammar file
public static GrammarFile parseGrammarFile(String path) throws IOException, ParseException {
List<String> lines = Files.readAllLines(Paths.get(path));
List<Cmd.Command> commands = new ArrayList<>();
Map<String, String> nameToTypeDict = new HashMap<>();
Map<String, String> constantNameToSymbol = new HashMap<>();
Map<String, List<Cmd.CmdOp>> nonTerminals = new HashMap<>();
List<String> rawCommands = new ArrayList<>();
List<String> rawNameToTypes = new ArrayList<>();
List<String> rawConstantToType = new ArrayList<>();
Map<String, List<String>> rawNonTerminals = new HashMap<>();
int i = 0;
while (i < lines.size()) {
if (lines.get(i).startsWith("PrimitiveCmd")) {
i ++;
int l = i;
while (i < lines.size()) {
if (indentCount(lines.get(i)) == 0 && !lines.get(i).trim().equals(""))
break;
i ++;
}
rawCommands = lines.subList(l, i)
.stream().filter(s -> !s.trim().equals("")).map(s -> s.trim()).collect(Collectors.toList());
} else if (lines.get(i).startsWith("type")) {
i++;
int l = i;
while (i < lines.size()) {
if (indentCount(lines.get(i)) == 0 && !lines.get(i).trim().equals(""))
break;
i++;
}
rawNameToTypes = lines.subList(l, i)
.stream().filter(s -> !s.trim().equals("")).map(s -> s.trim()).collect(Collectors.toList());
} else if (lines.get(i).startsWith("constant")) {
i++;
int l = i;
while (i < lines.size()) {
if (indentCount(lines.get(i)) == 0 && !lines.get(i).trim().equals(""))
break;
i++;
}
rawConstantToType = lines.subList(l, i)
.stream().filter(s -> !s.trim().equals("")).map(s -> s.trim()).collect(Collectors.toList());
} else {
if (! lines.get(i).contains(":=")) {
i ++;
continue;
}
String ntName = lines.get(i).substring(0, lines.get(i).indexOf(":=")).trim();
i ++;
int l = i;
while (i < lines.size()) {
if (indentCount(lines.get(i)) == 0 && !lines.get(i).trim().equals(""))
break;
i ++;
}
List<String> nonTerminalContents = lines.subList(l, i)
.stream().filter(s -> !s.trim().equals("")).map(s -> s.trim()).collect(Collectors.toList());
rawNonTerminals.put(ntName, nonTerminalContents);
}
}
for (String s : rawConstantToType) {
String[] p = s.split("\\s+");
constantNameToSymbol.put(p[0], p[1]);
}
for (String s : rawNameToTypes) {
String typeName = s.substring(0, s.indexOf("(")).trim();
String[] argnames = s.substring(s.indexOf("(") + 1, s.indexOf(")")).split(",");
for (String a : argnames) {
nameToTypeDict.put(a.trim(), typeName);
}
}
Cmd.ConstantArgDict = constantNameToSymbol;
Cmd.NameToTypeDict = nameToTypeDict;
commands = parsePrimitiveGrammar(rawCommands);
for (Map.Entry<String, List<String>> e : rawNonTerminals.entrySet()) {
nonTerminals.put(e.getKey(), parseNonTerminalContent(e.getValue()));
}
return new GrammarFile(commands, nonTerminals, nameToTypeDict, constantNameToSymbol);
}
public static List<Cmd.Command> parsePrimitiveGrammar(List<String> rawCommands) throws ParseException {
List<Cmd.Command> commands = new ArrayList<>();
for (String s : rawCommands) {
String name = s.trim().split("\\s+")[0];
String raw = s.substring(s.indexOf(name) + name.length()).trim();
commands.add(new Cmd.Command(name, parseSynopsisInstance(raw)));
}
return commands;
}
public static List<Cmd.CmdOp> parseNonTerminalContent(List<String> cmdOpContents) throws ParseException {
List<Cmd.CmdOp> options = new ArrayList<>();
for (String s : cmdOpContents) {
String raw = s.trim();
options.add(parseSynopsisInstance(raw));
}
return options;
}
public static Cmd.ManPage parseFile(File file) throws IOException {
// read the file
List<String> lines = Files.readAllLines(file.toPath());
Cmd.ManPage manpage = new Cmd.ManPage();
int i = 0;
while (i < lines.size()) {
if (lines.get(i).startsWith("NAME")) {
// segmenting the name section
int l = i + 1;
i ++;
while (i < lines.size() && indentCount(lines.get(i)) != 0) {
i ++;
}
Pair<List<String>, String> name = parseName(lines.subList(l, i));
manpage.setName(name.getKey(), name.getKey().get(0) + ": " + name.getValue() + "\n");
} else if (i < lines.size() && lines.get(i).startsWith("SYNOPSIS")) {
// segmenting the synopsis section
int l = i + 1;
i ++;
while (indentCount(lines.get(i)) != 0) {
i ++;
}
manpage.rawSynopsis =lines.subList(l,i).stream().reduce("", (x,y)->(x.trim() + "\n" + y.trim())).trim();
List<Pair<String, Cmd.CmdOp>> options = parseSynopsis(manpage.getName(), lines.subList(l,i));
for (Pair<String, Cmd.CmdOp> pair : options) {
manpage.optionLists.add(pair.getValue());
}
} else if (i < lines.size() && (lines.get(i).startsWith("DESCRIPTION"))) {
// segmenting the description section
int l = i + 1;
i ++;
while(indentCount(lines.get(i)) != 0 || lines.get(i).equals("")) {
i ++;
}
Pair<String, List<Pair<String, String>>> descSec = parseDescription(lines.subList(l, i));
manpage.description += descSec.getKey() + "\n";
for (Pair<String, String> desc : descSec.getValue()) {
String optionPart = desc.getKey();
int inOuterLevel = 0;
boolean added = false;
for (int k = 0; k < optionPart.length(); k ++) {
if (optionPart.charAt(k) == ',' && inOuterLevel == 0) {
try {
Cmd.DescriptionPair d = new Cmd.DescriptionPair(parseSynopsisInstance(optionPart.substring(0, k)), optionPart, desc.getValue());
manpage.optionDesc.add(d);
added = true;
} catch (ParseException e) {
continue;
}
} else if (optionPart.charAt(k) == '[') {
inOuterLevel ++;
} else if (optionPart.charAt(k) == '[') {
inOuterLevel --;
}
}
if (! added) {
try {
manpage.optionDesc.add(
new Cmd.DescriptionPair(parseSynopsisInstance(optionPart), optionPart, desc.getValue()));
} catch (ParseException e) {
continue;
}
}
}
i --;
} else if (i < lines.size() && (lines.get(i).startsWith("PRIMARIES")
|| lines.get(i).startsWith("USE")
|| lines.get(i).startsWith("OPTIONS"))) {
// segmenting the PRIMARIES section, specially for the find command
int l = i + 1;
i ++;
while(indentCount(lines.get(i)) != 0 || lines.get(i).equals("")) {
i ++;
}
Pair<String, List<Pair<String, String>>> descSec = parseDescription(lines.subList(l, i));
for (Pair<String, String> desc : descSec.getValue()) {
String optionPart = desc.getKey();
int inOuterLevel = 0;
boolean added = false;
for (int k = 0; k < optionPart.length(); k ++) {
if (optionPart.charAt(k) == ',' && inOuterLevel == 0) {
try {
manpage.optionDesc.add(
new Cmd.DescriptionPair(parseSynopsisInstance(optionPart.substring(0, k)), optionPart, desc.getValue()));
added = true;
} catch (ParseException e) {
continue;
}
} else if (optionPart.charAt(k) == '[') {
inOuterLevel ++;
} else if (optionPart.charAt(k) == '[') {
inOuterLevel --;
}
}
if (! added) {
try {
manpage.optionDesc.add(
new Cmd.DescriptionPair(parseSynopsisInstance(optionPart), optionPart, desc.getValue()));
} catch (ParseException e) {
continue;
}
}
}
i --;
} else if (i < lines.size() && lines.get(i).startsWith("EXAMPLES")) {
int l = i + 1;
i ++;
while (indentCount(lines.get(i)) != 0 || lines.get(i).equals("")) {
i ++;
}
parseExample(lines.subList(l, i));
}
i ++;
}
return manpage;
}
// return value: key is the list of parsed aliases, value is the cmd description
private static Pair<List<String>, String> parseName(List<String> secContent) {
String content = secContent.stream().reduce("", String::concat).replaceAll("\\s+", " ");
List<String> aliases = new ArrayList<>();
String description = "";
String rawName = content.trim();
if (content.contains(" -- ")) {
rawName = content.substring(0, content.indexOf(" -- ")).trim();
description = content.substring(content.indexOf(" -- ") + 4).trim();
} else if (content.contains(" - ")) {
rawName = content.substring(0, content.indexOf(" - ")).trim();
description = content.substring(content.indexOf(" - ") + 3).trim();
}
String[] splits = rawName.split(",");
for (String s : splits) {
if (s.trim().equals("")) continue;
aliases.add(s.trim());
}
return new Pair<>(aliases, description);
}
// return value: extract the value
private static List<Pair<String, Cmd.CmdOp>> parseSynopsis(String name, List<String> secContent) {
List<Pair<String, Cmd.CmdOp>> result = new ArrayList<>();
int i = 0;
while (i < secContent.size()) {
// dealing with the first indent
int l = i;
i ++;
while (i < secContent.size() && !secContent.get(i).trim().startsWith(name + " ")) {
i ++;
}
List<String> subContent = secContent.subList(l, i);
if (subContent.size() == 0)
System.err.println("[Error@ParseSynopsis] An empty synopsis.");
String cmdRaw = subContent.stream().reduce(" ", String::concat).replaceAll("\\s+", " ").trim();
String cmdName = cmdRaw.split("\\s+")[0];
cmdRaw = cmdRaw.substring(cmdRaw.indexOf(cmdName) + cmdName.length()).trim();
try {
result.add(new Pair(cmdName, parseSynopsisInstance(cmdRaw)));
} catch (ParseException e) {
continue;
}
}
return result;
}
private static Cmd.CmdOp parseSynopsisInstance(String line) throws ParseException {
Cmd.CmdOp op = new SynopParser(new java.io.StringReader(line)).compoundOp();
return op;
}
/**
* Parse descriptions in the
* @param lines representing the body of descriptions of a file
* @return a Pair:
* the key of the pair is an overview of the description,
* the value is a list of pairs, (optionName, optionDescription)
*/
private static Pair<String, List<Pair<String, String>>> parseDescription(List<String> lines) {
// parse descriptions
int i = 0, l = i;
String instrdesc = "";
int baseIndention = indentCount(lines.get(0));
while (i < lines.size()) {
if ((indentCount(lines.get(i)) == baseIndention) && lines.get(i).trim().startsWith("-"))
break;
else
i ++;
}
if (i != 0)
instrdesc = lines.subList(l, i-2).stream().reduce("", (x,y) -> x + "\n" + y).replaceFirst("\\s+$", "");
// start parsing options
List<Pair<String, String>> optionList = new ArrayList<>();
while (i < lines.size()) {
if (!(indentCount(lines.get(i)) == baseIndention && lines.get(i).trim().startsWith("-")))
break;
String optionName = lines.get(i).trim().split(" ")[0];
//System.out.println(optionName);
l = i;
i ++;
while (i < lines.size() && !(indentCount(lines.get(i)) == baseIndention)) {
i ++;
}
String optionDesc = lines.subList(l, i).stream().reduce("", (x,y) -> x + "\n" + y);
//System.out.println(optionDesc);
optionList.add(new Pair(optionName, optionDesc));
}
return new Pair(instrdesc, optionList);
}
private static List<Pair<String, String>> parseExample(List<String> lines) {
// TODO
return new ArrayList<>();
}
private static int indentCount(String s) { return s.indexOf(s.trim()); }
}
| 19,346 | 40.517167 | 160 | java |
nl2bash | nl2bash-master/bashlint/man_parser/ManParserInterfaceTest.java | package man_parser;
import main.Main;
import man_parser.cmd.Cmd;
import man_parser.parser.ParseException;
import org.junit.jupiter.api.Test;
import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import java.util.stream.Collectors;
import static org.junit.Assert.*;
/**
* Created by clwang on 12/11/16.
*/
public class ManParserInterfaceTest {
@Test
public void test() {
try {
ManParserInterface.parseManPage(true);
} catch (IOException e) {
e.printStackTrace();
}
}
@Test
public void fastMakeGrammar() throws IOException, ParseException {
String[] args = {"-gen-primitive-cmd-json", "../grammar/grammar.2.txt", "../grammar/optionwords.txt"};
Main.main(args);
}
@Test
public void fastTestGrammar() throws IOException, ParseException {
String[] args = {"-gen-primitive-cmd-json", "./testdata/test_grammar.txt", "../grammar/optionwords.txt"};
Main.main(args);
}
@Test
public void fastGenG4Grammar() throws IOException, ParseException {
String[] args = {"-make_grammar"};
Main.main(args);
}
@Test
public void fastParseManPage() throws IOException {
String manPage = "../data/gnu-man/man1/split.txt";
Cmd.ManPage mp = ManParserInterface.parseFile(new File(manPage));
System.out.println(mp.rawSynopsis);
String options = mp.optionDesc.stream().map(od -> od.pureOptions()).reduce("", (x,y)->(x + " " + y));
System.out.println(options);
System.out.println();
System.out.println("\n" + mp.rawSynopsis.replaceAll("\\[OPTION\\]...", options));
System.out.println("\n" + mp.rawSynopsis.replaceAll("\\[OPTION...\\]", options));
}
} | 1,773 | 27.612903 | 113 | java |
nl2bash | nl2bash-master/bashlint/man_parser/cmd/Cmd.java | package man_parser.cmd;
import javafx.util.Pair;
import man_parser.ManParserInterface;
import man_parser.parser.ParseException;
import man_parser.parser.SynopParser;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Created by clwang on 2/14/16.
*/
public class Cmd {
public static Map<String, String> ConstantArgDict = new HashMap<>();
public static Map<String, String> NameToTypeDict = new HashMap<>();
public static class ManPage {
public List<String> aliases = new ArrayList<>();
public String description = "";
public String rawSynopsis = "";
// one option may have more than once synopsis
public List<CmdOp> optionLists = new ArrayList<>();
public List<DescriptionPair> optionDesc = new ArrayList<>();
public void setName(List<String> aliases, String description) {
this.aliases = aliases;
this.description = description;
}
public String getName() { return this.aliases.get(0); }
public String toString() {
String s = " " + aliases.stream().reduce("", String::concat) + "\r\n";
s+="Synopsis\r\n";
for (CmdOp op : optionLists) {
s += op.toString() + " ";
s += "\r\n";
}
s += "Description\r\n";
for (DescriptionPair p : optionDesc) {
s += p.toString();
}
return s;
}
}
public static class Command {
public String type = "command";
public String name = "";
public CmdOp option;
public Command(String name, CmdOp op) {
this.name = name;
this.option = op;
}
public String toString() {
return name + " " + option.toString();
}
}
public interface CmdOp {
String getType();
}
// flag of form -flagname
public static class Fl implements CmdOp {
public String type = "flag_option";
public String flag_name;
public Fl(String s) {
this.flag_name = s;
if (this.flag_name.startsWith("-")) {
this.flag_name = this.flag_name.substring(1);
}
if (this.flag_name.equals("ONE")) this.flag_name = "1";
if (this.flag_name.equals("TWO")) this.flag_name = "2";
if (this.flag_name.equals("THREE")) this.flag_name = "3";
if (this.flag_name.equals("EXCLAMATION")) this.flag_name = "!";
if (this.flag_name.equals("DOLLAR")) this.flag_name = "$";
if (this.flag_name.equals("AT")) this.flag_name = "@";
}
public String toString() {
String flag = "-" + flag_name;
return flag;
}
@Override
public String getType() {
return type;
}
}
// another type of flag, with --flagname=arg
public static class Flv2 implements CmdOp {
public String type = "long_flag_option";
public String flag_name;
// whether there exists arg
public boolean arg_exists = false;
// whether the arg is of the form [=XXX] or =XXX
public boolean arg_optional = false;
public Ar argument = new Ar();
public Flv2(String flagname) {
this.flag_name = flagname;
}
public void setName(String arg, boolean arg_optional) {
this.arg_exists = true;
this.argument = new Ar(arg);
this.arg_optional = arg_optional;
}
public String toString() {
String result = "--" + flag_name.toString();
if (arg_exists) {
if (arg_exists)
result += "[=" + argument.toString() + "]";
else
result += "=" + argument.toString();
}
return result;
}
@Override
public String getType() {
return type;
}
}
public static class Opt implements CmdOp {
public String type = "optional_option";
public CmdOp cmd;
public Opt(CmdOp cmd) { this.cmd = cmd; }
public String toString() {
return "[" + cmd.toString() + "]";
}
@Override
public String getType() {
return type;
}
}
public static class Ar implements CmdOp {
public String type = "argument_option";
public String arg_name;
public String arg_type;
public boolean isList = false;
public Ar() {}
public Ar(String s) {
Pair<String, String> p = normalizeArgNameType(s);
this.arg_name = p.getKey();
this.arg_type = p.getValue();
}
public String toString() {
if (isList) return arg_name + "...";
else return arg_name;
}
@Override
public String getType() {
return type;
}
}
public static class NonTerminal implements CmdOp {
public String type = "argument_option";
public String name;
public boolean isList = false;
public NonTerminal(String s) {
name = s;
}
public String toString() {
if (isList) return ":" + name + ": " + "...";
else return ":" + name + ":";
}
@Override
public String getType() {
return type;
}
}
public static class Compound implements CmdOp {
public String type = "compound_options";
public List<CmdOp> commands = new ArrayList<>();
public Compound(List<CmdOp> cmds) { this.commands = cmds; }
public String toString() {
return commands.stream().map(cmd -> cmd.toString()).reduce(" ", (x,y) -> x + " " + y);
}
@Override
public String getType() {
return type;
}
}
public static class Exclusive implements CmdOp {
public String type = "exclusive_options";
public List<CmdOp> commands = new ArrayList<>();
public Exclusive(List<CmdOp> cmds) { this.commands = cmds; }
public String toString() {
String s = "";
for (CmdOp flg : commands) {
s += flg + " | ";
}
return s;
}
@Override
public String getType() {
return type;
}
}
public static class DescriptionPair {
String type = "option_description_pair";
public String name;
public CmdOp option;
public List<CmdOp> allOptions = new ArrayList<>();
public String description;
public DescriptionPair(CmdOp fst, String wholeOpPart, String desc) {
this.name = fst.toString().trim();
this.option = fst;
this.description = desc;
this.addAllOptions(wholeOpPart);
}
public void addAllOptions(String allOptions) {
for (String s : allOptions.split(",")) {
try {
this.allOptions.add(new SynopParser(new java.io.StringReader(s)).compoundOp());
} catch (ParseException e) {
e.printStackTrace();
}
}
}
public String toString() {
return option.toString() + " :: " + description;
}
public String pureOptions() {
String synOp = "[";
for (int i = 0; i < allOptions.size(); i ++) {
if (i != 0)
synOp += "|";
synOp += " " + allOptions.get(i).toString() + " ";
}
synOp += "]";
return synOp;
}
}
private static Pair<String, String> normalizeArgNameType(String argName) {
if (NameToTypeDict.containsKey(argName)) {
return new Pair<>(argName, NameToTypeDict.get(argName));
} else if (ConstantArgDict.containsKey(argName)) {
return new Pair<>(ConstantArgDict.get(argName), "Constant");
} else {
return new Pair<>(argName, "Unknown");
}
}
}
| 8,185 | 31.228346 | 99 | java |
nl2bash | nl2bash-master/bashlint/man_parser/parser/ParseException.java | /* Generated By:JavaCC: Do not edit this line. ParseException.java Version 6.0 */
/* JavaCCOptions:KEEP_LINE_COL=null */
package man_parser.parser;
/**
* This exception is thrown when parse errors are encountered.
* You can explicitly create objects of this exception type by
* calling the method generateParseException in the generated
* parser.
*
* You can modify this class to customize your error reporting
* mechanisms so long as you retain the public fields.
*/
public class ParseException extends Exception {
/**
* The version identifier for this Serializable class.
* Increment only if the <i>serialized</i> form of the
* class changes.
*/
private static final long serialVersionUID = 1L;
/**
* This constructor is used by the method "generateParseException"
* in the generated parser. Calling this constructor generates
* a new object of this type with the fields "currentToken",
* "expectedTokenSequences", and "tokenImage" set.
*/
public ParseException(Token currentTokenVal,
int[][] expectedTokenSequencesVal,
String[] tokenImageVal
)
{
super(initialise(currentTokenVal, expectedTokenSequencesVal, tokenImageVal));
currentToken = currentTokenVal;
expectedTokenSequences = expectedTokenSequencesVal;
tokenImage = tokenImageVal;
}
/**
* The following constructors are for use by you for whatever
* purpose you can think of. Constructing the exception in this
* manner makes the exception behave in the normal way - i.e., as
* documented in the class "Throwable". The fields "errorToken",
* "expectedTokenSequences", and "tokenImage" do not contain
* relevant information. The JavaCC generated code does not use
* these constructors.
*/
public ParseException() {
super();
}
/** Constructor with message. */
public ParseException(String message) {
super(message);
}
/**
* This is the last token that has been consumed successfully. If
* this object has been created due to a parse error, the token
* followng this token will (therefore) be the first error token.
*/
public Token currentToken;
/**
* Each entry in this array is an array of integers. Each array
* of integers represents a sequence of tokens (by their ordinal
* values) that is expected at this point of the parse.
*/
public int[][] expectedTokenSequences;
/**
* This is a reference to the "tokenImage" array of the generated
* parser within which the parse error occurred. This array is
* defined in the generated ...Constants interface.
*/
public String[] tokenImage;
/**
* It uses "currentToken" and "expectedTokenSequences" to generate a parse
* error message and returns it. If this object has been created
* due to a parse error, and you do not catch it (it gets thrown
* from the parser) the correct error message
* gets displayed.
*/
private static String initialise(Token currentToken,
int[][] expectedTokenSequences,
String[] tokenImage) {
String eol = System.getProperty("line.separator", "\n");
StringBuffer expected = new StringBuffer();
int maxSize = 0;
for (int i = 0; i < expectedTokenSequences.length; i++) {
if (maxSize < expectedTokenSequences[i].length) {
maxSize = expectedTokenSequences[i].length;
}
for (int j = 0; j < expectedTokenSequences[i].length; j++) {
expected.append(tokenImage[expectedTokenSequences[i][j]]).append(' ');
}
if (expectedTokenSequences[i][expectedTokenSequences[i].length - 1] != 0) {
expected.append("...");
}
expected.append(eol).append(" ");
}
String retval = "Encountered \"";
Token tok = currentToken.next;
for (int i = 0; i < maxSize; i++) {
if (i != 0) retval += " ";
if (tok.kind == 0) {
retval += tokenImage[0];
break;
}
retval += " " + tokenImage[tok.kind];
retval += " \"";
retval += add_escapes(tok.image);
retval += " \"";
tok = tok.next;
}
retval += "\" at line " + currentToken.next.beginLine + ", column " + currentToken.next.beginColumn;
retval += "." + eol;
if (expectedTokenSequences.length == 1) {
retval += "Was expecting:" + eol + " ";
} else {
retval += "Was expecting one of:" + eol + " ";
}
retval += expected.toString();
return retval;
}
/**
* The end of line string for this machine.
*/
protected String eol = System.getProperty("line.separator", "\n");
/**
* Used to convert raw characters to their escaped version
* when these raw version cannot be used as part of an ASCII
* string literal.
*/
static String add_escapes(String str) {
StringBuffer retval = new StringBuffer();
char ch;
for (int i = 0; i < str.length(); i++) {
switch (str.charAt(i))
{
case 0 :
continue;
case '\b':
retval.append("\\b");
continue;
case '\t':
retval.append("\\t");
continue;
case '\n':
retval.append("\\n");
continue;
case '\f':
retval.append("\\f");
continue;
case '\r':
retval.append("\\r");
continue;
case '\"':
retval.append("\\\"");
continue;
case '\'':
retval.append("\\\'");
continue;
case '\\':
retval.append("\\\\");
continue;
default:
if ((ch = str.charAt(i)) < 0x20 || ch > 0x7e) {
String s = "0000" + Integer.toString(ch, 16);
retval.append("\\u" + s.substring(s.length() - 4, s.length()));
} else {
retval.append(ch);
}
continue;
}
}
return retval.toString();
}
}
/* JavaCC - OriginalChecksum=e48d0c792d76799514cc537b971e3bed (do not edit this line) */
| 6,175 | 31.851064 | 104 | java |
nl2bash | nl2bash-master/bashlint/man_parser/parser/SimpleCharStream.java | /* Generated By:JavaCC: Do not edit this line. SimpleCharStream.java Version 6.0 */
/* JavaCCOptions:STATIC=false,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */
package man_parser.parser;
/**
* An implementation of interface CharStream, where the stream is assumed to
* contain only ASCII characters (without unicode processing).
*/
public class SimpleCharStream
{
/** Whether parser is static. */
public static final boolean staticFlag = false;
int bufsize;
int available;
int tokenBegin;
/** Position in buffer. */
public int bufpos = -1;
protected int bufline[];
protected int bufcolumn[];
protected int column = 0;
protected int line = 1;
protected boolean prevCharIsCR = false;
protected boolean prevCharIsLF = false;
protected java.io.Reader inputStream;
protected char[] buffer;
protected int maxNextCharInd = 0;
protected int inBuf = 0;
protected int tabSize = 8;
protected boolean trackLineColumn = true;
public void setTabSize(int i) { tabSize = i; }
public int getTabSize() { return tabSize; }
protected void ExpandBuff(boolean wrapAround)
{
char[] newbuffer = new char[bufsize + 2048];
int newbufline[] = new int[bufsize + 2048];
int newbufcolumn[] = new int[bufsize + 2048];
try
{
if (wrapAround)
{
System.arraycopy(buffer, tokenBegin, newbuffer, 0, bufsize - tokenBegin);
System.arraycopy(buffer, 0, newbuffer, bufsize - tokenBegin, bufpos);
buffer = newbuffer;
System.arraycopy(bufline, tokenBegin, newbufline, 0, bufsize - tokenBegin);
System.arraycopy(bufline, 0, newbufline, bufsize - tokenBegin, bufpos);
bufline = newbufline;
System.arraycopy(bufcolumn, tokenBegin, newbufcolumn, 0, bufsize - tokenBegin);
System.arraycopy(bufcolumn, 0, newbufcolumn, bufsize - tokenBegin, bufpos);
bufcolumn = newbufcolumn;
maxNextCharInd = (bufpos += (bufsize - tokenBegin));
}
else
{
System.arraycopy(buffer, tokenBegin, newbuffer, 0, bufsize - tokenBegin);
buffer = newbuffer;
System.arraycopy(bufline, tokenBegin, newbufline, 0, bufsize - tokenBegin);
bufline = newbufline;
System.arraycopy(bufcolumn, tokenBegin, newbufcolumn, 0, bufsize - tokenBegin);
bufcolumn = newbufcolumn;
maxNextCharInd = (bufpos -= tokenBegin);
}
}
catch (Throwable t)
{
throw new Error(t.getMessage());
}
bufsize += 2048;
available = bufsize;
tokenBegin = 0;
}
protected void FillBuff() throws java.io.IOException
{
if (maxNextCharInd == available)
{
if (available == bufsize)
{
if (tokenBegin > 2048)
{
bufpos = maxNextCharInd = 0;
available = tokenBegin;
}
else if (tokenBegin < 0)
bufpos = maxNextCharInd = 0;
else
ExpandBuff(false);
}
else if (available > tokenBegin)
available = bufsize;
else if ((tokenBegin - available) < 2048)
ExpandBuff(true);
else
available = tokenBegin;
}
int i;
try {
if ((i = inputStream.read(buffer, maxNextCharInd, available - maxNextCharInd)) == -1)
{
inputStream.close();
throw new java.io.IOException();
}
else
maxNextCharInd += i;
return;
}
catch(java.io.IOException e) {
--bufpos;
backup(0);
if (tokenBegin == -1)
tokenBegin = bufpos;
throw e;
}
}
/** Start. */
public char BeginToken() throws java.io.IOException
{
tokenBegin = -1;
char c = readChar();
tokenBegin = bufpos;
return c;
}
protected void UpdateLineColumn(char c)
{
column++;
if (prevCharIsLF)
{
prevCharIsLF = false;
line += (column = 1);
}
else if (prevCharIsCR)
{
prevCharIsCR = false;
if (c == '\n')
{
prevCharIsLF = true;
}
else
line += (column = 1);
}
switch (c)
{
case '\r' :
prevCharIsCR = true;
break;
case '\n' :
prevCharIsLF = true;
break;
case '\t' :
column--;
column += (tabSize - (column % tabSize));
break;
default :
break;
}
bufline[bufpos] = line;
bufcolumn[bufpos] = column;
}
/** Read a character. */
public char readChar() throws java.io.IOException
{
if (inBuf > 0)
{
--inBuf;
if (++bufpos == bufsize)
bufpos = 0;
return buffer[bufpos];
}
if (++bufpos >= maxNextCharInd)
FillBuff();
char c = buffer[bufpos];
UpdateLineColumn(c);
return c;
}
@Deprecated
/**
* @deprecated
* @see #getEndColumn
*/
public int getColumn() {
return bufcolumn[bufpos];
}
@Deprecated
/**
* @deprecated
* @see #getEndLine
*/
public int getLine() {
return bufline[bufpos];
}
/** Get token end column number. */
public int getEndColumn() {
return bufcolumn[bufpos];
}
/** Get token end line number. */
public int getEndLine() {
return bufline[bufpos];
}
/** Get token beginning column number. */
public int getBeginColumn() {
return bufcolumn[tokenBegin];
}
/** Get token beginning line number. */
public int getBeginLine() {
return bufline[tokenBegin];
}
/** Backup a number of characters. */
public void backup(int amount) {
inBuf += amount;
if ((bufpos -= amount) < 0)
bufpos += bufsize;
}
/** Constructor. */
public SimpleCharStream(java.io.Reader dstream, int startline,
int startcolumn, int buffersize)
{
inputStream = dstream;
line = startline;
column = startcolumn - 1;
available = bufsize = buffersize;
buffer = new char[buffersize];
bufline = new int[buffersize];
bufcolumn = new int[buffersize];
}
/** Constructor. */
public SimpleCharStream(java.io.Reader dstream, int startline,
int startcolumn)
{
this(dstream, startline, startcolumn, 4096);
}
/** Constructor. */
public SimpleCharStream(java.io.Reader dstream)
{
this(dstream, 1, 1, 4096);
}
/** Reinitialise. */
public void ReInit(java.io.Reader dstream, int startline,
int startcolumn, int buffersize)
{
inputStream = dstream;
line = startline;
column = startcolumn - 1;
if (buffer == null || buffersize != buffer.length)
{
available = bufsize = buffersize;
buffer = new char[buffersize];
bufline = new int[buffersize];
bufcolumn = new int[buffersize];
}
prevCharIsLF = prevCharIsCR = false;
tokenBegin = inBuf = maxNextCharInd = 0;
bufpos = -1;
}
/** Reinitialise. */
public void ReInit(java.io.Reader dstream, int startline,
int startcolumn)
{
ReInit(dstream, startline, startcolumn, 4096);
}
/** Reinitialise. */
public void ReInit(java.io.Reader dstream)
{
ReInit(dstream, 1, 1, 4096);
}
/** Constructor. */
public SimpleCharStream(java.io.InputStream dstream, String encoding, int startline,
int startcolumn, int buffersize) throws java.io.UnsupportedEncodingException
{
this(encoding == null ? new java.io.InputStreamReader(dstream) : new java.io.InputStreamReader(dstream, encoding), startline, startcolumn, buffersize);
}
/** Constructor. */
public SimpleCharStream(java.io.InputStream dstream, int startline,
int startcolumn, int buffersize)
{
this(new java.io.InputStreamReader(dstream), startline, startcolumn, buffersize);
}
/** Constructor. */
public SimpleCharStream(java.io.InputStream dstream, String encoding, int startline,
int startcolumn) throws java.io.UnsupportedEncodingException
{
this(dstream, encoding, startline, startcolumn, 4096);
}
/** Constructor. */
public SimpleCharStream(java.io.InputStream dstream, int startline,
int startcolumn)
{
this(dstream, startline, startcolumn, 4096);
}
/** Constructor. */
public SimpleCharStream(java.io.InputStream dstream, String encoding) throws java.io.UnsupportedEncodingException
{
this(dstream, encoding, 1, 1, 4096);
}
/** Constructor. */
public SimpleCharStream(java.io.InputStream dstream)
{
this(dstream, 1, 1, 4096);
}
/** Reinitialise. */
public void ReInit(java.io.InputStream dstream, String encoding, int startline,
int startcolumn, int buffersize) throws java.io.UnsupportedEncodingException
{
ReInit(encoding == null ? new java.io.InputStreamReader(dstream) : new java.io.InputStreamReader(dstream, encoding), startline, startcolumn, buffersize);
}
/** Reinitialise. */
public void ReInit(java.io.InputStream dstream, int startline,
int startcolumn, int buffersize)
{
ReInit(new java.io.InputStreamReader(dstream), startline, startcolumn, buffersize);
}
/** Reinitialise. */
public void ReInit(java.io.InputStream dstream, String encoding) throws java.io.UnsupportedEncodingException
{
ReInit(dstream, encoding, 1, 1, 4096);
}
/** Reinitialise. */
public void ReInit(java.io.InputStream dstream)
{
ReInit(dstream, 1, 1, 4096);
}
/** Reinitialise. */
public void ReInit(java.io.InputStream dstream, String encoding, int startline,
int startcolumn) throws java.io.UnsupportedEncodingException
{
ReInit(dstream, encoding, startline, startcolumn, 4096);
}
/** Reinitialise. */
public void ReInit(java.io.InputStream dstream, int startline,
int startcolumn)
{
ReInit(dstream, startline, startcolumn, 4096);
}
/** Get token literal value. */
public String GetImage()
{
if (bufpos >= tokenBegin)
return new String(buffer, tokenBegin, bufpos - tokenBegin + 1);
else
return new String(buffer, tokenBegin, bufsize - tokenBegin) +
new String(buffer, 0, bufpos + 1);
}
/** Get the suffix. */
public char[] GetSuffix(int len)
{
char[] ret = new char[len];
if ((bufpos + 1) >= len)
System.arraycopy(buffer, bufpos - len + 1, ret, 0, len);
else
{
System.arraycopy(buffer, bufsize - (len - bufpos - 1), ret, 0,
len - bufpos - 1);
System.arraycopy(buffer, 0, ret, len - bufpos - 1, bufpos + 1);
}
return ret;
}
/** Reset buffer when finished. */
public void Done()
{
buffer = null;
bufline = null;
bufcolumn = null;
}
/**
* Method to adjust line and column numbers for the start of a token.
*/
public void adjustBeginLineColumn(int newLine, int newCol)
{
int start = tokenBegin;
int len;
if (bufpos >= tokenBegin)
{
len = bufpos - tokenBegin + inBuf + 1;
}
else
{
len = bufsize - tokenBegin + bufpos + 1 + inBuf;
}
int i = 0, j = 0, k = 0;
int nextColDiff = 0, columnDiff = 0;
while (i < len && bufline[j = start % bufsize] == bufline[k = ++start % bufsize])
{
bufline[j] = newLine;
nextColDiff = columnDiff + bufcolumn[k] - bufcolumn[j];
bufcolumn[j] = newCol + columnDiff;
columnDiff = nextColDiff;
i++;
}
if (i < len)
{
bufline[j] = newLine++;
bufcolumn[j] = newCol + columnDiff;
while (i++ < len)
{
if (bufline[j = start % bufsize] != bufline[++start % bufsize])
bufline[j] = newLine++;
else
bufline[j] = newLine;
}
}
line = bufline[j];
column = bufcolumn[j];
}
boolean getTrackLineColumn() { return trackLineColumn; }
void setTrackLineColumn(boolean tlc) { trackLineColumn = tlc; }
}
/* JavaCC - OriginalChecksum=4eb5efc5cd97bde585d160fb6c2b1c66 (do not edit this line) */
| 11,856 | 23.962105 | 157 | java |
nl2bash | nl2bash-master/bashlint/man_parser/parser/SynopParser.java | /* SynopParser.java */
/* Generated By:JavaCC: Do not edit this line. SynopParser.java */
package man_parser.parser;
import man_parser.cmd.*;
import main.Config;
import java.util.List;
import java.util.Optional;
import java.util.ArrayList;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.stream.Collectors;
public class SynopParser implements SynopParserConstants {
public static void main(String[] args) throws ParseException {
String testExample = "-a";
String testExample2 = "[-a] b --config[=id] --b";
Cmd.CmdOp tl = new SynopParser(new java.io.StringReader(testExample2)).compoundOp();
System.out.println(tl);
}
final public Cmd.Fl fl() throws ParseException {Token s;
s = jj_consume_token(FLAG);
System.out.println(s.toString()); {if ("" != null) return new Cmd.Fl(s.toString());}
throw new Error("Missing return statement in function");
}
final public Cmd.Ar ar() throws ParseException {Token s;
if (jj_2_1(2)) {
s = jj_consume_token(IDENTIFIER);
{if ("" != null) return new Cmd.Ar(s.toString());}
} else if (jj_2_2(2)) {
jj_consume_token(34);
s = jj_consume_token(IDENTIFIER);
jj_consume_token(35);
{if ("" != null) return new Cmd.Ar(s.toString());}
} else {
jj_consume_token(-1);
throw new ParseException();
}
throw new Error("Missing return statement in function");
}
final public Cmd.NonTerminal nt() throws ParseException {Token s;
jj_consume_token(36);
s = jj_consume_token(IDENTIFIER);
jj_consume_token(36);
{if ("" != null) return new Cmd.NonTerminal(s.toString());}
throw new Error("Missing return statement in function");
}
final public Cmd.CmdOp singleOp() throws ParseException {Cmd.CmdOp result; Token s; List<Cmd.CmdOp> temp = new ArrayList<Cmd.CmdOp>(); Cmd.Flv2 flagv2;
if (jj_2_8(2)) {
result = ar();
if (jj_2_3(2)) {
s = jj_consume_token(37);
Cmd.Ar ar = new Cmd.Ar(((Cmd.Ar)result).arg_name);
ar.isList = true; result = ar;
} else {
;
}
} else if (jj_2_9(2)) {
result = nt();
if (jj_2_4(2)) {
s = jj_consume_token(37);
Cmd.NonTerminal nt = new Cmd.NonTerminal(((Cmd.NonTerminal)result).name);
nt.isList = true; result = nt;
} else {
;
}
} else if (jj_2_10(2)) {
s = jj_consume_token(FLAG);
String flagName = s.toString().substring(s.toString().indexOf("-") + 1);
boolean belongsToWierdData = false;
try {
// load a hand crafted object
List<String> lines = Files.readAllLines(Paths.get(Config.OptionWordDictionary));
for (String u : lines) {
if (u.startsWith("-" + flagName)) {
belongsToWierdData = true;
}
}
} catch (IOException e) {
e.printStackTrace();
}
if (flagName.length() == 1 || belongsToWierdData)
result = new Cmd.Fl(s.toString());
else {
List<Cmd.CmdOp> flags = new ArrayList<Cmd.CmdOp>();
for (char c : flagName.toCharArray()) {
flags.add(new Cmd.Fl(String.valueOf(c)));
}
result = new Cmd.Compound(flags);
}
} else if (jj_2_11(2)) {
jj_consume_token(LBRACKET);
result = compoundOp();
jj_consume_token(RBRACKET);
boolean resultSet = false;
if (result instanceof Cmd.Compound) {
boolean allFlags = true;
for (Cmd.CmdOp cmd : ((Cmd.Compound)result).commands) {
if (! (cmd instanceof Cmd.Fl)) {
allFlags = false;
}
}
if (allFlags == true) {
List<Cmd.CmdOp> cmdops = new ArrayList<Cmd.CmdOp>();
for (Cmd.CmdOp c : ((Cmd.Compound)result).commands) {
cmdops.add(new Cmd.Opt(c));
}
result = new Cmd.Compound(cmdops);
resultSet = true;
}
}
if (!resultSet)
result = new Cmd.Opt(result);
} else if (jj_2_12(2)) {
jj_consume_token(38);
s = jj_consume_token(IDENTIFIER);
flagv2 = new Cmd.Flv2(s.toString());
if (jj_2_7(2)) {
if (jj_2_5(2)) {
jj_consume_token(LBRACKET);
jj_consume_token(39);
s = jj_consume_token(IDENTIFIER);
jj_consume_token(RBRACKET);
flagv2.setName(s.toString(), true);
} else if (jj_2_6(2)) {
jj_consume_token(39);
s = jj_consume_token(IDENTIFIER);
flagv2.setName(s.toString(), false);
} else {
jj_consume_token(-1);
throw new ParseException();
}
} else {
;
}
result = flagv2;
} else if (jj_2_13(2)) {
jj_consume_token(LBRACE);
result = compoundOp();
jj_consume_token(RBRACE);
} else {
jj_consume_token(-1);
throw new ParseException();
}
{if ("" != null) return result;}
throw new Error("Missing return statement in function");
}
final public Cmd.CmdOp compoundOp() throws ParseException {List<Cmd.CmdOp> cmds = new ArrayList<Cmd.CmdOp>();
Cmd.Compound result; Token t;
Cmd.CmdOp single;
boolean isExclusive = false;
single = singleOp();
cmds.add(single);
if (jj_2_18(2)) {
if (jj_2_16(2)) {
label_1:
while (true) {
single = singleOp();
cmds.add(single);
if (jj_2_14(2)) {
;
} else {
break label_1;
}
}
} else if (jj_2_17(2)) {
label_2:
while (true) {
jj_consume_token(BIT_OR);
single = singleOp();
cmds.add(single); isExclusive = true;
if (jj_2_15(2)) {
;
} else {
break label_2;
}
}
} else {
jj_consume_token(-1);
throw new ParseException();
}
} else {
;
}
if (cmds.size() == 1)
{if ("" != null) return cmds.get(0);}
else {
if (!isExclusive)
{if ("" != null) return new Cmd.Compound(cmds);}
else {
{if ("" != null) return new Cmd.Exclusive(cmds);}
}
}
throw new Error("Missing return statement in function");
}
private boolean jj_2_1(int xla)
{
jj_la = xla; jj_lastpos = jj_scanpos = token;
try { return !jj_3_1(); }
catch(LookaheadSuccess ls) { return true; }
finally { jj_save(0, xla); }
}
private boolean jj_2_2(int xla)
{
jj_la = xla; jj_lastpos = jj_scanpos = token;
try { return !jj_3_2(); }
catch(LookaheadSuccess ls) { return true; }
finally { jj_save(1, xla); }
}
private boolean jj_2_3(int xla)
{
jj_la = xla; jj_lastpos = jj_scanpos = token;
try { return !jj_3_3(); }
catch(LookaheadSuccess ls) { return true; }
finally { jj_save(2, xla); }
}
private boolean jj_2_4(int xla)
{
jj_la = xla; jj_lastpos = jj_scanpos = token;
try { return !jj_3_4(); }
catch(LookaheadSuccess ls) { return true; }
finally { jj_save(3, xla); }
}
private boolean jj_2_5(int xla)
{
jj_la = xla; jj_lastpos = jj_scanpos = token;
try { return !jj_3_5(); }
catch(LookaheadSuccess ls) { return true; }
finally { jj_save(4, xla); }
}
private boolean jj_2_6(int xla)
{
jj_la = xla; jj_lastpos = jj_scanpos = token;
try { return !jj_3_6(); }
catch(LookaheadSuccess ls) { return true; }
finally { jj_save(5, xla); }
}
private boolean jj_2_7(int xla)
{
jj_la = xla; jj_lastpos = jj_scanpos = token;
try { return !jj_3_7(); }
catch(LookaheadSuccess ls) { return true; }
finally { jj_save(6, xla); }
}
private boolean jj_2_8(int xla)
{
jj_la = xla; jj_lastpos = jj_scanpos = token;
try { return !jj_3_8(); }
catch(LookaheadSuccess ls) { return true; }
finally { jj_save(7, xla); }
}
private boolean jj_2_9(int xla)
{
jj_la = xla; jj_lastpos = jj_scanpos = token;
try { return !jj_3_9(); }
catch(LookaheadSuccess ls) { return true; }
finally { jj_save(8, xla); }
}
private boolean jj_2_10(int xla)
{
jj_la = xla; jj_lastpos = jj_scanpos = token;
try { return !jj_3_10(); }
catch(LookaheadSuccess ls) { return true; }
finally { jj_save(9, xla); }
}
private boolean jj_2_11(int xla)
{
jj_la = xla; jj_lastpos = jj_scanpos = token;
try { return !jj_3_11(); }
catch(LookaheadSuccess ls) { return true; }
finally { jj_save(10, xla); }
}
private boolean jj_2_12(int xla)
{
jj_la = xla; jj_lastpos = jj_scanpos = token;
try { return !jj_3_12(); }
catch(LookaheadSuccess ls) { return true; }
finally { jj_save(11, xla); }
}
private boolean jj_2_13(int xla)
{
jj_la = xla; jj_lastpos = jj_scanpos = token;
try { return !jj_3_13(); }
catch(LookaheadSuccess ls) { return true; }
finally { jj_save(12, xla); }
}
private boolean jj_2_14(int xla)
{
jj_la = xla; jj_lastpos = jj_scanpos = token;
try { return !jj_3_14(); }
catch(LookaheadSuccess ls) { return true; }
finally { jj_save(13, xla); }
}
private boolean jj_2_15(int xla)
{
jj_la = xla; jj_lastpos = jj_scanpos = token;
try { return !jj_3_15(); }
catch(LookaheadSuccess ls) { return true; }
finally { jj_save(14, xla); }
}
private boolean jj_2_16(int xla)
{
jj_la = xla; jj_lastpos = jj_scanpos = token;
try { return !jj_3_16(); }
catch(LookaheadSuccess ls) { return true; }
finally { jj_save(15, xla); }
}
private boolean jj_2_17(int xla)
{
jj_la = xla; jj_lastpos = jj_scanpos = token;
try { return !jj_3_17(); }
catch(LookaheadSuccess ls) { return true; }
finally { jj_save(16, xla); }
}
private boolean jj_2_18(int xla)
{
jj_la = xla; jj_lastpos = jj_scanpos = token;
try { return !jj_3_18(); }
catch(LookaheadSuccess ls) { return true; }
finally { jj_save(17, xla); }
}
private boolean jj_3_4()
{
if (jj_scan_token(37)) return true;
return false;
}
private boolean jj_3_9()
{
if (jj_3R_4()) return true;
return false;
}
private boolean jj_3_3()
{
if (jj_scan_token(37)) return true;
return false;
}
private boolean jj_3_8()
{
if (jj_3R_3()) return true;
Token xsp;
xsp = jj_scanpos;
if (jj_3_3()) jj_scanpos = xsp;
return false;
}
private boolean jj_3R_6()
{
Token xsp;
xsp = jj_scanpos;
if (jj_3_8()) {
jj_scanpos = xsp;
if (jj_3_9()) {
jj_scanpos = xsp;
if (jj_3_10()) {
jj_scanpos = xsp;
if (jj_3_11()) {
jj_scanpos = xsp;
if (jj_3_12()) {
jj_scanpos = xsp;
if (jj_3_13()) return true;
}
}
}
}
}
return false;
}
private boolean jj_3R_4()
{
if (jj_scan_token(36)) return true;
if (jj_scan_token(IDENTIFIER)) return true;
return false;
}
private boolean jj_3_2()
{
if (jj_scan_token(34)) return true;
if (jj_scan_token(IDENTIFIER)) return true;
return false;
}
private boolean jj_3_1()
{
if (jj_scan_token(IDENTIFIER)) return true;
return false;
}
private boolean jj_3_11()
{
if (jj_scan_token(LBRACKET)) return true;
if (jj_3R_5()) return true;
return false;
}
private boolean jj_3R_3()
{
Token xsp;
xsp = jj_scanpos;
if (jj_3_1()) {
jj_scanpos = xsp;
if (jj_3_2()) return true;
}
return false;
}
private boolean jj_3_15()
{
if (jj_scan_token(BIT_OR)) return true;
if (jj_3R_6()) return true;
return false;
}
private boolean jj_3_17()
{
Token xsp;
if (jj_3_15()) return true;
while (true) {
xsp = jj_scanpos;
if (jj_3_15()) { jj_scanpos = xsp; break; }
}
return false;
}
private boolean jj_3_14()
{
if (jj_3R_6()) return true;
return false;
}
private boolean jj_3_18()
{
Token xsp;
xsp = jj_scanpos;
if (jj_3_16()) {
jj_scanpos = xsp;
if (jj_3_17()) return true;
}
return false;
}
private boolean jj_3_16()
{
Token xsp;
if (jj_3_14()) return true;
while (true) {
xsp = jj_scanpos;
if (jj_3_14()) { jj_scanpos = xsp; break; }
}
return false;
}
private boolean jj_3R_5()
{
if (jj_3R_6()) return true;
return false;
}
private boolean jj_3_6()
{
if (jj_scan_token(39)) return true;
if (jj_scan_token(IDENTIFIER)) return true;
return false;
}
private boolean jj_3_7()
{
Token xsp;
xsp = jj_scanpos;
if (jj_3_5()) {
jj_scanpos = xsp;
if (jj_3_6()) return true;
}
return false;
}
private boolean jj_3_5()
{
if (jj_scan_token(LBRACKET)) return true;
if (jj_scan_token(39)) return true;
return false;
}
private boolean jj_3_13()
{
if (jj_scan_token(LBRACE)) return true;
if (jj_3R_5()) return true;
return false;
}
private boolean jj_3_12()
{
if (jj_scan_token(38)) return true;
if (jj_scan_token(IDENTIFIER)) return true;
return false;
}
private boolean jj_3_10()
{
if (jj_scan_token(FLAG)) return true;
return false;
}
/** Generated Token Manager. */
public SynopParserTokenManager token_source;
SimpleCharStream jj_input_stream;
/** Current token. */
public Token token;
/** Next token. */
public Token jj_nt;
private int jj_ntk;
private Token jj_scanpos, jj_lastpos;
private int jj_la;
private int jj_gen;
final private int[] jj_la1 = new int[0];
static private int[] jj_la1_0;
static private int[] jj_la1_1;
static {
jj_la1_init_0();
jj_la1_init_1();
}
private static void jj_la1_init_0() {
jj_la1_0 = new int[] {};
}
private static void jj_la1_init_1() {
jj_la1_1 = new int[] {};
}
final private JJCalls[] jj_2_rtns = new JJCalls[18];
private boolean jj_rescan = false;
private int jj_gc = 0;
/** Constructor with InputStream. */
public SynopParser(java.io.InputStream stream) {
this(stream, null);
}
/** Constructor with InputStream and supplied encoding */
public SynopParser(java.io.InputStream stream, String encoding) {
try { jj_input_stream = new SimpleCharStream(stream, encoding, 1, 1); } catch(java.io.UnsupportedEncodingException e) { throw new RuntimeException(e); }
token_source = new SynopParserTokenManager(jj_input_stream);
token = new Token();
jj_ntk = -1;
jj_gen = 0;
for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
}
/** Reinitialise. */
public void ReInit(java.io.InputStream stream) {
ReInit(stream, null);
}
/** Reinitialise. */
public void ReInit(java.io.InputStream stream, String encoding) {
try { jj_input_stream.ReInit(stream, encoding, 1, 1); } catch(java.io.UnsupportedEncodingException e) { throw new RuntimeException(e); }
token_source.ReInit(jj_input_stream);
token = new Token();
jj_ntk = -1;
jj_gen = 0;
for (int i = 0; i < 0; i++) jj_la1[i] = -1;
for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
}
/** Constructor. */
public SynopParser(java.io.Reader stream) {
jj_input_stream = new SimpleCharStream(stream, 1, 1);
token_source = new SynopParserTokenManager(jj_input_stream);
token = new Token();
jj_ntk = -1;
jj_gen = 0;
for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
}
/** Reinitialise. */
public void ReInit(java.io.Reader stream) {
jj_input_stream.ReInit(stream, 1, 1);
token_source.ReInit(jj_input_stream);
token = new Token();
jj_ntk = -1;
jj_gen = 0;
for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
}
/** Constructor with generated Token Manager. */
public SynopParser(SynopParserTokenManager tm) {
token_source = tm;
token = new Token();
jj_ntk = -1;
jj_gen = 0;
for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
}
/** Reinitialise. */
public void ReInit(SynopParserTokenManager tm) {
token_source = tm;
token = new Token();
jj_ntk = -1;
jj_gen = 0;
for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls();
}
private Token jj_consume_token(int kind) throws ParseException {
Token oldToken;
if ((oldToken = token).next != null) token = token.next;
else token = token.next = token_source.getNextToken();
jj_ntk = -1;
if (token.kind == kind) {
jj_gen++;
if (++jj_gc > 100) {
jj_gc = 0;
for (int i = 0; i < jj_2_rtns.length; i++) {
JJCalls c = jj_2_rtns[i];
while (c != null) {
if (c.gen < jj_gen) c.first = null;
c = c.next;
}
}
}
return token;
}
token = oldToken;
jj_kind = kind;
throw generateParseException();
}
@SuppressWarnings("serial")
static private final class LookaheadSuccess extends java.lang.Error { }
final private LookaheadSuccess jj_ls = new LookaheadSuccess();
private boolean jj_scan_token(int kind) {
if (jj_scanpos == jj_lastpos) {
jj_la--;
if (jj_scanpos.next == null) {
jj_lastpos = jj_scanpos = jj_scanpos.next = token_source.getNextToken();
} else {
jj_lastpos = jj_scanpos = jj_scanpos.next;
}
} else {
jj_scanpos = jj_scanpos.next;
}
if (jj_rescan) {
int i = 0; Token tok = token;
while (tok != null && tok != jj_scanpos) { i++; tok = tok.next; }
if (tok != null) jj_add_error_token(kind, i);
}
if (jj_scanpos.kind != kind) return true;
if (jj_la == 0 && jj_scanpos == jj_lastpos) throw jj_ls;
return false;
}
/** Get the next Token. */
final public Token getNextToken() {
if (token.next != null) token = token.next;
else token = token.next = token_source.getNextToken();
jj_ntk = -1;
jj_gen++;
return token;
}
/** Get the specific Token. */
final public Token getToken(int index) {
Token t = token;
for (int i = 0; i < index; i++) {
if (t.next != null) t = t.next;
else t = t.next = token_source.getNextToken();
}
return t;
}
private int jj_ntk_f() {
if ((jj_nt=token.next) == null)
return (jj_ntk = (token.next=token_source.getNextToken()).kind);
else
return (jj_ntk = jj_nt.kind);
}
private java.util.List<int[]> jj_expentries = new java.util.ArrayList<int[]>();
private int[] jj_expentry;
private int jj_kind = -1;
private int[] jj_lasttokens = new int[100];
private int jj_endpos;
private void jj_add_error_token(int kind, int pos) {
if (pos >= 100) return;
if (pos == jj_endpos + 1) {
jj_lasttokens[jj_endpos++] = kind;
} else if (jj_endpos != 0) {
jj_expentry = new int[jj_endpos];
for (int i = 0; i < jj_endpos; i++) {
jj_expentry[i] = jj_lasttokens[i];
}
jj_entries_loop: for (java.util.Iterator<?> it = jj_expentries.iterator(); it.hasNext();) {
int[] oldentry = (int[])(it.next());
if (oldentry.length == jj_expentry.length) {
for (int i = 0; i < jj_expentry.length; i++) {
if (oldentry[i] != jj_expentry[i]) {
continue jj_entries_loop;
}
}
jj_expentries.add(jj_expentry);
break jj_entries_loop;
}
}
if (pos != 0) jj_lasttokens[(jj_endpos = pos) - 1] = kind;
}
}
/** Generate ParseException. */
public ParseException generateParseException() {
jj_expentries.clear();
boolean[] la1tokens = new boolean[40];
if (jj_kind >= 0) {
la1tokens[jj_kind] = true;
jj_kind = -1;
}
for (int i = 0; i < 0; i++) {
if (jj_la1[i] == jj_gen) {
for (int j = 0; j < 32; j++) {
if ((jj_la1_0[i] & (1<<j)) != 0) {
la1tokens[j] = true;
}
if ((jj_la1_1[i] & (1<<j)) != 0) {
la1tokens[32+j] = true;
}
}
}
}
for (int i = 0; i < 40; i++) {
if (la1tokens[i]) {
jj_expentry = new int[1];
jj_expentry[0] = i;
jj_expentries.add(jj_expentry);
}
}
jj_endpos = 0;
jj_rescan_token();
jj_add_error_token(0, 0);
int[][] exptokseq = new int[jj_expentries.size()][];
for (int i = 0; i < jj_expentries.size(); i++) {
exptokseq[i] = jj_expentries.get(i);
}
return new ParseException(token, exptokseq, tokenImage);
}
/** Enable tracing. */
final public void enable_tracing() {
}
/** Disable tracing. */
final public void disable_tracing() {
}
private void jj_rescan_token() {
jj_rescan = true;
for (int i = 0; i < 18; i++) {
try {
JJCalls p = jj_2_rtns[i];
do {
if (p.gen > jj_gen) {
jj_la = p.arg; jj_lastpos = jj_scanpos = p.first;
switch (i) {
case 0: jj_3_1(); break;
case 1: jj_3_2(); break;
case 2: jj_3_3(); break;
case 3: jj_3_4(); break;
case 4: jj_3_5(); break;
case 5: jj_3_6(); break;
case 6: jj_3_7(); break;
case 7: jj_3_8(); break;
case 8: jj_3_9(); break;
case 9: jj_3_10(); break;
case 10: jj_3_11(); break;
case 11: jj_3_12(); break;
case 12: jj_3_13(); break;
case 13: jj_3_14(); break;
case 14: jj_3_15(); break;
case 15: jj_3_16(); break;
case 16: jj_3_17(); break;
case 17: jj_3_18(); break;
}
}
p = p.next;
} while (p != null);
} catch(LookaheadSuccess ls) { }
}
jj_rescan = false;
}
private void jj_save(int index, int xla) {
JJCalls p = jj_2_rtns[index];
while (p.gen > jj_gen) {
if (p.next == null) { p = p.next = new JJCalls(); break; }
p = p.next;
}
p.gen = jj_gen + xla - jj_la; p.first = token; p.arg = xla;
}
static final class JJCalls {
int gen;
Token first;
int arg;
JJCalls next;
}
}
| 22,291 | 25.569726 | 156 | java |
nl2bash | nl2bash-master/bashlint/man_parser/parser/SynopParserConstants.java | /* Generated By:JavaCC: Do not edit this line. SynopParserConstants.java */
package man_parser.parser;
/**
* Token literal values and constants.
* Generated by org.javacc.parser.OtherFilesGen#start()
*/
public interface SynopParserConstants {
/** End of File. */
int EOF = 0;
/** RegularExpression Id. */
int LPAREN = 10;
/** RegularExpression Id. */
int RPAREN = 11;
/** RegularExpression Id. */
int LBRACE = 12;
/** RegularExpression Id. */
int RBRACE = 13;
/** RegularExpression Id. */
int LBRACKET = 14;
/** RegularExpression Id. */
int RBRACKET = 15;
/** RegularExpression Id. */
int SEMICOLON = 16;
/** RegularExpression Id. */
int COMMA = 17;
/** RegularExpression Id. */
int DOT = 18;
/** RegularExpression Id. */
int FLAG = 19;
/** RegularExpression Id. */
int CONSTANT = 20;
/** RegularExpression Id. */
int DIGIT = 21;
/** RegularExpression Id. */
int STRING = 22;
/** RegularExpression Id. */
int PLUS = 23;
/** RegularExpression Id. */
int MINUS = 24;
/** RegularExpression Id. */
int STAR = 25;
/** RegularExpression Id. */
int SLASH = 26;
/** RegularExpression Id. */
int BIT_AND = 27;
/** RegularExpression Id. */
int BIT_OR = 28;
/** RegularExpression Id. */
int XOR = 29;
/** RegularExpression Id. */
int REM = 30;
/** RegularExpression Id. */
int IDENTIFIER = 31;
/** RegularExpression Id. */
int LETTER = 32;
/** RegularExpression Id. */
int PART_LETTER = 33;
/** Lexical state. */
int DEFAULT = 0;
/** Lexical state. */
int IN_COMMENT = 1;
/** Literal token values. */
String[] tokenImage = {
"<EOF>",
"\" \"",
"\"\\r\"",
"\"\\t\"",
"\"\\n\"",
"\"/*\"",
"\"/*\"",
"\"*/\"",
"<token of kind 8>",
"<token of kind 9>",
"\"(\"",
"\")\"",
"\"{\"",
"\"}\"",
"\"[\"",
"\"]\"",
"\";\"",
"\",\"",
"\".\"",
"<FLAG>",
"<CONSTANT>",
"<DIGIT>",
"<STRING>",
"\"+\"",
"\"-\"",
"\"*\"",
"\"/\"",
"\"&\"",
"\"|\"",
"\"^\"",
"\"%\"",
"<IDENTIFIER>",
"<LETTER>",
"<PART_LETTER>",
"\"<\"",
"\">\"",
"\":\"",
"\"...\"",
"\"--\"",
"\"=\"",
};
}
| 2,227 | 18.892857 | 75 | java |
nl2bash | nl2bash-master/bashlint/man_parser/parser/SynopParserTokenManager.java | /* SynopParserTokenManager.java */
/* Generated By:JavaCC: Do not edit this line. SynopParserTokenManager.java */
package man_parser.parser;
import man_parser.cmd.*;
import main.Config;
import java.util.List;
import java.util.Optional;
import java.util.ArrayList;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.stream.Collectors;
/** Token Manager. */
@SuppressWarnings("unused")public class SynopParserTokenManager implements SynopParserConstants {
/** Debug output. */
public java.io.PrintStream debugStream = System.out;
/** Set debug output. */
public void setDebugStream(java.io.PrintStream ds) { debugStream = ds; }
private final int jjStopStringLiteralDfa_0(int pos, long active0){
switch (pos)
{
case 0:
if ((active0 & 0x4000020L) != 0L)
return 0;
if ((active0 & 0x4001000000L) != 0L)
return 4;
return -1;
default :
return -1;
}
}
private final int jjStartNfa_0(int pos, long active0){
return jjMoveNfa_0(jjStopStringLiteralDfa_0(pos, active0), pos + 1);
}
private int jjStopAtPos(int pos, int kind)
{
jjmatchedKind = kind;
jjmatchedPos = pos;
return pos + 1;
}
private int jjMoveStringLiteralDfa0_0(){
switch(curChar)
{
case 37:
return jjStopAtPos(0, 30);
case 38:
return jjStopAtPos(0, 27);
case 40:
return jjStopAtPos(0, 10);
case 41:
return jjStopAtPos(0, 11);
case 42:
return jjStopAtPos(0, 25);
case 43:
return jjStopAtPos(0, 23);
case 44:
return jjStopAtPos(0, 17);
case 45:
jjmatchedKind = 24;
return jjMoveStringLiteralDfa1_0(0x4000000000L);
case 46:
jjmatchedKind = 18;
return jjMoveStringLiteralDfa1_0(0x2000000000L);
case 47:
jjmatchedKind = 26;
return jjMoveStringLiteralDfa1_0(0x20L);
case 58:
return jjStopAtPos(0, 36);
case 59:
return jjStopAtPos(0, 16);
case 60:
return jjStopAtPos(0, 34);
case 61:
return jjStopAtPos(0, 39);
case 62:
return jjStopAtPos(0, 35);
case 91:
return jjStopAtPos(0, 14);
case 93:
return jjStopAtPos(0, 15);
case 94:
return jjStopAtPos(0, 29);
case 123:
return jjStopAtPos(0, 12);
case 124:
return jjStopAtPos(0, 28);
case 125:
return jjStopAtPos(0, 13);
default :
return jjMoveNfa_0(2, 0);
}
}
private int jjMoveStringLiteralDfa1_0(long active0){
try { curChar = input_stream.readChar(); }
catch(java.io.IOException e) {
jjStopStringLiteralDfa_0(0, active0);
return 1;
}
switch(curChar)
{
case 42:
if ((active0 & 0x20L) != 0L)
return jjStopAtPos(1, 5);
break;
case 45:
if ((active0 & 0x4000000000L) != 0L)
return jjStopAtPos(1, 38);
break;
case 46:
return jjMoveStringLiteralDfa2_0(active0, 0x2000000000L);
default :
break;
}
return jjStartNfa_0(0, active0);
}
private int jjMoveStringLiteralDfa2_0(long old0, long active0){
if (((active0 &= old0)) == 0L)
return jjStartNfa_0(0, old0);
try { curChar = input_stream.readChar(); }
catch(java.io.IOException e) {
jjStopStringLiteralDfa_0(1, active0);
return 2;
}
switch(curChar)
{
case 46:
if ((active0 & 0x2000000000L) != 0L)
return jjStopAtPos(2, 37);
break;
default :
break;
}
return jjStartNfa_0(1, active0);
}
static final long[] jjbitVec0 = {
0x0L, 0x0L, 0xffffffffffffffffL, 0xffffffffffffffffL
};
private int jjMoveNfa_0(int startState, int curPos)
{
int startsAt = 0;
jjnewStateCnt = 15;
int i = 1;
jjstateSet[0] = startState;
int kind = 0x7fffffff;
for (;;)
{
if (++jjround == 0x7fffffff)
ReInitRounds();
if (curChar < 64)
{
long l = 1L << curChar;
do
{
switch(jjstateSet[--i])
{
case 2:
if ((0x3ff000000000000L & l) != 0L)
{
if (kind > 20)
kind = 20;
{ jjCheckNAdd(6); }
}
else if (curChar == 34)
{ jjCheckNAddStates(0, 2); }
else if (curChar == 45)
jjstateSet[jjnewStateCnt++] = 4;
else if (curChar == 47)
jjstateSet[jjnewStateCnt++] = 0;
if ((0x1001000000000L & l) != 0L)
{
if (kind > 31)
kind = 31;
{ jjCheckNAdd(14); }
}
break;
case 0:
if (curChar != 47)
break;
if (kind > 9)
kind = 9;
{ jjCheckNAdd(1); }
break;
case 1:
if ((0xfffffffffffffbffL & l) == 0L)
break;
if (kind > 9)
kind = 9;
{ jjCheckNAdd(1); }
break;
case 3:
if (curChar == 45)
jjstateSet[jjnewStateCnt++] = 4;
break;
case 4:
if ((0x1001000000000L & l) == 0L)
break;
if (kind > 19)
kind = 19;
{ jjCheckNAdd(5); }
break;
case 5:
if ((0x3ff201000000000L & l) == 0L)
break;
if (kind > 19)
kind = 19;
{ jjCheckNAdd(5); }
break;
case 6:
if ((0x3ff000000000000L & l) == 0L)
break;
if (kind > 20)
kind = 20;
{ jjCheckNAdd(6); }
break;
case 7:
if (curChar == 34)
{ jjCheckNAddStates(0, 2); }
break;
case 8:
if ((0x1001000000000L & l) != 0L)
{ jjCheckNAddStates(3, 7); }
break;
case 9:
if ((0x3ff201000000000L & l) != 0L)
{ jjCheckNAddStates(3, 7); }
break;
case 10:
if ((0x3ff000000000000L & l) != 0L)
{ jjCheckNAddStates(8, 11); }
break;
case 11:
if (curChar == 32)
{ jjCheckNAddStates(8, 11); }
break;
case 12:
if (curChar == 34 && kind > 22)
kind = 22;
break;
case 13:
if ((0x1001000000000L & l) == 0L)
break;
if (kind > 31)
kind = 31;
{ jjCheckNAdd(14); }
break;
case 14:
if ((0x3ff201000000000L & l) == 0L)
break;
if (kind > 31)
kind = 31;
{ jjCheckNAdd(14); }
break;
default : break;
}
} while(i != startsAt);
}
else if (curChar < 128)
{
long l = 1L << (curChar & 077);
do
{
switch(jjstateSet[--i])
{
case 2:
case 14:
if ((0x7fffffe87ffffffL & l) == 0L)
break;
if (kind > 31)
kind = 31;
{ jjCheckNAdd(14); }
break;
case 1:
if (kind > 9)
kind = 9;
jjstateSet[jjnewStateCnt++] = 1;
break;
case 4:
case 5:
if ((0x7fffffe87ffffffL & l) == 0L)
break;
if (kind > 19)
kind = 19;
{ jjCheckNAdd(5); }
break;
case 8:
case 9:
if ((0x7fffffe87ffffffL & l) != 0L)
{ jjCheckNAddStates(3, 7); }
break;
default : break;
}
} while(i != startsAt);
}
else
{
int i2 = (curChar & 0xff) >> 6;
long l2 = 1L << (curChar & 077);
do
{
switch(jjstateSet[--i])
{
case 1:
if ((jjbitVec0[i2] & l2) == 0L)
break;
if (kind > 9)
kind = 9;
jjstateSet[jjnewStateCnt++] = 1;
break;
default : break;
}
} while(i != startsAt);
}
if (kind != 0x7fffffff)
{
jjmatchedKind = kind;
jjmatchedPos = curPos;
kind = 0x7fffffff;
}
++curPos;
if ((i = jjnewStateCnt) == (startsAt = 15 - (jjnewStateCnt = startsAt)))
return curPos;
try { curChar = input_stream.readChar(); }
catch(java.io.IOException e) { return curPos; }
}
}
private int jjMoveStringLiteralDfa0_1(){
switch(curChar)
{
case 42:
return jjMoveStringLiteralDfa1_1(0x80L);
case 47:
return jjMoveStringLiteralDfa1_1(0x40L);
default :
return 1;
}
}
private int jjMoveStringLiteralDfa1_1(long active0){
try { curChar = input_stream.readChar(); }
catch(java.io.IOException e) {
return 1;
}
switch(curChar)
{
case 42:
if ((active0 & 0x40L) != 0L)
return jjStopAtPos(1, 6);
break;
case 47:
if ((active0 & 0x80L) != 0L)
return jjStopAtPos(1, 7);
break;
default :
return 2;
}
return 2;
}
static final int[] jjnextStates = {
8, 10, 11, 8, 9, 10, 11, 12, 8, 10, 11, 12,
};
/** Token literal values. */
public static final String[] jjstrLiteralImages = {
"", null, null, null, null, null, null, null, null, null, "\50", "\51",
"\173", "\175", "\133", "\135", "\73", "\54", "\56", null, null, null, null, "\53",
"\55", "\52", "\57", "\46", "\174", "\136", "\45", null, null, null, "\74", "\76",
"\72", "\56\56\56", "\55\55", "\75", };
protected Token jjFillToken()
{
final Token t;
final String curTokenImage;
final int beginLine;
final int endLine;
final int beginColumn;
final int endColumn;
String im = jjstrLiteralImages[jjmatchedKind];
curTokenImage = (im == null) ? input_stream.GetImage() : im;
beginLine = input_stream.getBeginLine();
beginColumn = input_stream.getBeginColumn();
endLine = input_stream.getEndLine();
endColumn = input_stream.getEndColumn();
t = Token.newToken(jjmatchedKind, curTokenImage);
t.beginLine = beginLine;
t.endLine = endLine;
t.beginColumn = beginColumn;
t.endColumn = endColumn;
return t;
}
int curLexState = 0;
int defaultLexState = 0;
int jjnewStateCnt;
int jjround;
int jjmatchedPos;
int jjmatchedKind;
/** Get the next Token. */
public Token getNextToken()
{
Token matchedToken;
int curPos = 0;
EOFLoop :
for (;;)
{
try
{
curChar = input_stream.BeginToken();
}
catch(java.io.IOException e)
{
jjmatchedKind = 0;
jjmatchedPos = -1;
matchedToken = jjFillToken();
return matchedToken;
}
image = jjimage;
image.setLength(0);
jjimageLen = 0;
switch(curLexState)
{
case 0:
try { input_stream.backup(0);
while (curChar <= 32 && (0x100002600L & (1L << curChar)) != 0L)
curChar = input_stream.BeginToken();
}
catch (java.io.IOException e1) { continue EOFLoop; }
jjmatchedKind = 0x7fffffff;
jjmatchedPos = 0;
curPos = jjMoveStringLiteralDfa0_0();
break;
case 1:
jjmatchedKind = 0x7fffffff;
jjmatchedPos = 0;
curPos = jjMoveStringLiteralDfa0_1();
if (jjmatchedPos == 0 && jjmatchedKind > 8)
{
jjmatchedKind = 8;
}
break;
}
if (jjmatchedKind != 0x7fffffff)
{
if (jjmatchedPos + 1 < curPos)
input_stream.backup(curPos - jjmatchedPos - 1);
if ((jjtoToken[jjmatchedKind >> 6] & (1L << (jjmatchedKind & 077))) != 0L)
{
matchedToken = jjFillToken();
if (jjnewLexState[jjmatchedKind] != -1)
curLexState = jjnewLexState[jjmatchedKind];
return matchedToken;
}
else
{
SkipLexicalActions(null);
if (jjnewLexState[jjmatchedKind] != -1)
curLexState = jjnewLexState[jjmatchedKind];
continue EOFLoop;
}
}
int error_line = input_stream.getEndLine();
int error_column = input_stream.getEndColumn();
String error_after = null;
boolean EOFSeen = false;
try { input_stream.readChar(); input_stream.backup(1); }
catch (java.io.IOException e1) {
EOFSeen = true;
error_after = curPos <= 1 ? "" : input_stream.GetImage();
if (curChar == '\n' || curChar == '\r') {
error_line++;
error_column = 0;
}
else
error_column++;
}
if (!EOFSeen) {
input_stream.backup(1);
error_after = curPos <= 1 ? "" : input_stream.GetImage();
}
throw new TokenMgrError(EOFSeen, curLexState, error_line, error_column, error_after, curChar, TokenMgrError.LEXICAL_ERROR);
}
}
void SkipLexicalActions(Token matchedToken)
{
switch(jjmatchedKind)
{
case 7 :
image.append(input_stream.GetSuffix(jjimageLen + (lengthOfMatch = jjmatchedPos + 1)));
SwitchTo(DEFAULT);
break;
default :
break;
}
}
private void jjCheckNAdd(int state)
{
if (jjrounds[state] != jjround)
{
jjstateSet[jjnewStateCnt++] = state;
jjrounds[state] = jjround;
}
}
private void jjAddStates(int start, int end)
{
do {
jjstateSet[jjnewStateCnt++] = jjnextStates[start];
} while (start++ != end);
}
private void jjCheckNAddTwoStates(int state1, int state2)
{
jjCheckNAdd(state1);
jjCheckNAdd(state2);
}
private void jjCheckNAddStates(int start, int end)
{
do {
jjCheckNAdd(jjnextStates[start]);
} while (start++ != end);
}
/** Constructor. */
public SynopParserTokenManager(SimpleCharStream stream){
if (SimpleCharStream.staticFlag)
throw new Error("ERROR: Cannot use a static CharStream class with a non-static lexical analyzer.");
input_stream = stream;
}
/** Constructor. */
public SynopParserTokenManager (SimpleCharStream stream, int lexState){
ReInit(stream);
SwitchTo(lexState);
}
/** Reinitialise parser. */
public void ReInit(SimpleCharStream stream)
{
jjmatchedPos = jjnewStateCnt = 0;
curLexState = defaultLexState;
input_stream = stream;
ReInitRounds();
}
private void ReInitRounds()
{
int i;
jjround = 0x80000001;
for (i = 15; i-- > 0;)
jjrounds[i] = 0x80000000;
}
/** Reinitialise parser. */
public void ReInit(SimpleCharStream stream, int lexState)
{
ReInit(stream);
SwitchTo(lexState);
}
/** Switch to specified lex state. */
public void SwitchTo(int lexState)
{
if (lexState >= 2 || lexState < 0)
throw new TokenMgrError("Error: Ignoring invalid lexical state : " + lexState + ". State unchanged.", TokenMgrError.INVALID_LEXICAL_STATE);
else
curLexState = lexState;
}
/** Lexer state names. */
public static final String[] lexStateNames = {
"DEFAULT",
"IN_COMMENT",
};
/** Lex State array. */
public static final int[] jjnewLexState = {
-1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
};
static final long[] jjtoToken = {
0xfcffdffc01L,
};
static final long[] jjtoSkip = {
0x3feL,
};
protected SimpleCharStream input_stream;
private final int[] jjrounds = new int[15];
private final int[] jjstateSet = new int[2 * 15];
private final StringBuilder jjimage = new StringBuilder();
private StringBuilder image = jjimage;
private int jjimageLen;
private int lengthOfMatch;
protected char curChar;
}
| 16,758 | 26.655116 | 145 | java |
nl2bash | nl2bash-master/bashlint/man_parser/parser/Token.java | /* Generated By:JavaCC: Do not edit this line. Token.java Version 6.0 */
/* JavaCCOptions:TOKEN_EXTENDS=,KEEP_LINE_COL=null,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */
package man_parser.parser;
/**
* Describes the input token stream.
*/
public class Token implements java.io.Serializable {
/**
* The version identifier for this Serializable class.
* Increment only if the <i>serialized</i> form of the
* class changes.
*/
private static final long serialVersionUID = 1L;
/**
* An integer that describes the kind of this token. This numbering
* system is determined by JavaCCParser, and a table of these numbers is
* stored in the file ...Constants.java.
*/
public int kind;
/** The line number of the first character of this Token. */
public int beginLine;
/** The column number of the first character of this Token. */
public int beginColumn;
/** The line number of the last character of this Token. */
public int endLine;
/** The column number of the last character of this Token. */
public int endColumn;
/**
* The string image of the token.
*/
public String image;
/**
* A reference to the next regular (non-special) token from the input
* stream. If this is the last token from the input stream, or if the
* token manager has not read tokens beyond this one, this field is
* set to null. This is true only if this token is also a regular
* token. Otherwise, see below for a description of the contents of
* this field.
*/
public Token next;
/**
* This field is used to access special tokens that occur prior to this
* token, but after the immediately preceding regular (non-special) token.
* If there are no such special tokens, this field is set to null.
* When there are more than one such special token, this field refers
* to the last of these special tokens, which in turn refers to the next
* previous special token through its specialToken field, and so on
* until the first special token (whose specialToken field is null).
* The next fields of special tokens refer to other special tokens that
* immediately follow it (without an intervening regular token). If there
* is no such token, this field is null.
*/
public Token specialToken;
/**
* An optional attribute value of the Token.
* Tokens which are not used as syntactic sugar will often contain
* meaningful values that will be used later on by the compiler or
* interpreter. This attribute value is often different from the image.
* Any subclass of Token that actually wants to return a non-null value can
* override this method as appropriate.
*/
public Object getValue() {
return null;
}
/**
* No-argument constructor
*/
public Token() {}
/**
* Constructs a new token for the specified Image.
*/
public Token(int kind)
{
this(kind, null);
}
/**
* Constructs a new token for the specified Image and Kind.
*/
public Token(int kind, String image)
{
this.kind = kind;
this.image = image;
}
/**
* Returns the image.
*/
public String toString()
{
return image;
}
/**
* Returns a new Token object, by default. However, if you want, you
* can create and return subclass objects based on the value of ofKind.
* Simply add the cases to the switch for all those special cases.
* For example, if you have a subclass of Token called IDToken that
* you want to create if ofKind is ID, simply add something like :
*
* case MyParserConstants.ID : return new IDToken(ofKind, image);
*
* to the following switch statement. Then you can cast matchedToken
* variable to the appropriate type and use sit in your lexical actions.
*/
public static Token newToken(int ofKind, String image)
{
switch(ofKind)
{
default : return new Token(ofKind, image);
}
}
public static Token newToken(int ofKind)
{
return newToken(ofKind, null);
}
}
/* JavaCC - OriginalChecksum=0bc1514d1a4b2ff16ede291d81c548f1 (do not edit this line) */
| 4,083 | 29.939394 | 90 | java |
nl2bash | nl2bash-master/bashlint/man_parser/parser/TokenMgrError.java | /* Generated By:JavaCC: Do not edit this line. TokenMgrError.java Version 6.0 */
/* JavaCCOptions: */
package man_parser.parser;
/** Token Manager Error. */
public class TokenMgrError extends Error
{
/**
* The version identifier for this Serializable class.
* Increment only if the <i>serialized</i> form of the
* class changes.
*/
private static final long serialVersionUID = 1L;
/*
* Ordinals for various reasons why an Error of this type can be thrown.
*/
/**
* Lexical error occurred.
*/
static final int LEXICAL_ERROR = 0;
/**
* An attempt was made to create a second instance of a static token manager.
*/
static final int STATIC_LEXER_ERROR = 1;
/**
* Tried to change to an invalid lexical state.
*/
static final int INVALID_LEXICAL_STATE = 2;
/**
* Detected (and bailed out of) an infinite loop in the token manager.
*/
static final int LOOP_DETECTED = 3;
/**
* Indicates the reason why the exception is thrown. It will have
* one of the above 4 values.
*/
int errorCode;
/**
* Replaces unprintable characters by their escaped (or unicode escaped)
* equivalents in the given string
*/
protected static final String addEscapes(String str) {
StringBuffer retval = new StringBuffer();
char ch;
for (int i = 0; i < str.length(); i++) {
switch (str.charAt(i))
{
case 0 :
continue;
case '\b':
retval.append("\\b");
continue;
case '\t':
retval.append("\\t");
continue;
case '\n':
retval.append("\\n");
continue;
case '\f':
retval.append("\\f");
continue;
case '\r':
retval.append("\\r");
continue;
case '\"':
retval.append("\\\"");
continue;
case '\'':
retval.append("\\\'");
continue;
case '\\':
retval.append("\\\\");
continue;
default:
if ((ch = str.charAt(i)) < 0x20 || ch > 0x7e) {
String s = "0000" + Integer.toString(ch, 16);
retval.append("\\u" + s.substring(s.length() - 4, s.length()));
} else {
retval.append(ch);
}
continue;
}
}
return retval.toString();
}
/**
* Returns a detailed message for the Error when it is thrown by the
* token manager to indicate a lexical error.
* Parameters :
* EOFSeen : indicates if EOF caused the lexical error
* curLexState : lexical state in which this error occurred
* errorLine : line number when the error occurred
* errorColumn : column number when the error occurred
* errorAfter : prefix that was seen before this error occurred
* curchar : the offending character
* Note: You can customize the lexical error message by modifying this method.
*/
protected static String LexicalError(boolean EOFSeen, int lexState, int errorLine, int errorColumn, String errorAfter, char curChar) {
return("Lexical error at line " +
errorLine + ", column " +
errorColumn + ". Encountered: " +
(EOFSeen ? "<EOF> " : ("\"" + addEscapes(String.valueOf(curChar)) + "\"") + " (" + (int)curChar + "), ") +
"after : \"" + addEscapes(errorAfter) + "\"");
}
/**
* You can also modify the body of this method to customize your error messages.
* For example, cases like LOOP_DETECTED and INVALID_LEXICAL_STATE are not
* of end-users concern, so you can return something like :
*
* "Internal Error : Please file a bug report .... "
*
* from this method for such cases in the release version of your parser.
*/
public String getMessage() {
return super.getMessage();
}
/*
* Constructors of various flavors follow.
*/
/** No arg constructor. */
public TokenMgrError() {
}
/** Constructor with message and reason. */
public TokenMgrError(String message, int reason) {
super(message);
errorCode = reason;
}
/** Full Constructor. */
public TokenMgrError(boolean EOFSeen, int lexState, int errorLine, int errorColumn, String errorAfter, char curChar, int reason) {
this(LexicalError(EOFSeen, lexState, errorLine, errorColumn, errorAfter, curChar), reason);
}
}
/* JavaCC - OriginalChecksum=0c6283a20d3ff5ec6f54f1746097274f (do not edit this line) */
| 4,427 | 28.918919 | 136 | java |
null | orc-main/java/bench/core/src/java/org/apache/hadoop/fs/TrackingLocalFileSystem.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
public class TrackingLocalFileSystem extends RawLocalFileSystem {
static final URI NAME = URI.create("track:///");
class TrackingFileInputStream extends RawLocalFileSystem.LocalFSFileInputStream {
TrackingFileInputStream(Path f) throws IOException {
super(f);
}
public int read() throws IOException {
statistics.incrementReadOps(1);
return super.read();
}
public int read(byte[] b, int off, int len) throws IOException {
statistics.incrementReadOps(1);
return super.read(b, off, len);
}
public int read(long position, byte[] b, int off, int len) throws IOException {
statistics.incrementReadOps(1);
return super.read(position, b, off, len);
}
}
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
if (!exists(f)) {
throw new FileNotFoundException(f.toString());
}
return new FSDataInputStream(new BufferedFSInputStream(
new TrackingFileInputStream(f), bufferSize));
}
@Override
public URI getUri() {
return NAME;
}
public FileSystem.Statistics getLocalStatistics() {
return statistics;
}
}
| 2,074 | 30.439394 | 83 | java |
null | orc-main/java/bench/core/src/java/org/apache/orc/bench/core/BenchmarkOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.core;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
public class BenchmarkOptions {
public static final String HELP = "help";
public static final String ITERATIONS = "iterations";
public static final String WARMUP_ITERATIONS = "warmup-iterations";
public static final String FORK = "fork";
public static final String TIME = "time";
public static final String MIN_MEMORY = "min-memory";
public static final String MAX_MEMORY = "max-memory";
public static final String GC = "gc";
public static final String STACK_PROFILE = "stack";
public static CommandLine parseCommandLine(String[] args) {
Options options = new Options()
.addOption("h", HELP, false, "Provide help")
.addOption("i", ITERATIONS, true, "Number of iterations")
.addOption("I", WARMUP_ITERATIONS, true, "Number of warmup iterations")
.addOption("f", FORK, true, "How many forks to use")
.addOption("t", TIME, true, "How long each iteration is in seconds")
.addOption("m", MIN_MEMORY, true, "The minimum size of each JVM")
.addOption("M", MAX_MEMORY, true, "The maximum size of each JVM")
.addOption("p", STACK_PROFILE, false, "Should enable stack profiler in JMH")
.addOption("g", GC, false, "Should GC be profiled");
CommandLine result;
try {
result = new DefaultParser().parse(options, args, true);
} catch (ParseException pe) {
System.err.println("Argument exception - " + pe.getMessage());
result = null;
}
if (result == null || result.hasOption(HELP) || result.getArgs().length == 0) {
new HelpFormatter().printHelp("java -jar <jar> <command> <options> <data>",
options);
System.err.println();
System.exit(1);
}
return result;
}
}
| 2,783 | 41.181818 | 84 | java |
null | orc-main/java/bench/core/src/java/org/apache/orc/bench/core/CompressionKind.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.core;
import io.airlift.compress.snappy.SnappyCodec;
import org.apache.hadoop.fs.Path;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.zip.GZIPInputStream;
import java.util.zip.GZIPOutputStream;
/**
* Enum for handling the compression codecs for the benchmark
*/
public enum CompressionKind {
NONE("none"),
ZLIB("gz"),
SNAPPY("snappy"),
ZSTD("zstd");
CompressionKind(String extension) {
this.extension = extension;
}
private final String extension;
public String getExtension() {
return extension;
}
public OutputStream create(OutputStream out) throws IOException {
switch (this) {
case NONE:
return out;
case ZLIB:
return new GZIPOutputStream(out);
case SNAPPY:
return new SnappyCodec().createOutputStream(out);
default:
throw new IllegalArgumentException("Unhandled kind " + this);
}
}
public InputStream read(InputStream in) throws IOException {
switch (this) {
case NONE:
return in;
case ZLIB:
return new GZIPInputStream(in);
case SNAPPY:
return new SnappyCodec().createInputStream(in);
default:
throw new IllegalArgumentException("Unhandled kind " + this);
}
}
public static CompressionKind fromPath(Path path) {
String name = path.getName();
int lastDot = name.lastIndexOf('.');
if (lastDot >= 0) {
String ext = name.substring(lastDot);
for (CompressionKind value : values()) {
if (ext.equals("." + value.getExtension())) {
return value;
}
}
}
return NONE;
}
public static CompressionKind fromExtension(String extension) {
for (CompressionKind value: values()) {
if (value.extension.equals(extension)) {
return value;
}
}
throw new IllegalArgumentException("Unknown compression " + extension);
}
}
| 2,772 | 27.295918 | 75 | java |
null | orc-main/java/bench/core/src/java/org/apache/orc/bench/core/Driver.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.core;
import java.util.Arrays;
import java.util.Map;
import java.util.ServiceLoader;
import java.util.TreeMap;
/**
* A driver tool to call the various benchmark classes.
*/
public class Driver {
private static final ServiceLoader<OrcBenchmark> loader =
ServiceLoader.load(OrcBenchmark.class);
private static Map<String, OrcBenchmark> getBenchmarks() {
Map<String, OrcBenchmark> result = new TreeMap<>();
for(OrcBenchmark bench: loader) {
result.put(bench.getName(), bench);
}
return result;
}
private static final String PATTERN = " %10s - %s";
private static void printUsageAndExit(Map<String, OrcBenchmark> benchmarks) {
System.err.println("Commands:");
for(OrcBenchmark bench: benchmarks.values()) {
System.err.println(String.format(PATTERN, bench.getName(),
bench.getDescription()));
}
System.exit(1);
}
public static void main(String[] args) throws Exception {
Map<String, OrcBenchmark> benchmarks = getBenchmarks();
if (args.length == 0) {
printUsageAndExit(benchmarks);
}
String command = args[0];
args = Arrays.copyOfRange(args, 1, args.length);
OrcBenchmark bench = benchmarks.get(command);
if (bench == null) {
printUsageAndExit(benchmarks);
System.exit(1);
}
bench.run(args);
}
}
| 2,172 | 31.432836 | 79 | java |
null | orc-main/java/bench/core/src/java/org/apache/orc/bench/core/IOCounters.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.core;
import org.openjdk.jmh.annotations.AuxCounters;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.TearDown;
/**
* A class to track the number of rows, bytes, and read/write operations that have
* been read/write.
* The read/write is named as "IO" instead of "RW" to avoid ambiguity as the latter could indicate the
* counts including both read and write, instead of either read or write.
*/
@AuxCounters(AuxCounters.Type.EVENTS)
@State(Scope.Thread)
public class IOCounters {
long bytesIO;
long io;
RecordCounters recordCounters;
@Setup(Level.Iteration)
public void setup(RecordCounters records) {
bytesIO = 0;
io = 0;
recordCounters = records;
}
@TearDown(Level.Iteration)
public void print() {
if (recordCounters != null) {
recordCounters.print();
}
System.out.println("io: " + io);
System.out.println("Bytes: " + bytesIO);
}
public double bytesPerRecord() {
return recordCounters == null || recordCounters.records == 0 ?
0 : ((double) bytesIO) / recordCounters.records;
}
public long records() {
return recordCounters == null || recordCounters.invocations == 0 ?
0 : recordCounters.records / recordCounters.invocations;
}
/**
* Capture the number of I/O on average in each invocation.
*/
public long ops() {
return recordCounters == null || recordCounters.invocations == 0 ?
0 : io / recordCounters.invocations;
}
public void addRecords(long value) {
if (recordCounters != null) {
recordCounters.records += value;
}
}
public void addInvocation() {
if (recordCounters != null) {
recordCounters.invocations += 1;
}
}
public void addBytes(long newIOs, long newBytes) {
bytesIO += newBytes;
io += newIOs;
}
}
| 2,780 | 29.228261 | 102 | java |
null | orc-main/java/bench/core/src/java/org/apache/orc/bench/core/NullFileSystem.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.core;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.Progressable;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URI;
import java.net.URISyntaxException;
public class NullFileSystem extends FileSystem {
@Override
public URI getUri() {
try {
return new URI("null:///");
} catch (URISyntaxException e) {
throw new IllegalArgumentException("Bad URL", e);
}
}
@Override
public FSDataInputStream open(Path path, int i) {
return new FSDataInputStream(new InputStream() {
@Override
public int read() {
return -1;
}
});
}
static class NullOutput extends OutputStream {
@Override
public void write(int b) {
// pass
}
public void write(byte[] buffer, int offset, int length) {
// pass
}
}
private static final OutputStream NULL_OUTPUT = new NullOutput();
@Override
public FSDataOutputStream create(Path path,
FsPermission fsPermission,
boolean b,
int i,
short i1,
long l,
Progressable progressable) throws IOException {
return new FSDataOutputStream(NULL_OUTPUT, null);
}
@Override
public FSDataOutputStream append(Path path,
int i,
Progressable progressable) throws IOException {
return new FSDataOutputStream(NULL_OUTPUT, null);
}
@Override
public boolean rename(Path path, Path path1) {
return false;
}
@Override
public boolean delete(Path path, boolean b) {
return false;
}
@Override
public FileStatus[] listStatus(Path path) {
return null;
}
@Override
public void setWorkingDirectory(Path path) {
// pass
}
@Override
public Path getWorkingDirectory() {
return null;
}
@Override
public boolean mkdirs(Path path, FsPermission fsPermission) {
return false;
}
@Override
public FileStatus getFileStatus(Path path) {
return null;
}
}
| 3,254 | 25.900826 | 82 | java |
null | orc-main/java/bench/core/src/java/org/apache/orc/bench/core/OrcBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.core;
/**
* API to support adding additional benchmarks to the Driver.
*/
public interface OrcBenchmark {
/**
* Get the name of the subcommand to invoke this benchmark.
* @return a simple string, hopefully lowercase
*/
String getName();
/**
* The human readable description of this benchmark
* @return
*/
String getDescription();
/**
* Run the benchmark
* @param args the arguments from the user
* @throws Exception
*/
void run(String[] args) throws Exception;
}
| 1,347 | 28.955556 | 75 | java |
null | orc-main/java/bench/core/src/java/org/apache/orc/bench/core/RandomGenerator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.core;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ListColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.StructColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.TypeDescription;
import java.nio.charset.StandardCharsets;
import java.sql.Timestamp;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
public class RandomGenerator {
private final TypeDescription schema = TypeDescription.createStruct();
private final List<Field> fields = new ArrayList<>();
private final Random random;
public RandomGenerator(int seed) {
random = new Random(seed);
}
private abstract class ValueGenerator {
double nullProbability = 0;
abstract void generate(ColumnVector vector, int valueCount);
}
private class RandomBoolean extends ValueGenerator {
public void generate(ColumnVector v, int valueCount) {
LongColumnVector vector = (LongColumnVector) v;
for(int r=0; r < valueCount; ++r) {
if (nullProbability != 0 && random.nextDouble() < nullProbability) {
v.noNulls = false;
v.isNull[r] = true;
} else {
vector.vector[r] = random.nextInt(2);
}
}
}
}
private class RandomList extends ValueGenerator {
private final int minSize;
private final int sizeRange;
private final Field child;
RandomList(int minSize, int maxSize, Field child) {
this.minSize = minSize;
this.sizeRange = maxSize - minSize + 1;
this.child = child;
}
public void generate(ColumnVector v, int valueCount) {
ListColumnVector vector = (ListColumnVector) v;
for(int r=0; r < valueCount; ++r) {
if (nullProbability != 0 && random.nextDouble() < nullProbability) {
v.noNulls = false;
v.isNull[r] = true;
} else {
vector.offsets[r] = vector.childCount;
vector.lengths[r] = random.nextInt(sizeRange) + minSize;
vector.childCount += vector.lengths[r];
}
}
vector.child.ensureSize(vector.childCount, false);
child.generator.generate(vector.child, vector.childCount);
}
}
private class RandomStruct extends ValueGenerator {
private final Field[] children;
RandomStruct(Field[] children) {
this.children = children;
}
public void generate(ColumnVector v, int valueCount) {
StructColumnVector vector = (StructColumnVector) v;
for(int r=0; r < valueCount; ++r) {
if (nullProbability != 0 && random.nextDouble() < nullProbability) {
v.noNulls = false;
v.isNull[r] = true;
}
}
for(int c=0; c < children.length; ++c) {
children[c].generator.generate(vector.fields[c], valueCount);
}
}
}
private abstract class IntegerGenerator extends ValueGenerator {
private final long sign;
private final long mask;
private IntegerGenerator(TypeDescription.Category kind) {
int bits = getIntegerLength(kind);
mask = bits == 64 ? 0 : -1L << bits;
sign = 1L << (bits - 1);
}
protected void normalize(LongColumnVector vector, int valueCount) {
// make sure the value stays in range by sign extending it
for(int r=0; r < valueCount; ++r) {
if ((vector.vector[r] & sign) == 0) {
vector.vector[r] &= ~mask;
} else {
vector.vector[r] |= mask;
}
}
}
}
private class AutoIncrement extends IntegerGenerator {
private long value;
private final long increment;
private AutoIncrement(TypeDescription.Category kind, long start,
long increment) {
super(kind);
this.value = start;
this.increment = increment;
}
public void generate(ColumnVector v, int valueCount) {
LongColumnVector vector = (LongColumnVector) v;
for(int r=0; r < valueCount; ++r) {
if (nullProbability != 0 && random.nextDouble() >= nullProbability) {
v.noNulls = false;
v.isNull[r] = true;
} else {
vector.vector[r] = value;
value += increment;
}
}
normalize(vector, valueCount);
}
}
private class RandomInteger extends IntegerGenerator {
private RandomInteger(TypeDescription.Category kind) {
super(kind);
}
public void generate(ColumnVector v, int valueCount) {
LongColumnVector vector = (LongColumnVector) v;
for(int r=0; r < valueCount; ++r) {
if (nullProbability != 0 && random.nextDouble() < nullProbability) {
v.noNulls = false;
v.isNull[r] = true;
} else {
vector.vector[r] = random.nextLong();
}
}
normalize(vector, valueCount);
}
}
private class IntegerRange extends IntegerGenerator {
private final long minimum;
private final long range;
private final long limit;
private IntegerRange(TypeDescription.Category kind, long minimum,
long maximum) {
super(kind);
this.minimum = minimum;
this.range = maximum - minimum + 1;
if (this.range < 0) {
throw new IllegalArgumentException("Can't support a negative range "
+ range);
}
limit = (Long.MAX_VALUE / range) * range;
}
public void generate(ColumnVector v, int valueCount) {
LongColumnVector vector = (LongColumnVector) v;
for(int r=0; r < valueCount; ++r) {
if (nullProbability != 0 && random.nextDouble() < nullProbability) {
v.noNulls = false;
v.isNull[r] = true;
} else {
long rand;
do {
// clear the sign bit
rand = random.nextLong() & Long.MAX_VALUE;
} while (rand >= limit);
vector.vector[r] = (rand % range) + minimum;
}
}
normalize(vector, valueCount);
}
}
private class StringChooser extends ValueGenerator {
private final byte[][] choices;
private StringChooser(String[] values) {
choices = new byte[values.length][];
for(int e=0; e < values.length; ++e) {
choices[e] = values[e].getBytes(StandardCharsets.UTF_8);
}
}
public void generate(ColumnVector v, int valueCount) {
BytesColumnVector vector = (BytesColumnVector) v;
for(int r=0; r < valueCount; ++r) {
if (nullProbability != 0 && random.nextDouble() < nullProbability) {
v.noNulls = false;
v.isNull[r] = true;
} else {
int val = random.nextInt(choices.length);
vector.setRef(r, choices[val], 0, choices[val].length);
}
}
}
}
private static byte[] concat(byte[] left, byte[] right) {
byte[] result = new byte[left.length + right.length];
System.arraycopy(left, 0, result, 0, left.length);
System.arraycopy(right, 0, result, left.length, right.length);
return result;
}
private static byte pickOne(byte[] choices, Random random) {
return choices[random.nextInt(choices.length)];
}
private static final byte[] LOWER_CONSONANTS =
"bcdfghjklmnpqrstvwxyz".getBytes(StandardCharsets.UTF_8);
private static final byte[] UPPER_CONSONANTS =
"BCDFGHJKLMNPQRSTVWXYZ".getBytes(StandardCharsets.UTF_8);
private static final byte[] CONSONANTS =
concat(LOWER_CONSONANTS, UPPER_CONSONANTS);
private static final byte[] LOWER_VOWELS = "aeiou".getBytes(StandardCharsets.UTF_8);
private static final byte[] UPPER_VOWELS = "AEIOU".getBytes(StandardCharsets.UTF_8);
private static final byte[] VOWELS = concat(LOWER_VOWELS, UPPER_VOWELS);
private static final byte[] LOWER_LETTERS =
concat(LOWER_CONSONANTS, LOWER_VOWELS);
private static final byte[] UPPER_LETTERS =
concat(UPPER_CONSONANTS, UPPER_VOWELS);
private static final byte[] LETTERS = concat(LOWER_LETTERS, UPPER_LETTERS);
private static final byte[] NATURAL_DIGITS = "123456789".getBytes(StandardCharsets.UTF_8);
private static final byte[] DIGITS = "0123456789".getBytes(StandardCharsets.UTF_8);
private class StringPattern extends ValueGenerator {
private final byte[] buffer;
private final byte[][] choices;
private final int[] locations;
private StringPattern(String pattern) {
buffer = pattern.getBytes(StandardCharsets.UTF_8);
int locs = 0;
for(int i=0; i < buffer.length; ++i) {
switch (buffer[i]) {
case 'C':
case 'c':
case 'E':
case 'V':
case 'v':
case 'F':
case 'l':
case 'L':
case 'D':
case 'x':
case 'X':
locs += 1;
break;
default:
break;
}
}
locations = new int[locs];
choices = new byte[locs][];
locs = 0;
for(int i=0; i < buffer.length; ++i) {
switch (buffer[i]) {
case 'C':
locations[locs] = i;
choices[locs++] = UPPER_CONSONANTS;
break;
case 'c':
locations[locs] = i;
choices[locs++] = LOWER_CONSONANTS;
break;
case 'E':
locations[locs] = i;
choices[locs++] = CONSONANTS;
break;
case 'V':
locations[locs] = i;
choices[locs++] = UPPER_VOWELS;
break;
case 'v':
locations[locs] = i;
choices[locs++] = LOWER_VOWELS;
break;
case 'F':
locations[locs] = i;
choices[locs++] = VOWELS;
break;
case 'l':
locations[locs] = i;
choices[locs++] = LOWER_LETTERS;
break;
case 'L':
locations[locs] = i;
choices[locs++] = UPPER_LETTERS;
break;
case 'D':
locations[locs] = i;
choices[locs++] = LETTERS;
break;
case 'x':
locations[locs] = i;
choices[locs++] = NATURAL_DIGITS;
break;
case 'X':
locations[locs] = i;
choices[locs++] = DIGITS;
break;
default:
break;
}
}
}
public void generate(ColumnVector v, int valueCount) {
BytesColumnVector vector = (BytesColumnVector) v;
for(int r=0; r < valueCount; ++r) {
if (nullProbability != 0 && random.nextDouble() < nullProbability) {
v.noNulls = false;
v.isNull[r] = true;
} else {
for(int m=0; m < locations.length; ++m) {
buffer[locations[m]] = pickOne(choices[m], random);
}
vector.setVal(r, buffer, 0, buffer.length);
}
}
}
}
private class TimestampRange extends ValueGenerator {
private final long minimum;
private final long range;
private final long limit;
private TimestampRange(String min, String max) {
minimum = Timestamp.valueOf(min).getTime();
range = Timestamp.valueOf(max).getTime() - minimum + 1;
if (range < 0) {
throw new IllegalArgumentException("Negative range " + range);
}
limit = (Long.MAX_VALUE / range) * range;
}
public void generate(ColumnVector v, int valueCount) {
TimestampColumnVector vector = (TimestampColumnVector) v;
for(int r=0; r < valueCount; ++r) {
if (nullProbability != 0 && random.nextDouble() < nullProbability) {
v.noNulls = false;
v.isNull[r] = true;
} else {
long rand;
do {
// clear the sign bit
rand = random.nextLong() & Long.MAX_VALUE;
} while (rand >= limit);
vector.time[r] = (rand % range) + minimum;
vector.nanos[r] = random.nextInt(1000000);
}
}
}
}
private static int getIntegerLength(TypeDescription.Category kind) {
switch (kind) {
case BYTE:
return 8;
case SHORT:
return 16;
case INT:
return 32;
case LONG:
return 64;
default:
throw new IllegalArgumentException("Unhandled type " + kind);
}
}
public class Field {
private final TypeDescription type;
private Field[] children;
private ValueGenerator generator;
private Field(TypeDescription type) {
this.type = type;
if (!type.getCategory().isPrimitive()) {
List<TypeDescription> childrenTypes = type.getChildren();
children = new Field[childrenTypes.size()];
for(int c=0; c < children.length; ++c) {
children[c] = new Field(childrenTypes.get(c));
}
}
}
public Field addAutoIncrement(long start, long increment) {
generator = new AutoIncrement(type.getCategory(), start, increment);
return this;
}
public Field addIntegerRange(long min, long max) {
generator = new IntegerRange(type.getCategory(), min, max);
return this;
}
public Field addRandomInt() {
generator = new RandomInteger(type.getCategory());
return this;
}
public Field addStringChoice(String... choices) {
if (type.getCategory() != TypeDescription.Category.STRING) {
throw new IllegalArgumentException("Must be string - " + type);
}
generator = new StringChooser(choices);
return this;
}
public Field addStringPattern(String pattern) {
if (type.getCategory() != TypeDescription.Category.STRING) {
throw new IllegalArgumentException("Must be string - " + type);
}
generator = new StringPattern(pattern);
return this;
}
public Field addTimestampRange(String start, String end) {
if (type.getCategory() != TypeDescription.Category.TIMESTAMP) {
throw new IllegalArgumentException("Must be timestamp - " + type);
}
generator = new TimestampRange(start, end);
return this;
}
public Field addBoolean() {
if (type.getCategory() != TypeDescription.Category.BOOLEAN) {
throw new IllegalArgumentException("Must be boolean - " + type);
}
generator = new RandomBoolean();
return this;
}
public Field hasNulls(double probability) {
generator.nullProbability = probability;
return this;
}
public Field addStruct() {
generator = new RandomStruct(children);
return this;
}
public Field addList(int minSize, int maxSize) {
generator = new RandomList(minSize, maxSize, children[0]);
return this;
}
public Field getChildField(int child) {
return children[child];
}
}
public Field addField(String name, TypeDescription.Category kind) {
TypeDescription type = new TypeDescription(kind);
return addField(name, type);
}
public Field addField(String name, TypeDescription type) {
schema.addField(name, type);
Field result = new Field(type);
fields.add(result);
return result;
}
public void generate(VectorizedRowBatch batch, int rowCount) {
batch.reset();
for(int c=0; c < batch.cols.length; ++c) {
fields.get(c).generator.generate(batch.cols[c], rowCount);
}
batch.size = rowCount;
}
/**
* Get the schema for the table that is being generated.
* @return
*/
public TypeDescription getSchema() {
return schema;
}
}
| 16,416 | 30.270476 | 92 | java |
null | orc-main/java/bench/core/src/java/org/apache/orc/bench/core/RecordCounters.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.core;
import org.openjdk.jmh.annotations.AuxCounters;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
/**
* A class to track the number of rows that have been read.
*/
@AuxCounters(AuxCounters.Type.OPERATIONS)
@State(Scope.Thread)
public class RecordCounters {
long records;
long invocations;
@Setup(Level.Iteration)
public void setup() {
records = 0;
invocations = 0;
}
public long perRecord() {
return records;
}
public void print() {
System.out.println();
System.out.println("Records: " + records);
System.out.println("Invocations: " + invocations);
}
}
| 1,569 | 28.622642 | 75 | java |
null | orc-main/java/bench/core/src/java/org/apache/orc/bench/core/SalesGenerator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.core;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.TypeDescription;
import org.apache.orc.bench.core.convert.BatchReader;
public class SalesGenerator implements BatchReader {
private final RandomGenerator generator;
private long rowsRemaining;
private static final double MOSTLY = 0.99999;
public SalesGenerator(long rows) {
this(rows, 42);
}
public SalesGenerator(long rows, int seed) {
generator = new RandomGenerator(seed);
// column 1
generator.addField("sales_id", TypeDescription.Category.LONG)
.addAutoIncrement(1000000000, 1);
generator.addField("customer_id", TypeDescription.Category.LONG)
.addIntegerRange(1000000000, 2000000000);
generator.addField("col3", TypeDescription.Category.LONG)
.addIntegerRange(1, 10000).hasNulls(0.9993100389335173);
// column 4
generator.addField("item_category", TypeDescription.Category.LONG)
.addIntegerRange(1, 1000000).hasNulls(0.00014784879996054823);
generator.addField("item_count", TypeDescription.Category.LONG)
.addIntegerRange(1, 1000);
generator.addField("change_ts", TypeDescription.Category.TIMESTAMP)
.addTimestampRange("2003-01-01 00:00:00", "2017-03-14 23:59:59");
// column 7
generator.addField("store_location", TypeDescription.Category.STRING)
.addStringChoice("Los Angeles", "New York", "Cupertino", "Sunnyvale",
"Boston", "Chicago", "Seattle", "Jackson",
"Palo Alto", "San Mateo", "San Jose", "Santa Clara",
"Irvine", "Torrance", "Gardena", "Hermosa", "Manhattan")
.hasNulls(0.0004928293332019384);
generator.addField("associate_id", TypeDescription.Category.STRING)
.addStringPattern("MR V").hasNulls(0.05026859198659506);
generator.addField("col9", TypeDescription.Category.LONG)
.addIntegerRange(1, 1000000000).hasNulls(MOSTLY);
// column 10
generator.addField("rebate_id", TypeDescription.Category.STRING)
.addStringPattern("xxxxxx").hasNulls(MOSTLY);
generator.addField("create_ts", TypeDescription.Category.TIMESTAMP)
.addTimestampRange("2003-01-01 00:00:00", "2017-03-14 23:59:59");
generator.addField("col13", TypeDescription.Category.LONG)
.addIntegerRange(1, 100000).hasNulls(MOSTLY);
// column 13
generator.addField("size", TypeDescription.Category.STRING)
.addStringChoice("Small", "Medium", "Large", "XL")
.hasNulls(0.9503720861465674);
generator.addField("col14", TypeDescription.Category.LONG)
.addIntegerRange(1, 100000);
generator.addField("fulfilled", TypeDescription.Category.BOOLEAN)
.addBoolean();
// column 16
generator.addField("global_id", TypeDescription.Category.STRING)
.addStringPattern("xxxxxxxxxxxxxxxx").hasNulls(0.021388793060962974);
generator.addField("col17", TypeDescription.Category.STRING)
.addStringPattern("L-xx").hasNulls(MOSTLY);
generator.addField("col18", TypeDescription.Category.STRING)
.addStringPattern("ll").hasNulls(MOSTLY);
// column 19
generator.addField("col19", TypeDescription.Category.LONG)
.addIntegerRange(1, 100000);
generator.addField("has_rebate", TypeDescription.Category.BOOLEAN)
.addBoolean();
RandomGenerator.Field list =
generator.addField("col21",
TypeDescription.fromString("array<struct<sub1:bigint,sub2:string," +
"sub3:string,sub4:bigint,sub5:bigint,sub6:string>>"))
.addList(0, 3)
.hasNulls(MOSTLY);
RandomGenerator.Field struct = list.getChildField(0).addStruct();
struct.getChildField(0).addIntegerRange(0, 10000000);
struct.getChildField(1).addStringPattern("VVVVV");
struct.getChildField(2).addStringPattern("VVVVVVVV");
struct.getChildField(3).addIntegerRange(0, 10000000);
struct.getChildField(4).addIntegerRange(0, 10000000);
struct.getChildField(5).addStringPattern("VVVVVVVV");
// column 38
generator.addField("vendor_id", TypeDescription.Category.STRING)
.addStringPattern("Lxxxxxx").hasNulls(0.1870780148834459);
generator.addField("country", TypeDescription.Category.STRING)
.addStringChoice("USA", "Germany", "Ireland", "Canada", "Mexico",
"Denmark").hasNulls(0.0004928293332019384);
// column 40
generator.addField("backend_version", TypeDescription.Category.STRING)
.addStringPattern("X.xx").hasNulls(0.0005913951998423039);
generator.addField("col41", TypeDescription.Category.LONG)
.addIntegerRange(1000000000, 100000000000L);
generator.addField("col42", TypeDescription.Category.LONG)
.addIntegerRange(1, 1000000000);
// column 43
generator.addField("col43", TypeDescription.Category.LONG)
.addIntegerRange(1000000000, 10000000000L).hasNulls(0.9763934749396284);
generator.addField("col44", TypeDescription.Category.LONG)
.addIntegerRange(1, 100000000);
generator.addField("col45", TypeDescription.Category.LONG)
.addIntegerRange(1, 100000000);
// column 46
generator.addField("col46", TypeDescription.Category.LONG)
.addIntegerRange(1, 10000000);
generator.addField("col47", TypeDescription.Category.LONG)
.addIntegerRange(1, 1000);
generator.addField("col48", TypeDescription.Category.LONG)
.addIntegerRange(1, 1000000).hasNulls(MOSTLY);
// column 49
generator.addField("col49", TypeDescription.Category.STRING)
.addStringPattern("xxxx").hasNulls(0.0004928293332019384);
generator.addField("col50", TypeDescription.Category.STRING)
.addStringPattern("ll").hasNulls(0.9496821250800848);
generator.addField("col51", TypeDescription.Category.LONG)
.addIntegerRange(1, 1000000).hasNulls(0.9999014341333596);
// column 52
generator.addField("col52", TypeDescription.Category.LONG)
.addIntegerRange(1, 1000000).hasNulls(0.9980779656005125);
generator.addField("col53", TypeDescription.Category.LONG)
.addIntegerRange(1, 1000000000);
generator.addField("col54", TypeDescription.Category.LONG)
.addIntegerRange(1, 1000000000);
// column 55
generator.addField("col55", TypeDescription.Category.STRING)
.addStringChoice("X");
generator.addField("col56", TypeDescription.Category.TIMESTAMP)
.addTimestampRange("2003-01-01 00:00:00", "2017-03-14 23:59:59");
generator.addField("col57", TypeDescription.Category.TIMESTAMP)
.addTimestampRange("2003-01-01 00:00:00", "2017-03-14 23:59:59");
// column 58
generator.addField("md5", TypeDescription.Category.LONG)
.addRandomInt();
generator.addField("col59", TypeDescription.Category.LONG)
.addIntegerRange(1000000000, 10000000000L);
generator.addField("col69", TypeDescription.Category.TIMESTAMP)
.addTimestampRange("2003-01-01 00:00:00", "2017-03-14 23:59:59")
.hasNulls(MOSTLY);
// column 61
generator.addField("col61", TypeDescription.Category.STRING)
.addStringPattern("X.xx").hasNulls(0.11399142476960233);
generator.addField("col62", TypeDescription.Category.STRING)
.addStringPattern("X.xx").hasNulls(0.9986200778670347);
generator.addField("col63", TypeDescription.Category.TIMESTAMP)
.addTimestampRange("2003-01-01 00:00:00", "2017-03-14 23:59:59");
// column 64
generator.addField("col64", TypeDescription.Category.LONG)
.addIntegerRange(1, 1000000).hasNulls(MOSTLY);
rowsRemaining = rows;
}
public boolean nextBatch(VectorizedRowBatch batch) {
int rows = (int) Math.min(batch.getMaxSize(), rowsRemaining);
generator.generate(batch, rows);
rowsRemaining -= rows;
return rows != 0;
}
@Override
public void close() {
// PASS
}
public TypeDescription getSchema() {
return generator.getSchema();
}
public static void main(String[] args) throws Exception {
SalesGenerator sales = new SalesGenerator(10, 42);
System.out.println("Schema " + sales.getSchema());
}
}
| 8,916 | 42.077295 | 80 | java |
null | orc-main/java/bench/core/src/java/org/apache/orc/bench/core/Utilities.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.core;
import com.google.common.base.Preconditions;
import org.apache.commons.cli.CommandLine;
import org.apache.hadoop.fs.Path;
import org.apache.orc.TypeDescription;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.TimeValue;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
public class Utilities {
public static TypeDescription loadSchema(String name) throws IOException {
InputStream in = Utilities.class.getClassLoader().getResourceAsStream(name);
Preconditions.checkArgument(in != null, "Schema not found: " + name);
byte[] buffer= new byte[1 * 1024];
int len = in.read(buffer);
StringBuilder string = new StringBuilder();
while (len > 0) {
for(int i=0; i < len; ++i) {
// strip out
if (buffer[i] != '\n' && buffer[i] != ' ') {
string.append((char) buffer[i]);
}
}
len = in.read(buffer);
}
return TypeDescription.fromString(string.toString());
}
public static org.apache.orc.CompressionKind getCodec(CompressionKind compression) {
switch (compression) {
case NONE:
return org.apache.orc.CompressionKind.NONE;
case ZLIB:
return org.apache.orc.CompressionKind.ZLIB;
case SNAPPY:
return org.apache.orc.CompressionKind.SNAPPY;
case ZSTD:
return org.apache.orc.CompressionKind.ZSTD;
default:
throw new IllegalArgumentException("Unknown compression " + compression);
}
}
public static Path getVariant(Path root,
String data,
String format,
String compress) {
return new Path(root, "generated/" + data + "/" + format + "." + compress);
}
private static final String ROOT_PROPERTY_NAME = "bench.root.dir";
/**
* Get the benchmark data root in the child jvm.
* @return the path to the benchmark data or null if it wasn't found
*/
public static Path getBenchmarkRoot() {
String value = System.getProperty(ROOT_PROPERTY_NAME);
return value == null ? null : new Path(value);
}
public static Options parseOptions(String[] args,
Class cls) throws IOException {
CommandLine options = BenchmarkOptions.parseCommandLine(args);
String dataPath = new File(options.getArgs()[0]).getCanonicalPath();
OptionsBuilder builder = new OptionsBuilder();
builder.include(cls.getSimpleName());
if (options.hasOption(BenchmarkOptions.GC)) {
builder.addProfiler("hs_gc");
}
if (options.hasOption(BenchmarkOptions.STACK_PROFILE)) {
builder.addProfiler("stack");
}
builder.measurementIterations(Integer.parseInt(options.getOptionValue(
BenchmarkOptions.ITERATIONS, "5")));
builder.warmupIterations(Integer.parseInt(options.getOptionValue(
BenchmarkOptions.WARMUP_ITERATIONS, "2")));
builder.forks(Integer.parseInt(options.getOptionValue(
BenchmarkOptions.FORK, "1")));
TimeValue iterationTime = TimeValue.seconds(Long.parseLong(
options.getOptionValue(BenchmarkOptions.TIME, "10")));
builder.measurementTime(iterationTime);
builder.warmupTime(iterationTime);
String minMemory = options.getOptionValue(BenchmarkOptions.MIN_MEMORY, "256m");
String maxMemory = options.getOptionValue(BenchmarkOptions.MAX_MEMORY, "2g");
builder.jvmArgs("-server",
"-Xms"+ minMemory, "-Xmx" + maxMemory,
"-D" + ROOT_PROPERTY_NAME + "=" + dataPath);
return builder.build();
}
}
| 4,463 | 37.482759 | 86 | java |
null | orc-main/java/bench/core/src/java/org/apache/orc/bench/core/convert/BatchReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.core.convert;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import java.io.IOException;
/**
* Generic interface for reading data.
*/
public interface BatchReader extends AutoCloseable {
boolean nextBatch(VectorizedRowBatch batch) throws IOException;
@Override
void close() throws IOException;
}
| 1,169 | 32.428571 | 75 | java |
null | orc-main/java/bench/core/src/java/org/apache/orc/bench/core/convert/BatchWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.core.convert;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import java.io.Closeable;
import java.io.IOException;
/**
* Generic interface for writing data.
*/
public interface BatchWriter extends Closeable {
void writeBatch(VectorizedRowBatch batch) throws IOException;
@Override
void close() throws IOException;
}
| 1,189 | 32.055556 | 75 | java |
null | orc-main/java/bench/core/src/java/org/apache/orc/bench/core/convert/GenerateVariants.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.core.convert;
import com.google.auto.service.AutoService;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.TypeDescription;
import org.apache.orc.bench.core.CompressionKind;
import org.apache.orc.bench.core.OrcBenchmark;
import org.apache.orc.bench.core.SalesGenerator;
import org.apache.orc.bench.core.Utilities;
import org.apache.orc.bench.core.convert.avro.AvroReader;
import org.apache.orc.bench.core.convert.avro.AvroWriter;
import org.apache.orc.bench.core.convert.csv.CsvReader;
import org.apache.orc.bench.core.convert.json.JsonReader;
import org.apache.orc.bench.core.convert.json.JsonWriter;
import org.apache.orc.bench.core.convert.orc.OrcReader;
import org.apache.orc.bench.core.convert.orc.OrcWriter;
import org.apache.orc.bench.core.convert.parquet.ParquetReader;
import org.apache.orc.bench.core.convert.parquet.ParquetWriter;
import java.io.IOException;
import java.util.Arrays;
/**
* A tool to create the different variants that we need to benchmark against.
*/
@AutoService(OrcBenchmark.class)
public class GenerateVariants implements OrcBenchmark {
public static BatchWriter createFileWriter(Path file,
String format,
TypeDescription schema,
Configuration conf,
CompressionKind compress
) throws IOException {
FileSystem fs = file.getFileSystem(conf);
fs.delete(file, false);
fs.mkdirs(file.getParent());
switch (format) {
case "json":
return new JsonWriter(file, schema, conf, compress);
case "orc":
return new OrcWriter(file, schema, conf, compress);
case "avro":
return new AvroWriter(file, schema, conf, compress);
case "parquet":
return new ParquetWriter(file, schema, conf, compress);
default:
throw new IllegalArgumentException("Unknown format " + format);
}
}
public static BatchReader createFileReader(Path file,
String format,
TypeDescription schema,
Configuration conf,
CompressionKind compress
) throws IOException {
switch (format) {
case "csv":
return new CsvReader(file, schema, conf, compress);
case "json":
return new JsonReader(file, schema, conf, compress);
case "orc":
return new OrcReader(file, schema, conf);
case "avro":
return new AvroReader(file, schema, conf);
case "parquet":
return new ParquetReader(file, schema, conf);
default:
throw new IllegalArgumentException("Unknown format " + format);
}
}
@Override
public String getName() {
return "generate";
}
@Override
public String getDescription() {
return "generate all of the data variants";
}
@Override
public void run(String[] args) throws Exception {
CommandLine cli = parseCommandLine(args);
String[] compressList =
cli.getOptionValue("compress", "snappy,zlib,zstd").split(",");
String[] dataList =
cli.getOptionValue("data", "taxi,sales,github").split(",");
String[] formatList =
cli.getOptionValue("format", "avro,json,orc,parquet").split(",");
long records = Long.parseLong(cli.getOptionValue("sales", "25000000"));
Configuration conf = new Configuration();
// Disable Hadoop checksums
conf.set("fs.file.impl", "org.apache.hadoop.fs.RawLocalFileSystem");
Path root = new Path(cli.getArgs()[0]);
for (final String data: dataList) {
System.out.println("Processing " + data + " " + Arrays.toString(formatList));
// Set up the reader
TypeDescription schema = Utilities.loadSchema(data + ".schema");
// Set up the writers for each combination
BatchWriter[] writers = new BatchWriter[compressList.length * formatList.length];
for(int compress=0; compress < compressList.length; ++compress) {
CompressionKind compressionKind =
CompressionKind.valueOf(compressList[compress].toUpperCase());
for(int format=0; format < formatList.length; ++format) {
if (compressionKind == CompressionKind.ZSTD && formatList[format].equals("json")) {
System.out.println("Ignore JSON format with ZSTD compression case");
continue; // JSON doesn't support ZSTD
}
Path outPath = Utilities.getVariant(root, data, formatList[format],
compressionKind.getExtension());
writers[compress * formatList.length + format] =
createFileWriter(outPath, formatList[format], schema, conf,
compressionKind);
}
}
// Copy the rows from Reader
try (BatchReader reader = createReader(root, data, schema, conf, records)) {
VectorizedRowBatch batch = schema.createRowBatch();
while (reader.nextBatch(batch)) {
for (BatchWriter writer : writers) {
if (writer != null) {
writer.writeBatch(batch);
}
}
}
}
// Close all the writers
for (BatchWriter writer : writers) {
if (writer != null) {
writer.close();
}
}
}
}
public static class RecursiveReader implements BatchReader {
private final RemoteIterator<LocatedFileStatus> filenames;
private final String format;
private final TypeDescription schema;
private final Configuration conf;
private final CompressionKind compress;
private BatchReader current = null;
public RecursiveReader(Path root,
String format,
TypeDescription schema,
Configuration conf,
CompressionKind compress) throws IOException {
FileSystem fs = root.getFileSystem(conf);
filenames = fs.listFiles(root, true);
this.format = format;
this.schema = schema;
this.conf = conf;
this.compress = compress;
}
@Override
public boolean nextBatch(VectorizedRowBatch batch) throws IOException {
while (current == null || !current.nextBatch(batch)) {
if (filenames.hasNext()) {
LocatedFileStatus next = filenames.next();
if (next.isFile()) {
current = createFileReader(next.getPath(), format, schema, conf,
compress);
}
} else {
return false;
}
}
return true;
}
@Override
public void close() throws IOException {
if (current != null) {
current.close();
}
}
}
public static BatchReader createReader(Path root,
String dataName,
TypeDescription schema,
Configuration conf,
long salesRecords) throws IOException {
switch (dataName) {
case "taxi":
return new RecursiveReader(new Path(root, "sources/" + dataName), "parquet",
schema, conf, CompressionKind.NONE);
case "sales":
return new SalesGenerator(salesRecords);
case "github":
return new RecursiveReader(new Path(root, "sources/" + dataName), "json",
schema, conf, CompressionKind.ZLIB);
default:
throw new IllegalArgumentException("Unknown data name " + dataName);
}
}
public static CommandLine parseCommandLine(String[] args) throws ParseException {
Options options = new Options()
.addOption("h", "help", false, "Provide help")
.addOption("c", "compress", true, "List of compression")
.addOption("d", "data", true, "List of data sets")
.addOption("f", "format", true, "List of formats")
.addOption("s", "sales", true, "Number of records for sales");
CommandLine result = new DefaultParser().parse(options, args);
if (result.hasOption("help") || result.getArgs().length == 0) {
new HelpFormatter().printHelp("generate <root>", options);
System.exit(1);
}
return result;
}
}
| 9,616 | 37.314741 | 93 | java |
null | orc-main/java/bench/core/src/java/org/apache/orc/bench/core/convert/ScanVariants.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.core.convert;
import com.google.auto.service.AutoService;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.TypeDescription;
import org.apache.orc.bench.core.CompressionKind;
import org.apache.orc.bench.core.OrcBenchmark;
import org.apache.orc.bench.core.Utilities;
/**
* A tool to create the different variants that we need to benchmark against.
*/
@AutoService(OrcBenchmark.class)
public class ScanVariants implements OrcBenchmark {
static CommandLine parseCommandLine(String[] args) throws ParseException {
Options options = new Options()
.addOption("h", "help", false, "Provide help")
.addOption("c", "compress", true, "List of compression")
.addOption("d", "data", true, "List of data sets")
.addOption("f", "format", true, "List of formats");
CommandLine result = new DefaultParser().parse(options, args);
if (result.hasOption("help") || result.getArgs().length == 0) {
new HelpFormatter().printHelp("scan <root>", options);
System.exit(1);
}
return result;
}
@Override
public String getName() {
return "scan";
}
@Override
public String getDescription() {
return "scan all of the data variants";
}
@Override
public void run(String[] args) throws Exception {
CommandLine cli = parseCommandLine(args);
String[] compressList =
cli.getOptionValue("compress", "snappy,gz,zstd").split(",");
String[] dataList =
cli.getOptionValue("data", "taxi,sales,github").split(",");
String[] formatList =
cli.getOptionValue("format", "avro,json,orc,parquet").split(",");
Configuration conf = new Configuration();
Path root = new Path(cli.getArgs()[0]);
for(String data: dataList) {
TypeDescription schema = Utilities.loadSchema(data + ".schema");
VectorizedRowBatch batch = schema.createRowBatch();
for (String compress : compressList) {
CompressionKind compressKind = CompressionKind.fromExtension(compress);
for (String format : formatList) {
if (compressKind == CompressionKind.ZSTD && format.equals("json")) {
continue; // JSON doesn't support ZSTD
}
Path filename = Utilities.getVariant(root, data, format,
compress);
BatchReader reader = GenerateVariants.createFileReader(filename,
format, schema, conf, compressKind);
long rows = 0;
long batches = 0;
while (reader.nextBatch(batch)) {
batches += 1;
rows += batch.size;
}
System.out.println(filename + " rows: " + rows + " batches: "
+ batches);
reader.close();
}
}
}
}
}
| 3,877 | 36.288462 | 79 | java |
null | orc-main/java/bench/core/src/java/org/apache/orc/bench/core/convert/avro/AvroReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.core.convert.avro;
import org.apache.avro.file.DataFileReader;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.io.DatumReader;
import org.apache.avro.mapred.FsInput;
import org.apache.avro.util.Utf8;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.type.HiveDecimal;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ListColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.StructColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.TypeDescription;
import org.apache.orc.bench.core.convert.BatchReader;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.List;
public class AvroReader implements BatchReader {
private final DataFileReader<GenericRecord> dataFileReader;
private GenericRecord record = null;
private final AvroConverter[] converters;
public AvroReader(Path path,
TypeDescription schema,
Configuration conf) throws IOException {
FsInput file = new FsInput(path, conf);
DatumReader<GenericRecord> datumReader = new GenericDatumReader<>();
dataFileReader = new DataFileReader<>(file, datumReader);
converters = buildConverters(schema);
}
@Override
public boolean nextBatch(VectorizedRowBatch batch) throws IOException {
batch.reset();
int maxSize = batch.getMaxSize();
while (dataFileReader.hasNext() && batch.size < maxSize) {
record = dataFileReader.next(record);
int row = batch.size++;
for(int c=0; c < converters.length; ++c) {
converters[c].convert(batch.cols[c], row, record.get(c));
}
}
return batch.size != 0;
}
@Override
public void close() throws IOException {
dataFileReader.close();
}
public interface AvroConverter {
void convert(ColumnVector vector, int row, Object value);
}
public static AvroConverter[] buildConverters(TypeDescription orcType) {
List<TypeDescription> children = orcType.getChildren();
AvroConverter[] result = new AvroConverter[children.size()];
for(int c=0; c < result.length; ++c) {
result[c] = createConverter(children.get(c));
}
return result;
}
private static class BooleanConverter implements AvroConverter {
public void convert(ColumnVector cv, int row, Object value) {
if (value == null) {
cv.noNulls = false;
cv.isNull[row] = true;
} else {
((LongColumnVector) cv).vector[row] =
((Boolean) value).booleanValue() ? 1 : 0;
}
}
}
private static class IntConverter implements AvroConverter {
public void convert(ColumnVector cv, int row, Object value) {
if (value == null) {
cv.noNulls = false;
cv.isNull[row] = true;
} else {
((LongColumnVector) cv).vector[row] =
((Integer) value).intValue();
}
}
}
private static class LongConverter implements AvroConverter {
public void convert(ColumnVector cv, int row, Object value) {
if (value == null) {
cv.noNulls = false;
cv.isNull[row] = true;
} else {
((LongColumnVector) cv).vector[row] =
((Long) value).longValue();
}
}
}
private static class FloatConverter implements AvroConverter {
public void convert(ColumnVector cv, int row, Object value) {
if (value == null) {
cv.noNulls = false;
cv.isNull[row] = true;
} else {
((DoubleColumnVector) cv).vector[row] =
((Float) value).floatValue();
}
}
}
private static class DoubleConverter implements AvroConverter {
public void convert(ColumnVector cv, int row, Object value) {
if (value == null) {
cv.noNulls = false;
cv.isNull[row] = true;
} else {
((DoubleColumnVector) cv).vector[row] =
((Double) value).doubleValue();
}
}
}
private static class StringConverter implements AvroConverter {
public void convert(ColumnVector cv, int row, Object value) {
if (value == null) {
cv.noNulls = false;
cv.isNull[row] = true;
} else {
byte[] bytes = ((Utf8) value).getBytes();
((BytesColumnVector) cv).setRef(row, bytes, 0, bytes.length);
}
}
}
private static class BinaryConverter implements AvroConverter {
public void convert(ColumnVector cv, int row, Object value) {
if (value == null) {
cv.noNulls = false;
cv.isNull[row] = true;
} else {
ByteBuffer buf = (ByteBuffer) value;
((BytesColumnVector) cv).setVal(row, buf.array(), buf.arrayOffset(),
buf.remaining());
}
}
}
private static class TimestampConverter implements AvroConverter {
public void convert(ColumnVector cv, int row, Object value) {
if (value == null) {
cv.noNulls = false;
cv.isNull[row] = true;
} else {
TimestampColumnVector tc = (TimestampColumnVector) cv;
tc.time[row] = ((Long) value).longValue();
tc.nanos[row] = 0;
}
}
}
private static class DecimalConverter implements AvroConverter {
final int scale;
final double multiplier;
DecimalConverter(int scale) {
this.scale = scale;
this.multiplier = Math.pow(10.0, this.scale);
}
public void convert(ColumnVector cv, int row, Object value) {
if (value == null) {
cv.noNulls = false;
cv.isNull[row] = true;
} else {
DecimalColumnVector tc = (DecimalColumnVector) cv;
tc.vector[row].set(HiveDecimal.create(Math.round((double) value * multiplier)));
}
}
}
private static class ListConverter implements AvroConverter {
final AvroConverter childConverter;
ListConverter(TypeDescription schema) {
childConverter = createConverter(schema.getChildren().get(0));
}
public void convert(ColumnVector cv, int row, Object value) {
if (value == null) {
cv.noNulls = false;
cv.isNull[row] = true;
} else {
ListColumnVector tc = (ListColumnVector) cv;
List array = (List) value;
int start = tc.childCount;
int len = array.size();
tc.childCount += len;
tc.child.ensureSize(tc.childCount, true);
for (int i = 0; i < len; ++i) {
childConverter.convert(tc.child, start + i, array.get(i));
}
}
}
}
private static class StructConverter implements AvroConverter {
final AvroConverter[] childConverters;
StructConverter(TypeDescription schema) {
List<TypeDescription> children = schema.getChildren();
childConverters = new AvroConverter[children.size()];
for(int i=0; i < childConverters.length; ++i) {
childConverters[i] = createConverter(children.get(i));
}
}
public void convert(ColumnVector cv, int row, Object value) {
if (value == null) {
cv.noNulls = false;
cv.isNull[row] = true;
} else {
StructColumnVector tc = (StructColumnVector) cv;
GenericData.Record record = (GenericData.Record) value;
for(int c=0; c < tc.fields.length; ++c) {
childConverters[c].convert(tc.fields[c], row, record.get(c));
}
}
}
}
static AvroConverter createConverter(TypeDescription types) {
switch (types.getCategory()) {
case BINARY:
return new BinaryConverter();
case BOOLEAN:
return new BooleanConverter();
case BYTE:
case SHORT:
case INT:
return new IntConverter();
case LONG:
return new LongConverter();
case FLOAT:
return new FloatConverter();
case DOUBLE:
return new DoubleConverter();
case CHAR:
case VARCHAR:
case STRING:
return new StringConverter();
case TIMESTAMP:
return new TimestampConverter();
case DECIMAL:
return new DecimalConverter(types.getScale());
case LIST:
return new ListConverter(types);
case STRUCT:
return new StructConverter(types);
default:
throw new IllegalArgumentException("Unhandled type " + types);
}
}
static byte[] getBytesFromByteBuffer(ByteBuffer byteBuffer) {
byteBuffer.rewind();
byte[] result = new byte[byteBuffer.limit()];
byteBuffer.get(result);
return result;
}
}
| 9,725 | 31.528428 | 88 | java |
null | orc-main/java/bench/core/src/java/org/apache/orc/bench/core/convert/avro/AvroSchemaUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.core.convert.avro;
import org.apache.avro.Schema;
import org.apache.orc.TypeDescription;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
/**
* GenerateVariants Hive TypeInfo to an Avro Schema
*/
public class AvroSchemaUtils {
private AvroSchemaUtils() {
// No instances
}
public static Schema createAvroSchema(TypeDescription typeInfo) {
Schema schema;
switch (typeInfo.getCategory()) {
case STRING:
schema = Schema.create(Schema.Type.STRING);
break;
case CHAR:
schema = getSchemaFor("{" +
"\"type\":\"string\"," +
"\"logicalType\":\"char\"," +
"\"maxLength\":" + typeInfo.getMaxLength() + "}");
break;
case VARCHAR:
schema = getSchemaFor("{" +
"\"type\":\"string\"," +
"\"logicalType\":\"varchar\"," +
"\"maxLength\":" + typeInfo.getMaxLength() + "}");
break;
case BINARY:
schema = Schema.create(Schema.Type.BYTES);
break;
case BYTE:
schema = Schema.create(Schema.Type.INT);
break;
case SHORT:
schema = Schema.create(Schema.Type.INT);
break;
case INT:
schema = Schema.create(Schema.Type.INT);
break;
case LONG:
schema = Schema.create(Schema.Type.LONG);
break;
case FLOAT:
schema = Schema.create(Schema.Type.FLOAT);
break;
case DOUBLE:
schema = Schema.create(Schema.Type.DOUBLE);
break;
case BOOLEAN:
schema = Schema.create(Schema.Type.BOOLEAN);
break;
case DECIMAL:
String precision = String.valueOf(typeInfo.getPrecision());
String scale = String.valueOf(typeInfo.getScale());
schema = getSchemaFor("{" +
"\"type\":\"bytes\"," +
"\"logicalType\":\"decimal\"," +
"\"precision\":" + precision + "," +
"\"scale\":" + scale + "}");
break;
case DATE:
schema = getSchemaFor("{" +
"\"type\":\"int\"," +
"\"logicalType\":\"date\"}");
break;
case TIMESTAMP:
schema = getSchemaFor("{" +
"\"type\":\"long\"," +
"\"logicalType\":\"timestamp-millis\"}");
break;
case LIST:
schema = createAvroArray(typeInfo);
break;
case MAP:
schema = createAvroMap(typeInfo);
break;
case STRUCT:
schema = createAvroRecord(typeInfo);
break;
case UNION:
schema = createAvroUnion(typeInfo);
break;
default:
throw new UnsupportedOperationException(typeInfo + " is not supported.");
}
return schema;
}
private static Schema createAvroUnion(TypeDescription typeInfo) {
List<Schema> childSchemas = new ArrayList<>();
for (TypeDescription childTypeInfo : typeInfo.getChildren()) {
Schema childSchema = createAvroSchema(childTypeInfo);
if (childSchema.getType() == Schema.Type.UNION) {
for (Schema grandkid: childSchema.getTypes()) {
if (childSchema.getType() != Schema.Type.NULL) {
childSchemas.add(grandkid);
}
}
} else {
childSchemas.add(childSchema);
}
}
return wrapInUnionWithNull(Schema.createUnion(childSchemas));
}
private static Schema createAvroRecord(TypeDescription typeInfo) {
List<Schema.Field> childFields = new ArrayList<>();
List<String> fieldNames = typeInfo.getFieldNames();
List<TypeDescription> fieldTypes = typeInfo.getChildren();
for (int i = 0; i < fieldNames.size(); ++i) {
TypeDescription childTypeInfo = fieldTypes.get(i);
Schema.Field field = new Schema.Field(fieldNames.get(i),
wrapInUnionWithNull(createAvroSchema(childTypeInfo)),
childTypeInfo.toString(),
(Object) null);
childFields.add(field);
}
Schema recordSchema = Schema.createRecord("record_" + typeInfo.getId(),
typeInfo.toString(), null, false);
recordSchema.setFields(childFields);
return recordSchema;
}
private static Schema createAvroMap(TypeDescription typeInfo) {
TypeDescription keyTypeInfo = typeInfo.getChildren().get(0);
if (keyTypeInfo.getCategory() != TypeDescription.Category.STRING) {
throw new UnsupportedOperationException("Avro only supports maps with string keys "
+ typeInfo);
}
Schema valueSchema = wrapInUnionWithNull(createAvroSchema
(typeInfo.getChildren().get(1)));
return Schema.createMap(valueSchema);
}
private static Schema createAvroArray(TypeDescription typeInfo) {
Schema child = createAvroSchema(typeInfo.getChildren().get(0));
return Schema.createArray(wrapInUnionWithNull(child));
}
private static Schema wrapInUnionWithNull(Schema schema) {
Schema NULL = Schema.create(Schema.Type.NULL);
switch (schema.getType()) {
case NULL:
return schema;
case UNION:
List<Schema> kids = schema.getTypes();
List<Schema> newKids = new ArrayList<>(kids.size() + 1);
newKids.add(NULL);
return Schema.createUnion(newKids);
default:
return Schema.createUnion(Arrays.asList(NULL, schema));
}
}
private static Schema getSchemaFor(String str) {
Schema.Parser parser = new Schema.Parser();
return parser.parse(str);
}
}
| 6,236 | 31.316062 | 89 | java |
null | orc-main/java/bench/core/src/java/org/apache/orc/bench/core/convert/avro/AvroWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.core.convert.avro;
import org.apache.avro.Schema;
import org.apache.avro.file.CodecFactory;
import org.apache.avro.file.DataFileWriter;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericDatumWriter;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.type.HiveDecimal;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ListColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.StructColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.TypeDescription;
import org.apache.orc.bench.core.CompressionKind;
import org.apache.orc.bench.core.convert.BatchWriter;
import java.io.IOException;
import java.nio.Buffer;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.List;
public class AvroWriter implements BatchWriter {
public interface AvroConverter {
Object convert(ColumnVector vector, int row);
}
private static class BooleanConverter implements AvroConverter {
public Object convert(ColumnVector cv, int row) {
if (cv.isRepeating) {
row = 0;
}
if (cv.noNulls || !cv.isNull[row]) {
LongColumnVector vector = (LongColumnVector) cv;
return vector.vector[row] != 0;
} else {
return null;
}
}
}
private static class IntConverter implements AvroConverter {
public Object convert(ColumnVector cv, int row) {
if (cv.isRepeating) {
row = 0;
}
if (cv.noNulls || !cv.isNull[row]) {
LongColumnVector vector = (LongColumnVector) cv;
return (int) vector.vector[row];
} else {
return null;
}
}
}
private static class LongConverter implements AvroConverter {
public Object convert(ColumnVector cv, int row) {
if (cv.isRepeating) {
row = 0;
}
if (cv.noNulls || !cv.isNull[row]) {
LongColumnVector vector = (LongColumnVector) cv;
return vector.vector[row];
} else {
return null;
}
}
}
private static class FloatConverter implements AvroConverter {
public Object convert(ColumnVector cv, int row) {
if (cv.isRepeating) {
row = 0;
}
if (cv.noNulls || !cv.isNull[row]) {
DoubleColumnVector vector = (DoubleColumnVector) cv;
return (float) vector.vector[row];
} else {
return null;
}
}
}
private static class DoubleConverter implements AvroConverter {
public Object convert(ColumnVector cv, int row) {
if (cv.isRepeating) {
row = 0;
}
if (cv.noNulls || !cv.isNull[row]) {
DoubleColumnVector vector = (DoubleColumnVector) cv;
return vector.vector[row];
} else {
return null;
}
}
}
private static class StringConverter implements AvroConverter {
public Object convert(ColumnVector cv, int row) {
if (cv.isRepeating) {
row = 0;
}
if (cv.noNulls || !cv.isNull[row]) {
BytesColumnVector vector = (BytesColumnVector) cv;
return new String(vector.vector[row], vector.start[row],
vector.length[row], StandardCharsets.UTF_8);
} else {
return null;
}
}
}
private static class BinaryConverter implements AvroConverter {
public Object convert(ColumnVector cv, int row) {
if (cv.isRepeating) {
row = 0;
}
if (cv.noNulls || !cv.isNull[row]) {
BytesColumnVector vector = (BytesColumnVector) cv;
return ByteBuffer.wrap(vector.vector[row], vector.start[row],
vector.length[row]);
} else {
return null;
}
}
}
private static class TimestampConverter implements AvroConverter {
public Object convert(ColumnVector cv, int row) {
if (cv.isRepeating) {
row = 0;
}
if (cv.noNulls || !cv.isNull[row]) {
TimestampColumnVector vector = (TimestampColumnVector) cv;
return vector.time[row];
} else {
return null;
}
}
}
private static class DecimalConverter implements AvroConverter {
final int scale;
DecimalConverter(int scale) {
this.scale = scale;
}
public Object convert(ColumnVector cv, int row) {
if (cv.isRepeating) {
row = 0;
}
if (cv.noNulls || !cv.isNull[row]) {
DecimalColumnVector vector = (DecimalColumnVector) cv;
return getBufferFromDecimal(
vector.vector[row].getHiveDecimal(), scale);
} else {
return null;
}
}
}
private static class ListConverter implements AvroConverter {
final Schema avroSchema;
final AvroConverter childConverter;
ListConverter(TypeDescription schema, Schema avroSchema) {
this.avroSchema = avroSchema;
childConverter = createConverter(schema.getChildren().get(0),
removeNullable(avroSchema.getElementType()));
}
@SuppressWarnings("unchecked")
public Object convert(ColumnVector cv, int row) {
if (cv.isRepeating) {
row = 0;
}
if (cv.noNulls || !cv.isNull[row]) {
ListColumnVector vector = (ListColumnVector) cv;
int offset = (int) vector.offsets[row];
int length = (int) vector.lengths[row];
GenericData.Array result = new GenericData.Array(length, avroSchema);
for(int i=0; i < length; ++i) {
result.add(childConverter.convert(vector.child, offset + i));
}
return result;
} else {
return null;
}
}
}
private static class StructConverter implements AvroConverter {
final Schema avroSchema;
final AvroConverter[] childConverters;
StructConverter(TypeDescription schema, Schema avroSchema) {
this.avroSchema = avroSchema;
List<TypeDescription> childrenTypes = schema.getChildren();
childConverters = new AvroConverter[childrenTypes.size()];
List<Schema.Field> fields = avroSchema.getFields();
for(int f=0; f < childConverters.length; ++f) {
childConverters[f] = createConverter(childrenTypes.get(f),
removeNullable(fields.get(f).schema()));
}
}
public Object convert(ColumnVector cv, int row) {
if (cv.isRepeating) {
row = 0;
}
if (cv.noNulls || !cv.isNull[row]) {
StructColumnVector vector = (StructColumnVector) cv;
GenericData.Record result = new GenericData.Record(avroSchema);
for(int f=0; f < childConverters.length; ++f) {
result.put(f, childConverters[f].convert(vector.fields[f], row));
}
return result;
} else {
return null;
}
}
}
public static AvroConverter createConverter(TypeDescription types,
Schema avroSchema) {
switch (types.getCategory()) {
case BINARY:
return new BinaryConverter();
case BOOLEAN:
return new BooleanConverter();
case BYTE:
case SHORT:
case INT:
return new IntConverter();
case LONG:
return new LongConverter();
case FLOAT:
return new FloatConverter();
case DOUBLE:
return new DoubleConverter();
case CHAR:
case VARCHAR:
case STRING:
return new StringConverter();
case TIMESTAMP:
return new TimestampConverter();
case DECIMAL:
return new DecimalConverter(types.getScale());
case LIST:
return new ListConverter(types, avroSchema);
case STRUCT:
return new StructConverter(types, avroSchema);
default:
throw new IllegalArgumentException("Unhandled type " + types);
}
}
/**
* Remove the union(null, ...) wrapper around the schema.
*
* All of the types in Hive are nullable and in Avro those are represented
* by wrapping each type in a union type with the void type.
* @param avro The avro type
* @return The avro type with the nullable layer removed
*/
static Schema removeNullable(Schema avro) {
while (avro.getType() == Schema.Type.UNION) {
List<Schema> children = avro.getTypes();
if (children.size() == 2 &&
children.get(0).getType() == Schema.Type.NULL) {
avro = children.get(1);
} else {
break;
}
}
return avro;
}
private final AvroConverter[] converters;
private final DataFileWriter<GenericData.Record> writer;
private final GenericData.Record record;
public static AvroConverter[] buildConverters(TypeDescription orcType,
Schema avroSchema) {
List<TypeDescription> childTypes = orcType.getChildren();
List<Schema.Field> avroFields = avroSchema.getFields();
AvroConverter[] result = new AvroConverter[childTypes.size()];
for(int c=0; c < result.length; ++c) {
result[c] = createConverter(childTypes.get(c),
removeNullable(avroFields.get(c).schema()));
}
return result;
}
public AvroWriter(Path path, TypeDescription schema,
Configuration conf,
CompressionKind compression) throws IOException {
Schema avroSchema = AvroSchemaUtils.createAvroSchema(schema);
GenericDatumWriter<GenericData.Record> gdw = new GenericDatumWriter<>(avroSchema);
writer = new DataFileWriter<>(gdw);
converters = buildConverters(schema, avroSchema);
switch (compression) {
case NONE:
break;
case ZLIB:
writer.setCodec(CodecFactory.deflateCodec(-1));
break;
case SNAPPY:
writer.setCodec(CodecFactory.snappyCodec());
break;
case ZSTD:
writer.setCodec(CodecFactory.zstandardCodec(CodecFactory.DEFAULT_ZSTANDARD_LEVEL));
break;
default:
throw new IllegalArgumentException("Compression unsupported " + compression);
}
writer.create(avroSchema, path.getFileSystem(conf).create(path));
record = new GenericData.Record(avroSchema);
}
public void writeBatch(VectorizedRowBatch batch) throws IOException {
for(int r=0; r < batch.size; ++r) {
for(int f=0; f < batch.cols.length; ++f) {
record.put(f, converters[f].convert(batch.cols[f], r));
}
writer.append(record);
}
}
public void close() throws IOException {
writer.close();
}
static Buffer getBufferFromDecimal(HiveDecimal dec, int scale) {
if (dec == null) {
return null;
}
return ByteBuffer.wrap(dec.bigIntegerBytesScaled(scale));
}
}
| 11,760 | 31.046322 | 91 | java |
null | orc-main/java/bench/core/src/java/org/apache/orc/bench/core/convert/csv/CsvReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.core.convert.csv;
import org.apache.commons.csv.CSVFormat;
import org.apache.commons.csv.CSVParser;
import org.apache.commons.csv.CSVRecord;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.type.HiveDecimal;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.TypeDescription;
import org.apache.orc.bench.core.CompressionKind;
import org.apache.orc.bench.core.convert.BatchReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.nio.charset.StandardCharsets;
import java.sql.Timestamp;
import java.util.Iterator;
import java.util.List;
public class CsvReader implements BatchReader {
private final Iterator<CSVRecord> parser;
private final ColumnReader[] readers;
interface ColumnReader {
void read(String value, ColumnVector vect, int row);
}
static class LongColumnReader implements ColumnReader {
public void read(String value, ColumnVector vect, int row) {
if ("".equals(value)) {
vect.noNulls = false;
vect.isNull[row] = true;
} else {
LongColumnVector vector = (LongColumnVector) vect;
vector.vector[row] = Long.parseLong(value);
}
}
}
static class DoubleColumnReader implements ColumnReader {
public void read(String value, ColumnVector vect, int row) {
if ("".equals(value)) {
vect.noNulls = false;
vect.isNull[row] = true;
} else {
DoubleColumnVector vector = (DoubleColumnVector) vect;
vector.vector[row] = Double.parseDouble(value);
}
}
}
static class StringColumnReader implements ColumnReader {
public void read(String value, ColumnVector vect, int row) {
if ("".equals(value)) {
vect.noNulls = false;
vect.isNull[row] = true;
} else {
BytesColumnVector vector = (BytesColumnVector) vect;
byte[] bytes = value.getBytes(StandardCharsets.UTF_8);
vector.setRef(row, bytes, 0, bytes.length);
}
}
}
static class TimestampColumnReader implements ColumnReader {
public void read(String value, ColumnVector vect, int row) {
if ("".equals(value)) {
vect.noNulls = false;
vect.isNull[row] = true;
} else {
TimestampColumnVector vector = (TimestampColumnVector) vect;
vector.set(row, Timestamp.valueOf(value));
}
}
}
static class DecimalColumnReader implements ColumnReader {
public void read(String value, ColumnVector vect, int row) {
if ("".equals(value)) {
vect.noNulls = false;
vect.isNull[row] = true;
} else {
DecimalColumnVector vector = (DecimalColumnVector) vect;
vector.vector[row].set(HiveDecimal.create(value));
}
}
}
ColumnReader createReader(TypeDescription schema) {
switch (schema.getCategory()) {
case BYTE:
case SHORT:
case INT:
case LONG:
return new LongColumnReader();
case FLOAT:
case DOUBLE:
return new DoubleColumnReader();
case CHAR:
case VARCHAR:
case STRING:
return new StringColumnReader();
case DECIMAL:
return new DecimalColumnReader();
case TIMESTAMP:
return new TimestampColumnReader();
default:
throw new IllegalArgumentException("Unhandled type " + schema);
}
}
public CsvReader(Path path,
TypeDescription schema,
Configuration conf,
CompressionKind compress) throws IOException {
FileSystem fs = path.getFileSystem(conf);
InputStream input = compress.read(fs.open(path));
parser = new CSVParser(new InputStreamReader(input, StandardCharsets.UTF_8),
CSVFormat.RFC4180.withHeader()).iterator();
List<TypeDescription> columnTypes = schema.getChildren();
readers = new ColumnReader[columnTypes.size()];
int c = 0;
for(TypeDescription columnType: columnTypes) {
readers[c++] = createReader(columnType);
}
}
public boolean nextBatch(VectorizedRowBatch batch) throws IOException {
batch.reset();
int maxSize = batch.getMaxSize();
while (parser.hasNext() && batch.size < maxSize) {
CSVRecord record = parser.next();
int c = 0;
for(String val: record) {
readers[c].read(val, batch.cols[c], batch.size);
c += 1;
}
batch.size++;
}
return batch.size != 0;
}
public void close() {
// PASS
}
}
| 5,793 | 32.491329 | 80 | java |
null | orc-main/java/bench/core/src/java/org/apache/orc/bench/core/convert/json/JsonReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.core.convert.json;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import com.google.gson.JsonStreamParser;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.type.HiveDecimal;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ListColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.StructColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.TypeDescription;
import org.apache.orc.bench.core.CompressionKind;
import org.apache.orc.bench.core.convert.BatchReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.nio.charset.StandardCharsets;
import java.sql.Timestamp;
import java.util.List;
public class JsonReader implements BatchReader {
private final TypeDescription schema;
private final JsonStreamParser parser;
private final JsonConverter[] converters;
public JsonReader(Path path,
TypeDescription schema,
Configuration conf,
CompressionKind compressionKind) throws IOException {
this.schema = schema;
FileSystem fs = path.getFileSystem(conf);
InputStream input = compressionKind.read(fs.open(path));
parser = new JsonStreamParser(new InputStreamReader(input,
StandardCharsets.UTF_8));
if (schema.getCategory() != TypeDescription.Category.STRUCT) {
throw new IllegalArgumentException("Root must be struct - " + schema);
}
List<TypeDescription> fieldTypes = schema.getChildren();
converters = new JsonConverter[fieldTypes.size()];
for(int c = 0; c < converters.length; ++c) {
converters[c] = createConverter(fieldTypes.get(c));
}
}
public boolean nextBatch(VectorizedRowBatch batch) throws IOException {
batch.reset();
int maxSize = batch.getMaxSize();
List<String> fieldNames = schema.getFieldNames();
while (parser.hasNext() && batch.size < maxSize) {
JsonObject elem = parser.next().getAsJsonObject();
for(int c=0; c < converters.length; ++c) {
// look up each field to see if it is in the input, otherwise
// set it to null.
JsonElement field = elem.get(fieldNames.get(c));
if (field == null) {
batch.cols[c].noNulls = false;
batch.cols[c].isNull[batch.size] = true;
} else {
converters[c].convert(field, batch.cols[c], batch.size);
}
}
batch.size++;
}
return batch.size != 0;
}
public void close() {
// PASS
}
interface JsonConverter {
void convert(JsonElement value, ColumnVector vect, int row);
}
static class BooleanColumnConverter implements JsonConverter {
public void convert(JsonElement value, ColumnVector vect, int row) {
if (value == null || value.isJsonNull()) {
vect.noNulls = false;
vect.isNull[row] = true;
} else {
LongColumnVector vector = (LongColumnVector) vect;
vector.vector[row] = value.getAsBoolean() ? 1 : 0;
}
}
}
static class LongColumnConverter implements JsonConverter {
public void convert(JsonElement value, ColumnVector vect, int row) {
if (value == null || value.isJsonNull()) {
vect.noNulls = false;
vect.isNull[row] = true;
} else {
LongColumnVector vector = (LongColumnVector) vect;
vector.vector[row] = value.getAsLong();
}
}
}
static class DoubleColumnConverter implements JsonConverter {
public void convert(JsonElement value, ColumnVector vect, int row) {
if (value == null || value.isJsonNull()) {
vect.noNulls = false;
vect.isNull[row] = true;
} else {
DoubleColumnVector vector = (DoubleColumnVector) vect;
vector.vector[row] = value.getAsDouble();
}
}
}
static class StringColumnConverter implements JsonConverter {
public void convert(JsonElement value, ColumnVector vect, int row) {
if (value == null || value.isJsonNull()) {
vect.noNulls = false;
vect.isNull[row] = true;
} else {
BytesColumnVector vector = (BytesColumnVector) vect;
byte[] bytes = value.getAsString().getBytes(StandardCharsets.UTF_8);
vector.setRef(row, bytes, 0, bytes.length);
}
}
}
static class BinaryColumnConverter implements JsonConverter {
public void convert(JsonElement value, ColumnVector vect, int row) {
if (value == null || value.isJsonNull()) {
vect.noNulls = false;
vect.isNull[row] = true;
} else {
BytesColumnVector vector = (BytesColumnVector) vect;
String binStr = value.getAsString();
byte[] bytes = new byte[binStr.length()/2];
for(int i=0; i < bytes.length; ++i) {
bytes[i] = (byte) Integer.parseInt(binStr.substring(i*2, i*2+2), 16);
}
vector.setRef(row, bytes, 0, bytes.length);
}
}
}
static class TimestampColumnConverter implements JsonConverter {
public void convert(JsonElement value, ColumnVector vect, int row) {
if (value == null || value.isJsonNull()) {
vect.noNulls = false;
vect.isNull[row] = true;
} else {
TimestampColumnVector vector = (TimestampColumnVector) vect;
vector.set(row, Timestamp.valueOf(value.getAsString()
.replaceAll("[TZ]", " ")));
}
}
}
static class DecimalColumnConverter implements JsonConverter {
public void convert(JsonElement value, ColumnVector vect, int row) {
if (value == null || value.isJsonNull()) {
vect.noNulls = false;
vect.isNull[row] = true;
} else {
DecimalColumnVector vector = (DecimalColumnVector) vect;
vector.vector[row].set(HiveDecimal.create(value.getAsString()));
}
}
}
static class StructColumnConverter implements JsonConverter {
private JsonConverter[] childrenConverters;
private List<String> fieldNames;
StructColumnConverter(TypeDescription schema) {
List<TypeDescription> kids = schema.getChildren();
childrenConverters = new JsonConverter[kids.size()];
for(int c=0; c < childrenConverters.length; ++c) {
childrenConverters[c] = createConverter(kids.get(c));
}
fieldNames = schema.getFieldNames();
}
public void convert(JsonElement value, ColumnVector vect, int row) {
if (value == null || value.isJsonNull()) {
vect.noNulls = false;
vect.isNull[row] = true;
} else {
StructColumnVector vector = (StructColumnVector) vect;
JsonObject obj = value.getAsJsonObject();
for(int c=0; c < childrenConverters.length; ++c) {
JsonElement elem = obj.get(fieldNames.get(c));
childrenConverters[c].convert(elem, vector.fields[c], row);
}
}
}
}
static class ListColumnConverter implements JsonConverter {
private JsonConverter childrenConverter;
ListColumnConverter(TypeDescription schema) {
childrenConverter = createConverter(schema.getChildren().get(0));
}
public void convert(JsonElement value, ColumnVector vect, int row) {
if (value == null || value.isJsonNull()) {
vect.noNulls = false;
vect.isNull[row] = true;
} else {
ListColumnVector vector = (ListColumnVector) vect;
JsonArray obj = value.getAsJsonArray();
vector.lengths[row] = obj.size();
vector.offsets[row] = vector.childCount;
vector.childCount += vector.lengths[row];
vector.child.ensureSize(vector.childCount, true);
for(int c=0; c < obj.size(); ++c) {
childrenConverter.convert(obj.get(c), vector.child,
(int) vector.offsets[row] + c);
}
}
}
}
static JsonConverter createConverter(TypeDescription schema) {
switch (schema.getCategory()) {
case BYTE:
case SHORT:
case INT:
case LONG:
return new LongColumnConverter();
case FLOAT:
case DOUBLE:
return new DoubleColumnConverter();
case CHAR:
case VARCHAR:
case STRING:
return new StringColumnConverter();
case DECIMAL:
return new DecimalColumnConverter();
case TIMESTAMP:
return new TimestampColumnConverter();
case BINARY:
return new BinaryColumnConverter();
case BOOLEAN:
return new BooleanColumnConverter();
case STRUCT:
return new StructColumnConverter(schema);
case LIST:
return new ListColumnConverter(schema);
default:
throw new IllegalArgumentException("Unhandled type " + schema);
}
}
}
| 9,980 | 34.902878 | 79 | java |
null | orc-main/java/bench/core/src/java/org/apache/orc/bench/core/convert/json/JsonWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.core.convert.json;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ListColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.MapColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.StructColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.UnionColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.serde2.io.DateWritable;
import org.apache.orc.TypeDescription;
import org.apache.orc.bench.core.CompressionKind;
import org.apache.orc.bench.core.convert.BatchWriter;
import java.io.IOException;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.nio.charset.StandardCharsets;
import java.util.List;
public class JsonWriter implements BatchWriter {
private final Writer outStream;
private final com.google.gson.stream.JsonWriter writer;
private final TypeDescription schema;
public JsonWriter(Path path, TypeDescription schema,
Configuration conf,
CompressionKind compression) throws IOException {
OutputStream file = path.getFileSystem(conf).create(path, true);
outStream = new OutputStreamWriter(compression.create(file),
StandardCharsets.UTF_8);
writer = new com.google.gson.stream.JsonWriter(outStream);
writer.setLenient(true);
this.schema = schema;
}
private static void printMap(com.google.gson.stream.JsonWriter writer,
MapColumnVector vector,
TypeDescription schema,
int row) throws IOException {
writer.beginArray();
TypeDescription keyType = schema.getChildren().get(0);
TypeDescription valueType = schema.getChildren().get(1);
int offset = (int) vector.offsets[row];
for (int i = 0; i < vector.lengths[row]; ++i) {
writer.beginObject();
writer.name("_key");
printValue(writer, vector.keys, keyType, offset + i);
writer.name("_value");
printValue(writer, vector.values, valueType, offset + i);
writer.endObject();
}
writer.endArray();
}
private static void printList(com.google.gson.stream.JsonWriter writer,
ListColumnVector vector,
TypeDescription schema,
int row) throws IOException {
writer.beginArray();
int offset = (int) vector.offsets[row];
TypeDescription childType = schema.getChildren().get(0);
for (int i = 0; i < vector.lengths[row]; ++i) {
printValue(writer, vector.child, childType, offset + i);
}
writer.endArray();
}
private static void printUnion(com.google.gson.stream.JsonWriter writer,
UnionColumnVector vector,
TypeDescription schema,
int row) throws IOException {
int tag = vector.tags[row];
printValue(writer, vector.fields[tag], schema.getChildren().get(tag), row);
}
static void printStruct(com.google.gson.stream.JsonWriter writer,
StructColumnVector batch,
TypeDescription schema,
int row) throws IOException {
writer.beginObject();
List<String> fieldNames = schema.getFieldNames();
List<TypeDescription> fieldTypes = schema.getChildren();
for (int i = 0; i < fieldTypes.size(); ++i) {
writer.name(fieldNames.get(i));
printValue(writer, batch.fields[i], fieldTypes.get(i), row);
}
writer.endObject();
}
static void printBinary(com.google.gson.stream.JsonWriter writer, BytesColumnVector vector,
int row) throws IOException {
StringBuilder buffer = new StringBuilder();
int offset = vector.start[row];
for(int i=0; i < vector.length[row]; ++i) {
int value = 0xff & (int) vector.vector[row][offset + i];
buffer.append(String.format("%02x", value));
}
writer.value(buffer.toString());
}
static void printValue(com.google.gson.stream.JsonWriter writer, ColumnVector vector,
TypeDescription schema, int row) throws IOException {
if (vector.isRepeating) {
row = 0;
}
if (vector.noNulls || !vector.isNull[row]) {
switch (schema.getCategory()) {
case BOOLEAN:
writer.value(((LongColumnVector) vector).vector[row] != 0);
break;
case BYTE:
case SHORT:
case INT:
case LONG:
writer.value(((LongColumnVector) vector).vector[row]);
break;
case FLOAT:
case DOUBLE:
writer.value(((DoubleColumnVector) vector).vector[row]);
break;
case STRING:
case CHAR:
case VARCHAR:
writer.value(((BytesColumnVector) vector).toString(row));
break;
case BINARY:
printBinary(writer, (BytesColumnVector) vector, row);
break;
case DECIMAL:
writer.value(((DecimalColumnVector) vector).vector[row].toString());
break;
case DATE:
writer.value(new DateWritable(
(int) ((LongColumnVector) vector).vector[row]).toString());
break;
case TIMESTAMP:
writer.value(((TimestampColumnVector) vector)
.asScratchTimestamp(row).toString());
break;
case LIST:
printList(writer, (ListColumnVector) vector, schema, row);
break;
case MAP:
printMap(writer, (MapColumnVector) vector, schema, row);
break;
case STRUCT:
printStruct(writer, (StructColumnVector) vector, schema, row);
break;
case UNION:
printUnion(writer, (UnionColumnVector) vector, schema, row);
break;
default:
throw new IllegalArgumentException("Unknown type " + schema);
}
} else {
writer.nullValue();
}
}
static void printRow(com.google.gson.stream.JsonWriter writer,
VectorizedRowBatch batch,
TypeDescription schema,
int row) throws IOException {
if (schema.getCategory() == TypeDescription.Category.STRUCT) {
List<TypeDescription> fieldTypes = schema.getChildren();
List<String> fieldNames = schema.getFieldNames();
writer.beginObject();
for (int c = 0; c < batch.cols.length; ++c) {
writer.name(fieldNames.get(c));
printValue(writer, batch.cols[c], fieldTypes.get(c), row);
}
writer.endObject();
} else {
printValue(writer, batch.cols[0], schema, row);
}
}
public void writeBatch(VectorizedRowBatch batch) throws IOException {
for (int r = 0; r < batch.size; ++r) {
printRow(writer, batch, schema, r);
outStream.write("\n");
}
}
public void close() throws IOException {
writer.close();
}
}
| 8,210 | 37.190698 | 93 | java |
null | orc-main/java/bench/core/src/java/org/apache/orc/bench/core/convert/orc/OrcReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.core.convert.orc;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.OrcFile;
import org.apache.orc.Reader;
import org.apache.orc.RecordReader;
import org.apache.orc.TypeDescription;
import org.apache.orc.bench.core.convert.BatchReader;
import java.io.IOException;
public class OrcReader implements BatchReader {
private final RecordReader reader;
public OrcReader(Path path,
TypeDescription schema,
Configuration conf
) throws IOException {
Reader file = OrcFile.createReader(path, OrcFile.readerOptions(conf));
reader = file.rows(file.options().schema(schema));
}
public boolean nextBatch(VectorizedRowBatch batch) throws IOException {
return reader.nextBatch(batch);
}
public void close() throws IOException {
reader.close();
}
}
| 1,777 | 33.862745 | 75 | java |
null | orc-main/java/bench/core/src/java/org/apache/orc/bench/core/convert/orc/OrcWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.core.convert.orc;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.OrcFile;
import org.apache.orc.TypeDescription;
import org.apache.orc.Writer;
import org.apache.orc.bench.core.CompressionKind;
import org.apache.orc.bench.core.Utilities;
import org.apache.orc.bench.core.convert.BatchWriter;
import java.io.IOException;
public class OrcWriter implements BatchWriter {
private final Writer writer;
public OrcWriter(Path path,
TypeDescription schema,
Configuration conf,
CompressionKind compression
) throws IOException {
writer = OrcFile.createWriter(path,
OrcFile.writerOptions(conf)
.setSchema(schema)
.compress(Utilities.getCodec(compression)));
}
public void writeBatch(VectorizedRowBatch batch) throws IOException {
writer.addRowBatch(batch);
}
public void close() throws IOException {
writer.close();
}
}
| 1,904 | 33.636364 | 75 | java |
null | orc-main/java/bench/core/src/java/org/apache/orc/bench/core/convert/parquet/ParquetReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.core.convert.parquet;
import org.apache.avro.generic.GenericData;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.TypeDescription;
import org.apache.orc.bench.core.convert.BatchReader;
import org.apache.orc.bench.core.convert.avro.AvroReader;
import org.apache.parquet.avro.AvroParquetReader;
import org.apache.parquet.hadoop.util.HadoopInputFile;
import java.io.IOException;
public class ParquetReader implements BatchReader {
private final org.apache.parquet.hadoop.ParquetReader<GenericData.Record>
reader;
private final AvroReader.AvroConverter[] converters;
public ParquetReader(Path path,
TypeDescription schema,
Configuration conf) throws IOException {
HadoopInputFile inputFile = HadoopInputFile.fromPath(path, conf);
reader = AvroParquetReader.<GenericData.Record>builder(inputFile)
.withCompatibility(true).build();
converters = AvroReader.buildConverters(schema);
}
@Override
public boolean nextBatch(VectorizedRowBatch batch) throws IOException {
batch.reset();
int maxSize = batch.getMaxSize();
while (batch.size < maxSize) {
GenericData.Record value = reader.read();
if (value == null) {
break;
}
int row = batch.size++;
for(int c=0; c < converters.length; ++c) {
converters[c].convert(batch.cols[c], row, value.get(c));
}
}
return batch.size != 0;
}
@Override
public void close() throws IOException {
reader.close();
}
}
| 2,464 | 34.724638 | 75 | java |
null | orc-main/java/bench/core/src/java/org/apache/orc/bench/core/convert/parquet/ParquetWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.core.convert.parquet;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.TypeDescription;
import org.apache.orc.bench.core.CompressionKind;
import org.apache.orc.bench.core.convert.BatchWriter;
import org.apache.orc.bench.core.convert.avro.AvroSchemaUtils;
import org.apache.orc.bench.core.convert.avro.AvroWriter;
import org.apache.parquet.avro.AvroParquetWriter;
import org.apache.parquet.hadoop.metadata.CompressionCodecName;
import org.apache.parquet.hadoop.util.HadoopOutputFile;
import java.io.IOException;
public class ParquetWriter implements BatchWriter {
private final org.apache.parquet.hadoop.ParquetWriter<GenericData.Record>
writer;
private final AvroWriter.AvroConverter[] converters;
private final GenericData.Record record;
static CompressionCodecName getParquetCompression(CompressionKind kind) {
switch (kind) {
case NONE:
return CompressionCodecName.UNCOMPRESSED;
case ZLIB:
return CompressionCodecName.GZIP;
case SNAPPY:
return CompressionCodecName.SNAPPY;
case ZSTD:
return CompressionCodecName.ZSTD;
default:
throw new IllegalArgumentException("Unhandled compression type " + kind);
}
}
public ParquetWriter(Path path,
TypeDescription schema,
Configuration conf,
CompressionKind compression
) throws IOException {
Schema avroSchema = AvroSchemaUtils.createAvroSchema(schema);
HadoopOutputFile outputFile = HadoopOutputFile.fromPath(path, conf);
writer = AvroParquetWriter
.<GenericData.Record>builder(outputFile)
.withSchema(avroSchema)
.withConf(conf)
.withCompressionCodec(getParquetCompression(compression))
.build();
converters = AvroWriter.buildConverters(schema, avroSchema);
record = new GenericData.Record(avroSchema);
}
public void writeBatch(VectorizedRowBatch batch) throws IOException {
for(int r=0; r < batch.size; ++r) {
for(int f=0; f < batch.cols.length; ++f) {
record.put(f, converters[f].convert(batch.cols[f], r));
}
writer.write(record);
}
}
public void close() throws IOException {
writer.close();
}
}
| 3,275 | 36.227273 | 81 | java |
null | orc-main/java/bench/core/src/java/org/apache/orc/bench/core/filter/FilterBench.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.core.filter;
import com.google.auto.service.AutoService;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.ParseException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.orc.OrcFile;
import org.apache.orc.OrcFilterContext;
import org.apache.orc.Reader;
import org.apache.orc.bench.core.OrcBenchmark;
import org.apache.orc.impl.OrcFilterContextImpl;
import org.apache.orc.impl.filter.FilterFactory;
import org.apache.orc.impl.filter.RowFilterFactory;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.annotations.Warmup;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.TimeValue;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
import static org.apache.orc.bench.core.BenchmarkOptions.FORK;
import static org.apache.orc.bench.core.BenchmarkOptions.GC;
import static org.apache.orc.bench.core.BenchmarkOptions.HELP;
import static org.apache.orc.bench.core.BenchmarkOptions.ITERATIONS;
import static org.apache.orc.bench.core.BenchmarkOptions.MAX_MEMORY;
import static org.apache.orc.bench.core.BenchmarkOptions.MIN_MEMORY;
import static org.apache.orc.bench.core.BenchmarkOptions.TIME;
import static org.apache.orc.bench.core.BenchmarkOptions.WARMUP_ITERATIONS;
@AutoService(OrcBenchmark.class)
public class FilterBench implements OrcBenchmark {
@Override
public String getName() {
return "filter";
}
@Override
public String getDescription() {
return "Perform filter bench";
}
@Override
public void run(String[] args) throws Exception {
new Runner(parseOptions(args)).run();
}
public static CommandLine parseCommandLine(String[] args, boolean needsArgs) {
org.apache.commons.cli.Options options = new org.apache.commons.cli.Options()
.addOption("h", HELP, false, "Provide help")
.addOption("i", ITERATIONS, true, "Number of iterations")
.addOption("I", WARMUP_ITERATIONS, true, "Number of warmup iterations")
.addOption("f", FORK, true, "How many forks to use")
.addOption("t", TIME, true, "How long each iteration is in seconds")
.addOption("m", MIN_MEMORY, true, "The minimum size of each JVM")
.addOption("M", MAX_MEMORY, true, "The maximum size of each JVM")
.addOption("g", GC, false, "Should GC be profiled");
CommandLine result;
try {
result = new DefaultParser().parse(options, args, true);
} catch (ParseException pe) {
System.err.println("Argument exception - " + pe.getMessage());
result = null;
}
if (result == null || result.hasOption(HELP) || (needsArgs && result.getArgs().length == 0)) {
new HelpFormatter().printHelp("java -jar <jar> <command> <options> <sub_cmd>\n"
+ "sub_cmd:\nsimple\ncomplex\n",
options);
System.err.println();
System.exit(1);
}
return result;
}
public static OptionsBuilder optionsBuilder(CommandLine options) {
OptionsBuilder builder = new OptionsBuilder();
if (options.hasOption(GC)) {
builder.addProfiler("hs_gc");
}
if (options.hasOption(ITERATIONS)) {
builder.measurementIterations(Integer.parseInt(options.getOptionValue(ITERATIONS)));
}
if (options.hasOption(WARMUP_ITERATIONS)) {
builder.warmupIterations(Integer.parseInt(options.getOptionValue(
WARMUP_ITERATIONS)));
}
if (options.hasOption(FORK)) {
builder.forks(Integer.parseInt(options.getOptionValue(
FORK)));
}
if (options.hasOption(TIME)) {
TimeValue iterationTime = TimeValue.seconds(Long.parseLong(
options.getOptionValue(TIME)));
builder.measurementTime(iterationTime);
builder.warmupTime(iterationTime);
}
String minMemory = options.getOptionValue(MIN_MEMORY, "256m");
String maxMemory = options.getOptionValue(MAX_MEMORY, "2g");
builder.jvmArgs("-server",
"-Xms" + minMemory, "-Xmx" + maxMemory);
return builder;
}
public static Options parseOptions(String[] args) {
CommandLine options = parseCommandLine(args, true);
OptionsBuilder builder = optionsBuilder(options);
String cmd = options.getArgs()[0];
switch (cmd) {
case "simple":
builder.include(SimpleFilter.class.getSimpleName());
break;
case "complex":
builder.include(ComplexFilter.class.getSimpleName());
break;
default:
throw new UnsupportedOperationException(String.format("Command %s is not supported", cmd));
}
return builder.build();
}
private static Consumer<OrcFilterContext> createFilter(SearchArgument sArg,
String fType,
boolean normalize,
Configuration conf)
throws FilterFactory.UnSupportedSArgException {
switch (fType) {
case "row":
return RowFilterFactory.create(sArg,
FilterBenchUtil.schema,
OrcFile.Version.CURRENT,
normalize);
case "vector":
Reader.Options options = new Reader.Options(conf)
.searchArgument(sArg, new String[0])
.allowSARGToFilter(true)
.useSelected(true);
return FilterFactory.createBatchFilter(options,
FilterBenchUtil.schema,
false,
OrcFile.Version.CURRENT,
normalize,
null,
null);
default:
throw new IllegalArgumentException();
}
}
@OutputTimeUnit(value = TimeUnit.MICROSECONDS)
@Warmup(iterations = 20, time = 1)
@BenchmarkMode(value = Mode.AverageTime)
@Fork(value = 1)
@State(value = Scope.Benchmark)
@Measurement(iterations = 20, time = 1)
public static class SimpleFilter {
private OrcFilterContext fc;
private int[] expSel;
@Param( {"4", "8", "16", "32", "256"})
private int fInSize;
@Param( {"row", "vector"})
private String fType;
private Consumer<OrcFilterContext> f;
@Setup
public void setup() throws FilterFactory.UnSupportedSArgException {
Random rnd = new Random(1024);
VectorizedRowBatch b = FilterBenchUtil.createBatch(rnd);
Configuration conf = new Configuration();
fc = new OrcFilterContextImpl(FilterBenchUtil.schema, false).setBatch(b);
Map.Entry<SearchArgument, int[]> r = FilterBenchUtil.createSArg(rnd, b, fInSize);
SearchArgument sArg = r.getKey();
expSel = r.getValue();
f = createFilter(sArg, fType, false, conf);
}
@Benchmark
public OrcFilterContext filter() {
// Reset the selection
FilterBenchUtil.unFilterBatch(fc);
f.accept(fc);
return fc;
}
@TearDown
public void tearDown() {
FilterBenchUtil.validate(fc, expSel);
}
}
@OutputTimeUnit(value = TimeUnit.MICROSECONDS)
@Warmup(iterations = 20, time = 1)
@BenchmarkMode(value = Mode.AverageTime)
@Fork(value = 1)
@State(value = Scope.Benchmark)
@Measurement(iterations = 20, time = 1)
public static class ComplexFilter {
private OrcFilterContext fc;
private int[] expSel;
private final int inSize = 32;
@Param( {"2", "4", "8"})
private int fSize;
@Param( {"true", "false"})
private boolean normalize;
@Param( {"row", "vector"})
private String fType;
private Consumer<OrcFilterContext> f;
private final Configuration conf = new Configuration();
@Setup
public void setup() throws FilterFactory.UnSupportedSArgException {
VectorizedRowBatch b = FilterBenchUtil.createBatch(new Random(1024));
fc = new OrcFilterContextImpl(FilterBenchUtil.schema, false).setBatch(b);
Map.Entry<SearchArgument, int[]> r = FilterBenchUtil.createComplexSArg(new Random(1024),
b,
inSize,
fSize);
SearchArgument sArg = r.getKey();
expSel = r.getValue();
f = createFilter(sArg, fType, normalize, conf);
}
@Benchmark
public OrcFilterContext filter() {
// Reset the selection
FilterBenchUtil.unFilterBatch(fc);
f.accept(fc);
return fc;
}
@TearDown
public void tearDown() {
FilterBenchUtil.validate(fc, expSel);
}
}
}
| 10,453 | 35.939929 | 99 | java |
null | orc-main/java/bench/core/src/java/org/apache/orc/bench/core/filter/FilterBenchUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.core.filter;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory;
import org.apache.orc.OrcFilterContext;
import org.apache.orc.TypeDescription;
import java.util.AbstractMap;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Map;
import java.util.Random;
import java.util.Set;
class FilterBenchUtil {
static final TypeDescription schema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createLong())
.addField("f2", TypeDescription.createLong());
static VectorizedRowBatch createBatch(Random rnd) {
VectorizedRowBatch b = schema.createRowBatch(1024);
LongColumnVector f1Vector = (LongColumnVector) b.cols[0];
LongColumnVector f2Vector = (LongColumnVector) b.cols[1];
for (int i = 0; i < b.getMaxSize(); i++) {
f1Vector.vector[b.size] = rnd.nextInt();
f2Vector.vector[b.size] = rnd.nextInt();
b.size++;
}
return b;
}
static Map.Entry<SearchArgument, int[]> createSArg(Random rnd,
VectorizedRowBatch b,
int inSize) {
LongColumnVector f1Vector = (LongColumnVector) b.cols[0];
LongColumnVector f2Vector = (LongColumnVector) b.cols[1];
Object[] f1Values = new Object[inSize];
Object[] f2Values = new Object[inSize];
Set<Integer> sel = new HashSet<>();
for (int i = 0; i < f1Values.length; i++) {
int selIdx = rnd.nextInt(b.getMaxSize());
f1Values[i] = f1Vector.vector[selIdx];
sel.add(selIdx);
selIdx = rnd.nextInt(b.getMaxSize());
f2Values[i] = f2Vector.vector[selIdx];
sel.add(selIdx);
}
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startOr()
.in("f1", PredicateLeaf.Type.LONG, f1Values)
.in("f2", PredicateLeaf.Type.LONG, f2Values)
.end()
.build();
int[] s = sel.stream()
.mapToInt(Integer::intValue)
.toArray();
Arrays.sort(s);
return new AbstractMap.SimpleImmutableEntry<>(sArg, s);
}
static Map.Entry<SearchArgument, int[]> createComplexSArg(Random rnd,
VectorizedRowBatch b,
int inSize,
int orSize) {
LongColumnVector f1Vector = (LongColumnVector) b.cols[0];
LongColumnVector f2Vector = (LongColumnVector) b.cols[1];
Object[] f1Values = new Object[inSize];
Object[] f2Values = new Object[inSize];
Set<Integer> sel = new HashSet<>();
SearchArgument.Builder builder = SearchArgumentFactory.newBuilder();
builder.startOr();
builder.in("f2", PredicateLeaf.Type.LONG, f2Vector.vector[0], f2Vector.vector[1]);
sel.add(0);
sel.add(1);
int selIdx;
for (int i = 0; i < orSize; i++) {
builder.startAnd();
for (int j = 0; j < inSize; j++) {
selIdx = rnd.nextInt(b.getMaxSize());
f1Values[j] = f1Vector.vector[selIdx];
f2Values[j] = f2Vector.vector[selIdx];
sel.add(selIdx);
}
builder
.in("f1", PredicateLeaf.Type.LONG, f1Values)
.in("f2", PredicateLeaf.Type.LONG, f2Values);
builder.end();
}
builder.end();
int[] s = sel.stream()
.mapToInt(Integer::intValue)
.toArray();
Arrays.sort(s);
return new AbstractMap.SimpleImmutableEntry<>(builder.build(), s);
}
static void unFilterBatch(OrcFilterContext fc) {
fc.setSelectedInUse(false);
fc.setSelectedSize(1024);
}
static void validate(OrcFilterContext fc, int[] expSel) {
if (!fc.isSelectedInUse()) {
throw new IllegalArgumentException("Validation failed: selected is not set");
}
if (expSel.length != fc.getSelectedSize()) {
throw new IllegalArgumentException(String.format(
"Validation failed: length %s is not equal to expected length %s",
fc.getSelectedSize(), expSel.length));
}
if (!Arrays.equals(expSel, Arrays.copyOf(fc.getSelected(), expSel.length))) {
throw new IllegalArgumentException("Validation failed: array values are not the same");
}
}
}
| 5,265 | 35.825175 | 93 | java |
null | orc-main/java/bench/core/src/java/org/apache/orc/bench/core/impl/ChunkReadBench.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.core.impl;
import com.google.auto.service.AutoService;
import org.apache.commons.cli.CommandLine;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.orc.bench.core.OrcBenchmark;
import org.apache.orc.bench.core.filter.FilterBench;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.annotations.Warmup;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import java.io.File;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
@OutputTimeUnit(value = TimeUnit.SECONDS)
@Warmup(iterations = 20, time = 1)
@BenchmarkMode(value = Mode.AverageTime)
@Fork(value = 1)
@State(value = Scope.Benchmark)
@Measurement(iterations = 20, time = 1)
@AutoService(OrcBenchmark.class)
public class ChunkReadBench implements OrcBenchmark {
@Override
public String getName() {
return "chunk_read";
}
@Override
public String getDescription() {
return "Perform chunk read bench";
}
@Override
public void run(String[] args) throws Exception {
CommandLine options = FilterBench.parseCommandLine(args, false);
OptionsBuilder builder = FilterBench.optionsBuilder(options);
builder.include(getClass().getSimpleName());
new Runner(builder.build()).run();
}
private final Path workDir = new Path(System.getProperty("test.tmp.dir",
"target" + File.separator + "test"
+ File.separator + "tmp"));
private final Path filePath = new Path(workDir, "perf_chunk_read_file.orc");
private final Configuration conf = new Configuration();
@Param( {"128"})
private int colCount;
@Param( {"65536"})
private int rowCount;
@Param( {"true", "false"})
private boolean alternate;
@Param( {"0", "4194304"})
private int minSeekSize;
@Param( {"0.0", "10.0"})
private double extraByteTolerance;
private long readRows = 0;
@Setup
public void setup() throws IOException {
if (minSeekSize == 0 && extraByteTolerance > 0) {
throw new IllegalArgumentException("Ignore extraByteTolerance variations with seekSize is"
+ " 0");
}
FileSystem fs = FileSystem.get(conf);
if (!fs.exists(filePath)) {
ChunkReadUtil.createORCFile(colCount, rowCount, filePath);
}
ChunkReadUtil.setConf(conf, minSeekSize, extraByteTolerance);
}
@Benchmark
public long read() throws IOException {
readRows = ChunkReadUtil.readORCFile(filePath, conf, alternate);
return readRows;
}
@TearDown
public void tearDown() {
if (readRows != rowCount) {
throw new IllegalArgumentException(String.format(
"readRows %d is not equal to expected rows %d", readRows, rowCount));
}
}
}
| 4,150 | 33.02459 | 96 | java |
null | orc-main/java/bench/core/src/java/org/apache/orc/bench/core/impl/ChunkReadUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.core.impl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.orc.OrcConf;
import org.apache.orc.OrcFile;
import org.apache.orc.Reader;
import org.apache.orc.RecordReader;
import org.apache.orc.TypeDescription;
import org.apache.orc.Writer;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.Random;
public class ChunkReadUtil {
private static final int SCALE = 6;
static void setConf(Configuration conf, int minSeekSize, double extraByteTolerance) {
OrcConf.ORC_MIN_DISK_SEEK_SIZE.setInt(conf, minSeekSize);
OrcConf.ORC_MIN_DISK_SEEK_SIZE_TOLERANCE.setDouble(conf, extraByteTolerance);
}
static long readORCFile(Path file, Configuration conf, boolean alternate)
throws IOException {
Reader r = OrcFile.createReader(file, OrcFile.readerOptions(conf));
long rowCount = 0;
VectorizedRowBatch batch = r.getSchema().createRowBatch();
Reader.Options o = r.options();
if (alternate) {
o.include(includeAlternate(r.getSchema()));
}
RecordReader rr = r.rows(o);
while (rr.nextBatch(batch)) {
rowCount += batch.size;
}
return rowCount;
}
private static boolean[] includeAlternate(TypeDescription schema) {
boolean[] includes = new boolean[schema.getMaximumId() + 1];
for (int i = 1; i < includes.length; i += 2) {
includes[i] = true;
}
includes[0] = true;
return includes;
}
static long createORCFile(int colCount, int rowCount, Path file) throws IOException {
TypeDescription schema = createSchema(colCount);
return writeFile(schema, rowCount, file);
}
private static long writeFile(TypeDescription schema, int rowCount, Path path)
throws IOException {
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
try (Writer writer = OrcFile.createWriter(path,
OrcFile.writerOptions(conf)
.fileSystem(fs)
.overwrite(true)
.rowIndexStride(8192)
.setSchema(schema)
.overwrite(true))) {
Random rnd = new Random(1024);
VectorizedRowBatch b = schema.createRowBatch();
for (int rowIdx = 0; rowIdx < rowCount; rowIdx++) {
((LongColumnVector) b.cols[0]).vector[b.size] = rowIdx;
long v = rnd.nextLong();
for (int colIdx = 1; colIdx < schema.getChildren().size() - 1; colIdx++) {
switch (schema.getChildren().get(colIdx).getCategory()) {
case LONG:
((LongColumnVector) b.cols[colIdx]).vector[b.size] = v;
break;
case DECIMAL:
HiveDecimalWritable d = new HiveDecimalWritable();
d.setFromLongAndScale(v, SCALE);
((DecimalColumnVector) b.cols[colIdx]).vector[b.size] = d;
break;
case STRING:
((BytesColumnVector) b.cols[colIdx]).setVal(b.size,
String.valueOf(v)
.getBytes(StandardCharsets.UTF_8));
break;
default:
throw new IllegalArgumentException();
}
}
b.size += 1;
if (b.size == b.getMaxSize()) {
writer.addRowBatch(b);
b.reset();
}
}
if (b.size > 0) {
writer.addRowBatch(b);
b.reset();
}
}
return fs.getFileStatus(path).getLen();
}
private static TypeDescription createSchema(int colCount) {
TypeDescription schema = TypeDescription.createStruct()
.addField("id", TypeDescription.createLong());
for (int i = 1; i <= colCount; i++) {
TypeDescription fieldType;
switch (i % 3) {
case 0:
fieldType = TypeDescription.createString();
break;
case 1:
fieldType = TypeDescription.createDecimal().withPrecision(20).withScale(SCALE);
break;
default:
fieldType = TypeDescription.createLong();
break;
}
schema.addField("f_" + i, fieldType);
}
return schema;
}
}
| 5,548 | 36.241611 | 95 | java |
null | orc-main/java/bench/core/src/java/org/apache/orc/impl/filter/RowFilter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.filter;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.orc.OrcFilterContext;
interface RowFilter {
boolean accept(OrcFilterContext batch, int rowIdx);
class LeafFilter extends org.apache.orc.impl.filter.LeafFilter implements RowFilter {
final org.apache.orc.impl.filter.LeafFilter filter;
LeafFilter(org.apache.orc.impl.filter.LeafFilter filter) {
super(filter.getColName(), false);
this.filter = filter;
}
@Override
public boolean accept(OrcFilterContext batch, int rowIdx) {
ColumnVector[] branch = batch.findColumnVector(filter.getColName());
ColumnVector v = branch[branch.length - 1];
boolean noNulls = OrcFilterContext.noNulls(branch);
int idx = rowIdx;
if (v.isRepeating) {
idx = 0;
}
if (noNulls || !OrcFilterContext.isNull(branch, idx)) {
return allow(v, idx);
} else {
return false;
}
}
@Override
protected boolean allow(ColumnVector v, int rowIdx) {
return filter.allow(v,rowIdx);
}
}
class OrFilter implements RowFilter {
final RowFilter[] filters;
OrFilter(RowFilter[] filters) {
this.filters = filters;
}
@Override
public boolean accept(OrcFilterContext batch, int rowIdx) {
boolean result = true;
for (RowFilter filter : filters) {
result = filter.accept(batch, rowIdx);
if (result) {
break;
}
}
return result;
}
}
class AndFilter implements RowFilter {
final RowFilter[] filters;
AndFilter(RowFilter[] filters) {
this.filters = filters;
}
@Override
public boolean accept(OrcFilterContext batch, int rowIdx) {
boolean result = true;
for (RowFilter filter : filters) {
result = filter.accept(batch, rowIdx);
if (!result) {
break;
}
}
return result;
}
}
}
| 2,768 | 27.546392 | 87 | java |
null | orc-main/java/bench/core/src/java/org/apache/orc/impl/filter/RowFilterFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.filter;
import org.apache.hadoop.hive.ql.io.sarg.ExpressionTree;
import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.orc.OrcFile;
import org.apache.orc.OrcFilterContext;
import org.apache.orc.TypeDescription;
import org.apache.orc.impl.filter.leaf.LeafFilterFactory;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.function.Consumer;
public class RowFilterFactory {
public static Consumer<OrcFilterContext> create(SearchArgument sArg,
TypeDescription readSchema,
OrcFile.Version version,
boolean normalize)
throws FilterFactory.UnSupportedSArgException {
Set<String> colIds = new HashSet<>();
ExpressionTree expr = normalize ? sArg.getExpression() : sArg.getCompactExpression();
RowFilter filter = create(expr,
colIds,
sArg.getLeaves(),
readSchema,
version);
return new RowBatchFilter(filter, colIds.toArray(new String[0]));
}
static RowFilter create(ExpressionTree expr,
Set<String> colIds,
List<PredicateLeaf> leaves,
TypeDescription readSchema,
OrcFile.Version version)
throws FilterFactory.UnSupportedSArgException {
RowFilter result;
switch (expr.getOperator()) {
case OR:
RowFilter[] orFilters = new RowFilter[expr.getChildren().size()];
for (int i = 0; i < expr.getChildren().size(); i++) {
orFilters[i] = create(expr.getChildren().get(i), colIds, leaves, readSchema, version);
}
result = new RowFilter.OrFilter(orFilters);
break;
case AND:
RowFilter[] andFilters = new RowFilter[expr.getChildren().size()];
for (int i = 0; i < expr.getChildren().size(); i++) {
andFilters[i] = create(expr.getChildren().get(i), colIds, leaves, readSchema, version);
}
result = new RowFilter.AndFilter(andFilters);
break;
case LEAF:
result = createLeafFilter(leaves.get(expr.getLeaf()), colIds, readSchema, version, false);
break;
default:
throw new FilterFactory.UnSupportedSArgException(String.format(
"SArg Expression: %s is not supported",
expr));
}
return result;
}
private static RowFilter createLeafFilter(PredicateLeaf leaf,
Set<String> colIds,
TypeDescription readSchema,
OrcFile.Version version,
boolean negated)
throws FilterFactory.UnSupportedSArgException {
colIds.add(leaf.getColumnName());
LeafFilter f = (LeafFilter) LeafFilterFactory.createLeafVectorFilter(leaf,
colIds,
readSchema,
false,
version,
negated);
return new RowFilter.LeafFilter(f);
}
static class RowBatchFilter implements Consumer<OrcFilterContext> {
private final RowFilter filter;
private final String[] colNames;
private RowBatchFilter(RowFilter filter, String[] colNames) {
this.filter = filter;
this.colNames = colNames;
}
@Override
public void accept(OrcFilterContext batch) {
int size = 0;
int[] selected = batch.getSelected();
for (int i = 0; i < batch.getSelectedSize(); i++) {
if (filter.accept(batch, i)) {
selected[size] = i;
size += 1;
}
}
batch.setSelectedInUse(true);
batch.setSelected(selected);
batch.setSelectedSize(size);
}
public String[] getColNames() {
return colNames;
}
}
}
| 5,128 | 38.453846 | 98 | java |
null | orc-main/java/bench/core/src/test/org/apache/orc/bench/core/filter/TestFilter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.core.filter;
import org.apache.orc.impl.filter.RowFilterFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.orc.OrcFile;
import org.apache.orc.OrcFilterContext;
import org.apache.orc.Reader;
import org.apache.orc.impl.filter.FilterFactory;
import org.apache.orc.impl.OrcFilterContextImpl;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Map;
import java.util.Random;
import java.util.function.Consumer;
import java.util.stream.Stream;
import static org.junit.jupiter.params.provider.Arguments.arguments;
public class TestFilter {
private static final Logger LOG = LoggerFactory.getLogger(TestFilter.class);
private static final long seed = 1024;
protected final Configuration conf = new Configuration();
protected final Random rnd = new Random(seed);
protected final VectorizedRowBatch b = FilterBenchUtil.createBatch(rnd);
protected final OrcFilterContextImpl fc = (OrcFilterContextImpl)
new OrcFilterContextImpl(FilterBenchUtil.schema, false).setBatch(b);
public static Stream<Arguments> filters() {
return Stream.of(
arguments("simple", "row", false),
arguments("simple", "vector", false),
arguments("complex", "row", true),
arguments("complex", "vector", true),
arguments("complex", "row", false),
arguments("complex", "vector", false)
);
}
@BeforeEach
public void setup() {
FilterBenchUtil.unFilterBatch(fc);
}
@ParameterizedTest(name = "#{index} - {0}+{1}")
@MethodSource("org.apache.orc.bench.core.filter.TestFilter#filters")
public void testFilter(String complexity, String filterType, boolean normalize)
throws FilterFactory.UnSupportedSArgException {
new Filter(complexity, filterType, normalize).execute();
}
private class Filter {
protected final SearchArgument sArg;
protected final int[] expSel;
protected final Consumer<OrcFilterContext> filter;
private Filter(String complexity, String filterType, boolean normalize)
throws FilterFactory.UnSupportedSArgException {
Map.Entry<SearchArgument, int[]> ft;
switch (complexity) {
case "simple":
ft = FilterBenchUtil.createSArg(new Random(seed), b, 5);
break;
case "complex":
ft = FilterBenchUtil.createComplexSArg(new Random(seed), b, 10, 8);
break;
default:
throw new IllegalArgumentException();
}
sArg = ft.getKey();
LOG.info("SearchArgument has {} expressions", sArg.getExpression().getChildren().size());
expSel = ft.getValue();
switch (filterType) {
case "row":
filter = RowFilterFactory.create(sArg,
FilterBenchUtil.schema,
OrcFile.Version.CURRENT,
normalize);
break;
case "vector":
Reader.Options options = new Reader.Options(conf)
.searchArgument(sArg, new String[0])
.allowSARGToFilter(true);
filter = FilterFactory.createBatchFilter(options,
FilterBenchUtil.schema,
false,
OrcFile.Version.CURRENT,
normalize,
null,
null);
break;
default:
throw new IllegalArgumentException();
}
}
private void execute() {
filter.accept(fc.setBatch(b));
FilterBenchUtil.validate(fc, expSel);
}
}
} | 4,868 | 37.338583 | 95 | java |
null | orc-main/java/bench/core/src/test/org/apache/orc/bench/core/impl/ChunkReadUtilTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.core.impl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import java.io.File;
import java.io.IOException;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
class ChunkReadUtilTest {
private static final Path workDir = new Path(System.getProperty("test.tmp.dir",
"target" + File.separator + "test"
+ File.separator + "tmp"));
private static final Path filePath = new Path(workDir, "chunk_read_file.orc");
private static long fileLength;
private static final int ROW_COUNT = 524288;
private static final int COL_COUNT = 16;
@BeforeAll
public static void setup() throws IOException {
fileLength = ChunkReadUtil.createORCFile(COL_COUNT, ROW_COUNT, filePath);
}
private static void readStart() {
FileSystem.clearStatistics();
}
private static FileSystem.Statistics readEnd() {
return FileSystem.getAllStatistics().get(0);
}
@Test
public void testReadAll() throws IOException {
Configuration conf = new Configuration();
readStart();
assertEquals(ROW_COUNT, ChunkReadUtil.readORCFile(filePath, conf, false));
assertTrue((readEnd().getBytesRead() / (double) fileLength) > 1);
}
@Test
public void testReadAlternate() throws IOException {
Configuration conf = new Configuration();
readStart();
assertEquals(ROW_COUNT, ChunkReadUtil.readORCFile(filePath, conf, true));
assertTrue((readEnd().getBytesRead() / (double) fileLength) < .5);
}
@Test
public void testReadAlternateWMinSeekSize() throws IOException {
Configuration conf = new Configuration();
ChunkReadUtil.setConf(conf, 4 * 1024 * 1024, 10);
readStart();
assertEquals(ROW_COUNT, ChunkReadUtil.readORCFile(filePath, conf, true));
double readFraction = readEnd().getBytesRead() / (double) fileLength;
assertTrue(readFraction > 1 && readFraction < 1.01);
}
@Test
public void testReadAlternateWMinSeekSizeDrop() throws IOException {
Configuration conf = new Configuration();
ChunkReadUtil.setConf(conf, 4 * 1024 * 1024, 0);
readStart();
assertEquals(ROW_COUNT, ChunkReadUtil.readORCFile(filePath, conf, true));
double readFraction = readEnd().getBytesRead() / (double) fileLength;
assertTrue(readFraction > 1 && readFraction < 1.01);
}
} | 3,418 | 36.988889 | 100 | java |
null | orc-main/java/bench/core/src/test/org/apache/orc/impl/filter/ATestFilter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.filter;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DateColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.TypeDescription;
import org.apache.orc.impl.DateUtils;
import org.apache.orc.impl.OrcFilterContextImpl;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class ATestFilter {
protected final TypeDescription schema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createLong())
.addField("f2", TypeDescription.createString())
.addField("f3p", TypeDescription.createDate())
.addField("f3h", TypeDescription.createDate());
protected final OrcFilterContextImpl fc = new OrcFilterContextImpl(schema, false);
protected final VectorizedRowBatch batch = schema.createRowBatch();
protected void setBatch(Long[] f1Values, String[] f2Values) {
setBatch(f1Values, f2Values, new String[0]);
}
protected void setBatch(Long[] f1Values, String[] f2Values, String[] f3Values) {
final LongColumnVector f1Vector = (LongColumnVector) batch.cols[0];
final BytesColumnVector f2Vector = (BytesColumnVector) batch.cols[1];
final DateColumnVector f3p = (DateColumnVector) batch.cols[2];
final DateColumnVector f3h = (DateColumnVector) batch.cols[3];
batch.reset();
f1Vector.noNulls = true;
for (int i =0; i < f1Values.length; i++) {
if (f1Values[i] == null) {
f1Vector.noNulls = false;
f1Vector.isNull[i] = true;
} else {
f1Vector.isNull[i] = false;
f1Vector.vector[i] = f1Values[i];
}
}
for (int i = 0; i < f2Values.length; i++) {
if (f2Values[i] == null) {
f2Vector.noNulls = false;
f2Vector.isNull[i] = true;
} else {
f2Vector.isNull[i] = false;
byte[] bytes = f2Values[i].getBytes(StandardCharsets.UTF_8);
f2Vector.vector[i] = bytes;
f2Vector.start[i] = 0;
f2Vector.length[i] = bytes.length;
}
}
for (int i = 0; i < f3Values.length; i++) {
if (f3Values[i] == null) {
f3p.noNulls = false;
f3p.isNull[i] = true;
f3h.noNulls = false;
f3h.isNull[i] = true;
} else {
f3p.isNull[i] = false;
f3p.vector[i] = DateUtils.parseDate(f3Values[i], true);
f3h.isNull[i] = false;
f3h.vector[i] = DateUtils.parseDate(f3Values[i], false);
}
}
batch.size = f1Values.length;
fc.setBatch(batch);
}
protected void validateSelected(int... v) {
assertTrue(fc.isSelectedInUse());
assertEquals(v.length, fc.getSelectedSize());
assertArrayEquals(v, Arrays.copyOf(fc.getSelected(), v.length));
}
protected void validateNoneSelected() {
assertTrue(fc.isSelectedInUse());
assertEquals(0, fc.getSelectedSize());
}
}
| 3,945 | 35.201835 | 84 | java |
null | orc-main/java/bench/core/src/test/org/apache/orc/impl/filter/TestRowFilter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.filter;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory;
import org.apache.orc.OrcFile;
import org.apache.orc.TypeDescription;
import org.apache.orc.impl.OrcFilterContextImpl;
import org.junit.jupiter.api.Test;
import java.util.HashSet;
import java.util.Set;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestRowFilter extends ATestFilter {
private final TypeDescription schema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createLong())
.addField("f2", TypeDescription.createString());
final OrcFilterContextImpl fc = new OrcFilterContextImpl(schema, false);
private final VectorizedRowBatch batch = schema.createRowBatch();
@Test
public void testINLongConversion() throws FilterFactory.UnSupportedSArgException {
SearchArgument sarg = SearchArgumentFactory.newBuilder()
.in("f1", PredicateLeaf.Type.LONG, 1L, 2L, 3L)
.build();
Set<String> colIds = new HashSet<>();
RowFilter filter = RowFilterFactory.create(sarg.getExpression(),
colIds,
sarg.getLeaves(),
schema,
OrcFile.Version.CURRENT);
assertNotNull(filter);
assertTrue(filter instanceof RowFilter.LeafFilter);
assertEquals(1, colIds.size());
assertTrue(colIds.contains("f1"));
setBatch(new Long[] {1L, 0L, 2L, 4L, 3L},
new String[] {});
fc.setBatch(batch);
for (int i = 0; i < batch.size; i++) {
if (i % 2 == 0) {
assertTrue(filter.accept(fc, i));
} else {
assertFalse(filter.accept(fc, i));
}
}
}
@Test
public void testINStringConversion() throws FilterFactory.UnSupportedSArgException {
SearchArgument sarg = SearchArgumentFactory.newBuilder()
.in("f2", PredicateLeaf.Type.STRING, "a", "b")
.build();
Set<String> colIds = new HashSet<>();
RowFilter filter = RowFilterFactory.create(sarg.getExpression(),
colIds,
sarg.getLeaves(),
schema,
OrcFile.Version.CURRENT);
assertNotNull(filter);
assertTrue(filter instanceof RowFilter.LeafFilter);
assertEquals(1, colIds.size());
assertTrue(colIds.contains("f2"));
setBatch(new Long[] {1L, 0L, 2L, 4L, 3L},
new String[] {"a", "z", "b", "y", "a"});
fc.setBatch(batch);
for (int i = 0; i < batch.size; i++) {
if (i % 2 == 0) {
assertTrue(filter.accept(fc, i));
} else {
assertFalse(filter.accept(fc, i));
}
}
}
@Test
public void testORConversion() throws FilterFactory.UnSupportedSArgException {
SearchArgument sarg = SearchArgumentFactory.newBuilder()
.startOr()
.in("f1", PredicateLeaf.Type.LONG, 1L, 2L, 3L)
.in("f2", PredicateLeaf.Type.STRING, "a", "b", "c")
.end()
.build();
Set<String> colIds = new HashSet<>();
RowFilter filter = RowFilterFactory.create(sarg.getExpression(),
colIds,
sarg.getLeaves(),
schema,
OrcFile.Version.CURRENT);
assertNotNull(filter);
assertTrue(filter instanceof RowFilter.OrFilter);
assertEquals(2, ((RowFilter.OrFilter) filter).filters.length);
assertEquals(2, colIds.size());
assertTrue(colIds.contains("f1"));
assertTrue(colIds.contains("f2"));
// Setup the data such that the OR condition should select every row
setBatch(new Long[] {1L, 0L, 2L, 4L, 3L},
new String[] {"z", "a", "y", "b", "x"});
fc.setBatch(batch);
for (int i = 0; i < batch.size; i++) {
assertTrue(filter.accept(fc, i));
}
}
@Test
public void testANDConversion() throws FilterFactory.UnSupportedSArgException {
SearchArgument sarg = SearchArgumentFactory.newBuilder()
.startAnd()
.in("f1", PredicateLeaf.Type.LONG, 1L, 2L, 3L)
.in("f2", PredicateLeaf.Type.STRING, "a", "b", "c")
.end()
.build();
Set<String> colIds = new HashSet<>();
RowFilter filter = RowFilterFactory.create(sarg.getExpression(),
colIds,
sarg.getLeaves(),
schema,
OrcFile.Version.CURRENT);
assertNotNull(filter);
assertTrue(filter instanceof RowFilter.AndFilter);
assertEquals(2, ((RowFilter.AndFilter) filter).filters.length);
assertEquals(2, colIds.size());
assertTrue(colIds.contains("f1"));
assertTrue(colIds.contains("f2"));
// Setup the data such that the AND condition should not select any row
setBatch(new Long[] {1L, 0L, 2L, 4L, 3L},
new String[] {"z", "a", "y", "b", "x"});
fc.setBatch(batch);
for (int i = 0; i < batch.size; i++) {
assertFalse(filter.accept(fc, i));
}
}
} | 6,441 | 36.672515 | 86 | java |
null | orc-main/java/bench/hive/src/java/org/apache/hadoop/hive/ql/io/orc/OrcBenchmarkUtilities.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hive.ql.io.orc;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
import org.apache.hadoop.io.Writable;
import org.apache.orc.OrcProto;
import org.apache.orc.OrcUtils;
import org.apache.orc.TypeDescription;
import java.util.List;
/**
* HiveUtilities that need the non-public methods from Hive.
*/
public class OrcBenchmarkUtilities {
public static StructObjectInspector createObjectInspector(TypeDescription schema) {
List<OrcProto.Type> types = OrcUtils.getOrcTypes(schema);
return (StructObjectInspector) OrcStruct.createObjectInspector(0, types);
}
public static Writable nextObject(VectorizedRowBatch batch,
TypeDescription schema,
int rowId,
Writable obj) {
OrcStruct result = (OrcStruct) obj;
if (result == null) {
result = new OrcStruct(batch.cols.length);
}
List<TypeDescription> childrenTypes = schema.getChildren();
for(int c=0; c < batch.cols.length; ++c) {
result.setFieldValue(c, RecordReaderImpl.nextValue(batch.cols[c], rowId,
childrenTypes.get(c), result.getFieldValue(c)));
}
return result;
}
}
| 2,116 | 37.490909 | 85 | java |
null | orc-main/java/bench/hive/src/java/org/apache/orc/bench/hive/ColumnProjectionBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.hive;
import com.google.auto.service.AutoService;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.TrackingLocalFileSystem;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.io.parquet.read.DataWritableReadSupport;
import org.apache.hadoop.hive.ql.io.parquet.read.ParquetRecordReaderWrapper;
import org.apache.hadoop.io.ArrayWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Reporter;
import org.apache.orc.OrcFile;
import org.apache.orc.Reader;
import org.apache.orc.RecordReader;
import org.apache.orc.TypeDescription;
import org.apache.orc.bench.core.IOCounters;
import org.apache.orc.bench.core.OrcBenchmark;
import org.apache.orc.bench.core.Utilities;
import org.apache.parquet.hadoop.ParquetInputFormat;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.runner.Runner;
import java.net.URI;
import java.util.List;
import java.util.concurrent.TimeUnit;
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.MICROSECONDS)
@State(Scope.Thread)
@AutoService(OrcBenchmark.class)
public class ColumnProjectionBenchmark implements OrcBenchmark {
private static final Path root = Utilities.getBenchmarkRoot();
@Param({ "github", "sales", "taxi"})
public String dataset;
@Param({"snappy", "gz", "zstd"})
public String compression;
@Override
public String getName() {
return "read-some";
}
@Override
public String getDescription() {
return "Benchmark column projection";
}
@Override
public void run(String[] args) throws Exception {
new Runner(Utilities.parseOptions(args, getClass())).run();
}
@Benchmark
public void orc(IOCounters counters) throws Exception{
Configuration conf = new Configuration();
TrackingLocalFileSystem fs = new TrackingLocalFileSystem();
fs.initialize(new URI("file:///"), conf);
FileSystem.Statistics statistics = fs.getLocalStatistics();
statistics.reset();
OrcFile.ReaderOptions options = OrcFile.readerOptions(conf).filesystem(fs);
Path path = Utilities.getVariant(root, dataset, "orc", compression);
Reader reader = OrcFile.createReader(path, options);
TypeDescription schema = reader.getSchema();
boolean[] include = new boolean[schema.getMaximumId() + 1];
// select first two columns
List<TypeDescription> children = schema.getChildren();
for(int c= children.get(0).getId(); c <= children.get(1).getMaximumId(); ++c) {
include[c] = true;
}
RecordReader rows = reader.rows(new Reader.Options()
.include(include));
VectorizedRowBatch batch = schema.createRowBatch();
while (rows.nextBatch(batch)) {
counters.addRecords(batch.size);
}
rows.close();
counters.addBytes(statistics.getReadOps(), statistics.getBytesRead());
counters.addInvocation();
}
@Benchmark
public void parquet(IOCounters counters) throws Exception {
JobConf conf = new JobConf();
conf.set("fs.track.impl", TrackingLocalFileSystem.class.getName());
conf.set("fs.defaultFS", "track:///");
if ("taxi".equals(dataset)) {
conf.set("columns", "vendor_id,pickup_time");
conf.set("columns.types", "int,timestamp");
} else if ("sales".equals(dataset)) {
conf.set("columns", "sales_id,customer_id");
conf.set("columns.types", "bigint,bigint");
} else if ("github".equals(dataset)) {
conf.set("columns", "actor,created_at");
conf.set("columns.types", "struct<avatar_url:string,gravatar_id:string," +
"id:int,login:string,url:string>,timestamp");
} else {
throw new IllegalArgumentException("Unknown data set " + dataset);
}
Path path = Utilities.getVariant(root, dataset, "parquet", compression);
FileSystem.Statistics statistics = FileSystem.getStatistics("track:///",
TrackingLocalFileSystem.class);
statistics.reset();
ParquetInputFormat<ArrayWritable> inputFormat =
new ParquetInputFormat<>(DataWritableReadSupport.class);
NullWritable nada = NullWritable.get();
FileSplit split = new FileSplit(path, 0, Long.MAX_VALUE, new String[]{});
org.apache.hadoop.mapred.RecordReader<NullWritable,ArrayWritable> recordReader =
new ParquetRecordReaderWrapper(inputFormat, split, conf, Reporter.NULL);
ArrayWritable value = recordReader.createValue();
while (recordReader.next(nada, value)) {
counters.addRecords(1);
}
recordReader.close();
counters.addBytes(statistics.getReadOps(), statistics.getBytesRead());
counters.addInvocation();
}
}
| 5,842 | 37.953333 | 84 | java |
null | orc-main/java/bench/hive/src/java/org/apache/orc/bench/hive/DecimalBench.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.hive;
import com.google.auto.service.AutoService;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.Decimal64ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.CompressionKind;
import org.apache.orc.OrcFile;
import org.apache.orc.Reader;
import org.apache.orc.RecordReader;
import org.apache.orc.TypeDescription;
import org.apache.orc.Writer;
import org.apache.orc.bench.core.NullFileSystem;
import org.apache.orc.bench.core.OrcBenchmark;
import org.apache.orc.bench.core.Utilities;
import org.apache.orc.bench.core.convert.BatchReader;
import org.apache.orc.bench.core.convert.GenerateVariants;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.infra.Blackhole;
import org.openjdk.jmh.runner.Runner;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.MICROSECONDS)
@AutoService(OrcBenchmark.class)
public class DecimalBench implements OrcBenchmark {
private static final Path root = Utilities.getBenchmarkRoot();
@Override
public String getName() {
return "decimal";
}
@Override
public String getDescription() {
return "Benchmark new decimal64 read and write";
}
@Override
public void run(String[] args) throws Exception {
new Runner(Utilities.parseOptions(args, getClass())).run();
}
/**
* Abstract out whether we are writing short or long decimals
*/
public interface Loader {
/**
* Load the data from the values array into the ColumnVector.
* @param vector the output
* @param values the input
* @param offset the first input value
* @param length the number of values to copy
*/
void loadData(ColumnVector vector, long[] values, int offset, int length);
}
static class Decimal64Loader implements Loader {
final int scale;
final int precision;
Decimal64Loader(int precision, int scale) {
this.precision = precision;
this.scale = scale;
}
@Override
public void loadData(ColumnVector vector, long[] values, int offset, int length) {
Decimal64ColumnVector v = (Decimal64ColumnVector) vector;
v.ensureSize(length, false);
v.noNulls = true;
for(int p=0; p < length; ++p) {
v.vector[p] = values[p + offset];
}
v.precision = (short) precision;
v.scale = (short) scale;
}
}
static class DecimalLoader implements Loader {
final int scale;
final int precision;
DecimalLoader(int precision, int scale) {
this.precision = precision;
this.scale = scale;
}
@Override
public void loadData(ColumnVector vector, long[] values, int offset, int length) {
DecimalColumnVector v = (DecimalColumnVector) vector;
v.noNulls = true;
for(int p=0; p < length; ++p) {
v.vector[p].setFromLongAndScale(values[offset + p], scale);
}
v.precision = (short) precision;
v.scale = (short) scale;
}
}
@State(Scope.Thread)
public static class OutputState {
// try both DecimalColumnVector and Decimal64ColumnVector
@Param({"ORIGINAL", "USE_DECIMAL64"})
public TypeDescription.RowBatchVersion version;
long[] total_amount = new long[1024 * 1024];
Configuration conf = new Configuration();
FileSystem fs = new NullFileSystem();
TypeDescription schema;
VectorizedRowBatch batch;
Loader loader;
int precision;
@Setup
public void setup() throws IOException {
if (version == TypeDescription.RowBatchVersion.ORIGINAL) {
precision = 19;
loader = new DecimalLoader(precision, 2);
} else {
precision = 8;
loader = new Decimal64Loader(precision, 2);
}
schema = TypeDescription.createDecimal()
.withScale(2)
.withPrecision(precision);
readRawData(total_amount, root, "total_amount", conf);
batch = schema.createRowBatchV2();
}
}
@Benchmark
public void write(OutputState state) throws Exception {
Writer writer = OrcFile.createWriter(new Path("null"),
OrcFile.writerOptions(state.conf)
.fileSystem(state.fs)
.setSchema(state.schema)
.compress(CompressionKind.NONE));
int r = 0;
int batchSize = state.batch.getMaxSize();
while (r < state.total_amount.length) {
state.batch.size = batchSize;
state.loader.loadData(state.batch.cols[0], state.total_amount, r, batchSize);
writer.addRowBatch(state.batch);
r += batchSize;
}
writer.close();
}
static void readRawData(long[] data,
Path root,
String column,
Configuration conf) throws IOException {
TypeDescription schema = Utilities.loadSchema("taxi.schema");
int row = 0;
int batchPosn = 0;
BatchReader reader =
new GenerateVariants.RecursiveReader(new Path(root, "sources/taxi"), "parquet",
schema, conf, org.apache.orc.bench.core.CompressionKind.NONE);
VectorizedRowBatch batch = schema.createRowBatch();
batch.size = 0;
TypeDescription columnSchema = schema.findSubtype(column);
DecimalColumnVector cv = (DecimalColumnVector) batch.cols[columnSchema.getId() - 1];
int scale = columnSchema.getScale();
while (row < data.length) {
if (batchPosn >= batch.size) {
// Read the next batch and ignore eof. If the file is shorter
// than we need, just reuse the current batch over again.
reader.nextBatch(batch);
batchPosn = 0;
}
data[row++] = cv.vector[batchPosn++].serialize64(scale);
}
}
@State(Scope.Thread)
public static class InputState {
// try both DecimalColumnVector and Decimal64ColumnVector
@Param({"ORIGINAL", "USE_DECIMAL64"})
public TypeDescription.RowBatchVersion version;
Configuration conf = new Configuration();
FileSystem fs;
TypeDescription schema;
VectorizedRowBatch batch;
Path path;
boolean[] include;
Reader reader;
Reader.Options readerOptions;
@Setup
public void setup() throws IOException {
fs = FileSystem.getLocal(conf).getRaw();
path = new Path(root, "generated/taxi/orc.zstd");
schema = Utilities.loadSchema("taxi.schema");
batch = schema.createRowBatch(version, 1024);
// only include the columns with decimal values
include = new boolean[schema.getMaximumId() + 1];
for(TypeDescription child: schema.getChildren()) {
if (child.getCategory() == TypeDescription.Category.DECIMAL) {
include[child.getId()] = true;
}
}
reader = OrcFile.createReader(path,
OrcFile.readerOptions(conf).filesystem(fs));
// just read the decimal columns from the first stripe
readerOptions = reader.options().include(include).range(0, 1000);
}
}
@Benchmark
public void read(Blackhole blackhole, InputState state) throws Exception {
RecordReader rows = state.reader.rows(state.readerOptions);
while (rows.nextBatch(state.batch)) {
blackhole.consume(state.batch);
}
rows.close();
}
}
| 8,527 | 32.574803 | 88 | java |
null | orc-main/java/bench/hive/src/java/org/apache/orc/bench/hive/FullReadBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.hive;
import com.google.auto.service.AutoService;
import com.google.gson.JsonStreamParser;
import org.apache.avro.file.DataFileReader;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.io.DatumReader;
import org.apache.avro.mapred.FsInput;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.TrackingLocalFileSystem;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.io.parquet.read.DataWritableReadSupport;
import org.apache.hadoop.hive.ql.io.parquet.read.ParquetRecordReaderWrapper;
import org.apache.hadoop.io.ArrayWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Reporter;
import org.apache.orc.OrcFile;
import org.apache.orc.Reader;
import org.apache.orc.RecordReader;
import org.apache.orc.TypeDescription;
import org.apache.orc.bench.core.CompressionKind;
import org.apache.orc.bench.core.IOCounters;
import org.apache.orc.bench.core.OrcBenchmark;
import org.apache.orc.bench.core.Utilities;
import org.apache.parquet.hadoop.ParquetInputFormat;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.runner.Runner;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.URI;
import java.nio.charset.StandardCharsets;
import java.util.concurrent.TimeUnit;
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.MICROSECONDS)
@State(Scope.Thread)
@AutoService(OrcBenchmark.class)
public class FullReadBenchmark implements OrcBenchmark {
private static final Path root = Utilities.getBenchmarkRoot();
@Param({"taxi", "sales", "github"})
public String dataset;
@Param({"gz", "snappy", "zstd"})
public String compression;
@Override
public String getName() {
return "read-all";
}
@Override
public String getDescription() {
return "read all columns and rows";
}
@Override
public void run(String[] args) throws Exception {
new Runner(Utilities.parseOptions(args, getClass())).run();
}
@Benchmark
public void orc(IOCounters counters) throws Exception{
Configuration conf = new Configuration();
TrackingLocalFileSystem fs = new TrackingLocalFileSystem();
fs.initialize(new URI("file:///"), conf);
FileSystem.Statistics statistics = fs.getLocalStatistics();
statistics.reset();
OrcFile.ReaderOptions options = OrcFile.readerOptions(conf).filesystem(fs);
Path path = Utilities.getVariant(root, dataset, "orc", compression);
Reader reader = OrcFile.createReader(path, options);
TypeDescription schema = reader.getSchema();
RecordReader rows = reader.rows();
VectorizedRowBatch batch = schema.createRowBatch();
while (rows.nextBatch(batch)) {
counters.addRecords(batch.size);
}
rows.close();
counters.addBytes(statistics.getReadOps(), statistics.getBytesRead());
counters.addInvocation();
}
@Benchmark
public void avro(IOCounters counters) throws Exception {
Configuration conf = new Configuration();
conf.set("fs.track.impl", TrackingLocalFileSystem.class.getName());
conf.set("fs.defaultFS", "track:///");
Path path = Utilities.getVariant(root, dataset, "avro", compression);
FileSystem.Statistics statistics = FileSystem.getStatistics("track:///",
TrackingLocalFileSystem.class);
statistics.reset();
FsInput file = new FsInput(path, conf);
DatumReader<GenericRecord> datumReader = new GenericDatumReader<>();
DataFileReader<GenericRecord> dataFileReader =
new DataFileReader<>(file, datumReader);
GenericRecord record = null;
while (dataFileReader.hasNext()) {
record = dataFileReader.next(record);
counters.addRecords(1);
}
counters.addBytes(statistics.getReadOps(), statistics.getBytesRead());
counters.addInvocation();
}
@Benchmark
public void parquet(IOCounters counters) throws Exception {
JobConf conf = new JobConf();
conf.set("fs.track.impl", TrackingLocalFileSystem.class.getName());
conf.set("fs.defaultFS", "track:///");
Path path = Utilities.getVariant(root, dataset, "parquet", compression);
FileSystem.Statistics statistics = FileSystem.getStatistics("track:///",
TrackingLocalFileSystem.class);
statistics.reset();
ParquetInputFormat<ArrayWritable> inputFormat =
new ParquetInputFormat<>(DataWritableReadSupport.class);
NullWritable nada = NullWritable.get();
FileSplit split = new FileSplit(path, 0, Long.MAX_VALUE, new String[]{});
org.apache.hadoop.mapred.RecordReader<NullWritable,ArrayWritable> recordReader =
new ParquetRecordReaderWrapper(inputFormat, split, conf, Reporter.NULL);
ArrayWritable value = recordReader.createValue();
while (recordReader.next(nada, value)) {
counters.addRecords(1);
}
recordReader.close();
counters.addBytes(statistics.getReadOps(), statistics.getBytesRead());
counters.addInvocation();
}
@Benchmark
public void json(IOCounters counters) throws Exception {
Configuration conf = new Configuration();
TrackingLocalFileSystem fs = new TrackingLocalFileSystem();
fs.initialize(new URI("file:///"), conf);
FileSystem.Statistics statistics = fs.getLocalStatistics();
statistics.reset();
Path path = Utilities.getVariant(root, dataset, "json", compression);
CompressionKind compress = CompressionKind.fromExtension(compression);
InputStream input = compress.read(fs.open(path));
JsonStreamParser parser =
new JsonStreamParser(new InputStreamReader(input,
StandardCharsets.UTF_8));
while (parser.hasNext()) {
parser.next();
counters.addRecords(1);
}
counters.addBytes(statistics.getReadOps(), statistics.getBytesRead());
counters.addInvocation();
}
}
| 7,077 | 37.89011 | 84 | java |
null | orc-main/java/bench/hive/src/java/org/apache/orc/bench/hive/ORCWriterBenchMark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.hive;
import com.google.auto.service.AutoService;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.TrackingLocalFileSystem;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.CompressionKind;
import org.apache.orc.OrcConf;
import org.apache.orc.OrcFile;
import org.apache.orc.TypeDescription;
import org.apache.orc.Writer;
import org.apache.orc.bench.core.IOCounters;
import org.apache.orc.bench.core.OrcBenchmark;
import org.apache.orc.bench.core.Utilities;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.runner.Runner;
import java.net.URI;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import java.util.concurrent.TimeUnit;
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.MICROSECONDS)
@State(Scope.Thread)
@AutoService(OrcBenchmark.class)
public class ORCWriterBenchMark implements OrcBenchmark {
private static final Path root = Utilities.getBenchmarkRoot();
private List<VectorizedRowBatch> batches = new ArrayList<>();
private Path dumpDir() {
return new Path(root, "dumpDir");
}
@Override
public String getName() {
return "write";
}
@Override
public String getDescription() {
return "Benchmark ORC Writer with different DICT options";
}
@Setup(Level.Trial)
public void prepareData() {
TypeDescription schema = TypeDescription.fromString("struct<str:string>");
VectorizedRowBatch batch = schema.createRowBatch();
BytesColumnVector strVector = (BytesColumnVector) batch.cols[0];
Random rand = new Random();
for (int i = 0; i < 32 * 1024; i++) {
if (batch.size == batch.getMaxSize()) {
batches.add(batch);
batch = schema.createRowBatch();
strVector = (BytesColumnVector) batch.cols[0];
}
int randomNum = rand.nextInt(distinctCount - 10 + 1) + 10;
byte[] value = String.format("row %06d", randomNum).getBytes(StandardCharsets.UTF_8);
strVector.setRef(batch.size, value, 0, value.length);
++batch.size;
}
batches.add(batch);
}
@Param({"RBTREE", "HASH", "NONE"})
public String dictImpl;
/**
* Controlling the bound of randomly generated data which is used to control the number of distinct keys
* appeared in the dictionary.
*/
@Param({"10000", "2500", "500"})
public int distinctCount;
@TearDown(Level.Invocation)
public void tearDownBenchmark()
throws Exception {
// Cleaning up the dump files.
Configuration conf = new Configuration();
TrackingLocalFileSystem fs = new TrackingLocalFileSystem();
fs.initialize(new URI("file:///"), conf);
fs.delete(dumpDir(), true);
}
@Override
public void run(String[] args)
throws Exception {
new Runner(Utilities.parseOptions(args, getClass())).run();
}
@Benchmark
public void dictBench(IOCounters counters)
throws Exception {
Configuration conf = new Configuration();
if (dictImpl.equalsIgnoreCase("NONE")) {
// turn off the dictionaries
OrcConf.DICTIONARY_KEY_SIZE_THRESHOLD.setDouble(conf, 0);
} else {
OrcConf.DICTIONARY_IMPL.setString(conf, dictImpl);
}
TrackingLocalFileSystem fs = new TrackingLocalFileSystem();
fs.initialize(new URI("file:///"), conf);
FileSystem.Statistics statistics = fs.getLocalStatistics();
statistics.reset();
Path testFilePath = new Path(dumpDir(), "dictBench");
TypeDescription schema = TypeDescription.fromString("struct<str:string>");
// Note that the total data volume will be around 100 * 1024 * 1024
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).fileSystem(fs).setSchema(schema).compress(CompressionKind.NONE)
.stripeSize(128 * 1024));
for (VectorizedRowBatch batch : batches) {
writer.addRowBatch(batch);
counters.addRecords(batch.size);
}
writer.close();
counters.addBytes(statistics.getWriteOps(), statistics.getBytesWritten());
counters.addInvocation();
}
}
| 5,451 | 33.075 | 106 | java |
null | orc-main/java/bench/hive/src/java/org/apache/orc/bench/hive/RowFilterProjectionBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.hive;
import com.google.auto.service.AutoService;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.TrackingLocalFileSystem;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.OrcFile;
import org.apache.orc.OrcFilterContext;
import org.apache.orc.Reader;
import org.apache.orc.RecordReader;
import org.apache.orc.TypeDescription;
import org.apache.orc.bench.core.IOCounters;
import org.apache.orc.bench.core.OrcBenchmark;
import org.apache.orc.bench.core.Utilities;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.runner.Runner;
import java.net.URI;
import java.util.HashSet;
import java.util.List;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.TimeUnit;
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.MICROSECONDS)
@State(Scope.Thread)
@AutoService(OrcBenchmark.class)
public class RowFilterProjectionBenchmark implements OrcBenchmark {
private static final Path root = Utilities.getBenchmarkRoot();
@Param({"taxi"})
public String dataset;
@Param({"zstd"})
public String compression;
@Param({"0.01", "0.1", "0.2", "0.4", "0.6", "0.8", "1."})
public String filter_percentage;
@Param({"all", "2", "4", "8", "16"})
public String projected_columns;
@Override
public String getName() {
return "row-filter";
}
@Override
public String getDescription() {
return "Benchmark column projection with row-level filtering";
}
@Override
public void run(String[] args) throws Exception {
new Runner(Utilities.parseOptions(args, getClass())).run();
}
static Set<Integer> filterValues = null;
public static void generateRandomSet(double percentage) throws IllegalArgumentException {
if (percentage > 1.0) {
throw new IllegalArgumentException("Filter percentage must be < 1.0 but was "+ percentage);
}
filterValues = new HashSet<>();
while (filterValues.size() < (1024 * percentage)) {
Random randomGenerator = new Random();
filterValues.add(randomGenerator.nextInt(1024));
}
}
public static void customIntRowFilter(OrcFilterContext batch) {
int newSize = 0;
for (int row = 0; row < batch.getSelectedSize(); ++row) {
// Select ONLY specific keys
if (filterValues.contains(row)) {
batch.getSelected()[newSize++] = row;
}
}
batch.setSelectedInUse(true);
batch.setSelectedSize(newSize);
}
@Benchmark
public void orcRowFilter(IOCounters counters) throws Exception {
Configuration conf = new Configuration();
TrackingLocalFileSystem fs = new TrackingLocalFileSystem();
fs.initialize(new URI("file:///"), conf);
FileSystem.Statistics statistics = fs.getLocalStatistics();
statistics.reset();
OrcFile.ReaderOptions options = OrcFile.readerOptions(conf).filesystem(fs);
Path path = Utilities.getVariant(root, dataset, "orc", compression);
Reader reader = OrcFile.createReader(path, options);
TypeDescription schema = reader.getSchema();
// select an ID column to apply filter on
String filter_column;
if ("taxi".equals(dataset)) {
filter_column = "vendor_id";
} else if ("sales".equals(dataset)) {
filter_column = "sales_id";
} else if ("github".equals(dataset)) {
filter_column = "id";
} else {
throw new IllegalArgumentException("Unknown data set " + dataset);
}
boolean[] include = new boolean[schema.getMaximumId() + 1];
int columns_len = schema.getMaximumId();
if (projected_columns.compareTo("all") != 0) {
columns_len = Integer.parseInt(projected_columns);
}
// select the remaining columns to project
List<TypeDescription> children = schema.getChildren();
boolean foundFilterCol = false;
for (int c = children.get(0).getId(); c < schema.getMaximumId() + 1; ++c) {
if (c < schema.getFieldNames().size() &&
schema.getFieldNames().get(c-1).compareTo(filter_column) == 0) {
foundFilterCol = true;
include[c] = true;
}
else {
if (columns_len > 0) {
include[c] = true;
columns_len--;
}
}
if (foundFilterCol && (columns_len == 0)) break;
}
generateRandomSet(Double.parseDouble(filter_percentage));
RecordReader rows =
reader.rows(reader.options()
.include(include)
.setRowFilter(new String[]{filter_column},
RowFilterProjectionBenchmark::customIntRowFilter));
VectorizedRowBatch batch = schema.createRowBatch();
while (rows.nextBatch(batch)) {
counters.addRecords(batch.size);
}
rows.close();
counters.addBytes(statistics.getReadOps(), statistics.getBytesRead());
counters.addInvocation();
}
@Benchmark
public void orcNoFilter(IOCounters counters) throws Exception {
Configuration conf = new Configuration();
TrackingLocalFileSystem fs = new TrackingLocalFileSystem();
fs.initialize(new URI("file:///"), conf);
FileSystem.Statistics statistics = fs.getLocalStatistics();
statistics.reset();
OrcFile.ReaderOptions options = OrcFile.readerOptions(conf).filesystem(fs);
Path path = Utilities.getVariant(root, dataset, "orc", compression);
Reader reader = OrcFile.createReader(path, options);
TypeDescription schema = reader.getSchema();
// select an ID column to apply filter on
String filter_column;
if ("taxi".equals(dataset)) {
filter_column = "vendor_id";
} else if ("sales".equals(dataset)) {
filter_column = "sales_id";
} else if ("github".equals(dataset)) {
filter_column = "id";
} else {
throw new IllegalArgumentException("Unknown data set " + dataset);
}
boolean[] include = new boolean[schema.getMaximumId() + 1];
int columns_len = schema.getMaximumId();
if (projected_columns.compareTo("all") != 0) {
columns_len = Integer.parseInt(projected_columns);
}
// select the remaining columns to project
List<TypeDescription> children = schema.getChildren();
boolean foundFilterCol = false;
for (int c = children.get(0).getId(); c < schema.getMaximumId() + 1; ++c) {
if (c < schema.getFieldNames().size() &&
schema.getFieldNames().get(c-1).compareTo(filter_column) == 0) {
foundFilterCol = true;
include[c] = true;
}
else {
if (columns_len > 0) {
include[c] = true;
columns_len--;
}
}
if (foundFilterCol && (columns_len == 0)) break;
}
RecordReader rows = reader.rows(reader.options().include(include));
VectorizedRowBatch batch = schema.createRowBatch();
while (rows.nextBatch(batch)) {
counters.addRecords(batch.size);
}
rows.close();
counters.addBytes(statistics.getReadOps(), statistics.getBytesRead());
counters.addInvocation();
}
}
| 8,041 | 34.901786 | 97 | java |
null | orc-main/java/bench/hive/src/java/org/apache/orc/bench/hive/rowfilter/BooleanRowFilterBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.hive.rowfilter;
import org.apache.orc.RecordReader;
import org.apache.orc.TypeDescription;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.Warmup;
import org.openjdk.jmh.infra.Blackhole;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import java.util.concurrent.TimeUnit;
@State(Scope.Benchmark)
@Warmup(iterations = 5)
@Measurement(iterations = 5)
@Fork(1)
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.MILLISECONDS)
public class BooleanRowFilterBenchmark extends org.openjdk.jmh.Main {
@State(Scope.Thread)
public static class InputState extends RowFilterInputState {
@Param({"ORIGINAL"})
public TypeDescription.RowBatchVersion version;
@Param({"BOOLEAN"})
public TypeDescription.Category benchType;
@Param({"0.01", "0.1", "0.2", "0.4", "0.6", "0.8", "1."})
public String filterPerc;
@Param({"2"})
public int filterColsNum;
String dataRelativePath = "data/generated/sales/orc.zstd";
String schemaName = "sales.schema";
String filterColumn = "sales_id";
}
@Benchmark
public void readOrcRowFilter(Blackhole blackhole, InputState state) throws Exception {
RecordReader rows =
state.reader.rows(state.readerOptions
.setRowFilter(new String[]{state.filterColumn}, state::customIntRowFilter));
while (rows.nextBatch(state.batch)) {
blackhole.consume(state.batch);
}
rows.close();
}
@Benchmark
public void readOrcNoFilter(Blackhole blackhole, InputState state) throws Exception {
RecordReader rows = state.reader.rows(state.readerOptions);
while (rows.nextBatch(state.batch)) {
blackhole.consume(state.batch);
}
rows.close();
}
/*
* Run this test:
* java -cp hive/target/orc-benchmarks-hive-*-uber.jar org.apache.orc.bench.hive.rowfilter.BooleanRowFilterBenchmark
*/
public static void main(String[] args) throws RunnerException {
new Runner(new OptionsBuilder()
.include(BooleanRowFilterBenchmark.class.getSimpleName())
.forks(1)
.build()).run();
}
}
| 3,361 | 32.287129 | 119 | java |
null | orc-main/java/bench/hive/src/java/org/apache/orc/bench/hive/rowfilter/DecimalRowFilterBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.hive.rowfilter;
import org.apache.orc.RecordReader;
import org.apache.orc.TypeDescription;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.Warmup;
import org.openjdk.jmh.infra.Blackhole;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import java.util.concurrent.TimeUnit;
@State(Scope.Benchmark)
@Warmup(iterations = 5)
@Measurement(iterations = 5)
@Fork(1)
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.MILLISECONDS)
public class DecimalRowFilterBenchmark extends org.openjdk.jmh.Main {
@State(Scope.Thread)
public static class InputState extends RowFilterInputState {
// try both DecimalColumnVector and Decimal64
@Param({"ORIGINAL", "USE_DECIMAL64"})
public TypeDescription.RowBatchVersion version;
@Param({"DECIMAL"})
public TypeDescription.Category benchType;
@Param({"0.01", "0.1", "0.2", "0.4", "0.6", "0.8", "1."})
public String filterPerc;
@Param({"2"})
public int filterColsNum;
String dataRelativePath = "data/generated/taxi/orc.zstd";
String schemaName = "taxi.schema";
String filterColumn = "vendor_id";
}
@Benchmark
public void readOrcRowFilter(Blackhole blackhole, InputState state) throws Exception {
RecordReader rows =
state.reader.rows(state.readerOptions
.setRowFilter(new String[]{state.filterColumn}, state::customIntRowFilter));
while (rows.nextBatch(state.batch)) {
blackhole.consume(state.batch);
}
rows.close();
}
@Benchmark
public void readOrcNoFilter(Blackhole blackhole, InputState state) throws Exception {
RecordReader rows = state.reader.rows(state.readerOptions);
while (rows.nextBatch(state.batch)) {
blackhole.consume(state.batch);
}
rows.close();
}
/*
* Run this test:
* java -cp hive/target/orc-benchmarks-hive-*-uber.jar org.apache.orc.bench.hive.rowfilter.DecimalRowFilterBenchmark
*/
public static void main(String[] args) throws RunnerException {
new Runner(new OptionsBuilder()
.include(DecimalRowFilterBenchmark.class.getSimpleName())
.forks(1)
.build()).run();
}
}
| 3,427 | 32.607843 | 119 | java |
null | orc-main/java/bench/hive/src/java/org/apache/orc/bench/hive/rowfilter/DoubleRowFilterBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.hive.rowfilter;
import org.apache.orc.RecordReader;
import org.apache.orc.TypeDescription;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.Warmup;
import org.openjdk.jmh.infra.Blackhole;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import java.util.concurrent.TimeUnit;
@State(Scope.Benchmark)
@Warmup(iterations = 5)
@Measurement(iterations = 5)
@Fork(1)
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.MILLISECONDS)
public class DoubleRowFilterBenchmark extends org.openjdk.jmh.Main {
@State(Scope.Thread)
public static class InputState extends RowFilterInputState {
// try both DecimalColumnVector and Decimal64
@Param({"ORIGINAL"})
public TypeDescription.RowBatchVersion version;
@Param({"DOUBLE"})
public TypeDescription.Category benchType;
@Param({"0.01", "0.1", "0.2", "0.4", "0.6", "0.8", "1."})
public String filterPerc;
@Param({"2"})
public int filterColsNum;
String dataRelativePath = "data/generated/taxi/orc.zstd";
String schemaName = "taxi.schema";
String filterColumn = "vendor_id";
}
@Benchmark
public void readOrcRowFilter(Blackhole blackhole, InputState state) throws Exception {
RecordReader rows =
state.reader.rows(state.readerOptions
.setRowFilter(new String[]{state.filterColumn}, state::customIntRowFilter));
while (rows.nextBatch(state.batch)) {
blackhole.consume(state.batch);
}
rows.close();
}
@Benchmark
public void readOrcNoFilter(Blackhole blackhole, InputState state) throws Exception {
RecordReader rows = state.reader.rows(state.readerOptions);
while (rows.nextBatch(state.batch)) {
blackhole.consume(state.batch);
}
rows.close();
}
/*
* Run this test:
* java -cp hive/target/orc-benchmarks-hive-*-uber.jar org.apache.orc.bench.hive.rowfilter.DoubleRowFilterBenchmark
*/
public static void main(String[] args) throws RunnerException {
new Runner(new OptionsBuilder()
.include(DoubleRowFilterBenchmark.class.getSimpleName())
.forks(1)
.build()).run();
}
}
| 3,406 | 32.401961 | 118 | java |
null | orc-main/java/bench/hive/src/java/org/apache/orc/bench/hive/rowfilter/RowFilterInputState.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.hive.rowfilter;
import org.apache.commons.lang.reflect.FieldUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.OrcFile;
import org.apache.orc.OrcFilterContext;
import org.apache.orc.Reader;
import org.apache.orc.TypeDescription;
import org.apache.orc.bench.core.Utilities;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import java.io.IOException;
import java.util.Random;
@State(Scope.Thread)
public abstract class RowFilterInputState {
private static final Path root = new Path(System.getProperty("user.dir"));
Configuration conf = new Configuration();
FileSystem fs;
TypeDescription schema;
VectorizedRowBatch batch;
Path path;
boolean[] include;
Reader reader;
Reader.Options readerOptions;
boolean[] filterValues = null;
@Setup
public void setup() throws IOException, IllegalAccessException {
TypeDescription.RowBatchVersion version =
(TypeDescription.RowBatchVersion) FieldUtils.readField(this, "version", true);
TypeDescription.Category benchType =
(TypeDescription.Category) FieldUtils.readField(this, "benchType", true);
String filterPerc = (String) FieldUtils.readField(this, "filterPerc", true);
int filterColsNum = (int) FieldUtils.readField(this, "filterColsNum", true);
String dataRelativePath = (String) FieldUtils.readField(this, "dataRelativePath", true);
String schemaName = (String) FieldUtils.readField(this, "schemaName", true);
String filterColumn = (String) FieldUtils.readField(this, "filterColumn", true);
fs = FileSystem.getLocal(conf).getRaw();
path = new Path(root, dataRelativePath);
schema = Utilities.loadSchema(schemaName);
batch = schema.createRowBatch(version, 1024);
include = new boolean[schema.getMaximumId() + 1];
for (TypeDescription child : schema.getChildren()) {
if (schema.getFieldNames().get(child.getId() - 1).compareTo(filterColumn) == 0) {
System.out.println(
"Apply Filter on column: " + schema.getFieldNames().get(child.getId() - 1));
include[child.getId()] = true;
} else if (child.getCategory() == benchType) {
System.out.println("Skip column(s): " + schema.getFieldNames().get(child.getId() - 1));
include[child.getId()] = true;
if (--filterColsNum == 0) break;
}
}
if (filterColsNum != 0) {
System.err.println("Dataset does not contain type: " + benchType);
System.exit(-1);
}
generateRandomSet(Double.parseDouble(filterPerc));
reader = OrcFile.createReader(path,
OrcFile.readerOptions(conf).filesystem(fs));
// just read the Boolean columns
readerOptions = reader.options().include(include);
}
public void generateRandomSet(double percentage) throws IllegalArgumentException {
if (percentage > 1.0) {
throw new IllegalArgumentException("Filter percentage must be < 1.0 but was " + percentage);
}
filterValues = new boolean[1024];
int count = 0;
while (count < (1024 * percentage)) {
Random randomGenerator = new Random();
int randVal = randomGenerator.nextInt(1024);
if (!filterValues[randVal]) {
filterValues[randVal] = true;
count++;
}
}
}
public void customIntRowFilter(OrcFilterContext batch) {
int newSize = 0;
for (int row = 0; row < batch.getSelectedSize(); ++row) {
if (filterValues[row]) {
batch.getSelected()[newSize++] = row;
}
}
batch.setSelectedInUse(true);
batch.setSelectedSize(newSize);
}
}
| 4,577 | 37.470588 | 98 | java |
null | orc-main/java/bench/hive/src/java/org/apache/orc/bench/hive/rowfilter/StringRowFilterBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.hive.rowfilter;
import org.apache.orc.RecordReader;
import org.apache.orc.TypeDescription;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.Warmup;
import org.openjdk.jmh.infra.Blackhole;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import java.util.concurrent.TimeUnit;
@State(Scope.Benchmark)
@Warmup(iterations = 5)
@Measurement(iterations = 5)
@Fork(1)
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.MILLISECONDS)
public class StringRowFilterBenchmark extends org.openjdk.jmh.Main {
@State(Scope.Thread)
public static class InputState extends RowFilterInputState {
@Param({"ORIGINAL"})
public TypeDescription.RowBatchVersion version;
@Param({"STRING"})
public TypeDescription.Category benchType;
@Param({"0.01", "0.1", "0.2", "0.4", "0.6", "0.8", "1."})
public String filterPerc;
@Param({"2"})
public int filterColsNum;
String dataRelativePath = "data/generated/sales/orc.zstd";
String schemaName = "sales.schema";
String filterColumn = "sales_id";
}
@Benchmark
public void readOrcRowFilter(Blackhole blackhole, InputState state) throws Exception {
RecordReader rows =
state.reader.rows(state.readerOptions
.setRowFilter(new String[]{state.filterColumn}, state::customIntRowFilter));
while (rows.nextBatch(state.batch)) {
blackhole.consume(state.batch);
}
rows.close();
}
@Benchmark
public void readOrcNoFilter(Blackhole blackhole, InputState state) throws Exception {
RecordReader rows = state.reader.rows(state.readerOptions);
while (rows.nextBatch(state.batch)) {
blackhole.consume(state.batch);
}
rows.close();
}
/*
* Run this test:
* java -cp hive/target/orc-benchmarks-hive-*-uber.jar org.apache.orc.bench.hive.rowfilter.StringRowFilterBenchmark
*/
public static void main(String[] args) throws RunnerException {
new Runner(new OptionsBuilder()
.include(StringRowFilterBenchmark.class.getSimpleName())
.forks(1)
.build()).run();
}
}
| 3,357 | 32.247525 | 118 | java |
null | orc-main/java/bench/hive/src/java/org/apache/orc/bench/hive/rowfilter/TimestampRowFilterBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.hive.rowfilter;
import org.apache.hadoop.fs.Path;
import org.apache.orc.RecordReader;
import org.apache.orc.TypeDescription;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.Warmup;
import org.openjdk.jmh.infra.Blackhole;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import java.util.concurrent.TimeUnit;
@State(Scope.Benchmark)
@Warmup(iterations = 5)
@Measurement(iterations = 5)
@Fork(1)
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.MILLISECONDS)
public class TimestampRowFilterBenchmark extends org.openjdk.jmh.Main {
private static final Path root = new Path(System.getProperty("user.dir"));
@State(Scope.Thread)
public static class InputState extends RowFilterInputState {
// try both DecimalColumnVector and Decimal64
@Param({"ORIGINAL"})
public TypeDescription.RowBatchVersion version;
@Param({"TIMESTAMP"})
public TypeDescription.Category benchType;
@Param({"0.01", "0.1", "0.2", "0.4", "0.6", "0.8", "1."})
public String filterPerc;
@Param({"2"})
public int filterColsNum;
String dataRelativePath = "data/generated/taxi/orc.zstd";
String schemaName = "taxi.schema";
String filterColumn = "vendor_id";
}
@Benchmark
public void readOrcRowFilter(Blackhole blackhole, InputState state) throws Exception {
RecordReader rows =
state.reader.rows(state.readerOptions
.setRowFilter(new String[]{state.filterColumn}, state::customIntRowFilter));
while (rows.nextBatch(state.batch)) {
blackhole.consume(state.batch);
}
rows.close();
}
@Benchmark
public void readOrcNoFilter(Blackhole blackhole, InputState state) throws Exception {
RecordReader rows = state.reader.rows(state.readerOptions);
while (rows.nextBatch(state.batch)) {
blackhole.consume(state.batch);
}
rows.close();
}
/*
* Run this test:
* java -cp hive/target/orc-benchmarks-hive-*-uber.jar \
* org.apache.orc.bench.hive.rowfilter.TimestampRowFilterBenchmark
*/
public static void main(String[] args) throws RunnerException {
new Runner(new OptionsBuilder()
.include(TimestampRowFilterBenchmark.class.getSimpleName())
.forks(1)
.build()).run();
}
}
| 3,539 | 32.714286 | 88 | java |
null | orc-main/java/bench/spark/src/java/org/apache/orc/bench/spark/SparkBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.spark;
import com.google.auto.service.AutoService;
import org.apache.commons.cli.CommandLine;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.TrackingLocalFileSystem;
import org.apache.orc.TypeDescription;
import org.apache.orc.bench.core.IOCounters;
import org.apache.orc.bench.core.OrcBenchmark;
import org.apache.orc.bench.core.Utilities;
import org.apache.orc.bench.core.convert.GenerateVariants;
import org.apache.spark.paths.SparkPath;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.avro.AvroFileFormat;
import org.apache.spark.sql.catalyst.InternalRow;
import org.apache.spark.sql.execution.datasources.FileFormat;
import org.apache.spark.sql.execution.datasources.PartitionedFile;
import org.apache.spark.sql.execution.datasources.json.JsonFileFormat;
import org.apache.spark.sql.execution.datasources.orc.OrcFileFormat;
import org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat;
import org.apache.spark.sql.sources.And$;
import org.apache.spark.sql.sources.Filter;
import org.apache.spark.sql.sources.GreaterThanOrEqual$;
import org.apache.spark.sql.sources.LessThan$;
import org.apache.spark.sql.types.StructType;
import org.apache.spark.sql.vectorized.ColumnarBatch;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.infra.Blackhole;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import scala.Function1;
import scala.Tuple2;
import scala.collection.Iterator;
import scala.collection.JavaConverters;
import scala.collection.Seq;
import scala.collection.immutable.Map;
import scala.collection.immutable.Map$;
import java.io.IOException;
import java.sql.Timestamp;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.MICROSECONDS)
@AutoService(OrcBenchmark.class)
public class SparkBenchmark implements OrcBenchmark {
private static final Path root = Utilities.getBenchmarkRoot();
@Override
public String getName() {
return "spark";
}
@Override
public String getDescription() {
return "Run Spark benchmarks";
}
@Override
public void run(String[] args) throws Exception {
CommandLine cmds = GenerateVariants.parseCommandLine(args);
new Runner(new OptionsBuilder()
.parent(Utilities.parseOptions(args, this.getClass()))
.param("compression", cmds.getOptionValue("compress", "gz,snappy,zstd").split(","))
.param("dataset", cmds.getOptionValue("data", "taxi,sales,github").split(","))
.param("format", cmds.getOptionValue("format", "orc,parquet,json").split(","))
.build()
).run();
}
@State(Scope.Thread)
public static class InputSource {
SparkSession session;
TrackingLocalFileSystem fs;
Configuration conf;
Path path;
StructType schema;
StructType empty = new StructType();
FileFormat formatObject;
@Param({"taxi", "sales", "github"})
String dataset;
@Param({"gz", "snappy", "zstd"})
String compression;
@Param({"orc", "parquet", "json"})
String format;
@Setup(Level.Trial)
public void setup() {
session = SparkSession.builder().appName("benchmark")
.config("spark.master", "local[4]")
.config("spark.sql.orc.filterPushdown", true)
.config("spark.sql.orc.impl", "native")
.getOrCreate();
conf = session.sparkContext().hadoopConfiguration();
conf.set("avro.mapred.ignore.inputs.without.extension","false");
conf.set("fs.track.impl", TrackingLocalFileSystem.class.getName());
path = new Path("track://",
Utilities.getVariant(root, dataset, format, compression));
try {
fs = (TrackingLocalFileSystem) path.getFileSystem(conf);
} catch (IOException e) {
throw new IllegalArgumentException("Can't get filesystem", e);
}
try {
TypeDescription orcSchema = Utilities.loadSchema(dataset + ".schema");
schema = (StructType) SparkSchema.convertToSparkType(orcSchema);
} catch (IOException e) {
throw new IllegalArgumentException("Can't read schema " + dataset, e);
}
switch (format) {
case "avro":
formatObject = new AvroFileFormat();
break;
case "orc":
formatObject = new OrcFileFormat();
break;
case "parquet":
formatObject = new ParquetFileFormat();
break;
case "json":
formatObject = new JsonFileFormat();
break;
default:
throw new IllegalArgumentException("Unknown format " + format);
}
}
}
static void processReader(Iterator<InternalRow> reader,
FileSystem.Statistics statistics,
IOCounters counters,
Blackhole blackhole) {
while (reader.hasNext()) {
Object row = reader.next();
if (row instanceof ColumnarBatch) {
counters.addRecords(((ColumnarBatch) row).numRows());
} else {
counters.addRecords(1);
}
blackhole.consume(row);
}
counters.addInvocation();
counters.addBytes(statistics.getReadOps(), statistics.getBytesRead());
}
@Benchmark
public void fullRead(InputSource source,
IOCounters counters,
Blackhole blackhole) {
FileSystem.Statistics statistics = source.fs.getLocalStatistics();
statistics.reset();
List<Filter> filters = new ArrayList<>();
List<Tuple2<String,String>> options = new ArrayList<>();
switch (source.format) {
case "json":
options.add(new Tuple2<>("timestampFormat", "yyyy-MM-dd HH:mm:ss.SSS"));
break;
default:
break;
}
Seq<Tuple2<String,String>> optionsScala = JavaConverters
.asScalaBufferConverter(options).asScala().toSeq();
@SuppressWarnings("unchecked")
Map<String,String> scalaMap = (Map<String, String>)Map$.MODULE$.apply(optionsScala);
Function1<PartitionedFile,Iterator<InternalRow>> factory =
source.formatObject.buildReaderWithPartitionValues(source.session,
source.schema, source.empty, source.schema,
JavaConverters.collectionAsScalaIterableConverter(filters).asScala().toSeq(),
scalaMap, source.conf);
PartitionedFile file = new PartitionedFile(InternalRow.empty(),
SparkPath.fromPath(source.path), 0, Long.MAX_VALUE, new String[0], 0L, 0L);
processReader(factory.apply(file), statistics, counters, blackhole);
}
@Benchmark
public void partialRead(InputSource source,
IOCounters counters,
Blackhole blackhole) {
FileSystem.Statistics statistics = source.fs.getLocalStatistics();
statistics.reset();
List<Filter> filters = new ArrayList<>();
List<Tuple2<String,String>> options = new ArrayList<>();
switch (source.format) {
case "json":
case "avro":
throw new IllegalArgumentException(source.format + " can't handle projection");
default:
break;
}
TypeDescription readSchema = null;
switch (source.dataset) {
case "taxi":
readSchema = TypeDescription.fromString("struct<vendor_id:int," +
"pickup_time:timestamp>");
break;
case "sales":
readSchema = TypeDescription.fromString("struct<sales_id:bigint," +
"customer_id:bigint>");
break;
case "github":
readSchema = TypeDescription.fromString("struct<actor:struct<" +
"avatar_url:string,gravatar_id:string,id:int,login:string,url:string>," +
"created_at:timestamp>");
break;
default:
throw new IllegalArgumentException("Unknown data set " + source.dataset);
}
Seq<Tuple2<String,String>> optionsScala =
JavaConverters.asScalaBufferConverter(options).asScala().toSeq();
@SuppressWarnings("unchecked")
Map<String,String> scalaMap = (Map<String, String>)Map$.MODULE$.apply(optionsScala);
Function1<PartitionedFile,Iterator<InternalRow>> factory =
source.formatObject.buildReaderWithPartitionValues(source.session,
source.schema, source.empty,
(StructType) SparkSchema.convertToSparkType(readSchema),
JavaConverters.collectionAsScalaIterableConverter(filters).asScala().toSeq(),
scalaMap, source.conf);
PartitionedFile file = new PartitionedFile(InternalRow.empty(),
SparkPath.fromPath(source.path), 0, Long.MAX_VALUE, new String[0], 0L, 0L);
processReader(factory.apply(file), statistics, counters, blackhole);
}
@Benchmark
public void pushDown(InputSource source,
IOCounters counters,
Blackhole blackhole) {
FileSystem.Statistics statistics = source.fs.getLocalStatistics();
statistics.reset();
List<Filter> filters = new ArrayList<>();
switch (source.dataset) {
case "taxi":
filters.add(And$.MODULE$.apply(
GreaterThanOrEqual$.MODULE$.apply("pickup_time",
Timestamp.valueOf("2015-11-01 00:00:00.0")),
LessThan$.MODULE$.apply("pickup_time",
Timestamp.valueOf("2015-11-01 00:01:00.0"))));
break;
case "sales":
filters.add(And$.MODULE$.apply(
GreaterThanOrEqual$.MODULE$.apply("sales_id", 1000000000L),
LessThan$.MODULE$.apply("sales_id", 1000001000L)));
break;
case "github":
filters.add(And$.MODULE$.apply(
GreaterThanOrEqual$.MODULE$.apply("created_at",
Timestamp.valueOf("2015-11-01 00:00:00.0")),
LessThan$.MODULE$.apply("created_at",
Timestamp.valueOf("2015-11-01 00:01:00.0"))));
break;
default:
throw new IllegalArgumentException("Unknown data set " + source.dataset);
}
List<Tuple2<String,String>> options = new ArrayList<>();
switch (source.format) {
case "json":
case "avro":
throw new IllegalArgumentException(source.format + " can't handle pushdown");
default:
break;
}
Seq<Tuple2<String,String>> optionsScala =
JavaConverters.asScalaBufferConverter(options).asScala().toSeq();
@SuppressWarnings("unchecked")
Map<String,String> scalaMap = (Map<String, String>)Map$.MODULE$.apply(optionsScala);
Function1<PartitionedFile,Iterator<InternalRow>> factory =
source.formatObject.buildReaderWithPartitionValues(source.session,
source.schema, source.empty, source.schema,
JavaConverters.collectionAsScalaIterableConverter(filters).asScala().toSeq(),
scalaMap, source.conf);
PartitionedFile file = new PartitionedFile(InternalRow.empty(),
SparkPath.fromPath(source.path), 0, Long.MAX_VALUE, new String[0], 0L, 0L);
processReader(factory.apply(file), statistics, counters, blackhole);
}
}
| 12,244 | 38.627832 | 91 | java |
null | orc-main/java/bench/spark/src/java/org/apache/orc/bench/spark/SparkSchema.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.bench.spark;
import org.apache.orc.TypeDescription;
import org.apache.spark.sql.types.ArrayType$;
import org.apache.spark.sql.types.BinaryType$;
import org.apache.spark.sql.types.BooleanType$;
import org.apache.spark.sql.types.ByteType$;
import org.apache.spark.sql.types.DataType;
import org.apache.spark.sql.types.DateType$;
import org.apache.spark.sql.types.DecimalType$;
import org.apache.spark.sql.types.DoubleType$;
import org.apache.spark.sql.types.FloatType$;
import org.apache.spark.sql.types.IntegerType$;
import org.apache.spark.sql.types.LongType$;
import org.apache.spark.sql.types.MapType$;
import org.apache.spark.sql.types.Metadata;
import org.apache.spark.sql.types.ShortType$;
import org.apache.spark.sql.types.StringType$;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType$;
import org.apache.spark.sql.types.TimestampType$;
import java.util.ArrayList;
import java.util.List;
public class SparkSchema {
public static DataType convertToSparkType(TypeDescription schema) {
switch (schema.getCategory()) {
case BOOLEAN:
return BooleanType$.MODULE$;
case BYTE:
return ByteType$.MODULE$;
case SHORT:
return ShortType$.MODULE$;
case INT:
return IntegerType$.MODULE$;
case LONG:
return LongType$.MODULE$;
case FLOAT:
return FloatType$.MODULE$;
case DOUBLE:
return DoubleType$.MODULE$;
case BINARY:
return BinaryType$.MODULE$;
case STRING:
case CHAR:
case VARCHAR:
return StringType$.MODULE$;
case DATE:
return DateType$.MODULE$;
case TIMESTAMP:
return TimestampType$.MODULE$;
case DECIMAL:
return DecimalType$.MODULE$.apply(schema.getPrecision(), schema.getScale());
case LIST:
return ArrayType$.MODULE$.apply(
convertToSparkType(schema.getChildren().get(0)), true);
case MAP:
return MapType$.MODULE$.apply(
convertToSparkType(schema.getChildren().get(0)),
convertToSparkType(schema.getChildren().get(1)), true);
case STRUCT: {
int size = schema.getChildren().size();
List<StructField> sparkFields = new ArrayList<>(size);
for(int c=0; c < size; ++c) {
sparkFields.add(StructField.apply(schema.getFieldNames().get(c),
convertToSparkType(schema.getChildren().get(c)), true,
Metadata.empty()));
}
return StructType$.MODULE$.apply(sparkFields);
}
default:
throw new IllegalArgumentException("Unhandled type " + schema);
}
}
}
| 3,471 | 35.166667 | 84 | java |
null | orc-main/java/core/src/java/org/apache/orc/BinaryColumnStatistics.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
/**
* Statistics for binary columns.
*/
public interface BinaryColumnStatistics extends ColumnStatistics {
long getSum();
}
| 959 | 35.923077 | 75 | java |
null | orc-main/java/core/src/java/org/apache/orc/BooleanColumnStatistics.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
/**
* Statistics for boolean columns.
*/
public interface BooleanColumnStatistics extends ColumnStatistics {
long getFalseCount();
long getTrueCount();
}
| 992 | 34.464286 | 75 | java |
null | orc-main/java/core/src/java/org/apache/orc/CollectionColumnStatistics.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
/**
* Statistics for all of collections such as Map and List.
*/
public interface CollectionColumnStatistics extends ColumnStatistics {
/**
* Get minimum number of children in the collection.
* @return the minimum children count
*/
long getMinimumChildren();
/**
* Get maximum number of children in the collection.
* @return the maximum children count
*/
long getMaximumChildren();
/**
* Get the total number of children in the collection.
* @return the total number of children
*/
long getTotalChildren();
}
| 1,383 | 31.952381 | 75 | java |
null | orc-main/java/core/src/java/org/apache/orc/ColumnStatistics.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
/**
* Statistics that are available for all types of columns.
*/
public interface ColumnStatistics {
/**
* Get the number of values in this column. It will differ from the number
* of rows because of NULL values and repeated values.
* @return the number of values
*/
long getNumberOfValues();
/**
* Returns true if there are nulls in the scope of column statistics.
* @return true if null present else false
*/
boolean hasNull();
/**
* Get the number of bytes for this column.
* @return the number of bytes
*/
long getBytesOnDisk();
}
| 1,412 | 31.860465 | 76 | java |
null | orc-main/java/core/src/java/org/apache/orc/CompressionCodec.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import java.io.Closeable;
import java.io.IOException;
import java.nio.ByteBuffer;
/**
* The API for compression codecs for ORC.
* Closeable.close() returns this codec to the OrcCodecPool.
*/
public interface CompressionCodec extends Closeable {
enum SpeedModifier {
/* speed/compression tradeoffs */
FASTEST,
FAST,
DEFAULT
}
enum DataKind {
TEXT,
BINARY
}
interface Options {
/**
* Make a copy before making changes.
* @return a new copy
*/
Options copy();
/**
* Set the speed for the compression.
* @param newValue how aggressively to compress
* @return this
*/
Options setSpeed(SpeedModifier newValue);
/**
* Set the kind of data for the compression.
* @param newValue what kind of data this is
* @return this
*/
Options setData(DataKind newValue);
}
/**
* Get the default options for this codec.
* @return the default options object
*/
Options getDefaultOptions();
/**
* Compress the in buffer to the out buffer.
* @param in the bytes to compress
* @param out the compressed bytes
* @param overflow put any additional bytes here
* @param options the options to control compression
* @return true if the output is smaller than input
* @throws IOException
*/
boolean compress(ByteBuffer in, ByteBuffer out, ByteBuffer overflow,
Options options) throws IOException;
/**
* Decompress the in buffer to the out buffer.
* @param in the bytes to decompress
* @param out the decompressed bytes
* @throws IOException
*/
void decompress(ByteBuffer in, ByteBuffer out) throws IOException;
/** Resets the codec, preparing it for reuse. */
void reset();
/** Closes the codec, releasing the resources. */
void destroy();
/**
* Get the compression kind.
*/
CompressionKind getKind();
/**
* Return the codec to the pool.
*/
@Override
void close();
}
| 2,807 | 25.242991 | 75 | java |
null | orc-main/java/core/src/java/org/apache/orc/CompressionKind.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
/**
* An enumeration that lists the generic compression algorithms that
* can be applied to ORC files.
*/
public enum CompressionKind {
NONE, ZLIB, SNAPPY, LZO, LZ4, ZSTD
}
| 1,010 | 35.107143 | 75 | java |
null | orc-main/java/core/src/java/org/apache/orc/DataMask.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.orc.impl.MaskDescriptionImpl;
import java.util.ServiceLoader;
/**
* The API for masking data during column encryption for ORC.
* <p>
* They apply to an individual column (via ColumnVector) instead of a
* VectorRowBatch.
*
*/
public interface DataMask {
/**
* The standard DataMasks can be created using this short cut.
*
* For example, DataMask.Standard.NULLIFY.build(schema) will build a
* nullify DataMask.
*/
enum Standard {
NULLIFY("nullify"),
REDACT("redact"),
SHA256("sha256");
Standard(String name) {
this.name = name;
}
private final String name;
/**
* Get the name of the predefined data mask.
* @return the standard name
*/
public String getName() {
return name;
}
/**
* Build a DataMaskDescription given the name and a set of parameters.
* @param params the parameters
* @return a MaskDescription with the given parameters
*/
public DataMaskDescription getDescription(String... params) {
return new MaskDescriptionImpl(name, params);
}
}
/**
* Mask the given range of values
* @param original the original input data
* @param masked the masked output data
* @param start the first data element to mask
* @param length the number of data elements to mask
*/
void maskData(ColumnVector original, ColumnVector masked,
int start, int length);
/**
* An interface to provide override data masks for sub-columns.
*/
interface MaskOverrides {
/**
* Should the current mask be overridden on a sub-column?
* @param type the subfield
* @return the new mask description or null to continue using the same one
*/
DataMaskDescription hasOverride(TypeDescription type);
}
/**
* Providers can provide one or more kinds of data masks.
* Because they are discovered using a service loader, they may be added
* by third party jars.
*/
interface Provider {
/**
* Build a mask with the given parameters.
* @param description the description of the data mask
* @param schema the type of the field
* @param overrides a function to override this mask on a sub-column
* @return the new data mask or null if this name is unknown
*/
DataMask build(DataMaskDescription description,
TypeDescription schema,
MaskOverrides overrides);
}
/**
* To create a DataMask, the users should come through this API.
*
* It supports extension via additional DataMask.Provider implementations
* that are accessed through Java's ServiceLoader API.
*/
class Factory {
/**
* Build a new DataMask instance.
* @param mask the description of the data mask
* @param schema the type of the field
* @param overrides sub-columns where the mask is overridden
* @return a new DataMask
* @throws IllegalArgumentException if no such kind of data mask was found
*
* @see org.apache.orc.impl.mask.MaskProvider for the standard provider
*/
public static DataMask build(DataMaskDescription mask,
TypeDescription schema,
MaskOverrides overrides) {
for(Provider provider: ServiceLoader.load(Provider.class)) {
DataMask result = provider.build(mask, schema, overrides);
if (result != null) {
return result;
}
}
throw new IllegalArgumentException("Can't find data mask - " + mask);
}
}
}
| 4,450 | 30.567376 | 78 | java |
null | orc-main/java/core/src/java/org/apache/orc/DataMaskDescription.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
/**
* Information about the DataMask used to mask the unencrypted data.
*/
public interface DataMaskDescription {
/**
* The name of the mask.
* @return the name
*/
String getName();
/**
* The parameters for the mask
* @return the array of parameters
*/
String[] getParameters();
/**
* Get the list of columns that use this mask.
* @return the list of columns
*/
TypeDescription[] getColumns();
}
| 1,272 | 27.931818 | 75 | java |
null | orc-main/java/core/src/java/org/apache/orc/DataReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import org.apache.orc.impl.BufferChunkList;
import org.apache.orc.impl.InStream;
import java.io.IOException;
import java.nio.ByteBuffer;
/** An abstract data reader that IO formats can use to read bytes from underlying storage. */
public interface DataReader extends AutoCloseable, Cloneable {
/** Opens the DataReader, making it ready to use. */
void open() throws IOException;
OrcProto.StripeFooter readStripeFooter(StripeInformation stripe) throws IOException;
/**
* Reads the data from the file.
*
* Note that for the cases such as zero-copy read, caller must release the disk
* ranges produced after being done with them. Call isTrackingDiskRanges to
* find out if this is needed.
*
* @param range List of disk ranges to read. Ranges with data will be ignored.
* @param doForceDirect Whether the data should be read into direct buffers.
* @return The list range with buffers filled in
*/
BufferChunkList readFileData(BufferChunkList range,
boolean doForceDirect) throws IOException;
/**
* Whether the user should release buffers created by readFileData. See readFileData javadoc.
*/
boolean isTrackingDiskRanges();
/**
* Releases buffers created by readFileData. See readFileData javadoc.
* @param toRelease The buffer to release.
*/
void releaseBuffer(ByteBuffer toRelease);
/**
* Clone the entire state of the DataReader with the assumption that the
* clone will be closed at a different time. Thus, any file handles in the
* implementation need to be cloned.
* @return a new instance
*/
DataReader clone();
@Override
void close() throws IOException;
/**
* Returns the compression options used by this DataReader.
* The codec if present is owned by the DataReader and should not be returned
* to the OrcCodecPool.
* @return the compression options
*/
InStream.StreamOptions getCompressionOptions();
}
| 2,786 | 34.278481 | 95 | java |
null | orc-main/java/core/src/java/org/apache/orc/DateColumnStatistics.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import java.time.chrono.ChronoLocalDate;
import java.util.Date;
/**
* Statistics for DATE columns.
*/
public interface DateColumnStatistics extends ColumnStatistics {
/**
* Get the minimum value for the column.
* @return minimum value as a LocalDate
*/
ChronoLocalDate getMinimumLocalDate();
/**
* Get the minimum value for the column.
* @return minimum value as days since epoch (1 Jan 1970)
*/
long getMinimumDayOfEpoch();
/**
* Get the maximum value for the column.
* @return maximum value as a LocalDate
*/
ChronoLocalDate getMaximumLocalDate();
/**
* Get the maximum value for the column.
* @return maximum value as days since epoch (1 Jan 1970)
*/
long getMaximumDayOfEpoch();
/**
* Get the minimum value for the column.
* @return minimum value
* @deprecated Use #getMinimumLocalDate instead
*/
Date getMinimum();
/**
* Get the maximum value for the column.
* @return maximum value
* @deprecated Use #getMaximumLocalDate instead
*/
Date getMaximum();
}
| 1,883 | 27.545455 | 75 | java |
null | orc-main/java/core/src/java/org/apache/orc/DecimalColumnStatistics.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import org.apache.hadoop.hive.common.type.HiveDecimal;
/**
* Statistics for decimal columns.
*/
public interface DecimalColumnStatistics extends ColumnStatistics {
/**
* Get the minimum value for the column.
* @return the minimum value
*/
HiveDecimal getMinimum();
/**
* Get the maximum value for the column.
* @return the maximum value
*/
HiveDecimal getMaximum();
/**
* Get the sum of the values of the column.
* @return the sum
*/
HiveDecimal getSum();
}
| 1,335 | 28.043478 | 75 | java |
null | orc-main/java/core/src/java/org/apache/orc/DoubleColumnStatistics.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
/**
* Statistics for float and double columns.
*/
public interface DoubleColumnStatistics extends ColumnStatistics {
/**
* Get the smallest value in the column. Only defined if getNumberOfValues
* is non-zero.
* @return the minimum
*/
double getMinimum();
/**
* Get the largest value in the column. Only defined if getNumberOfValues
* is non-zero.
* @return the maximum
*/
double getMaximum();
/**
* Get the sum of the values in the column.
* @return the sum
*/
double getSum();
}
| 1,362 | 29.288889 | 76 | java |
null | orc-main/java/core/src/java/org/apache/orc/EncryptionKey.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
/**
* Information about a key used for column encryption in an ORC file.
*/
public interface EncryptionKey extends Comparable<EncryptionKey> {
/**
* The name of the key.
* @return the name
*/
String getKeyName();
/**
* The version of the key.
* @return the version, which for most KeyProviders start at 0.
*/
int getKeyVersion();
/**
* The encryption algorithm for this key.
* @return the encryption algorithm
*/
EncryptionAlgorithm getAlgorithm();
/**
* The columns that are encrypted with this key.
* @return the list of columns
*/
EncryptionVariant[] getEncryptionRoots();
/**
* Is the key available to this user?
* @return true if the key is available
*/
boolean isAvailable();
}
| 1,588 | 27.375 | 75 | java |
null | orc-main/java/core/src/java/org/apache/orc/EncryptionVariant.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import java.io.IOException;
import java.security.Key;
/**
* Information about a column encryption variant.
* <p>
* Column encryption is done by encoding multiple variants of the same column.
* Each encrypted column ends up in two variants:
* <ul>
* <li>Encrypted original</li>
* <li>Unencrypted masked</li>
* </ul>
*/
public interface EncryptionVariant extends Comparable<EncryptionVariant> {
/**
* Get the key description for this column. This description is global to the
* file and is passed to the KeyProvider along with various encrypted local
* keys for the stripes or file footer so that it can decrypt them.
* @return the encryption key description
*/
EncryptionKey getKeyDescription();
/**
* Get the root column for this variant.
* @return the root column type
*/
TypeDescription getRoot();
/**
* Get the encryption variant id within the file.
*/
int getVariantId();
/**
* Get the local key for the footer.
* @return the local decrypted key or null if it isn't available
*/
Key getFileFooterKey() throws IOException;
/**
* Get the local key for a stripe's data or footer.
* @param stripe the stripe within the file (0 to N-1)
* @return the local decrypted key or null if it isn't available
*/
Key getStripeKey(long stripe) throws IOException;
}
| 2,176 | 31.014706 | 79 | java |
null | orc-main/java/core/src/java/org/apache/orc/FileFormatException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import java.io.IOException;
/**
* Thrown when an invalid file format is encountered.
*/
public class FileFormatException extends IOException {
public FileFormatException(String errMsg) {
super(errMsg);
}
}
| 1,055 | 33.064516 | 75 | java |
null | orc-main/java/core/src/java/org/apache/orc/FileMetadata.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import java.util.List;
/**
* Cached file metadata. Right now, it caches everything; we don't have to store all the
* protobuf structs actually, we could just store what we need, but that would require that
* ORC stop depending on them too. Luckily, they shouldn't be very big.
* @deprecated Use {@link org.apache.orc.impl.OrcTail} instead
*/
public interface FileMetadata {
boolean isOriginalFormat();
List<StripeInformation> getStripes();
CompressionKind getCompressionKind();
int getCompressionBufferSize();
int getRowIndexStride();
int getColumnCount();
int getFlattenedColumnCount();
Object getFileKey();
List<Integer> getVersionList();
int getMetadataSize();
int getWriterImplementation();
int getWriterVersionNum();
List<OrcProto.Type> getTypes();
List<OrcProto.StripeStatistics> getStripeStats();
long getContentLength();
long getNumberOfRows();
List<OrcProto.ColumnStatistics> getFileStats();
}
| 1,791 | 27 | 91 | java |
null | orc-main/java/core/src/java/org/apache/orc/InMemoryKeystore.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import org.apache.orc.impl.HadoopShims;
import org.apache.orc.impl.KeyProvider;
import org.apache.orc.impl.LocalKey;
import javax.crypto.BadPaddingException;
import javax.crypto.Cipher;
import javax.crypto.IllegalBlockSizeException;
import javax.crypto.spec.IvParameterSpec;
import javax.crypto.spec.SecretKeySpec;
import java.io.IOException;
import java.security.InvalidAlgorithmParameterException;
import java.security.InvalidKeyException;
import java.security.Key;
import java.security.NoSuchAlgorithmException;
import java.security.SecureRandom;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.TreeMap;
/**
* This is an in-memory implementation of {@link KeyProvider}.
* <p>
* The primary use of this class is for when the user doesn't have a
* Hadoop KMS running and wishes to use encryption. It is also useful for
* testing.
* <p>
* The local keys for this class are encrypted/decrypted using the cipher
* in CBC/NoPadding mode and a constant IV. Since the key is random, the
* constant IV is not a problem.
* <p>
* This class is not thread safe.
*/
public class InMemoryKeystore implements KeyProvider {
/**
* Support AES 256 ?
*/
public static final boolean SUPPORTS_AES_256;
static {
try {
SUPPORTS_AES_256 = Cipher.getMaxAllowedKeyLength("AES") >= 256;
} catch (final NoSuchAlgorithmException e) {
throw new IllegalArgumentException("Unknown algorithm", e);
}
}
private final Random random;
/**
* A map that stores the 'keyName@version'
* and 'metadata + material' mapping.
*/
private final TreeMap<String, KeyVersion> keys = new TreeMap<>();
/**
* A map from the keyName (without version) to the currentVersion.
*/
private final Map<String, Integer> currentVersion = new HashMap<>();
/**
* Create a new InMemoryKeystore.
*/
public InMemoryKeystore() {
this(new SecureRandom());
}
/**
* Create an InMemoryKeystore with the given random generator.
* Except for testing, this must be a SecureRandom.
*/
public InMemoryKeystore(Random random) {
this.random = random;
}
/**
* Build a version string from a basename and version number. Converts
* "/aaa/bbb" and 3 to "/aaa/bbb@3".
*
* @param name the basename of the key
* @param version the version of the key
* @return the versionName of the key.
*/
private static String buildVersionName(final String name,
final int version) {
return name + "@" + version;
}
/**
* Get the list of key names from the key provider.
*
* @return a list of key names
*/
@Override
public List<String> getKeyNames() {
return new ArrayList<>(currentVersion.keySet());
}
/**
* Get the current metadata for a given key. This is used when encrypting
* new data.
*
* @param keyName the name of a key
* @return metadata for the current version of the key
*/
@Override
public HadoopShims.KeyMetadata getCurrentKeyVersion(final String keyName) {
String versionName = buildVersionName(keyName, currentVersion.get(keyName));
KeyVersion keyVersion = keys.get(versionName);
if (keyVersion == null) {
throw new IllegalArgumentException("Unknown key " + keyName);
}
return keys.get(versionName);
}
/**
* Create a local key for the given key version.
*
* @param key the master key version
* @return the local key's material
*/
@Override
public LocalKey createLocalKey(final HadoopShims.KeyMetadata key) {
final String keyVersion = buildVersionName(key.getKeyName(), key.getVersion());
final KeyVersion secret = keys.get(keyVersion);
if (secret == null) {
throw new IllegalArgumentException("Unknown key " + key);
}
final EncryptionAlgorithm algorithm = secret.getAlgorithm();
byte[] encryptedKey = new byte[algorithm.keyLength()];
random.nextBytes(encryptedKey);
byte[] iv = Arrays.copyOf(encryptedKey, algorithm.getIvLength());
Cipher localCipher = algorithm.createCipher();
try {
localCipher.init(Cipher.DECRYPT_MODE,
new SecretKeySpec(secret.getMaterial(),
algorithm.getAlgorithm()), new IvParameterSpec(iv));
} catch (final InvalidKeyException e) {
throw new IllegalStateException(
"ORC bad encryption key for " + keyVersion, e);
} catch (final InvalidAlgorithmParameterException e) {
throw new IllegalStateException(
"ORC bad encryption parameter for " + keyVersion, e);
}
try {
byte[] decryptedKey = localCipher.doFinal(encryptedKey);
return new LocalKey(algorithm, decryptedKey, encryptedKey);
} catch (final IllegalBlockSizeException e) {
throw new IllegalStateException(
"ORC bad block size for " + keyVersion, e);
} catch (final BadPaddingException e) {
throw new IllegalStateException(
"ORC bad padding for " + keyVersion, e);
}
}
/**
* Create a local key for the given key version and initialization vector.
* Given a probabilistically unique iv, it will generate a unique key
* with the master key at the specified version. This allows the encryption
* to use this local key for the encryption and decryption without ever
* having access to the master key.
* <p>
* This uses KeyProviderCryptoExtension.decryptEncryptedKey with a fixed key
* of the appropriate length.
*
* @param key the master key version
* @param encryptedKey the unique initialization vector
* @return the local key's material
*/
@Override
public Key decryptLocalKey(HadoopShims.KeyMetadata key,
byte[] encryptedKey) {
final String keyVersion = buildVersionName(key.getKeyName(), key.getVersion());
final KeyVersion secret = keys.get(keyVersion);
if (secret == null) {
return null;
}
final EncryptionAlgorithm algorithm = secret.getAlgorithm();
byte[] iv = Arrays.copyOf(encryptedKey, algorithm.getIvLength());
Cipher localCipher = algorithm.createCipher();
try {
localCipher.init(Cipher.DECRYPT_MODE,
new SecretKeySpec(secret.getMaterial(),
algorithm.getAlgorithm()), new IvParameterSpec(iv));
} catch (final InvalidKeyException e) {
throw new IllegalStateException(
"ORC bad encryption key for " + keyVersion, e);
} catch (final InvalidAlgorithmParameterException e) {
throw new IllegalStateException(
"ORC bad encryption parameter for " + keyVersion, e);
}
try {
byte[] decryptedKey = localCipher.doFinal(encryptedKey);
return new SecretKeySpec(decryptedKey, algorithm.getAlgorithm());
} catch (final IllegalBlockSizeException e) {
throw new IllegalStateException(
"ORC bad block size for " + keyVersion, e);
} catch (final BadPaddingException e) {
throw new IllegalStateException(
"ORC bad padding for " + keyVersion, e);
}
}
@Override
public HadoopShims.KeyProviderKind getKind() {
return HadoopShims.KeyProviderKind.HADOOP;
}
/**
* Function that takes care of adding a new key.<br>
* A new key can be added only if:
* <ul>
* <li>This is a new key and no prior key version exist.</li>
* <li>If the key exists (has versions), then the new version to be added should be greater than
* the version that already exists.</li>
* </ul>
*
* @param keyName Name of the key to be added
* @param algorithm Algorithm used
* @param masterKey Master key
* @return this
*/
public InMemoryKeystore addKey(String keyName, EncryptionAlgorithm algorithm,
byte[] masterKey) throws IOException {
return addKey(keyName, 0, algorithm, masterKey);
}
/**
* Function that takes care of adding a new key.<br>
* A new key can be added only if:
* <ul>
* <li>This is a new key and no prior key version exist.</li>
* <li>If the key exists (has versions), then the new version to be added should be greater than
* the version that already exists.</li>
* </ul>
*
* @param keyName Name of the key to be added
* @param version Key Version
* @param algorithm Algorithm used
* @param masterKey Master key
* @return this
*/
public InMemoryKeystore addKey(String keyName, int version,
EncryptionAlgorithm algorithm,
byte[] masterKey) throws IOException {
/* Test weather platform supports the algorithm */
if (!SUPPORTS_AES_256 && (algorithm != EncryptionAlgorithm.AES_CTR_128)) {
algorithm = EncryptionAlgorithm.AES_CTR_128;
}
final byte[] buffer = new byte[algorithm.keyLength()];
if (algorithm.keyLength() > masterKey.length) {
System.arraycopy(masterKey, 0, buffer, 0, masterKey.length);
/* fill with zeros */
Arrays.fill(buffer, masterKey.length, buffer.length - 1, (byte) 0);
} else {
System.arraycopy(masterKey, 0, buffer, 0, algorithm.keyLength());
}
final KeyVersion key = new KeyVersion(keyName, version, algorithm,
buffer);
/* Check whether the key is already present and has a smaller version */
Integer currentKeyVersion = currentVersion.get(keyName);
if (currentKeyVersion != null && currentKeyVersion >= version) {
throw new IOException(String
.format("Key %s with equal or higher version %d already exists",
keyName, version));
}
keys.put(buildVersionName(keyName, version), key);
currentVersion.put(keyName, version);
return this;
}
/**
* This class contains the meta-data and the material for the key.
*/
static class KeyVersion extends HadoopShims.KeyMetadata {
private final byte[] material;
KeyVersion(final String keyName, final int version,
final EncryptionAlgorithm algorithm, final byte[] material) {
super(keyName, version, algorithm);
this.material = material;
}
/**
* Get the material for the key
*
* @return the material
*/
private byte[] getMaterial() {
return material;
}
}
}
| 11,056 | 32.10479 | 98 | java |
null | orc-main/java/core/src/java/org/apache/orc/IntegerColumnStatistics.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
/**
* Statistics for all of the integer columns, such as byte, short, int, and
* long.
*/
public interface IntegerColumnStatistics extends ColumnStatistics {
/**
* Get the smallest value in the column. Only defined if getNumberOfValues
* is non-zero.
* @return the minimum
*/
long getMinimum();
/**
* Get the largest value in the column. Only defined if getNumberOfValues
* is non-zero.
* @return the maximum
*/
long getMaximum();
/**
* Is the sum defined? If the sum overflowed the counter this will be false.
* @return is the sum available
*/
boolean isSumDefined();
/**
* Get the sum of the column. Only valid if isSumDefined returns true.
* @return the sum of the column
*/
long getSum();
}
| 1,590 | 30.196078 | 78 | java |
null | orc-main/java/core/src/java/org/apache/orc/MemoryManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import org.apache.hadoop.fs.Path;
import java.io.IOException;
/**
* A memory manager that keeps a global context of how many ORC
* writers there are and manages the memory between them. For use cases with
* dynamic partitions, it is easy to end up with many writers in the same task.
* By managing the size of each allocation, we try to cut down the size of each
* allocation and keep the task from running out of memory.
* <p>
* This class is not thread safe, but is re-entrant - ensure creation and all
* invocations are triggered from the same thread.
*/
public interface MemoryManager {
interface Callback {
/**
* The scale factor for the stripe size has changed and thus the
* writer should adjust their desired size appropriately.
* @param newScale the current scale factor for memory allocations
* @return true if the writer was over the limit
*/
boolean checkMemory(double newScale) throws IOException;
}
/**
* Add a new writer's memory allocation to the pool. We use the path
* as a unique key to ensure that we don't get duplicates.
* @param path the file that is being written
* @param requestedAllocation the requested buffer size
*/
void addWriter(Path path, long requestedAllocation,
Callback callback) throws IOException;
/**
* Remove the given writer from the pool.
* @param path the file that has been closed
*/
void removeWriter(Path path) throws IOException;
/**
* Give the memory manager an opportunity for doing a memory check.
* @param rows number of rows added
* @throws IOException
* @deprecated Use {@link MemoryManager#checkMemory} instead
*/
void addedRow(int rows) throws IOException;
/**
* As part of adding rows, the writer calls this method to determine
* if the scale factor has changed. If it has changed, the Callback will be
* called.
* @param previousAllocation the previous allocation
* @param writer the callback to call back into if we need to
* @return the current allocation
*/
default long checkMemory(long previousAllocation,
Callback writer) throws IOException {
addedRow(1024);
return previousAllocation;
}
}
| 3,069 | 35.547619 | 79 | java |
null | orc-main/java/core/src/java/org/apache/orc/OrcConf.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
/**
* Define the configuration properties that Orc understands.
*/
public enum OrcConf {
STRIPE_SIZE("orc.stripe.size", "hive.exec.orc.default.stripe.size",
64L * 1024 * 1024,
"Define the default ORC stripe size, in bytes."),
STRIPE_ROW_COUNT("orc.stripe.row.count", "orc.stripe.row.count",
Integer.MAX_VALUE, "This value limit the row count in one stripe. \n" +
"The number of stripe rows can be controlled at \n" +
"(0, \"orc.stripe.row.count\" + max(batchSize, \"orc.rows.between.memory.checks\"))"),
BLOCK_SIZE("orc.block.size", "hive.exec.orc.default.block.size",
256L * 1024 * 1024,
"Define the default file system block size for ORC files."),
ENABLE_INDEXES("orc.create.index", "orc.create.index", true,
"Should the ORC writer create indexes as part of the file."),
ROW_INDEX_STRIDE("orc.row.index.stride",
"hive.exec.orc.default.row.index.stride", 10000,
"Define the default ORC index stride in number of rows. (Stride is the\n"+
" number of rows an index entry represents.)"),
BUFFER_SIZE("orc.compress.size", "hive.exec.orc.default.buffer.size",
256 * 1024, "Define the default ORC buffer size, in bytes."),
BASE_DELTA_RATIO("orc.base.delta.ratio", "hive.exec.orc.base.delta.ratio", 8,
"The ratio of base writer and delta writer in terms of STRIPE_SIZE and BUFFER_SIZE."),
BLOCK_PADDING("orc.block.padding", "hive.exec.orc.default.block.padding",
true,
"Define whether stripes should be padded to the HDFS block boundaries."),
COMPRESS("orc.compress", "hive.exec.orc.default.compress", "ZLIB",
"Define the default compression codec for ORC file"),
WRITE_FORMAT("orc.write.format", "hive.exec.orc.write.format", "0.12",
"Define the version of the file to write. Possible values are 0.11 and\n"+
" 0.12. If this parameter is not defined, ORC will use the run\n" +
" length encoding (RLE) introduced in Hive 0.12."),
ENFORCE_COMPRESSION_BUFFER_SIZE("orc.buffer.size.enforce",
"hive.exec.orc.buffer.size.enforce", false,
"Defines whether to enforce ORC compression buffer size."),
ENCODING_STRATEGY("orc.encoding.strategy", "hive.exec.orc.encoding.strategy",
"SPEED",
"Define the encoding strategy to use while writing data. Changing this\n"+
"will only affect the light weight encoding for integers. This\n" +
"flag will not change the compression level of higher level\n" +
"compression codec (like ZLIB)."),
COMPRESSION_STRATEGY("orc.compression.strategy",
"hive.exec.orc.compression.strategy", "SPEED",
"Define the compression strategy to use while writing data.\n" +
"This changes the compression level of higher level compression\n" +
"codec (like ZLIB)."),
BLOCK_PADDING_TOLERANCE("orc.block.padding.tolerance",
"hive.exec.orc.block.padding.tolerance", 0.05,
"Define the tolerance for block padding as a decimal fraction of\n" +
"stripe size (for example, the default value 0.05 is 5% of the\n" +
"stripe size). For the defaults of 64Mb ORC stripe and 256Mb HDFS\n" +
"blocks, the default block padding tolerance of 5% will\n" +
"reserve a maximum of 3.2Mb for padding within the 256Mb block.\n" +
"In that case, if the available size within the block is more than\n"+
"3.2Mb, a new smaller stripe will be inserted to fit within that\n" +
"space. This will make sure that no stripe written will block\n" +
" boundaries and cause remote reads within a node local task."),
BLOOM_FILTER_FPP("orc.bloom.filter.fpp", "orc.default.bloom.fpp", 0.01,
"Define the default false positive probability for bloom filters."),
USE_ZEROCOPY("orc.use.zerocopy", "hive.exec.orc.zerocopy", false,
"Use zerocopy reads with ORC. (This requires Hadoop 2.3 or later.)"),
SKIP_CORRUPT_DATA("orc.skip.corrupt.data", "hive.exec.orc.skip.corrupt.data",
false,
"If ORC reader encounters corrupt data, this value will be used to\n" +
"determine whether to skip the corrupt data or throw exception.\n" +
"The default behavior is to throw exception."),
TOLERATE_MISSING_SCHEMA("orc.tolerate.missing.schema",
"hive.exec.orc.tolerate.missing.schema",
true,
"Writers earlier than HIVE-4243 may have inaccurate schema metadata.\n"
+ "This setting will enable best effort schema evolution rather\n"
+ "than rejecting mismatched schemas"),
MEMORY_POOL("orc.memory.pool", "hive.exec.orc.memory.pool", 0.5,
"Maximum fraction of heap that can be used by ORC file writers"),
DICTIONARY_KEY_SIZE_THRESHOLD("orc.dictionary.key.threshold",
"hive.exec.orc.dictionary.key.size.threshold",
0.8,
"If the number of distinct keys in a dictionary is greater than this\n" +
"fraction of the total number of non-null rows, turn off \n" +
"dictionary encoding. Use 1 to always use dictionary encoding."),
ROW_INDEX_STRIDE_DICTIONARY_CHECK("orc.dictionary.early.check",
"hive.orc.row.index.stride.dictionary.check",
true,
"If enabled dictionary check will happen after first row index stride\n" +
"(default 10000 rows) else dictionary check will happen before\n" +
"writing first stripe. In both cases, the decision to use\n" +
"dictionary or not will be retained thereafter."),
DICTIONARY_IMPL("orc.dictionary.implementation", "orc.dictionary.implementation",
"rbtree",
"the implementation for the dictionary used for string-type column encoding.\n" +
"The choices are:\n"
+ " rbtree - use red-black tree as the implementation for the dictionary.\n"
+ " hash - use hash table as the implementation for the dictionary."),
BLOOM_FILTER_COLUMNS("orc.bloom.filter.columns", "orc.bloom.filter.columns",
"", "List of columns to create bloom filters for when writing."),
BLOOM_FILTER_WRITE_VERSION("orc.bloom.filter.write.version",
"orc.bloom.filter.write.version", OrcFile.BloomFilterVersion.UTF8.toString(),
"Which version of the bloom filters should we write.\n" +
"The choices are:\n" +
" original - writes two versions of the bloom filters for use by\n" +
" both old and new readers.\n" +
" utf8 - writes just the new bloom filters."),
IGNORE_NON_UTF8_BLOOM_FILTERS("orc.bloom.filter.ignore.non-utf8",
"orc.bloom.filter.ignore.non-utf8", false,
"Should the reader ignore the obsolete non-UTF8 bloom filters."),
MAX_FILE_LENGTH("orc.max.file.length", "orc.max.file.length", Long.MAX_VALUE,
"The maximum size of the file to read for finding the file tail. This\n" +
"is primarily used for streaming ingest to read intermediate\n" +
"footers while the file is still open"),
MAPRED_INPUT_SCHEMA("orc.mapred.input.schema", null, null,
"The schema that the user desires to read. The values are\n" +
"interpreted using TypeDescription.fromString."),
MAPRED_SHUFFLE_KEY_SCHEMA("orc.mapred.map.output.key.schema", null, null,
"The schema of the MapReduce shuffle key. The values are\n" +
"interpreted using TypeDescription.fromString."),
MAPRED_SHUFFLE_VALUE_SCHEMA("orc.mapred.map.output.value.schema", null, null,
"The schema of the MapReduce shuffle value. The values are\n" +
"interpreted using TypeDescription.fromString."),
MAPRED_OUTPUT_SCHEMA("orc.mapred.output.schema", null, null,
"The schema that the user desires to write. The values are\n" +
"interpreted using TypeDescription.fromString."),
INCLUDE_COLUMNS("orc.include.columns", "hive.io.file.readcolumn.ids", null,
"The list of comma separated column ids that should be read with 0\n" +
"being the first column, 1 being the next, and so on. ."),
KRYO_SARG("orc.kryo.sarg", "orc.kryo.sarg", null,
"The kryo and base64 encoded SearchArgument for predicate pushdown."),
KRYO_SARG_BUFFER("orc.kryo.sarg.buffer", null, 8192,
"The kryo buffer size for SearchArgument for predicate pushdown."),
SARG_COLUMNS("orc.sarg.column.names", "orc.sarg.column.names", null,
"The list of column names for the SearchArgument."),
FORCE_POSITIONAL_EVOLUTION("orc.force.positional.evolution",
"orc.force.positional.evolution", false,
"Require schema evolution to match the top level columns using position\n" +
"rather than column names. This provides backwards compatibility with\n" +
"Hive 2.1."),
FORCE_POSITIONAL_EVOLUTION_LEVEL("orc.force.positional.evolution.level",
"orc.force.positional.evolution.level", 1,
"Require schema evolution to match the the defined no. of level columns using position\n" +
"rather than column names. This provides backwards compatibility with Hive 2.1."),
ROWS_BETWEEN_CHECKS("orc.rows.between.memory.checks", "orc.rows.between.memory.checks", 5000,
"How often should MemoryManager check the memory sizes? Measured in rows\n" +
"added to all of the writers. Valid range is [1,10000] and is primarily meant for" +
"testing. Setting this too low may negatively affect performance."
+ " Use orc.stripe.row.count instead if the value larger than orc.stripe.row.count."),
OVERWRITE_OUTPUT_FILE("orc.overwrite.output.file", "orc.overwrite.output.file", false,
"A boolean flag to enable overwriting of the output file if it already exists.\n"),
IS_SCHEMA_EVOLUTION_CASE_SENSITIVE("orc.schema.evolution.case.sensitive",
"orc.schema.evolution.case.sensitive", true,
"A boolean flag to determine if the comparision of field names " +
"in schema evolution is case sensitive .\n"),
ALLOW_SARG_TO_FILTER("orc.sarg.to.filter", "orc.sarg.to.filter", false,
"A boolean flag to determine if a SArg is allowed to become a filter"),
READER_USE_SELECTED("orc.filter.use.selected", "orc.filter.use.selected", false,
"A boolean flag to determine if the selected vector is supported by\n"
+ "the reading application. If false, the output of the ORC reader "
+ "must have the filter\n"
+ "reapplied to avoid using unset values in the unselected rows.\n"
+ "If unsure please leave this as false."),
ALLOW_PLUGIN_FILTER("orc.filter.plugin",
"orc.filter.plugin",
false,
"Enables the use of plugin filters during read. The plugin filters "
+ "are discovered against the service "
+ "org.apache.orc.filter.PluginFilterService, if multiple filters are "
+ "determined, they are combined using AND. The order of application is "
+ "non-deterministic and the filter functionality should not depend on the "
+ "order of application."),
PLUGIN_FILTER_ALLOWLIST("orc.filter.plugin.allowlist",
"orc.filter.plugin.allowlist",
"*",
"A list of comma-separated class names. If specified it restricts "
+ "the PluginFilters to just these classes as discovered by the "
+ "PluginFilterService. The default of * allows all discovered classes "
+ "and an empty string would not allow any plugins to be applied."),
WRITE_VARIABLE_LENGTH_BLOCKS("orc.write.variable.length.blocks", null, false,
"A boolean flag as to whether the ORC writer should write variable length\n"
+ "HDFS blocks."),
DIRECT_ENCODING_COLUMNS("orc.column.encoding.direct", "orc.column.encoding.direct", "",
"Comma-separated list of columns for which dictionary encoding is to be skipped."),
// some JVM doesn't allow array creation of size Integer.MAX_VALUE, so chunk size is slightly less than max int
ORC_MAX_DISK_RANGE_CHUNK_LIMIT("orc.max.disk.range.chunk.limit",
"hive.exec.orc.max.disk.range.chunk.limit",
Integer.MAX_VALUE - 1024, "When reading stripes >2GB, specify max limit for the chunk size."),
ORC_MIN_DISK_SEEK_SIZE("orc.min.disk.seek.size",
"orc.min.disk.seek.size",
0,
"When determining contiguous reads, gaps within this size are "
+ "read contiguously and not seeked. Default value of zero disables this "
+ "optimization"),
ORC_MIN_DISK_SEEK_SIZE_TOLERANCE("orc.min.disk.seek.size.tolerance",
"orc.min.disk.seek.size.tolerance", 0.00,
"Define the tolerance for for extra bytes read as a result of "
+ "orc.min.disk.seek.size. If the "
+ "(bytesRead - bytesNeeded) / bytesNeeded is greater than this "
+ "threshold then extra work is performed to drop the extra bytes from "
+ "memory after the read."),
ENCRYPTION("orc.encrypt", "orc.encrypt", null, "The list of keys and columns to encrypt with"),
DATA_MASK("orc.mask", "orc.mask", null, "The masks to apply to the encrypted columns"),
KEY_PROVIDER("orc.key.provider", "orc.key.provider", "hadoop",
"The kind of KeyProvider to use for encryption."),
PROLEPTIC_GREGORIAN("orc.proleptic.gregorian", "orc.proleptic.gregorian", false,
"Should we read and write dates & times using the proleptic Gregorian calendar\n" +
"instead of the hybrid Julian Gregorian? Hive before 3.1 and Spark before 3.0\n" +
"used hybrid."),
PROLEPTIC_GREGORIAN_DEFAULT("orc.proleptic.gregorian.default",
"orc.proleptic.gregorian.default", false,
"This value controls whether pre-ORC 27 files are using the hybrid or proleptic\n" +
"calendar. Only Hive 3.1 and the C++ library wrote using the proleptic, so hybrid\n" +
"is the default."),
ROW_BATCH_SIZE("orc.row.batch.size", "orc.row.batch.size", 1024,
"The number of rows to include in a orc vectorized reader batch. " +
"The value should be carefully chosen to minimize overhead and avoid OOMs in reading data."),
ROW_BATCH_CHILD_LIMIT("orc.row.child.limit", "orc.row.child.limit",
1024 * 32, "The maximum number of child elements to buffer before "+
"the ORC row writer writes the batch to the file."
)
;
private final String attribute;
private final String hiveConfName;
private final Object defaultValue;
private final String description;
OrcConf(String attribute,
String hiveConfName,
Object defaultValue,
String description) {
this.attribute = attribute;
this.hiveConfName = hiveConfName;
this.defaultValue = defaultValue;
this.description = description;
}
public String getAttribute() {
return attribute;
}
public String getHiveConfName() {
return hiveConfName;
}
public Object getDefaultValue() {
return defaultValue;
}
public String getDescription() {
return description;
}
private String lookupValue(Properties tbl, Configuration conf) {
String result = null;
if (tbl != null) {
result = tbl.getProperty(attribute);
}
if (result == null && conf != null) {
result = conf.get(attribute);
if (result == null && hiveConfName != null) {
result = conf.get(hiveConfName);
}
}
return result;
}
public int getInt(Properties tbl, Configuration conf) {
String value = lookupValue(tbl, conf);
if (value != null) {
return Integer.parseInt(value);
}
return ((Number) defaultValue).intValue();
}
public int getInt(Configuration conf) {
return getInt(null, conf);
}
/**
* @deprecated Use {@link #getInt(Configuration)} instead. This method was
* incorrectly added and shouldn't be used anymore.
*/
@Deprecated
public void getInt(Configuration conf, int value) {
// noop
}
public void setInt(Configuration conf, int value) {
conf.setInt(attribute, value);
}
public long getLong(Properties tbl, Configuration conf) {
String value = lookupValue(tbl, conf);
if (value != null) {
return Long.parseLong(value);
}
return ((Number) defaultValue).longValue();
}
public long getLong(Configuration conf) {
return getLong(null, conf);
}
public void setLong(Configuration conf, long value) {
conf.setLong(attribute, value);
}
public String getString(Properties tbl, Configuration conf) {
String value = lookupValue(tbl, conf);
return value == null ? (String) defaultValue : value;
}
public String getString(Configuration conf) {
return getString(null, conf);
}
public List<String> getStringAsList(Configuration conf) {
String value = getString(null, conf);
List<String> confList = new ArrayList<>();
if (StringUtils.isEmpty(value)) {
return confList;
}
for (String str: value.split(",")) {
String trimStr = StringUtils.trim(str);
if (StringUtils.isNotEmpty(trimStr)) {
confList.add(trimStr);
}
}
return confList;
}
public void setString(Configuration conf, String value) {
conf.set(attribute, value);
}
public boolean getBoolean(Properties tbl, Configuration conf) {
String value = lookupValue(tbl, conf);
if (value != null) {
return Boolean.parseBoolean(value);
}
return (Boolean) defaultValue;
}
public boolean getBoolean(Configuration conf) {
return getBoolean(null, conf);
}
public void setBoolean(Configuration conf, boolean value) {
conf.setBoolean(attribute, value);
}
public double getDouble(Properties tbl, Configuration conf) {
String value = lookupValue(tbl, conf);
if (value != null) {
return Double.parseDouble(value);
}
return ((Number) defaultValue).doubleValue();
}
public double getDouble(Configuration conf) {
return getDouble(null, conf);
}
public void setDouble(Configuration conf, double value) {
conf.setDouble(attribute, value);
}
}
| 19,250 | 47.736709 | 113 | java |
null | orc-main/java/core/src/java/org/apache/orc/OrcFile.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.orc.impl.HadoopShims;
import org.apache.orc.impl.HadoopShimsFactory;
import org.apache.orc.impl.KeyProvider;
import org.apache.orc.impl.MemoryManagerImpl;
import org.apache.orc.impl.OrcTail;
import org.apache.orc.impl.ReaderImpl;
import org.apache.orc.impl.WriterImpl;
import org.apache.orc.impl.WriterInternal;
import org.apache.orc.impl.writer.WriterImplV2;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
/**
* Contains factory methods to read or write ORC files.
*/
public class OrcFile {
private static final Logger LOG = LoggerFactory.getLogger(OrcFile.class);
public static final String MAGIC = "ORC";
/**
* Create a version number for the ORC file format, so that we can add
* non-forward compatible changes in the future. To make it easier for users
* to understand the version numbers, we use the Hive release number that
* first wrote that version of ORC files.
*
* Thus, if you add new encodings or other non-forward compatible changes
* to ORC files, which prevent the old reader from reading the new format,
* you should change these variable to reflect the next Hive release number.
* Non-forward compatible changes should never be added in patch releases.
*
* Do not make any changes that break backwards compatibility, which would
* prevent the new reader from reading ORC files generated by any released
* version of Hive.
*/
public enum Version {
V_0_11("0.11", 0, 11),
V_0_12("0.12", 0, 12),
/**
* Do not use this format except for testing. It will not be compatible
* with other versions of the software. While we iterate on the ORC 2.0
* format, we will make incompatible format changes under this version
* without providing any forward or backward compatibility.
*
* When 2.0 is released, this version identifier will be completely removed.
*/
UNSTABLE_PRE_2_0("UNSTABLE-PRE-2.0", 1, 9999),
/**
* The generic identifier for all unknown versions.
*/
FUTURE("future", Integer.MAX_VALUE, Integer.MAX_VALUE);
public static final Version CURRENT = V_0_12;
private final String name;
private final int major;
private final int minor;
Version(String name, int major, int minor) {
this.name = name;
this.major = major;
this.minor = minor;
}
public static Version byName(String name) {
for(Version version: values()) {
if (version.name.equals(name)) {
return version;
}
}
throw new IllegalArgumentException("Unknown ORC version " + name);
}
/**
* Get the human readable name for the version.
*/
public String getName() {
return name;
}
/**
* Get the major version number.
*/
public int getMajor() {
return major;
}
/**
* Get the minor version number.
*/
public int getMinor() {
return minor;
}
}
public enum WriterImplementation {
ORC_JAVA(0), // ORC Java writer
ORC_CPP(1), // ORC C++ writer
PRESTO(2), // Presto writer
SCRITCHLEY_GO(3), // Go writer from https://github.com/scritchley/orc
TRINO(4), // Trino writer
UNKNOWN(Integer.MAX_VALUE);
private final int id;
WriterImplementation(int id) {
this.id = id;
}
public int getId() {
return id;
}
public static WriterImplementation from(int id) {
WriterImplementation[] values = values();
if (id >= 0 && id < values.length - 1) {
return values[id];
}
return UNKNOWN;
}
}
/**
* Records the version of the writer in terms of which bugs have been fixed.
* When you fix bugs in the writer (or make substantial changes) that don't
* change the file format, add a new version here instead of Version.
*
* The ids are assigned sequentially from 6 per a WriterImplementation so that
* readers that predate ORC-202 treat the other writers correctly.
*/
public enum WriterVersion {
// Java ORC Writer
ORIGINAL(WriterImplementation.ORC_JAVA, 0),
HIVE_8732(WriterImplementation.ORC_JAVA, 1), /**
* fixed stripe/file maximum statistics and
* string statistics to use utf8 for min/max
*/
HIVE_4243(WriterImplementation.ORC_JAVA, 2), // use real column names from Hive tables
HIVE_12055(WriterImplementation.ORC_JAVA, 3), // vectorized writer
HIVE_13083(WriterImplementation.ORC_JAVA, 4), // decimals write present stream correctly
ORC_101(WriterImplementation.ORC_JAVA, 5), // bloom filters use utf8
ORC_135(WriterImplementation.ORC_JAVA, 6), // timestamp stats use utc
ORC_517(WriterImplementation.ORC_JAVA, 7), // decimal64 min/max are fixed
ORC_203(WriterImplementation.ORC_JAVA, 8), // trim long strings & record they were trimmed
ORC_14(WriterImplementation.ORC_JAVA, 9), // column encryption added
// C++ ORC Writer
ORC_CPP_ORIGINAL(WriterImplementation.ORC_CPP, 6),
// Presto Writer
PRESTO_ORIGINAL(WriterImplementation.PRESTO, 6),
// Scritchley Go Writer
SCRITCHLEY_GO_ORIGINAL(WriterImplementation.SCRITCHLEY_GO, 6),
// Trino Writer
TRINO_ORIGINAL(WriterImplementation.TRINO, 6),
// Don't use any magic numbers here except for the below:
FUTURE(WriterImplementation.UNKNOWN, Integer.MAX_VALUE); // a version from a future writer
private final int id;
private final WriterImplementation writer;
public WriterImplementation getWriterImplementation() {
return writer;
}
public int getId() {
return id;
}
WriterVersion(WriterImplementation writer, int id) {
this.writer = writer;
this.id = id;
}
private static final WriterVersion[][] values =
new WriterVersion[WriterImplementation.values().length][];
static {
for(WriterVersion v: WriterVersion.values()) {
WriterImplementation writer = v.writer;
if (writer != WriterImplementation.UNKNOWN) {
if (values[writer.id] == null) {
values[writer.id] = new WriterVersion[WriterVersion.values().length];
}
if (values[writer.id][v.id] != null) {
throw new IllegalArgumentException("Duplicate WriterVersion id " + v);
}
values[writer.id][v.id] = v;
}
}
}
/**
* Convert the integer from OrcProto.PostScript.writerVersion
* to the enumeration with unknown versions being mapped to FUTURE.
* @param writer the writer implementation
* @param val the serialized writer version
* @return the corresponding enumeration value
*/
public static WriterVersion from(WriterImplementation writer, int val) {
if (writer == WriterImplementation.UNKNOWN) {
return FUTURE;
}
if (writer != WriterImplementation.ORC_JAVA && val < 6) {
throw new IllegalArgumentException("ORC File with illegal version " +
val + " for writer " + writer);
}
WriterVersion[] versions = values[writer.id];
if (val < 0 || versions.length <= val) {
return FUTURE;
}
WriterVersion result = versions[val];
return result == null ? FUTURE : result;
}
/**
* Does this file include the given fix or come from a different writer?
* @param fix the required fix
* @return true if the required fix is present
*/
public boolean includes(WriterVersion fix) {
return writer != fix.writer || id >= fix.id;
}
}
/**
* The WriterVersion for this version of the software.
*/
public static final WriterVersion CURRENT_WRITER = WriterVersion.ORC_14;
public enum EncodingStrategy {
SPEED, COMPRESSION
}
public enum CompressionStrategy {
SPEED, COMPRESSION
}
// unused
protected OrcFile() {}
public static class ReaderOptions {
private final Configuration conf;
private FileSystem filesystem;
private long maxLength = Long.MAX_VALUE;
private OrcTail orcTail;
private KeyProvider keyProvider;
// TODO: We can generalize FileMetadata interface. Make OrcTail implement FileMetadata interface
// and remove this class altogether. Both footer caching and llap caching just needs OrcTail.
// For now keeping this around to avoid complex surgery
private FileMetadata fileMetadata;
private boolean useUTCTimestamp;
private boolean useProlepticGregorian;
public ReaderOptions(Configuration conf) {
this.conf = conf;
this.useProlepticGregorian = OrcConf.PROLEPTIC_GREGORIAN.getBoolean(conf);
}
public ReaderOptions filesystem(FileSystem fs) {
this.filesystem = fs;
return this;
}
public ReaderOptions maxLength(long val) {
maxLength = val;
return this;
}
public ReaderOptions orcTail(OrcTail tail) {
this.orcTail = tail;
return this;
}
/**
* Set the KeyProvider to override the default for getting keys.
* @param provider
* @return
*/
public ReaderOptions setKeyProvider(KeyProvider provider) {
this.keyProvider = provider;
return this;
}
/**
* Should the reader convert dates and times to the proleptic Gregorian
* calendar?
* @param newValue should it use the proleptic Gregorian calendar?
* @return this
*/
public ReaderOptions convertToProlepticGregorian(boolean newValue) {
this.useProlepticGregorian = newValue;
return this;
}
public Configuration getConfiguration() {
return conf;
}
public FileSystem getFilesystem() {
return filesystem;
}
public long getMaxLength() {
return maxLength;
}
public OrcTail getOrcTail() {
return orcTail;
}
public KeyProvider getKeyProvider() {
return keyProvider;
}
/**
* @deprecated Use {@link #orcTail(OrcTail)} instead.
*/
public ReaderOptions fileMetadata(final FileMetadata metadata) {
fileMetadata = metadata;
return this;
}
public FileMetadata getFileMetadata() {
return fileMetadata;
}
public ReaderOptions useUTCTimestamp(boolean value) {
useUTCTimestamp = value;
return this;
}
public boolean getUseUTCTimestamp() {
return useUTCTimestamp;
}
public boolean getConvertToProlepticGregorian() {
return useProlepticGregorian;
}
}
public static ReaderOptions readerOptions(Configuration conf) {
return new ReaderOptions(conf);
}
public static Reader createReader(Path path,
ReaderOptions options) throws IOException {
return new ReaderImpl(path, options);
}
public interface WriterContext {
Writer getWriter();
}
public interface WriterCallback {
void preStripeWrite(WriterContext context) throws IOException;
void preFooterWrite(WriterContext context) throws IOException;
}
public enum BloomFilterVersion {
// Include both the BLOOM_FILTER and BLOOM_FILTER_UTF8 streams to support
// both old and new readers.
ORIGINAL("original"),
// Only include the BLOOM_FILTER_UTF8 streams that consistently use UTF8.
// See ORC-101
UTF8("utf8");
private final String id;
BloomFilterVersion(String id) {
this.id = id;
}
@Override
public String toString() {
return id;
}
public static BloomFilterVersion fromString(String s) {
for (BloomFilterVersion version: values()) {
if (version.id.equals(s)) {
return version;
}
}
throw new IllegalArgumentException("Unknown BloomFilterVersion " + s);
}
}
/**
* Options for creating ORC file writers.
*/
public static class WriterOptions implements Cloneable {
private final Configuration configuration;
private FileSystem fileSystemValue = null;
private TypeDescription schema = null;
private long stripeSizeValue;
private long stripeRowCountValue;
private long blockSizeValue;
private boolean buildIndex;
private int rowIndexStrideValue;
private int bufferSizeValue;
private boolean enforceBufferSize = false;
private boolean blockPaddingValue;
private CompressionKind compressValue;
private MemoryManager memoryManagerValue;
private Version versionValue;
private WriterCallback callback;
private EncodingStrategy encodingStrategy;
private CompressionStrategy compressionStrategy;
private double paddingTolerance;
private String bloomFilterColumns;
private double bloomFilterFpp;
private BloomFilterVersion bloomFilterVersion;
private PhysicalWriter physicalWriter;
private WriterVersion writerVersion = CURRENT_WRITER;
private boolean useUTCTimestamp;
private boolean overwrite;
private boolean writeVariableLengthBlocks;
private HadoopShims shims;
private String directEncodingColumns;
private String encryption;
private String masks;
private KeyProvider provider;
private boolean useProlepticGregorian;
private Map<String, HadoopShims.KeyMetadata> keyOverrides = new HashMap<>();
protected WriterOptions(Properties tableProperties, Configuration conf) {
configuration = conf;
memoryManagerValue = getStaticMemoryManager(conf);
overwrite = OrcConf.OVERWRITE_OUTPUT_FILE.getBoolean(tableProperties, conf);
stripeSizeValue = OrcConf.STRIPE_SIZE.getLong(tableProperties, conf);
stripeRowCountValue = OrcConf.STRIPE_ROW_COUNT.getLong(tableProperties, conf);
blockSizeValue = OrcConf.BLOCK_SIZE.getLong(tableProperties, conf);
buildIndex = OrcConf.ENABLE_INDEXES.getBoolean(tableProperties, conf);
rowIndexStrideValue =
(int) OrcConf.ROW_INDEX_STRIDE.getLong(tableProperties, conf);
bufferSizeValue = (int) OrcConf.BUFFER_SIZE.getLong(tableProperties,
conf);
blockPaddingValue =
OrcConf.BLOCK_PADDING.getBoolean(tableProperties, conf);
compressValue =
CompressionKind.valueOf(OrcConf.COMPRESS.getString(tableProperties,
conf).toUpperCase());
enforceBufferSize = OrcConf.ENFORCE_COMPRESSION_BUFFER_SIZE.getBoolean(tableProperties, conf);
String versionName = OrcConf.WRITE_FORMAT.getString(tableProperties,
conf);
versionValue = Version.byName(versionName);
String enString = OrcConf.ENCODING_STRATEGY.getString(tableProperties,
conf);
encodingStrategy = EncodingStrategy.valueOf(enString);
String compString =
OrcConf.COMPRESSION_STRATEGY.getString(tableProperties, conf);
compressionStrategy = CompressionStrategy.valueOf(compString);
paddingTolerance =
OrcConf.BLOCK_PADDING_TOLERANCE.getDouble(tableProperties, conf);
bloomFilterColumns = OrcConf.BLOOM_FILTER_COLUMNS.getString(tableProperties,
conf);
bloomFilterFpp = OrcConf.BLOOM_FILTER_FPP.getDouble(tableProperties,
conf);
bloomFilterVersion =
BloomFilterVersion.fromString(
OrcConf.BLOOM_FILTER_WRITE_VERSION.getString(tableProperties,
conf));
shims = HadoopShimsFactory.get();
writeVariableLengthBlocks =
OrcConf.WRITE_VARIABLE_LENGTH_BLOCKS.getBoolean(tableProperties,conf);
directEncodingColumns = OrcConf.DIRECT_ENCODING_COLUMNS.getString(
tableProperties, conf);
useProlepticGregorian = OrcConf.PROLEPTIC_GREGORIAN.getBoolean(conf);
}
/**
* @return a SHALLOW clone
*/
@Override
public WriterOptions clone() {
try {
return (WriterOptions) super.clone();
} catch (CloneNotSupportedException ex) {
throw new AssertionError("Expected super.clone() to work");
}
}
/**
* Provide the filesystem for the path, if the client has it available.
* If it is not provided, it will be found from the path.
*/
public WriterOptions fileSystem(FileSystem value) {
fileSystemValue = value;
return this;
}
/**
* If the output file already exists, should it be overwritten?
* If it is not provided, write operation will fail if the file already exists.
*/
public WriterOptions overwrite(boolean value) {
overwrite = value;
return this;
}
/**
* Set the stripe size for the file. The writer stores the contents of the
* stripe in memory until this memory limit is reached and the stripe
* is flushed to the HDFS file and the next stripe started.
*/
public WriterOptions stripeSize(long value) {
stripeSizeValue = value;
return this;
}
/**
* Set the file system block size for the file. For optimal performance,
* set the block size to be multiple factors of stripe size.
*/
public WriterOptions blockSize(long value) {
blockSizeValue = value;
return this;
}
/**
* Set the distance between entries in the row index. The minimum value is
* 1000 to prevent the index from overwhelming the data. If the stride is
* set to 0, no indexes will be included in the file.
*/
public WriterOptions rowIndexStride(int value) {
rowIndexStrideValue = value;
if (rowIndexStrideValue <= 0) {
buildIndex = false;
}
return this;
}
/**
* Sets whether build the index. The default value is true. If the value is
* set to false, rowIndexStrideValue will be set to zero.
*/
public WriterOptions buildIndex(boolean value) {
buildIndex = value;
if (!buildIndex) {
rowIndexStrideValue = 0;
}
return this;
}
/**
* The size of the memory buffers used for compressing and storing the
* stripe in memory. NOTE: ORC writer may choose to use smaller buffer
* size based on stripe size and number of columns for efficient stripe
* writing and memory utilization. To enforce writer to use the requested
* buffer size use enforceBufferSize().
*/
public WriterOptions bufferSize(int value) {
bufferSizeValue = value;
return this;
}
/**
* Enforce writer to use requested buffer size instead of estimating
* buffer size based on stripe size and number of columns.
* See bufferSize() method for more info.
* Default: false
*/
public WriterOptions enforceBufferSize() {
enforceBufferSize = true;
return this;
}
/**
* Sets whether the HDFS blocks are padded to prevent stripes from
* straddling blocks. Padding improves locality and thus the speed of
* reading, but costs space.
*/
public WriterOptions blockPadding(boolean value) {
blockPaddingValue = value;
return this;
}
/**
* Sets the encoding strategy that is used to encode the data.
*/
public WriterOptions encodingStrategy(EncodingStrategy strategy) {
encodingStrategy = strategy;
return this;
}
/**
* Sets the tolerance for block padding as a percentage of stripe size.
*/
public WriterOptions paddingTolerance(double value) {
paddingTolerance = value;
return this;
}
/**
* Comma separated values of column names for which bloom filter is to be created.
*/
public WriterOptions bloomFilterColumns(String columns) {
bloomFilterColumns = columns;
return this;
}
/**
* Specify the false positive probability for bloom filter.
*
* @param fpp - false positive probability
* @return this
*/
public WriterOptions bloomFilterFpp(double fpp) {
bloomFilterFpp = fpp;
return this;
}
/**
* Sets the generic compression that is used to compress the data.
*/
public WriterOptions compress(CompressionKind value) {
compressValue = value;
return this;
}
/**
* Set the schema for the file. This is a required parameter.
*
* @param schema the schema for the file.
* @return this
*/
public WriterOptions setSchema(TypeDescription schema) {
this.schema = schema;
return this;
}
/**
* Sets the version of the file that will be written.
*/
public WriterOptions version(Version value) {
versionValue = value;
return this;
}
/**
* Add a listener for when the stripe and file are about to be closed.
*
* @param callback the object to be called when the stripe is closed
* @return this
*/
public WriterOptions callback(WriterCallback callback) {
this.callback = callback;
return this;
}
/**
* Set the version of the bloom filters to write.
*/
public WriterOptions bloomFilterVersion(BloomFilterVersion version) {
this.bloomFilterVersion = version;
return this;
}
/**
* Change the physical writer of the ORC file.
* <p>
* SHOULD ONLY BE USED BY LLAP.
*
* @param writer the writer to control the layout and persistence
* @return this
*/
public WriterOptions physicalWriter(PhysicalWriter writer) {
this.physicalWriter = writer;
return this;
}
/**
* A public option to set the memory manager.
*/
public WriterOptions memory(MemoryManager value) {
memoryManagerValue = value;
return this;
}
/**
* Should the ORC file writer use HDFS variable length blocks, if they
* are available?
* @param value the new value
* @return this
*/
public WriterOptions writeVariableLengthBlocks(boolean value) {
writeVariableLengthBlocks = value;
return this;
}
/**
* Set the HadoopShims to use.
* This is only for testing.
* @param value the new value
* @return this
*/
public WriterOptions setShims(HadoopShims value) {
this.shims = value;
return this;
}
/**
* Manually set the writer version.
* This is an internal API.
*
* @param version the version to write
* @return this
*/
protected WriterOptions writerVersion(WriterVersion version) {
if (version == WriterVersion.FUTURE) {
throw new IllegalArgumentException("Can't write a future version.");
}
this.writerVersion = version;
return this;
}
/**
* Manually set the time zone for the writer to utc.
* If not defined, system time zone is assumed.
*/
public WriterOptions useUTCTimestamp(boolean value) {
useUTCTimestamp = value;
return this;
}
/**
* Set the comma-separated list of columns that should be direct encoded.
* @param value the value to set
* @return this
*/
public WriterOptions directEncodingColumns(String value) {
directEncodingColumns = value;
return this;
}
/**
* Encrypt a set of columns with a key.
*
* Format of the string is a key-list.
* <ul>
* <li>key-list = key (';' key-list)?</li>
* <li>key = key-name ':' field-list</li>
* <li>field-list = field-name ( ',' field-list )?</li>
* <li>field-name = number | field-part ('.' field-name)?</li>
* <li>field-part = quoted string | simple name</li>
* </ul>
*
* @param value a key-list of which columns to encrypt
* @return this
*/
public WriterOptions encrypt(String value) {
encryption = value;
return this;
}
/**
* Set the masks for the unencrypted data.
*
* Format of the string is a mask-list.
* <ul>
* <li>mask-list = mask (';' mask-list)?</li>
* <li>mask = mask-name (',' parameter)* ':' field-list</li>
* <li>field-list = field-name ( ',' field-list )?</li>
* <li>field-name = number | field-part ('.' field-name)?</li>
* <li>field-part = quoted string | simple name</li>
* </ul>
*
* @param value a list of the masks and column names
* @return this
*/
public WriterOptions masks(String value) {
masks = value;
return this;
}
/**
* For users that need to override the current version of a key, this
* method allows them to define the version and algorithm for a given key.
*
* This will mostly be used for ORC file merging where the writer has to
* use the same version of the key that the original files used.
*
* @param keyName the key name
* @param version the version of the key to use
* @param algorithm the algorithm for the given key version
* @return this
*/
public WriterOptions setKeyVersion(String keyName, int version,
EncryptionAlgorithm algorithm) {
HadoopShims.KeyMetadata meta = new HadoopShims.KeyMetadata(keyName,
version, algorithm);
keyOverrides.put(keyName, meta);
return this;
}
/**
* Set the key provider for column encryption.
* @param provider the object that holds the master secrets
* @return this
*/
public WriterOptions setKeyProvider(KeyProvider provider) {
this.provider = provider;
return this;
}
/**
* Should the writer use the proleptic Gregorian calendar for
* times and dates.
* @param newValue true if we should use the proleptic calendar
* @return this
*/
public WriterOptions setProlepticGregorian(boolean newValue) {
this.useProlepticGregorian = newValue;
return this;
}
public KeyProvider getKeyProvider() {
return provider;
}
public boolean getBlockPadding() {
return blockPaddingValue;
}
public long getBlockSize() {
return blockSizeValue;
}
public String getBloomFilterColumns() {
return bloomFilterColumns;
}
public boolean getOverwrite() {
return overwrite;
}
public FileSystem getFileSystem() {
return fileSystemValue;
}
public Configuration getConfiguration() {
return configuration;
}
public TypeDescription getSchema() {
return schema;
}
public long getStripeSize() {
return stripeSizeValue;
}
public long getStripeRowCountValue() {
return stripeRowCountValue;
}
public CompressionKind getCompress() {
return compressValue;
}
public WriterCallback getCallback() {
return callback;
}
public Version getVersion() {
return versionValue;
}
public MemoryManager getMemoryManager() {
return memoryManagerValue;
}
public int getBufferSize() {
return bufferSizeValue;
}
public boolean isEnforceBufferSize() {
return enforceBufferSize;
}
public int getRowIndexStride() {
return rowIndexStrideValue;
}
public boolean isBuildIndex() {
return buildIndex;
}
public CompressionStrategy getCompressionStrategy() {
return compressionStrategy;
}
public EncodingStrategy getEncodingStrategy() {
return encodingStrategy;
}
public double getPaddingTolerance() {
return paddingTolerance;
}
public double getBloomFilterFpp() {
return bloomFilterFpp;
}
public BloomFilterVersion getBloomFilterVersion() {
return bloomFilterVersion;
}
public PhysicalWriter getPhysicalWriter() {
return physicalWriter;
}
public WriterVersion getWriterVersion() {
return writerVersion;
}
public boolean getWriteVariableLengthBlocks() {
return writeVariableLengthBlocks;
}
public HadoopShims getHadoopShims() {
return shims;
}
public boolean getUseUTCTimestamp() {
return useUTCTimestamp;
}
public String getDirectEncodingColumns() {
return directEncodingColumns;
}
public String getEncryption() {
return encryption;
}
public String getMasks() {
return masks;
}
public Map<String, HadoopShims.KeyMetadata> getKeyOverrides() {
return keyOverrides;
}
public boolean getProlepticGregorian() {
return useProlepticGregorian;
}
}
/**
* Create a set of writer options based on a configuration.
* @param conf the configuration to use for values
* @return A WriterOptions object that can be modified
*/
public static WriterOptions writerOptions(Configuration conf) {
return new WriterOptions(null, conf);
}
/**
* Create a set of write options based on a set of table properties and
* configuration.
* @param tableProperties the properties of the table
* @param conf the configuration of the query
* @return a WriterOptions object that can be modified
*/
public static WriterOptions writerOptions(Properties tableProperties,
Configuration conf) {
return new WriterOptions(tableProperties, conf);
}
private static MemoryManager memoryManager = null;
private static synchronized MemoryManager getStaticMemoryManager(Configuration conf) {
if (memoryManager == null) {
memoryManager = new MemoryManagerImpl(conf);
}
return memoryManager;
}
/**
* Create an ORC file writer. This is the public interface for creating
* writers going forward and new options will only be added to this method.
* @param path filename to write to
* @param opts the options
* @return a new ORC file writer
* @throws IOException
*/
public static Writer createWriter(Path path,
WriterOptions opts
) throws IOException {
FileSystem fs = opts.getFileSystem() == null ?
path.getFileSystem(opts.getConfiguration()) : opts.getFileSystem();
switch (opts.getVersion()) {
case V_0_11:
case V_0_12:
return new WriterImpl(fs, path, opts);
case UNSTABLE_PRE_2_0:
return new WriterImplV2(fs, path, opts);
default:
throw new IllegalArgumentException("Unknown version " +
opts.getVersion());
}
}
/**
* Do we understand the version in the reader?
* @param path the path of the file
* @param reader the ORC file reader
* @return is the version understood by this writer?
*/
static boolean understandFormat(Path path, Reader reader) {
if (reader.getFileVersion() == Version.FUTURE) {
LOG.info("Can't merge {} because it has a future version.", path);
return false;
}
if (reader.getWriterVersion() == WriterVersion.FUTURE) {
LOG.info("Can't merge {} because it has a future writerVersion.", path);
return false;
}
return true;
}
private static boolean sameKeys(EncryptionKey[] first,
EncryptionKey[] next) {
if (first.length != next.length) {
return false;
}
for(int k = 0; k < first.length; ++k) {
if (!first[k].getKeyName().equals(next[k].getKeyName()) ||
first[k].getKeyVersion() != next[k].getKeyVersion() ||
first[k].getAlgorithm() != next[k].getAlgorithm()) {
return false;
}
}
return true;
}
private static boolean sameMasks(DataMaskDescription[] first,
DataMaskDescription[] next) {
if (first.length != next.length) {
return false;
}
for(int k = 0; k < first.length; ++k) {
if (!first[k].getName().equals(next[k].getName())) {
return false;
}
String[] firstParam = first[k].getParameters();
String[] nextParam = next[k].getParameters();
if (firstParam.length != nextParam.length) {
return false;
}
for(int p=0; p < firstParam.length; ++p) {
if (!firstParam[p].equals(nextParam[p])) {
return false;
}
}
TypeDescription[] firstRoots = first[k].getColumns();
TypeDescription[] nextRoots = next[k].getColumns();
if (firstRoots.length != nextRoots.length) {
return false;
}
for(int r=0; r < firstRoots.length; ++r) {
if (firstRoots[r].getId() != nextRoots[r].getId()) {
return false;
}
}
}
return true;
}
private static boolean sameVariants(EncryptionVariant[] first,
EncryptionVariant[] next) {
if (first.length != next.length) {
return false;
}
for(int k = 0; k < first.length; ++k) {
if ((first[k].getKeyDescription() == null) !=
(next[k].getKeyDescription() == null) ||
!first[k].getKeyDescription().getKeyName().equals(
next[k].getKeyDescription().getKeyName()) ||
first[k].getRoot().getId() !=
next[k].getRoot().getId()) {
return false;
}
}
return true;
}
/**
* Is the new reader compatible with the file that is being written?
* @param firstReader the first reader that others must match
* @param userMetadata the user metadata
* @param path the new path name for warning messages
* @param reader the new reader
* @return is the reader compatible with the previous ones?
*/
static boolean readerIsCompatible(Reader firstReader,
Map<String, ByteBuffer> userMetadata,
Path path,
Reader reader) {
// now we have to check compatibility
TypeDescription schema = firstReader.getSchema();
if (!reader.getSchema().equals(schema)) {
LOG.info("Can't merge {} because of different schemas {} vs {}",
path, reader.getSchema(), schema);
return false;
}
CompressionKind compression = firstReader.getCompressionKind();
if (reader.getCompressionKind() != compression) {
LOG.info("Can't merge {} because of different compression {} vs {}",
path, reader.getCompressionKind(), compression);
return false;
}
OrcFile.Version fileVersion = firstReader.getFileVersion();
if (reader.getFileVersion() != fileVersion) {
LOG.info("Can't merge {} because of different file versions {} vs {}",
path, reader.getFileVersion(), fileVersion);
return false;
}
OrcFile.WriterVersion writerVersion = firstReader.getWriterVersion();
if (reader.getWriterVersion() != writerVersion) {
LOG.info("Can't merge {} because of different writer versions {} vs {}",
path, reader.getFileVersion(), fileVersion);
return false;
}
int rowIndexStride = firstReader.getRowIndexStride();
if (reader.getRowIndexStride() != rowIndexStride) {
LOG.info("Can't merge {} because of different row index strides {} vs {}",
path, reader.getRowIndexStride(), rowIndexStride);
return false;
}
for(String key: reader.getMetadataKeys()) {
ByteBuffer currentValue = userMetadata.get(key);
if (currentValue != null) {
ByteBuffer newValue = reader.getMetadataValue(key);
if (!newValue.equals(currentValue)) {
LOG.info("Can't merge {} because of different user metadata {}", path,
key);
return false;
}
}
}
if (!sameKeys(firstReader.getColumnEncryptionKeys(),
reader.getColumnEncryptionKeys())) {
LOG.info("Can't merge {} because it has different encryption keys", path);
return false;
}
if (!sameMasks(firstReader.getDataMasks(), reader.getDataMasks())) {
LOG.info("Can't merge {} because it has different encryption masks", path);
return false;
}
if (!sameVariants(firstReader.getEncryptionVariants(),
reader.getEncryptionVariants())) {
LOG.info("Can't merge {} because it has different encryption variants", path);
return false;
}
if (firstReader.writerUsedProlepticGregorian() !=
reader.writerUsedProlepticGregorian()) {
LOG.info("Can't merge {} because it uses a different calendar", path);
return false;
}
return true;
}
static void mergeMetadata(Map<String,ByteBuffer> metadata,
Reader reader) {
for(String key: reader.getMetadataKeys()) {
metadata.put(key, reader.getMetadataValue(key));
}
}
/**
* Merges multiple ORC files that all have the same schema to produce
* a single ORC file.
* The merge will reject files that aren't compatible with the merged file
* so the output list may be shorter than the input list.
* The stripes are copied as serialized byte buffers.
* The user metadata are merged and files that disagree on the value
* associated with a key will be rejected.
*
* @param outputPath the output file
* @param options the options for writing with although the options related
* to the input files' encodings are overridden
* @param inputFiles the list of files to merge
* @return the list of files that were successfully merged
* @throws IOException
*/
public static List<Path> mergeFiles(Path outputPath,
WriterOptions options,
List<Path> inputFiles) throws IOException {
Writer output = null;
final Configuration conf = options.getConfiguration();
KeyProvider keyProvider = options.getKeyProvider();
try {
byte[] buffer = new byte[0];
Reader firstFile = null;
List<Path> result = new ArrayList<>(inputFiles.size());
Map<String, ByteBuffer> userMetadata = new HashMap<>();
int bufferSize = 0;
for (Path input : inputFiles) {
FileSystem fs = input.getFileSystem(conf);
Reader reader = createReader(input,
readerOptions(options.getConfiguration())
.filesystem(fs)
.setKeyProvider(keyProvider));
if (!understandFormat(input, reader)) {
continue;
} else if (firstFile == null) {
// if this is the first file that we are including, grab the values
firstFile = reader;
bufferSize = reader.getCompressionSize();
CompressionKind compression = reader.getCompressionKind();
options.bufferSize(bufferSize)
.version(reader.getFileVersion())
.writerVersion(reader.getWriterVersion())
.compress(compression)
.rowIndexStride(reader.getRowIndexStride())
.setSchema(reader.getSchema());
if (compression != CompressionKind.NONE) {
options.enforceBufferSize().bufferSize(bufferSize);
}
mergeMetadata(userMetadata, reader);
// ensure that the merged file uses the same key versions
for(EncryptionKey key: reader.getColumnEncryptionKeys()) {
options.setKeyVersion(key.getKeyName(), key.getKeyVersion(),
key.getAlgorithm());
}
output = createWriter(outputPath, options);
} else if (!readerIsCompatible(firstFile, userMetadata, input, reader)) {
continue;
} else {
mergeMetadata(userMetadata, reader);
if (bufferSize < reader.getCompressionSize()) {
bufferSize = reader.getCompressionSize();
((WriterInternal) output).increaseCompressionSize(bufferSize);
}
}
EncryptionVariant[] variants = reader.getEncryptionVariants();
List<StripeStatistics>[] completeList = new List[variants.length + 1];
for(int v=0; v < variants.length; ++v) {
completeList[v] = reader.getVariantStripeStatistics(variants[v]);
}
completeList[completeList.length - 1] = reader.getVariantStripeStatistics(null);
StripeStatistics[] stripeStats = new StripeStatistics[completeList.length];
try (FSDataInputStream inputStream = ((ReaderImpl) reader).takeFile()) {
result.add(input);
for (StripeInformation stripe : reader.getStripes()) {
int length = (int) stripe.getLength();
if (buffer.length < length) {
buffer = new byte[length];
}
long offset = stripe.getOffset();
inputStream.readFully(offset, buffer, 0, length);
int stripeId = (int) stripe.getStripeId();
for(int v=0; v < completeList.length; ++v) {
stripeStats[v] = completeList[v].get(stripeId);
}
output.appendStripe(buffer, 0, length, stripe, stripeStats);
}
}
}
if (output != null) {
for (Map.Entry<String, ByteBuffer> entry : userMetadata.entrySet()) {
output.addUserMetadata(entry.getKey(), entry.getValue());
}
output.close();
}
return result;
} catch (Throwable t) {
if (output != null) {
try {
output.close();
} catch (Throwable ignore) {
// PASS
}
try {
FileSystem fs = options.getFileSystem() == null ?
outputPath.getFileSystem(conf) : options.getFileSystem();
fs.delete(outputPath, false);
} catch (Throwable ignore) {
// PASS
}
}
throw new IOException("Problem merging files into " + outputPath, t);
}
}
}
| 42,484 | 30.919609 | 100 | java |
null | orc-main/java/core/src/java/org/apache/orc/OrcFilterContext.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ListColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.MapColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.io.filter.MutableFilterContext;
/**
* This defines the input for any filter operation. This is an extension of
* [[{@link VectorizedRowBatch}]] with schema.
* <p>
* This offers a convenience method of finding the column vector from a given column name
* that the filters can invoke to get access to the column vector.
*/
public interface OrcFilterContext extends MutableFilterContext {
/**
* Retrieves the column vector that matches the specified name. Allows support for nested struct
* references e.g. order.date where date is a field in a struct called order.
*
* @param name The column name whose vector should be retrieved
* @return The column vectors from the root to the column name. The array levels match the name
* levels with Array[0] referring to the top level, followed by the subsequent levels. For
* example of order.date Array[0] refers to order and Array[1] refers to date
* @throws IllegalArgumentException if the field is not found or if the nested field is not part
* of a struct
*/
ColumnVector[] findColumnVector(String name);
/**
* Utility method for determining if the leaf vector of the branch can be treated as having
* noNulls.
* This method navigates from the top to the leaf and checks if we have nulls anywhere in the
* branch as compared to checking just the leaf vector.
*
* @param vectorBranch The input vector branch from the root to the leaf
* @return true if the entire branch satisfies noNull else false
*/
static boolean noNulls(ColumnVector[] vectorBranch) {
for (ColumnVector v : vectorBranch) {
if (!v.noNulls) {
return false;
}
}
return true;
}
/**
* Utility method for determining if a particular row element in the vector branch is null.
* This method navigates from the top to the leaf and checks if we have nulls anywhere in the
* branch as compared to checking just the leaf vector.
*
* @param vectorBranch The input vector branch from the root to the leaf
* @param idx The row index being tested
* @return true if the entire branch is not null for the idx otherwise false
* @throws IllegalArgumentException If a multivalued vector such as List or Map is encountered in
* the branch.
*/
static boolean isNull(ColumnVector[] vectorBranch, int idx) throws IllegalArgumentException {
for (ColumnVector v : vectorBranch) {
if (v instanceof ListColumnVector || v instanceof MapColumnVector) {
throw new IllegalArgumentException(String.format(
"Found vector: %s in branch. List and Map vectors are not supported in isNull "
+ "determination", v));
}
// v.noNulls = false does not mean that we have at least one null value
if (!v.noNulls && v.isNull[v.isRepeating ? 0 : idx]) {
return true;
}
}
return false;
}
}
| 4,061 | 43.152174 | 99 | java |
null | orc-main/java/core/src/java/org/apache/orc/OrcUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import org.apache.orc.impl.ParserUtils;
import org.apache.orc.impl.ReaderImpl;
import org.apache.orc.impl.SchemaEvolution;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Properties;
import static org.apache.hadoop.util.StringUtils.COMMA_STR;
public class OrcUtils {
/**
* Returns selected columns as a boolean array with true value set for specified column names.
* The result will contain number of elements equal to flattened number of columns.
* For example:
* selectedColumns - a,b,c
* allColumns - a,b,c,d
* If column c is a complex type, say list<string> and other types are
* primitives then result will
* be [false, true, true, true, true, true, false]
* Index 0 is the root element of the struct which is set to false by default, index 1,2
* corresponds to columns a and b. Index 3,4 correspond to column c which is list<string> and
* index 5 correspond to column d. After flattening list<string> gets 2 columns.
* <p>
* Column names that aren't found are ignored.
* @param selectedColumns - comma separated list of selected column names
* @param schema - object schema
* @return - boolean array with true value set for the specified column names
*/
public static boolean[] includeColumns(String selectedColumns,
TypeDescription schema) {
int numFlattenedCols = schema.getMaximumId();
boolean[] results = new boolean[numFlattenedCols + 1];
if ("*".equals(selectedColumns)) {
Arrays.fill(results, true);
return results;
}
TypeDescription baseSchema = SchemaEvolution.checkAcidSchema(schema) ?
SchemaEvolution.getBaseRow(schema) : schema;
if (selectedColumns != null &&
baseSchema.getCategory() == TypeDescription.Category.STRUCT) {
for (String columnName : selectedColumns.split(COMMA_STR)) {
TypeDescription column = findColumn(baseSchema, columnName.trim());
if (column != null) {
for (int i = column.getId(); i <= column.getMaximumId(); ++i) {
results[i] = true;
}
}
}
}
return results;
}
private static TypeDescription findColumn(TypeDescription schema, String column) {
TypeDescription result = schema;
String[] columnMatcher = column.split("\\.");
int index = 0;
while (index < columnMatcher.length &&
result.getCategory() == TypeDescription.Category.STRUCT) {
String columnName = columnMatcher[index];
int prevIndex = index;
List<TypeDescription> fields = result.getChildren();
List<String> fieldNames = result.getFieldNames();
for (int i = 0; i < fields.size(); i++) {
if (columnName.equalsIgnoreCase(fieldNames.get(i))) {
result = fields.get(i);
index++;
break;
}
}
if (prevIndex == index) {
return null;
}
}
return result;
}
public static List<OrcProto.Type> getOrcTypes(TypeDescription typeDescr) {
List<OrcProto.Type> result = new ArrayList<>();
appendOrcTypes(result, typeDescr);
return result;
}
private static void appendOrcTypes(List<OrcProto.Type> result, TypeDescription typeDescr) {
OrcProto.Type.Builder type = OrcProto.Type.newBuilder();
List<TypeDescription> children = typeDescr.getChildren();
// save the attributes
for(String key: typeDescr.getAttributeNames()) {
type.addAttributes(
OrcProto.StringPair.newBuilder()
.setKey(key).setValue(typeDescr.getAttributeValue(key))
.build());
}
switch (typeDescr.getCategory()) {
case BOOLEAN:
type.setKind(OrcProto.Type.Kind.BOOLEAN);
break;
case BYTE:
type.setKind(OrcProto.Type.Kind.BYTE);
break;
case SHORT:
type.setKind(OrcProto.Type.Kind.SHORT);
break;
case INT:
type.setKind(OrcProto.Type.Kind.INT);
break;
case LONG:
type.setKind(OrcProto.Type.Kind.LONG);
break;
case FLOAT:
type.setKind(OrcProto.Type.Kind.FLOAT);
break;
case DOUBLE:
type.setKind(OrcProto.Type.Kind.DOUBLE);
break;
case STRING:
type.setKind(OrcProto.Type.Kind.STRING);
break;
case CHAR:
type.setKind(OrcProto.Type.Kind.CHAR);
type.setMaximumLength(typeDescr.getMaxLength());
break;
case VARCHAR:
type.setKind(OrcProto.Type.Kind.VARCHAR);
type.setMaximumLength(typeDescr.getMaxLength());
break;
case BINARY:
type.setKind(OrcProto.Type.Kind.BINARY);
break;
case TIMESTAMP:
type.setKind(OrcProto.Type.Kind.TIMESTAMP);
break;
case TIMESTAMP_INSTANT:
type.setKind(OrcProto.Type.Kind.TIMESTAMP_INSTANT);
break;
case DATE:
type.setKind(OrcProto.Type.Kind.DATE);
break;
case DECIMAL:
type.setKind(OrcProto.Type.Kind.DECIMAL);
type.setPrecision(typeDescr.getPrecision());
type.setScale(typeDescr.getScale());
break;
case LIST:
type.setKind(OrcProto.Type.Kind.LIST);
type.addSubtypes(children.get(0).getId());
break;
case MAP:
type.setKind(OrcProto.Type.Kind.MAP);
for(TypeDescription t: children) {
type.addSubtypes(t.getId());
}
break;
case STRUCT:
type.setKind(OrcProto.Type.Kind.STRUCT);
for(TypeDescription t: children) {
type.addSubtypes(t.getId());
}
for(String field: typeDescr.getFieldNames()) {
type.addFieldNames(field);
}
break;
case UNION:
type.setKind(OrcProto.Type.Kind.UNION);
for(TypeDescription t: children) {
type.addSubtypes(t.getId());
}
break;
default:
throw new IllegalArgumentException("Unknown category: " +
typeDescr.getCategory());
}
result.add(type.build());
if (children != null) {
for(TypeDescription child: children) {
appendOrcTypes(result, child);
}
}
}
/**
* Checks whether the list of protobuf types from the file are valid or not.
* @param types the list of types from the protobuf
* @param root the top of the tree to check
* @return the next available id
* @throws java.io.IOException if the tree is invalid
*/
public static int isValidTypeTree(List<OrcProto.Type> types,
int root) throws IOException {
if (root < 0 || root >= types.size()) {
throw new IOException("Illegal type id " + root +
". The valid range is 0 to " + (types.size() - 1));
}
OrcProto.Type rootType = types.get(root);
int current = root+1;
List<Integer> children = rootType.getSubtypesList();
if (!rootType.hasKind()) {
throw new IOException("Type " + root + " has an unknown kind.");
}
// ensure that we have the right number of children
switch(rootType.getKind()) {
case LIST:
if (children == null || children.size() != 1) {
throw new IOException("Wrong number of type children in list " + root);
}
break;
case MAP:
if (children == null || children.size() != 2) {
throw new IOException("Wrong number of type children in map " + root);
}
break;
case UNION:
case STRUCT:
break;
default:
if (children != null && children.size() != 0) {
throw new IOException("Type children under primitive type " + root);
}
}
// ensure the children are also correct
if (children != null) {
for(int child: children) {
if (child != current) {
throw new IOException("Unexpected child type id " + child + " when " +
current + " was expected.");
}
current = isValidTypeTree(types, current);
}
}
return current;
}
/**
* Translate the given rootColumn from the list of types to a TypeDescription.
* @param types all of the types
* @param rootColumn translate this type
* @return a new TypeDescription that matches the given rootColumn
*/
public static
TypeDescription convertTypeFromProtobuf(List<OrcProto.Type> types,
int rootColumn)
throws FileFormatException {
OrcProto.Type type = types.get(rootColumn);
TypeDescription result;
switch (type.getKind()) {
case BOOLEAN:
result = TypeDescription.createBoolean();
break;
case BYTE:
result = TypeDescription.createByte();
break;
case SHORT:
result = TypeDescription.createShort();
break;
case INT:
result = TypeDescription.createInt();
break;
case LONG:
result = TypeDescription.createLong();
break;
case FLOAT:
result = TypeDescription.createFloat();
break;
case DOUBLE:
result = TypeDescription.createDouble();
break;
case STRING:
result = TypeDescription.createString();
break;
case CHAR:
case VARCHAR:
result = type.getKind() == OrcProto.Type.Kind.CHAR ?
TypeDescription.createChar() : TypeDescription.createVarchar();
if (type.hasMaximumLength()) {
result.withMaxLength(type.getMaximumLength());
}
break;
case BINARY:
result = TypeDescription.createBinary();
break;
case TIMESTAMP:
result = TypeDescription.createTimestamp();
break;
case TIMESTAMP_INSTANT:
result = TypeDescription.createTimestampInstant();
break;
case DATE:
result = TypeDescription.createDate();
break;
case DECIMAL:
result = TypeDescription.createDecimal();
if (type.hasScale()) {
result.withScale(type.getScale());
}
if (type.hasPrecision()) {
result.withPrecision(type.getPrecision());
}
break;
case LIST:
if (type.getSubtypesCount() != 1) {
throw new FileFormatException("LIST type should contain exactly " +
"one subtype but has " + type.getSubtypesCount());
}
result = TypeDescription.createList(
convertTypeFromProtobuf(types, type.getSubtypes(0)));
break;
case MAP:
if (type.getSubtypesCount() != 2) {
throw new FileFormatException("MAP type should contain exactly " +
"two subtypes but has " + type.getSubtypesCount());
}
result = TypeDescription.createMap(
convertTypeFromProtobuf(types, type.getSubtypes(0)),
convertTypeFromProtobuf(types, type.getSubtypes(1)));
break;
case STRUCT:
result = TypeDescription.createStruct();
for(int f=0; f < type.getSubtypesCount(); ++f) {
String name = type.getFieldNames(f);
name = name.startsWith("`") ? name : "`" + name + "`";
String fieldName = ParserUtils.parseName(new ParserUtils.StringPosition(name));
result.addField(fieldName, convertTypeFromProtobuf(types, type.getSubtypes(f)));
}
break;
case UNION:
if (type.getSubtypesCount() == 0) {
throw new FileFormatException("UNION type should contain at least" +
" one subtype but has none");
}
result = TypeDescription.createUnion();
for(int f=0; f < type.getSubtypesCount(); ++f) {
result.addUnionChild(
convertTypeFromProtobuf(types, type.getSubtypes(f)));
}
break;
default:
throw new IllegalArgumentException("Unknown ORC type " + type.getKind());
}
for(int i = 0; i < type.getAttributesCount(); ++i) {
OrcProto.StringPair pair = type.getAttributes(i);
result.setAttribute(pair.getKey(), pair.getValue());
}
return result;
}
public static List<StripeInformation> convertProtoStripesToStripes(
List<OrcProto.StripeInformation> stripes) {
List<StripeInformation> result = new ArrayList<>(stripes.size());
long previousStripeId = 0;
byte[][] previousKeys = null;
long stripeId = 0;
for (OrcProto.StripeInformation stripeProto: stripes) {
ReaderImpl.StripeInformationImpl stripe =
new ReaderImpl.StripeInformationImpl(stripeProto, stripeId++,
previousStripeId, previousKeys);
result.add(stripe);
previousStripeId = stripe.getEncryptionStripeId();
previousKeys = stripe.getEncryptedLocalKeys();
}
return result;
}
/**
* Get the user-facing version string for the software that wrote the file.
* @param writer the code for the writer from OrcProto.Footer
* @param version the orcVersion from OrcProto.Footer
* @return the version string
*/
public static String getSoftwareVersion(int writer,
String version) {
String base;
switch (writer) {
case 0:
base = "ORC Java";
break;
case 1:
base = "ORC C++";
break;
case 2:
base = "Presto";
break;
case 3:
base = "Scritchley Go";
break;
case 4:
base = "Trino";
break;
default:
base = String.format("Unknown(%d)", writer);
break;
}
if (version == null) {
return base;
} else {
return base + " " + version;
}
}
/**
* Get the software version from Maven.
* @return The version of the software.
*/
public static String getOrcVersion() {
Class<OrcFile> cls = OrcFile.class;
// try to load from maven properties first
try (InputStream is = cls.getResourceAsStream(
"/META-INF/maven/org.apache.orc/orc-core/pom.properties")) {
if (is != null) {
Properties p = new Properties();
p.load(is);
String version = p.getProperty("version", null);
if (version != null) {
return version;
}
}
} catch (IOException e) {
// ignore
}
// fallback to using Java API
Package aPackage = cls.getPackage();
if (aPackage != null) {
String version = aPackage.getImplementationVersion();
if (version != null) {
return version;
}
}
return "unknown";
}
}
| 15,362 | 32.397826 | 101 | java |
null | orc-main/java/core/src/java/org/apache/orc/PhysicalWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import org.apache.orc.impl.StreamName;
import org.apache.orc.impl.writer.StreamOptions;
import org.apache.orc.impl.writer.WriterEncryptionVariant;
import java.io.IOException;
import java.nio.ByteBuffer;
/**
* This interface separates the physical layout of ORC files from the higher
* level details.
* <p>
* This API is limited to being used by LLAP.
*/
public interface PhysicalWriter {
/**
* The target of an output stream.
*/
interface OutputReceiver {
/**
* Output the given buffer to the final destination
*
* @param buffer the buffer to output
*/
void output(ByteBuffer buffer) throws IOException;
/**
* Suppress this stream from being written to the stripe.
*/
void suppress();
}
/**
* Writes the header of the file, which consists of the magic "ORC" bytes.
*/
void writeHeader() throws IOException;
/**
* Create an OutputReceiver for the given name.
* @param name the name of the stream
*/
OutputReceiver createDataStream(StreamName name) throws IOException;
/**
* Write an index in the given stream name.
* @param name the name of the stream
* @param index the bloom filter to write
*/
void writeIndex(StreamName name,
OrcProto.RowIndex.Builder index) throws IOException;
/**
* Write a bloom filter index in the given stream name.
* @param name the name of the stream
* @param bloom the bloom filter to write
*/
void writeBloomFilter(StreamName name,
OrcProto.BloomFilterIndex.Builder bloom) throws IOException;
/**
* Flushes the data in all the streams, spills them to disk, write out stripe
* footer.
* @param footer Stripe footer to be updated with relevant data and written out.
* @param dirEntry File metadata entry for the stripe, to be updated with
* relevant data.
*/
void finalizeStripe(OrcProto.StripeFooter.Builder footer,
OrcProto.StripeInformation.Builder dirEntry) throws IOException;
/**
* Write a stripe or file statistics to the file.
* @param name the name of the stream
* @param statistics the statistics to write
*/
void writeStatistics(StreamName name,
OrcProto.ColumnStatistics.Builder statistics
) throws IOException;
/**
* Writes out the file metadata.
* @param builder Metadata builder to finalize and write.
*/
void writeFileMetadata(OrcProto.Metadata.Builder builder) throws IOException;
/**
* Writes out the file footer.
* @param builder Footer builder to finalize and write.
*/
void writeFileFooter(OrcProto.Footer.Builder builder) throws IOException;
/**
* Writes out the postscript (including the size byte if needed).
* @param builder Postscript builder to finalize and write.
*/
long writePostScript(OrcProto.PostScript.Builder builder) throws IOException;
/**
* Closes the writer.
*/
void close() throws IOException;
/**
* Flushes the writer so that readers can see the preceding postscripts.
*/
void flush() throws IOException;
/**
* Appends raw stripe data (e.g. for file merger).
* @param stripe Stripe data buffer.
* @param dirEntry File metadata entry for the stripe, to be updated with
* relevant data.
*/
void appendRawStripe(ByteBuffer stripe,
OrcProto.StripeInformation.Builder dirEntry
) throws IOException;
/**
* Get the number of bytes for a file in a given column.
* @param column column from which to get file size
* @param variant the encryption variant to check
* @return number of bytes for the given column
*/
long getFileBytes(int column, WriterEncryptionVariant variant);
/**
* Get the unencrypted stream options for this file. This class needs the
* stream options to write the indexes and footers.
*
* Additionally, the LLAP CacheWriter wants to disable the generic compression.
*/
StreamOptions getStreamOptions();
}
| 4,893 | 30.986928 | 86 | java |
null | orc-main/java/core/src/java/org/apache/orc/Reader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import java.io.Closeable;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.List;
import java.util.function.Consumer;
/**
* The interface for reading ORC files.
* <p>
* One Reader can support multiple concurrent RecordReader.
* @since 1.1.0
*/
public interface Reader extends Closeable {
/**
* Get the number of rows in the file.
* @return the number of rows
* @since 1.1.0
*/
long getNumberOfRows();
/**
* Get the deserialized data size of the file
* @return raw data size
* @since 1.1.0
*/
long getRawDataSize();
/**
* Get the deserialized data size of the specified columns
* @param colNames the list of column names
* @return raw data size of columns
* @since 1.1.0
*/
long getRawDataSizeOfColumns(List<String> colNames);
/**
* Get the deserialized data size of the specified columns ids
* @param colIds - internal column id (check orcfiledump for column ids)
* @return raw data size of columns
* @since 1.1.0
*/
long getRawDataSizeFromColIndices(List<Integer> colIds);
/**
* Get the user metadata keys.
* @return the set of metadata keys
* @since 1.1.0
*/
List<String> getMetadataKeys();
/**
* Get a user metadata value.
* @param key a key given by the user
* @return the bytes associated with the given key
* @since 1.1.0
*/
ByteBuffer getMetadataValue(String key);
/**
* Did the user set the given metadata value.
* @param key the key to check
* @return true if the metadata value was set
* @since 1.1.0
*/
boolean hasMetadataValue(String key);
/**
* Get the compression kind.
* @return the kind of compression in the file
* @since 1.1.0
*/
CompressionKind getCompressionKind();
/**
* Get the buffer size for the compression.
* @return number of bytes to buffer for the compression codec.
* @since 1.1.0
*/
int getCompressionSize();
/**
* Get the number of rows per a entry in the row index.
* @return the number of rows per an entry in the row index or 0 if there
* is no row index.
* @since 1.1.0
*/
int getRowIndexStride();
/**
* Get the list of stripes.
* @return the information about the stripes in order
* @since 1.1.0
*/
List<StripeInformation> getStripes();
/**
* Get the length of the file.
* @return the number of bytes in the file
* @since 1.1.0
*/
long getContentLength();
/**
* Get the statistics about the columns in the file.
* @return the information about the column
* @since 1.1.0
*/
ColumnStatistics[] getStatistics();
/**
* Get the type of rows in this ORC file.
* @since 1.1.0
*/
TypeDescription getSchema();
/**
* Get the list of types contained in the file. The root type is the first
* type in the list.
* @return the list of flattened types
* @deprecated use getSchema instead
* @since 1.1.0
*/
List<OrcProto.Type> getTypes();
/**
* Get the file format version.
* @since 1.1.0
*/
OrcFile.Version getFileVersion();
/**
* Get the version of the writer of this file.
* @since 1.1.0
*/
OrcFile.WriterVersion getWriterVersion();
/**
* Get the implementation and version of the software that wrote the file.
* It defaults to "ORC Java" for old files. For current files, we include the
* version also.
* @since 1.5.13
* @return returns the writer implementation and hopefully the version of the
* software
*/
String getSoftwareVersion();
/**
* Get the file tail (footer + postscript)
*
* @return - file tail
* @since 1.1.0
*/
OrcProto.FileTail getFileTail();
/**
* Get the list of encryption keys for column encryption.
* @return the set of encryption keys
* @since 1.6.0
*/
EncryptionKey[] getColumnEncryptionKeys();
/**
* Get the data masks for the unencrypted variant of the data.
* @return the lists of data masks
* @since 1.6.0
*/
DataMaskDescription[] getDataMasks();
/**
* Get the list of encryption variants for the data.
* @since 1.6.0
*/
EncryptionVariant[] getEncryptionVariants();
/**
* Get the stripe statistics for a given variant. The StripeStatistics will
* have 1 entry for each column in the variant. This enables the user to
* get the stripe statistics for each variant regardless of which keys are
* available.
* @param variant the encryption variant or null for unencrypted
* @return a list of stripe statistics (one per a stripe)
* @throws IOException if the required key is not available
* @since 1.6.0
*/
List<StripeStatistics> getVariantStripeStatistics(EncryptionVariant variant
) throws IOException;
/**
* Options for creating a RecordReader.
* @since 1.1.0
*/
class Options implements Cloneable {
private boolean[] include;
private long offset = 0;
private long length = Long.MAX_VALUE;
private int positionalEvolutionLevel;
private SearchArgument sarg = null;
private String[] columnNames = null;
private Boolean useZeroCopy = null;
private Boolean skipCorruptRecords = null;
private TypeDescription schema = null;
private String[] preFilterColumns = null;
Consumer<OrcFilterContext> skipRowCallback = null;
private DataReader dataReader = null;
private Boolean tolerateMissingSchema = null;
private boolean forcePositionalEvolution;
private boolean isSchemaEvolutionCaseAware =
(boolean) OrcConf.IS_SCHEMA_EVOLUTION_CASE_SENSITIVE.getDefaultValue();
private boolean includeAcidColumns = true;
private boolean allowSARGToFilter = false;
private boolean useSelected = false;
private boolean allowPluginFilters = false;
private List<String> pluginAllowListFilters = null;
private int minSeekSize = (int) OrcConf.ORC_MIN_DISK_SEEK_SIZE.getDefaultValue();
private double minSeekSizeTolerance = (double) OrcConf.ORC_MIN_DISK_SEEK_SIZE_TOLERANCE
.getDefaultValue();
private int rowBatchSize = (int) OrcConf.ROW_BATCH_SIZE.getDefaultValue();
/**
* @since 1.1.0
*/
public Options() {
// PASS
}
/**
* @since 1.1.0
*/
public Options(Configuration conf) {
useZeroCopy = OrcConf.USE_ZEROCOPY.getBoolean(conf);
skipCorruptRecords = OrcConf.SKIP_CORRUPT_DATA.getBoolean(conf);
tolerateMissingSchema = OrcConf.TOLERATE_MISSING_SCHEMA.getBoolean(conf);
forcePositionalEvolution = OrcConf.FORCE_POSITIONAL_EVOLUTION.getBoolean(conf);
positionalEvolutionLevel = OrcConf.FORCE_POSITIONAL_EVOLUTION_LEVEL.getInt(conf);
isSchemaEvolutionCaseAware =
OrcConf.IS_SCHEMA_EVOLUTION_CASE_SENSITIVE.getBoolean(conf);
allowSARGToFilter = OrcConf.ALLOW_SARG_TO_FILTER.getBoolean(conf);
useSelected = OrcConf.READER_USE_SELECTED.getBoolean(conf);
allowPluginFilters = OrcConf.ALLOW_PLUGIN_FILTER.getBoolean(conf);
pluginAllowListFilters = OrcConf.PLUGIN_FILTER_ALLOWLIST.getStringAsList(conf);
minSeekSize = OrcConf.ORC_MIN_DISK_SEEK_SIZE.getInt(conf);
minSeekSizeTolerance = OrcConf.ORC_MIN_DISK_SEEK_SIZE_TOLERANCE.getDouble(conf);
rowBatchSize = OrcConf.ROW_BATCH_SIZE.getInt(conf);
}
/**
* Set the list of columns to read.
* @param include a list of columns to read
* @return this
* @since 1.1.0
*/
public Options include(boolean[] include) {
this.include = include;
return this;
}
/**
* Set the range of bytes to read
* @param offset the starting byte offset
* @param length the number of bytes to read
* @return this
* @since 1.1.0
*/
public Options range(long offset, long length) {
this.offset = offset;
this.length = length;
return this;
}
/**
* Set the schema on read type description.
* @since 1.1.0
*/
public Options schema(TypeDescription schema) {
this.schema = schema;
return this;
}
/**
* Set a row level filter.
* This is an advanced feature that allows the caller to specify
* a list of columns that are read first and then a filter that
* is called to determine which rows if any should be read.
*
* User should expect the batches that come from the reader
* to use the selected array set by their filter.
*
* Use cases for this are predicates that SearchArgs can't represent,
* such as relationships between columns (eg. columnA == columnB).
* @param filterColumnNames a comma separated list of the column names that
* are read before the filter is applied. Only top
* level columns in the reader's schema can be used
* here and they must not be duplicated.
* @param filterCallback a function callback to perform filtering during the call to
* RecordReader.nextBatch. This function should not reference
* any static fields nor modify the passed in ColumnVectors but
* should set the filter output using the selected array.
*
* @return this
* @since 1.7.0
*/
public Options setRowFilter(
String[] filterColumnNames, Consumer<OrcFilterContext> filterCallback) {
this.preFilterColumns = filterColumnNames;
this.skipRowCallback = filterCallback;
return this;
}
/**
* Set search argument for predicate push down.
* @param sarg the search argument
* @param columnNames the column names for
* @return this
* @since 1.1.0
*/
public Options searchArgument(SearchArgument sarg, String[] columnNames) {
this.sarg = sarg;
this.columnNames = columnNames;
return this;
}
/**
* Set allowSARGToFilter.
* @param allowSARGToFilter
* @return this
* @since 1.7.0
*/
public Options allowSARGToFilter(boolean allowSARGToFilter) {
this.allowSARGToFilter = allowSARGToFilter;
return this;
}
/**
* Get allowSARGToFilter value.
* @return allowSARGToFilter
* @since 1.7.0
*/
public boolean isAllowSARGToFilter() {
return allowSARGToFilter;
}
/**
* Set whether to use zero copy from HDFS.
* @param value the new zero copy flag
* @return this
* @since 1.1.0
*/
public Options useZeroCopy(boolean value) {
this.useZeroCopy = value;
return this;
}
/**
* Set dataReader.
* @param value the new dataReader.
* @return this
* @since 1.1.0
*/
public Options dataReader(DataReader value) {
this.dataReader = value;
return this;
}
/**
* Set whether to skip corrupt records.
* @param value the new skip corrupt records flag
* @return this
* @since 1.1.0
*/
public Options skipCorruptRecords(boolean value) {
this.skipCorruptRecords = value;
return this;
}
/**
* Set whether to make a best effort to tolerate schema evolution for files
* which do not have an embedded schema because they were written with a'
* pre-HIVE-4243 writer.
* @param value the new tolerance flag
* @return this
* @since 1.2.0
*/
public Options tolerateMissingSchema(boolean value) {
this.tolerateMissingSchema = value;
return this;
}
/**
* Set whether to force schema evolution to be positional instead of
* based on the column names.
* @param value force positional evolution
* @return this
* @since 1.3.0
*/
public Options forcePositionalEvolution(boolean value) {
this.forcePositionalEvolution = value;
return this;
}
/**
* Set number of levels to force schema evolution to be positional instead of
* based on the column names.
* @param value number of levels of positional schema evolution
* @return this
* @since 1.5.11
*/
public Options positionalEvolutionLevel(int value) {
this.positionalEvolutionLevel = value;
return this;
}
/**
* Set boolean flag to determine if the comparison of field names in schema
* evolution is case sensitive
* @param value the flag for schema evolution is case sensitive or not.
* @return this
* @since 1.5.0
*/
public Options isSchemaEvolutionCaseAware(boolean value) {
this.isSchemaEvolutionCaseAware = value;
return this;
}
/**
* {@code true} if acid metadata columns should be decoded otherwise they will
* be set to {@code null}.
* @since 1.5.3
*/
public Options includeAcidColumns(boolean includeAcidColumns) {
this.includeAcidColumns = includeAcidColumns;
return this;
}
/**
* @since 1.1.0
*/
public boolean[] getInclude() {
return include;
}
/**
* @since 1.1.0
*/
public long getOffset() {
return offset;
}
/**
* @since 1.1.0
*/
public long getLength() {
return length;
}
/**
* @since 1.1.0
*/
public TypeDescription getSchema() {
return schema;
}
/**
* @since 1.1.0
*/
public SearchArgument getSearchArgument() {
return sarg;
}
/**
* @since 1.7.0
*/
public Consumer<OrcFilterContext> getFilterCallback() {
return skipRowCallback;
}
/**
* @since 1.7.0
*/
public String[] getPreFilterColumnNames(){
return preFilterColumns;
}
/**
* @since 1.1.0
*/
public String[] getColumnNames() {
return columnNames;
}
/**
* @since 1.1.0
*/
public long getMaxOffset() {
long result = offset + length;
if (result < 0) {
result = Long.MAX_VALUE;
}
return result;
}
/**
* @since 1.1.0
*/
public Boolean getUseZeroCopy() {
return useZeroCopy;
}
/**
* @since 1.1.0
*/
public Boolean getSkipCorruptRecords() {
return skipCorruptRecords;
}
/**
* @since 1.1.0
*/
public DataReader getDataReader() {
return dataReader;
}
/**
* @since 1.3.0
*/
public boolean getForcePositionalEvolution() {
return forcePositionalEvolution;
}
/**
* @since 1.5.11
*/
public int getPositionalEvolutionLevel() {
return positionalEvolutionLevel;
}
/**
* @since 1.5.0
*/
public boolean getIsSchemaEvolutionCaseAware() {
return isSchemaEvolutionCaseAware;
}
/**
* @since 1.5.3
*/
public boolean getIncludeAcidColumns() {
return includeAcidColumns;
}
/**
* @since 1.1.0
*/
@Override
public Options clone() {
try {
Options result = (Options) super.clone();
if (dataReader != null) {
result.dataReader = dataReader.clone();
}
return result;
} catch (CloneNotSupportedException e) {
throw new UnsupportedOperationException("uncloneable", e);
}
}
/**
* @since 1.1.0
*/
@Override
public String toString() {
StringBuilder buffer = new StringBuilder();
buffer.append("{include: ");
if (include == null) {
buffer.append("null");
} else {
buffer.append("[");
for(int i=0; i < include.length; ++i) {
if (i != 0) {
buffer.append(", ");
}
buffer.append(include[i]);
}
buffer.append("]");
}
buffer.append(", offset: ");
buffer.append(offset);
buffer.append(", length: ");
buffer.append(length);
if (sarg != null) {
buffer.append(", sarg: ");
buffer.append(sarg);
}
if (schema != null) {
buffer.append(", schema: ");
schema.printToBuffer(buffer);
}
buffer.append(", includeAcidColumns: ").append(includeAcidColumns);
buffer.append(", allowSARGToFilter: ").append(allowSARGToFilter);
buffer.append(", useSelected: ").append(useSelected);
buffer.append("}");
return buffer.toString();
}
/**
* @since 1.2.0
*/
public boolean getTolerateMissingSchema() {
return tolerateMissingSchema != null ? tolerateMissingSchema :
(Boolean) OrcConf.TOLERATE_MISSING_SCHEMA.getDefaultValue();
}
/**
* @since 1.7.0
*/
public boolean useSelected() {
return useSelected;
}
/**
* @since 1.7.0
*/
public Options useSelected(boolean newValue) {
this.useSelected = newValue;
return this;
}
public boolean allowPluginFilters() {
return allowPluginFilters;
}
public Options allowPluginFilters(boolean allowPluginFilters) {
this.allowPluginFilters = allowPluginFilters;
return this;
}
public List<String> pluginAllowListFilters() {
return pluginAllowListFilters;
}
public Options pluginAllowListFilters(String... allowLists) {
this.pluginAllowListFilters = Arrays.asList(allowLists);
return this;
}
/**
* @since 1.8.0
*/
public int minSeekSize() {
return minSeekSize;
}
/**
* @since 1.8.0
*/
public Options minSeekSize(int minSeekSize) {
this.minSeekSize = minSeekSize;
return this;
}
/**
* @since 1.8.0
*/
public double minSeekSizeTolerance() {
return minSeekSizeTolerance;
}
/**
* @since 1.8.0
*/
public Options minSeekSizeTolerance(double value) {
this.minSeekSizeTolerance = value;
return this;
}
/**
* @since 1.9.0
*/
public int getRowBatchSize() {
return rowBatchSize;
}
/**
* @since 1.9.0
*/
public Options rowBatchSize(int value) {
this.rowBatchSize = value;
return this;
}
}
/**
* Create a default options object that can be customized for creating
* a RecordReader.
* @return a new default Options object
* @since 1.2.0
*/
Options options();
/**
* Create a RecordReader that reads everything with the default options.
* @return a new RecordReader
* @since 1.1.0
*/
RecordReader rows() throws IOException;
/**
* Create a RecordReader that uses the options given.
* This method can't be named rows, because many callers used rows(null)
* before the rows() method was introduced.
* @param options the options to read with
* @return a new RecordReader
* @since 1.1.0
*/
RecordReader rows(Options options) throws IOException;
/**
* @return List of integers representing version of the file, in order from major to minor.
* @since 1.1.0
*/
List<Integer> getVersionList();
/**
* @return Gets the size of metadata, in bytes.
* @since 1.1.0
*/
int getMetadataSize();
/**
* @return Stripe statistics, in original protobuf form.
* @deprecated Use {@link #getStripeStatistics()} instead.
* @since 1.1.0
*/
List<OrcProto.StripeStatistics> getOrcProtoStripeStatistics();
/**
* Get the stripe statistics for all of the columns.
* @return a list of the statistics for each stripe in the file
* @since 1.2.0
*/
List<StripeStatistics> getStripeStatistics() throws IOException;
/**
* Get the stripe statistics from the file.
* @param include null for all columns or an array where the required columns
* are selected
* @return a list of the statistics for each stripe in the file
* @since 1.6.0
*/
List<StripeStatistics> getStripeStatistics(boolean[] include) throws IOException;
/**
* @return File statistics, in original protobuf form.
* @deprecated Use {@link #getStatistics()} instead.
* @since 1.1.0
*/
List<OrcProto.ColumnStatistics> getOrcProtoFileStatistics();
/**
* @return Serialized file metadata read from disk for the purposes of caching, etc.
* @since 1.1.0
*/
ByteBuffer getSerializedFileFooter();
/**
* Was the file written using the proleptic Gregorian calendar.
* @since 1.5.9
*/
boolean writerUsedProlepticGregorian();
/**
* Should the returned values use the proleptic Gregorian calendar?
* @since 1.5.9
*/
boolean getConvertToProlepticGregorian();
}
| 21,327 | 25.593516 | 93 | java |
null | orc-main/java/core/src/java/org/apache/orc/RecordReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import java.io.Closeable;
import java.io.IOException;
/**
* A row-by-row iterator for ORC files.
* @since 1.1.0
*/
public interface RecordReader extends Closeable {
/**
* Read the next row batch. The size of the batch to read cannot be
* controlled by the callers. Caller need to look at
* VectorizedRowBatch.size of the returned object to know the batch
* size read.
* @param batch a row batch object to read into
* @return were more rows available to read?
* @throws java.io.IOException
* @since 1.1.0
*/
boolean nextBatch(VectorizedRowBatch batch) throws IOException;
/**
* Get the row number of the row that will be returned by the following
* call to next().
* @return the row number from 0 to the number of rows in the file
* @throws java.io.IOException
* @since 1.1.0
*/
long getRowNumber() throws IOException;
/**
* Get the progress of the reader through the rows.
* @return a fraction between 0.0 and 1.0 of rows read
* @throws java.io.IOException
* @since 1.1.0
*/
float getProgress() throws IOException;
/**
* Release the resources associated with the given reader.
* @throws java.io.IOException
* @since 1.1.0
*/
@Override
void close() throws IOException;
/**
* Seek to a particular row number.
* @since 1.1.0
*/
void seekToRow(long rowCount) throws IOException;
}
| 2,285 | 30.315068 | 75 | java |
null | orc-main/java/core/src/java/org/apache/orc/StringColumnStatistics.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
/**
* Statistics for string columns.
*/
public interface StringColumnStatistics extends ColumnStatistics {
/**
* Get the minimum string.
* @return the minimum
*/
String getMinimum();
/**
* Get the maximum string.
* @return the maximum
*/
String getMaximum();
/**
* Get the lower bound of the values in this column.
* The value may be truncated to at most
* MAX_BYTES_RECORDED.
* @return lower bound
*/
String getLowerBound();
/**
* Get the upper bound of the values in this column.
* The value may be truncated to at most
* MAX_BYTES_RECORDED.
* @return upper bound
*/
String getUpperBound();
/**
* Get the total length of all strings
* @return the sum (total length)
*/
long getSum();
}
| 1,602 | 26.637931 | 75 | java |
null | orc-main/java/core/src/java/org/apache/orc/StripeInformation.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
/**
* Information about the stripes in an ORC file that is provided by the Reader.
*/
public interface StripeInformation {
/**
* Get the byte offset of the start of the stripe.
* @return the bytes from the start of the file
*/
long getOffset();
/**
* Get the total length of the stripe in bytes.
* @return the number of bytes in the stripe
*/
long getLength();
/**
* Get the length of the stripe's indexes.
* @return the number of bytes in the index
*/
long getIndexLength();
/**
* Get the length of the stripe's data.
* @return the number of bytes in the stripe
*/
long getDataLength();
/**
* Get the length of the stripe's tail section, which contains its index.
* @return the number of bytes in the tail
*/
long getFooterLength();
/**
* Get the number of rows in the stripe.
* @return a count of the number of rows
*/
long getNumberOfRows();
/**
* Get the index of this stripe in the current file.
* @return 0 to number_of_stripes - 1
*/
long getStripeId();
/**
* Does this stripe have an explicit encryption stripe id set?
* @return true if this stripe was the first stripe of a merge
*/
boolean hasEncryptionStripeId();
/**
* Get the original stripe id that was used when the stripe was originally
* written. This is only different that getStripeId in merged files.
* @return the original stripe id + 1
*/
long getEncryptionStripeId();
/**
* Get the encrypted keys starting from this stripe until overridden by
* a new set in a following stripe. The top level array is one for each
* encryption variant. Each element is an encrypted key.
* @return the array of encrypted keys
*/
byte[][] getEncryptedLocalKeys();
}
| 2,601 | 28.908046 | 79 | java |
null | orc-main/java/core/src/java/org/apache/orc/StripeStatistics.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import org.apache.orc.impl.ColumnStatisticsImpl;
import java.util.List;
/**
* The statistics for a stripe.
*/
public class StripeStatistics {
protected final List<OrcProto.ColumnStatistics> cs;
protected final TypeDescription schema;
private final boolean writerUsedProlepticGregorian;
private final boolean convertToProlepticGregorian;
public StripeStatistics(List<OrcProto.ColumnStatistics> list) {
this(null, list, false, false);
}
public StripeStatistics(TypeDescription schema,
List<OrcProto.ColumnStatistics> list,
boolean writerUsedProlepticGregorian,
boolean convertToProlepticGregorian) {
this.schema = schema;
this.cs = list;
this.writerUsedProlepticGregorian = writerUsedProlepticGregorian;
this.convertToProlepticGregorian = convertToProlepticGregorian;
}
private int getBase() {
return schema == null ? 0 : schema.getId();
}
/**
* Return list of column statistics
*
* @return column stats
*/
public ColumnStatistics[] getColumnStatistics() {
ColumnStatistics[] result = new ColumnStatistics[cs.size()];
int base = getBase();
for (int c = 0; c < result.length; ++c) {
TypeDescription column = schema == null ? null : schema.findSubtype(base + c);
result[c] = ColumnStatisticsImpl.deserialize(column, cs.get(c),
writerUsedProlepticGregorian, convertToProlepticGregorian);
}
return result;
}
public OrcProto.ColumnStatistics getColumn(int column) {
return cs.get(column);
}
}
| 2,417 | 32.583333 | 84 | java |
null | orc-main/java/core/src/java/org/apache/orc/TimestampColumnStatistics.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import java.sql.Timestamp;
/**
* Statistics for Timestamp columns.
*/
public interface TimestampColumnStatistics extends ColumnStatistics {
/**
* Get the minimum value for the column.
* @return minimum value
*/
Timestamp getMinimum();
/**
* Get the maximum value for the column.
* @return maximum value
*/
Timestamp getMaximum();
/**
* Get the minimum value for the column in UTC.
* @return minimum value in UTC
*/
Timestamp getMinimumUTC();
/**
* Get the maximum value for the column in UTC.
* @return maximum value in UTC
*/
Timestamp getMaximumUTC();
}
| 1,446 | 27.372549 | 75 | java |
null | orc-main/java/core/src/java/org/apache/orc/TypeDescription.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.impl.ParserUtils;
import org.apache.orc.impl.TypeUtils;
import org.jetbrains.annotations.NotNull;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.regex.Pattern;
/**
* This is the description of the types in an ORC file.
*/
public class TypeDescription
implements Comparable<TypeDescription>, Serializable, Cloneable {
private static final int MAX_PRECISION = 38;
private static final int MAX_SCALE = 38;
private static final int DEFAULT_PRECISION = 38;
private static final int DEFAULT_SCALE = 10;
public static final int MAX_DECIMAL64_PRECISION = 18;
public static final long MAX_DECIMAL64 = 999_999_999_999_999_999L;
public static final long MIN_DECIMAL64 = -MAX_DECIMAL64;
private static final int DEFAULT_LENGTH = 256;
static final Pattern UNQUOTED_NAMES = Pattern.compile("^[a-zA-Z0-9_]+$");
// type attributes
public static final String ENCRYPT_ATTRIBUTE = "encrypt";
public static final String MASK_ATTRIBUTE = "mask";
@Override
public int compareTo(TypeDescription other) {
if (this == other) {
return 0;
} else if (other == null) {
return -1;
} else {
int result = category.compareTo(other.category);
if (result == 0) {
switch (category) {
case CHAR:
case VARCHAR:
return maxLength - other.maxLength;
case DECIMAL:
if (precision != other.precision) {
return precision - other.precision;
}
return scale - other.scale;
case UNION:
case LIST:
case MAP:
if (children.size() != other.children.size()) {
return children.size() - other.children.size();
}
for(int c=0; result == 0 && c < children.size(); ++c) {
result = children.get(c).compareTo(other.children.get(c));
}
break;
case STRUCT:
if (children.size() != other.children.size()) {
return children.size() - other.children.size();
}
for(int c=0; result == 0 && c < children.size(); ++c) {
result = fieldNames.get(c).compareTo(other.fieldNames.get(c));
if (result == 0) {
result = children.get(c).compareTo(other.children.get(c));
}
}
break;
default:
// PASS
}
}
return result;
}
}
public enum Category {
BOOLEAN("boolean", true),
BYTE("tinyint", true),
SHORT("smallint", true),
INT("int", true),
LONG("bigint", true),
FLOAT("float", true),
DOUBLE("double", true),
STRING("string", true),
DATE("date", true),
TIMESTAMP("timestamp", true),
BINARY("binary", true),
DECIMAL("decimal", true),
VARCHAR("varchar", true),
CHAR("char", true),
LIST("array", false),
MAP("map", false),
STRUCT("struct", false),
UNION("uniontype", false),
TIMESTAMP_INSTANT("timestamp with local time zone", true);
Category(String name, boolean isPrimitive) {
this.name = name;
this.isPrimitive = isPrimitive;
}
final boolean isPrimitive;
final String name;
public boolean isPrimitive() {
return isPrimitive;
}
public String getName() {
return name;
}
}
public static TypeDescription createBoolean() {
return new TypeDescription(Category.BOOLEAN);
}
public static TypeDescription createByte() {
return new TypeDescription(Category.BYTE);
}
public static TypeDescription createShort() {
return new TypeDescription(Category.SHORT);
}
public static TypeDescription createInt() {
return new TypeDescription(Category.INT);
}
public static TypeDescription createLong() {
return new TypeDescription(Category.LONG);
}
public static TypeDescription createFloat() {
return new TypeDescription(Category.FLOAT);
}
public static TypeDescription createDouble() {
return new TypeDescription(Category.DOUBLE);
}
public static TypeDescription createString() {
return new TypeDescription(Category.STRING);
}
public static TypeDescription createDate() {
return new TypeDescription(Category.DATE);
}
public static TypeDescription createTimestamp() {
return new TypeDescription(Category.TIMESTAMP);
}
public static TypeDescription createTimestampInstant() {
return new TypeDescription(Category.TIMESTAMP_INSTANT);
}
public static TypeDescription createBinary() {
return new TypeDescription(Category.BINARY);
}
public static TypeDescription createDecimal() {
return new TypeDescription(Category.DECIMAL);
}
/**
* Parse TypeDescription from the Hive type names. This is the inverse
* of TypeDescription.toString()
* @param typeName the name of the type
* @return a new TypeDescription or null if typeName was null
* @throws IllegalArgumentException if the string is badly formed
*/
public static TypeDescription fromString(String typeName) {
if (typeName == null) {
return null;
}
ParserUtils.StringPosition source = new ParserUtils.StringPosition(typeName);
TypeDescription result = ParserUtils.parseType(source);
if (source.hasCharactersLeft()) {
throw new IllegalArgumentException("Extra characters at " + source);
}
return result;
}
/**
* For decimal types, set the precision.
* @param precision the new precision
* @return this
*/
public TypeDescription withPrecision(int precision) {
if (category != Category.DECIMAL) {
throw new IllegalArgumentException("precision is only allowed on decimal"+
" and not " + category.name);
} else if (precision < 1 || precision > MAX_PRECISION || scale > precision){
throw new IllegalArgumentException("precision " + precision +
" is out of range 1 .. " + scale);
}
this.precision = precision;
return this;
}
/**
* For decimal types, set the scale.
* @param scale the new scale
* @return this
*/
public TypeDescription withScale(int scale) {
if (category != Category.DECIMAL) {
throw new IllegalArgumentException("scale is only allowed on decimal"+
" and not " + category.name);
} else if (scale < 0 || scale > MAX_SCALE || scale > precision) {
throw new IllegalArgumentException("scale is out of range at " + scale);
}
this.scale = scale;
return this;
}
/**
* Set an attribute on this type.
* @param key the attribute name
* @param value the attribute value or null to clear the value
* @return this for method chaining
*/
public TypeDescription setAttribute(@NotNull String key,
String value) {
if (value == null) {
attributes.remove(key);
} else {
attributes.put(key, value);
}
return this;
}
/**
* Remove attribute on this type, if it is set.
* @param key the attribute name
* @return this for method chaining
*/
public TypeDescription removeAttribute(@NotNull String key) {
attributes.remove(key);
return this;
}
public static TypeDescription createVarchar() {
return new TypeDescription(Category.VARCHAR);
}
public static TypeDescription createChar() {
return new TypeDescription(Category.CHAR);
}
/**
* Set the maximum length for char and varchar types.
* @param maxLength the maximum value
* @return this
*/
public TypeDescription withMaxLength(int maxLength) {
if (category != Category.VARCHAR && category != Category.CHAR) {
throw new IllegalArgumentException("maxLength is only allowed on char" +
" and varchar and not " + category.name);
}
this.maxLength = maxLength;
return this;
}
public static TypeDescription createList(TypeDescription childType) {
TypeDescription result = new TypeDescription(Category.LIST);
result.children.add(childType);
childType.parent = result;
return result;
}
public static TypeDescription createMap(TypeDescription keyType,
TypeDescription valueType) {
TypeDescription result = new TypeDescription(Category.MAP);
result.children.add(keyType);
result.children.add(valueType);
keyType.parent = result;
valueType.parent = result;
return result;
}
public static TypeDescription createUnion() {
return new TypeDescription(Category.UNION);
}
public static TypeDescription createStruct() {
return new TypeDescription(Category.STRUCT);
}
/**
* Add a child to a union type.
* @param child a new child type to add
* @return the union type.
*/
public TypeDescription addUnionChild(TypeDescription child) {
if (category != Category.UNION) {
throw new IllegalArgumentException("Can only add types to union type" +
" and not " + category);
}
addChild(child);
return this;
}
/**
* Add a field to a struct type as it is built.
* @param field the field name
* @param fieldType the type of the field
* @return the struct type
*/
public TypeDescription addField(String field, TypeDescription fieldType) {
if (category != Category.STRUCT) {
throw new IllegalArgumentException("Can only add fields to struct type" +
" and not " + category);
}
fieldNames.add(field);
addChild(fieldType);
return this;
}
/**
* Get the id for this type.
* The first call will cause all of the the ids in tree to be assigned, so
* it should not be called before the type is completely built.
* @return the sequential id
*/
public int getId() {
// if the id hasn't been assigned, assign all of the ids from the root
if (id == -1) {
TypeDescription root = this;
while (root.parent != null) {
root = root.parent;
}
root.assignIds(0);
}
return id;
}
@Override
public TypeDescription clone() {
TypeDescription result = new TypeDescription(category);
result.maxLength = maxLength;
result.precision = precision;
result.scale = scale;
if (fieldNames != null) {
result.fieldNames.addAll(fieldNames);
}
if (children != null) {
for(TypeDescription child: children) {
TypeDescription clone = child.clone();
clone.parent = result;
result.children.add(clone);
}
}
for (Map.Entry<String,String> pair: attributes.entrySet()) {
result.attributes.put(pair.getKey(), pair.getValue());
}
return result;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + category.hashCode();
if (children != null) {
result = prime * result + children.hashCode();
}
result = prime * result + maxLength;
result = prime * result + precision;
result = prime * result + scale;
return result;
}
@Override
public boolean equals(Object other) {
return equals(other, true);
}
/**
* Determines whether the two object are equal.
* This function can either compare or ignore the type attributes as
* desired.
* @param other the reference object with which to compare.
* @param checkAttributes should the type attributes be considered?
* @return {@code true} if this object is the same as the other
* argument; {@code false} otherwise.
*/
public boolean equals(Object other, boolean checkAttributes) {
if (other == null || !(other instanceof TypeDescription)) {
return false;
}
if (other == this) {
return true;
}
TypeDescription castOther = (TypeDescription) other;
if (category != castOther.category ||
maxLength != castOther.maxLength ||
scale != castOther.scale ||
precision != castOther.precision) {
return false;
}
if (checkAttributes) {
// make sure the attributes are the same
List<String> attributeNames = getAttributeNames();
if (castOther.getAttributeNames().size() != attributeNames.size()) {
return false;
}
for (String attribute : attributeNames) {
if (!getAttributeValue(attribute).equals(castOther.getAttributeValue(attribute))) {
return false;
}
}
}
// check the children
if (children != null) {
if (children.size() != castOther.children.size()) {
return false;
}
for (int i = 0; i < children.size(); ++i) {
if (!children.get(i).equals(castOther.children.get(i), checkAttributes)) {
return false;
}
}
}
if (category == Category.STRUCT) {
for(int i=0; i < fieldNames.size(); ++i) {
if (!fieldNames.get(i).equals(castOther.fieldNames.get(i))) {
return false;
}
}
}
return true;
}
/**
* Get the maximum id assigned to this type or its children.
* The first call will cause all of the the ids in tree to be assigned, so
* it should not be called before the type is completely built.
* @return the maximum id assigned under this type
*/
public int getMaximumId() {
// if the id hasn't been assigned, assign all of the ids from the root
if (maxId == -1) {
TypeDescription root = this;
while (root.parent != null) {
root = root.parent;
}
root.assignIds(0);
}
return maxId;
}
/**
* Specify the version of the VectorizedRowBatch that the user desires.
*/
public enum RowBatchVersion {
ORIGINAL,
USE_DECIMAL64;
}
public VectorizedRowBatch createRowBatch(RowBatchVersion version, int size) {
VectorizedRowBatch result;
if (category == Category.STRUCT) {
result = new VectorizedRowBatch(children.size(), size);
for(int i=0; i < result.cols.length; ++i) {
result.cols[i] = TypeUtils.createColumn(children.get(i), version, size);
}
} else {
result = new VectorizedRowBatch(1, size);
result.cols[0] = TypeUtils.createColumn(this, version, size);
}
result.reset();
return result;
}
/**
* Create a VectorizedRowBatch that uses Decimal64ColumnVector for
* short (p ≤ 18) decimals.
* @return a new VectorizedRowBatch
*/
public VectorizedRowBatch createRowBatchV2() {
return createRowBatch(RowBatchVersion.USE_DECIMAL64,
VectorizedRowBatch.DEFAULT_SIZE);
}
/**
* Create a VectorizedRowBatch with the original ColumnVector types
* @param maxSize the maximum size of the batch
* @return a new VectorizedRowBatch
*/
public VectorizedRowBatch createRowBatch(int maxSize) {
return createRowBatch(RowBatchVersion.ORIGINAL, maxSize);
}
/**
* Create a VectorizedRowBatch with the original ColumnVector types
* @return a new VectorizedRowBatch
*/
public VectorizedRowBatch createRowBatch() {
return createRowBatch(RowBatchVersion.ORIGINAL,
VectorizedRowBatch.DEFAULT_SIZE);
}
/**
* Get the kind of this type.
* @return get the category for this type.
*/
public Category getCategory() {
return category;
}
/**
* Get the maximum length of the type. Only used for char and varchar types.
* @return the maximum length of the string type
*/
public int getMaxLength() {
return maxLength;
}
/**
* Get the precision of the decimal type.
* @return the number of digits for the precision.
*/
public int getPrecision() {
return precision;
}
/**
* Get the scale of the decimal type.
* @return the number of digits for the scale.
*/
public int getScale() {
return scale;
}
/**
* For struct types, get the list of field names.
* @return the list of field names.
*/
public List<String> getFieldNames() {
return Collections.unmodifiableList(fieldNames);
}
/**
* Get the list of attribute names defined on this type.
* @return a list of sorted attribute names
*/
public List<String> getAttributeNames() {
List<String> result = new ArrayList<>(attributes.keySet());
Collections.sort(result);
return result;
}
/**
* Get the value of a given attribute.
* @param attributeName the name of the attribute
* @return the value of the attribute or null if it isn't set
*/
public String getAttributeValue(String attributeName) {
return attributes.get(attributeName);
}
/**
* Get the parent of the current type
* @return null if root else parent
*/
public TypeDescription getParent() {
return parent;
}
/**
* Get the subtypes of this type.
* @return the list of children types
*/
public List<TypeDescription> getChildren() {
return children == null ? null : Collections.unmodifiableList(children);
}
/**
* Assign ids to all of the nodes under this one.
* @param startId the lowest id to assign
* @return the next available id
*/
private int assignIds(int startId) {
id = startId++;
if (children != null) {
for (TypeDescription child : children) {
startId = child.assignIds(startId);
}
}
maxId = startId - 1;
return startId;
}
/**
* Add a child to a type.
* @param child the child to add
*/
public void addChild(TypeDescription child) {
switch (category) {
case LIST:
if (children.size() >= 1) {
throw new IllegalArgumentException("Can't add more children to list");
}
case MAP:
if (children.size() >= 2) {
throw new IllegalArgumentException("Can't add more children to map");
}
case UNION:
case STRUCT:
children.add(child);
child.parent = this;
break;
default:
throw new IllegalArgumentException("Can't add children to " + category);
}
}
public TypeDescription(Category category) {
this.category = category;
if (category.isPrimitive) {
children = null;
} else {
children = new ArrayList<>();
}
if (category == Category.STRUCT) {
fieldNames = new ArrayList<>();
} else {
fieldNames = null;
}
}
private int id = -1;
private int maxId = -1;
private TypeDescription parent;
private final Category category;
private final List<TypeDescription> children;
private final List<String> fieldNames;
private final Map<String,String> attributes = new HashMap<>();
private int maxLength = DEFAULT_LENGTH;
private int precision = DEFAULT_PRECISION;
private int scale = DEFAULT_SCALE;
static void printFieldName(StringBuilder buffer, String name) {
if (UNQUOTED_NAMES.matcher(name).matches()) {
buffer.append(name);
} else {
buffer.append('`');
buffer.append(name.replace("`", "``"));
buffer.append('`');
}
}
public void printToBuffer(StringBuilder buffer) {
buffer.append(category.name);
switch (category) {
case DECIMAL:
buffer.append('(');
buffer.append(precision);
buffer.append(',');
buffer.append(scale);
buffer.append(')');
break;
case CHAR:
case VARCHAR:
buffer.append('(');
buffer.append(maxLength);
buffer.append(')');
break;
case LIST:
case MAP:
case UNION:
buffer.append('<');
for(int i=0; i < children.size(); ++i) {
if (i != 0) {
buffer.append(',');
}
children.get(i).printToBuffer(buffer);
}
buffer.append('>');
break;
case STRUCT:
buffer.append('<');
for(int i=0; i < children.size(); ++i) {
if (i != 0) {
buffer.append(',');
}
printFieldName(buffer, fieldNames.get(i));
buffer.append(':');
children.get(i).printToBuffer(buffer);
}
buffer.append('>');
break;
default:
break;
}
}
@Override
public String toString() {
StringBuilder buffer = new StringBuilder();
printToBuffer(buffer);
return buffer.toString();
}
private void printJsonToBuffer(String prefix, StringBuilder buffer,
int indent) {
for(int i=0; i < indent; ++i) {
buffer.append(' ');
}
buffer.append(prefix);
buffer.append("{\"category\": \"");
buffer.append(category.name);
buffer.append("\", \"id\": ");
buffer.append(getId());
buffer.append(", \"max\": ");
buffer.append(maxId);
switch (category) {
case DECIMAL:
buffer.append(", \"precision\": ");
buffer.append(precision);
buffer.append(", \"scale\": ");
buffer.append(scale);
break;
case CHAR:
case VARCHAR:
buffer.append(", \"length\": ");
buffer.append(maxLength);
break;
case LIST:
case MAP:
case UNION:
buffer.append(", \"children\": [");
for(int i=0; i < children.size(); ++i) {
buffer.append('\n');
children.get(i).printJsonToBuffer("", buffer, indent + 2);
if (i != children.size() - 1) {
buffer.append(',');
}
}
buffer.append("]");
break;
case STRUCT:
buffer.append(", \"fields\": [");
for(int i=0; i < children.size(); ++i) {
buffer.append('\n');
buffer.append('{');
children.get(i).printJsonToBuffer("\"" + fieldNames.get(i) + "\": ",
buffer, indent + 2);
buffer.append('}');
if (i != children.size() - 1) {
buffer.append(',');
}
}
buffer.append(']');
break;
default:
break;
}
buffer.append('}');
}
public String toJson() {
StringBuilder buffer = new StringBuilder();
printJsonToBuffer("", buffer, 0);
return buffer.toString();
}
/**
* Locate a subtype by its id.
* @param goal the column id to look for
* @return the subtype
*/
public TypeDescription findSubtype(int goal) {
ParserUtils.TypeFinder result = new ParserUtils.TypeFinder(this);
ParserUtils.findSubtype(this, goal, result);
return result.current;
}
/**
* Find a subtype of this schema by name.
* If the name is a simple integer, it will be used as a column number.
* Otherwise, this routine will recursively search for the name.
* <ul>
* <li>Struct fields are selected by name.</li>
* <li>List children are selected by "_elem".</li>
* <li>Map children are selected by "_key" or "_value".</li>
* <li>Union children are selected by number starting at 0.</li>
* </ul>
* Names are separated by '.'.
* @param columnName the name to search for
* @return the subtype
*/
public TypeDescription findSubtype(String columnName) {
return findSubtype(columnName, true);
}
public TypeDescription findSubtype(String columnName,
boolean isSchemaEvolutionCaseAware) {
ParserUtils.StringPosition source = new ParserUtils.StringPosition(columnName);
TypeDescription result = ParserUtils.findSubtype(this, source,
isSchemaEvolutionCaseAware);
if (source.hasCharactersLeft()) {
throw new IllegalArgumentException("Remaining text in parsing field name "
+ source);
}
return result;
}
/**
* Find a list of subtypes from a string, including the empty list.
*
* Each column name is separated by ','.
* @param columnNameList the list of column names
* @return the list of subtypes that correspond to the column names
*/
public List<TypeDescription> findSubtypes(String columnNameList) {
ParserUtils.StringPosition source = new ParserUtils.StringPosition(columnNameList);
List<TypeDescription> result = ParserUtils.findSubtypeList(this, source);
if (source.hasCharactersLeft()) {
throw new IllegalArgumentException("Remaining text in parsing field name "
+ source);
}
return result;
}
/**
* Annotate a schema with the encryption keys and masks.
* @param encryption the encryption keys and the fields
* @param masks the encryption masks and the fields
*/
public void annotateEncryption(String encryption, String masks) {
ParserUtils.StringPosition source = new ParserUtils.StringPosition(encryption);
ParserUtils.parseKeys(source, this);
if (source.hasCharactersLeft()) {
throw new IllegalArgumentException("Remaining text in parsing encryption keys "
+ source);
}
source = new ParserUtils.StringPosition(masks);
ParserUtils.parseMasks(source, this);
if (source.hasCharactersLeft()) {
throw new IllegalArgumentException("Remaining text in parsing encryption masks "
+ source);
}
}
/**
* Find the index of a given child object using == comparison.
* @param child The child type
* @return the index 0 to N-1 of the children.
*/
private int getChildIndex(TypeDescription child) {
for(int i=children.size() - 1; i >= 0; --i) {
if (children.get(i) == child) {
return i;
}
}
throw new IllegalArgumentException("Child not found");
}
/**
* For a complex type, get the partial name for this child. For structures,
* it returns the corresponding field name. For lists and maps, it uses the
* special names "_elem", "_key", and "_value". Unions use the integer index.
* @param child The desired child, which must be the same object (==)
* @return The name of the field for the given child.
*/
private String getPartialName(TypeDescription child) {
switch (category) {
case LIST:
return "_elem";
case MAP:
return getChildIndex(child) == 0 ? "_key" : "_value";
case STRUCT:
return fieldNames.get(getChildIndex(child));
case UNION:
return Integer.toString(getChildIndex(child));
default:
throw new IllegalArgumentException(
"Can't get the field name of a primitive type");
}
}
/**
* Get the full field name for the given type. For
* "struct<a:struct<list<struct<b:int,c:int>>>>" when
* called on c, would return "a._elem.c".
* @return A string that is the inverse of findSubtype
*/
public String getFullFieldName() {
List<String> parts = new ArrayList<>(getId());
TypeDescription current = this;
TypeDescription parent = current.getParent();
// Handle the root as a special case so that it isn't an empty string.
if (parent == null) {
return Integer.toString(current.getId());
}
while (parent != null) {
parts.add(parent.getPartialName(current));
current = parent;
parent = current.getParent();
}
// Put the string together backwards
StringBuilder buffer = new StringBuilder();
for (int part=parts.size() - 1; part >= 0; --part) {
buffer.append(parts.get(part));
if (part != 0) {
buffer.append('.');
}
}
return buffer.toString();
}
}
| 27,975 | 28.793397 | 91 | java |
null | orc-main/java/core/src/java/org/apache/orc/TypeDescriptionPrettyPrint.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import java.io.PrintStream;
import java.util.List;
/**
* A pretty printer for TypeDescription.
*/
public class TypeDescriptionPrettyPrint {
static void pad(PrintStream output, int offset) {
for(int i=0; i < offset; ++i) {
output.print(' ');
}
}
static void printFieldName(PrintStream output, String fieldName){
if (TypeDescription.UNQUOTED_NAMES.matcher(fieldName).matches()) {
output.print(fieldName);
} else {
output.print('`');
output.print(fieldName.replaceAll("`", "``"));
output.print('`');
}
}
static void printStruct(PrintStream output,
int offset,
TypeDescription type) {
output.print("<");
List<TypeDescription> children = type.getChildren();
List<String> fields = type.getFieldNames();
for(int c = 0; c < children.size(); ++c) {
if (c == 0) {
output.println();
} else {
output.println(",");
}
pad(output, offset + 2);
printFieldName(output, fields.get(c));
output.print(':');
printType(output, offset + 2, children.get(c));
}
output.print('>');
}
static void printComplex(PrintStream output,
int offset,
TypeDescription type) {
output.print("<");
List<TypeDescription> children = type.getChildren();
for(int c = 0; c < children.size(); ++c) {
if (c != 0) {
output.print(",");
}
printType(output, offset + 2, children.get(c));
}
output.print('>');
}
static void printType(PrintStream output,
int offset,
TypeDescription type) {
output.print(type.getCategory().getName());
switch (type.getCategory()) {
case BOOLEAN:
case BINARY:
case BYTE:
case DATE:
case DOUBLE:
case FLOAT:
case INT:
case LONG:
case SHORT:
case STRING:
case TIMESTAMP:
case TIMESTAMP_INSTANT:
break;
case DECIMAL:
output.print('(');
output.print(type.getPrecision());
output.print(',');
output.print(type.getScale());
output.print(')');
break;
case CHAR:
case VARCHAR:
output.print('(');
output.print(type.getMaxLength());
output.print(')');
break;
case STRUCT:
printStruct(output, offset, type);
break;
case LIST:
case MAP:
case UNION:
printComplex(output, offset, type);
break;
default:
throw new IllegalArgumentException("Unhandled type " + type);
}
}
public static void print(PrintStream output,
TypeDescription schema) {
printType(output, 0, schema);
}
}
| 3,632 | 26.315789 | 75 | java |
null | orc-main/java/core/src/java/org/apache/orc/UnknownFormatException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import com.google.protobuf.TextFormat;
import org.apache.hadoop.fs.Path;
import java.io.IOException;
/**
* @deprecated This will be removed in the future releases.
*/
@Deprecated
public class UnknownFormatException extends IOException {
private final Path path;
private final String versionString;
private final OrcProto.PostScript postscript;
public UnknownFormatException(Path path, String versionString,
OrcProto.PostScript postscript) {
super(path + " was written by a future ORC version " +
versionString + ". This file is not readable by this version of ORC.\n"+
"Postscript: " + TextFormat.shortDebugString(postscript));
this.path = path;
this.versionString = versionString;
this.postscript = postscript;
}
public Path getPath() {
return path;
}
public String getVersionString() {
return versionString;
}
public OrcProto.PostScript getPostscript() {
return postscript;
}
}
| 1,818 | 30.912281 | 80 | java |
null | orc-main/java/core/src/java/org/apache/orc/Writer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import java.io.Closeable;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.List;
/**
* The interface for writing ORC files.
* @since 1.1.0
*/
public interface Writer extends Closeable {
/**
* Get the schema for this writer
* @return the file schema
* @since 1.1.0
*/
TypeDescription getSchema();
/**
* Add arbitrary meta-data to the ORC file. This may be called at any point
* until the Writer is closed. If the same key is passed a second time, the
* second value will replace the first.
* @param key a key to label the data with.
* @param value the contents of the metadata.
* @since 1.1.0
*/
void addUserMetadata(String key, ByteBuffer value);
/**
* Add a row batch to the ORC file.
* @param batch the rows to add
* @since 1.1.0
*/
void addRowBatch(VectorizedRowBatch batch) throws IOException;
/**
* Flush all of the buffers and close the file. No methods on this writer
* should be called afterwards.
* @throws IOException
* @since 1.1.0
*/
@Override
void close() throws IOException;
/**
* Return the deserialized data size. Raw data size will be compute when
* writing the file footer. Hence raw data size value will be available only
* after closing the writer.
*
* @return raw data size
* @since 1.1.0
*/
long getRawDataSize();
/**
* Return the number of rows in file. Row count gets updated when flushing
* the stripes. To get accurate row count this method should be called after
* closing the writer.
*
* @return row count
* @since 1.1.0
*/
long getNumberOfRows();
/**
* Write an intermediate footer on the file such that if the file is
* truncated to the returned offset, it would be a valid ORC file.
* @return the offset that would be a valid end location for an ORC file
* @since 1.1.0
*/
long writeIntermediateFooter() throws IOException;
/**
* Fast stripe append to ORC file. This interface is used for fast ORC file
* merge with other ORC files. When merging, the file to be merged should pass
* stripe in binary form along with stripe information and stripe statistics.
* After appending last stripe of a file, use appendUserMetadata() to append
* any user metadata.
*
* This form only supports files with no column encryption. Use {@link
* #appendStripe(byte[], int, int, StripeInformation, StripeStatistics[])}
* for files with encryption.
*
* @param stripe - stripe as byte array
* @param offset - offset within byte array
* @param length - length of stripe within byte array
* @param stripeInfo - stripe information
* @param stripeStatistics - unencrypted stripe statistics
* @since 1.1.0
*/
void appendStripe(byte[] stripe, int offset, int length,
StripeInformation stripeInfo,
OrcProto.StripeStatistics stripeStatistics) throws IOException;
/**
* Fast stripe append to ORC file. This interface is used for fast ORC file
* merge with other ORC files. When merging, the file to be merged should pass
* stripe in binary form along with stripe information and stripe statistics.
* After appending last stripe of a file, use {@link #addUserMetadata(String,
* ByteBuffer)} to append any user metadata.
* @param stripe - stripe as byte array
* @param offset - offset within byte array
* @param length - length of stripe within byte array
* @param stripeInfo - stripe information
* @param stripeStatistics - stripe statistics with the last one being
* for the unencrypted data and the others being for
* each encryption variant.
* @since 1.6.0
*/
void appendStripe(byte[] stripe, int offset, int length,
StripeInformation stripeInfo,
StripeStatistics[] stripeStatistics) throws IOException;
/**
* Update the current user metadata with a list of new values.
* @param userMetadata - user metadata
* @deprecated use {@link #addUserMetadata(String, ByteBuffer)} instead
* @since 1.1.0
*/
void appendUserMetadata(List<OrcProto.UserMetadataItem> userMetadata);
/**
* Get the statistics about the columns in the file. The output of this is
* based on the time at which it is called. It shall use all of the currently
* written data to provide the statistics.
*
* Please note there are costs involved with invoking this method and should
* be used judiciously.
*
* @return the information about the column
* @since 1.1.0
*/
ColumnStatistics[] getStatistics() throws IOException;
/**
* Get the stripe information about the file. The output of this is based on the time at which it
* is called. It shall return stripes that have been completed.
*
* After the writer is closed this shall give the complete stripe information.
*
* @return stripe information
* @throws IOException
* @since 1.6.8
*/
List<StripeInformation> getStripes() throws IOException;
/**
* Estimate the memory currently used by the writer to buffer the stripe.
* `This method help write engine to control the refresh policy of the ORC.`
* @return the number of bytes
*/
long estimateMemory();
}
| 6,158 | 33.994318 | 99 | java |
null | orc-main/java/core/src/java/org/apache/orc/filter/BatchFilter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.filter;
import org.apache.orc.OrcFilterContext;
import java.util.function.Consumer;
/**
* Defines a batch filter that can operate on a
* {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} and filter rows by using the
* selected vector to determine the eligible rows.
*/
public interface BatchFilter extends Consumer<OrcFilterContext> {
/**
* Identifies the filter column names. These columns will be read before the filter is applied.
*
* @return Names of the filter columns
*/
String[] getColumnNames();
}
| 1,375 | 34.282051 | 97 | java |
null | orc-main/java/core/src/java/org/apache/orc/filter/PluginFilterService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.filter;
import org.apache.hadoop.conf.Configuration;
/**
* Service to determine Plugin filters to be used during read. The plugin filters determined are
* combined using AND.
* The filter is expected to be deterministic (for reattempts) and agnostic of the application order
* which is non-deterministic.
*/
public interface PluginFilterService {
/**
* Determine the filter for a given read path. The determination is based on the path and the
* read configuration, this should be carefully considered when using this in queries that might
* refer to the same table/files with multiple aliases.
*
* @param filePath The fully qualified file path that is being read
* @param config The read configuration is supplied as input. This should not be changed.
* @return The plugin filter determined for the given filePath
*/
BatchFilter getFilter(String filePath, Configuration config);
}
| 1,749 | 41.682927 | 100 | java |
null | orc-main/java/core/src/java/org/apache/orc/impl/AcidStats.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
/**
* Statistics about the ACID operations in an ORC file
*/
public class AcidStats {
public long inserts;
public long updates;
public long deletes;
public AcidStats() {
inserts = 0;
updates = 0;
deletes = 0;
}
public AcidStats(String serialized) {
String[] parts = serialized.split(",");
inserts = Long.parseLong(parts[0]);
updates = Long.parseLong(parts[1]);
deletes = Long.parseLong(parts[2]);
}
public String serialize() {
StringBuilder builder = new StringBuilder();
builder.append(inserts);
builder.append(",");
builder.append(updates);
builder.append(",");
builder.append(deletes);
return builder.toString();
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append(" inserts: ").append(inserts);
builder.append(" updates: ").append(updates);
builder.append(" deletes: ").append(deletes);
return builder.toString();
}
}
| 1,818 | 28.819672 | 75 | java |
null | orc-main/java/core/src/java/org/apache/orc/impl/AircompressorCodec.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import io.airlift.compress.Compressor;
import io.airlift.compress.Decompressor;
import org.apache.orc.CompressionCodec;
import org.apache.orc.CompressionKind;
import java.io.IOException;
import java.nio.ByteBuffer;
public class AircompressorCodec implements CompressionCodec {
private final CompressionKind kind;
private final Compressor compressor;
private final Decompressor decompressor;
AircompressorCodec(CompressionKind kind, Compressor compressor,
Decompressor decompressor) {
this.kind = kind;
this.compressor = compressor;
this.decompressor = decompressor;
}
// Thread local buffer
private static final ThreadLocal<byte[]> threadBuffer =
new ThreadLocal<byte[]>() {
@Override
protected byte[] initialValue() {
return null;
}
};
protected static byte[] getBuffer(int size) {
byte[] result = threadBuffer.get();
if (result == null || result.length < size || result.length > size * 2) {
result = new byte[size];
threadBuffer.set(result);
}
return result;
}
@Override
public boolean compress(ByteBuffer in, ByteBuffer out,
ByteBuffer overflow,
Options options) {
int inBytes = in.remaining();
// I should work on a patch for Snappy to support an overflow buffer
// to prevent the extra buffer copy.
byte[] compressed = getBuffer(compressor.maxCompressedLength(inBytes));
int outBytes =
compressor.compress(in.array(), in.arrayOffset() + in.position(), inBytes,
compressed, 0, compressed.length);
if (outBytes < inBytes) {
int remaining = out.remaining();
if (remaining >= outBytes) {
System.arraycopy(compressed, 0, out.array(), out.arrayOffset() +
out.position(), outBytes);
out.position(out.position() + outBytes);
} else {
System.arraycopy(compressed, 0, out.array(), out.arrayOffset() +
out.position(), remaining);
out.position(out.limit());
System.arraycopy(compressed, remaining, overflow.array(),
overflow.arrayOffset(), outBytes - remaining);
overflow.position(outBytes - remaining);
}
return true;
} else {
return false;
}
}
@Override
public void decompress(ByteBuffer in, ByteBuffer out) throws IOException {
int inOffset = in.position();
int uncompressLen =
decompressor.decompress(in.array(), in.arrayOffset() + inOffset,
in.limit() - inOffset, out.array(), out.arrayOffset() + out.position(),
out.remaining());
out.position(uncompressLen + out.position());
out.flip();
}
private static final Options NULL_OPTION = new Options() {
@Override
public Options copy() {
return this;
}
@Override
public Options setSpeed(SpeedModifier newValue) {
return this;
}
@Override
public Options setData(DataKind newValue) {
return this;
}
@Override
public boolean equals(Object other) {
return other != null && getClass() == other.getClass();
}
@Override
public int hashCode() {
return 0;
}
};
@Override
public Options getDefaultOptions() {
return NULL_OPTION;
}
@Override
public void reset() {
// Nothing to do.
}
@Override
public void destroy() {
// Nothing to do.
}
@Override
public CompressionKind getKind() {
return kind;
}
@Override
public void close() {
OrcCodecPool.returnCodec(kind, this);
}
}
| 4,408 | 27.816993 | 82 | java |
null | orc-main/java/core/src/java/org/apache/orc/impl/BitFieldReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.io.filter.FilterContext;
import java.io.EOFException;
import java.io.IOException;
public final class BitFieldReader {
private final RunLengthByteReader input;
private int current;
private byte currentIdx = 8;
public BitFieldReader(InStream input) {
this.input = new RunLengthByteReader(input);
}
private void readByte() throws IOException {
if (input.hasNext()) {
current = 0xff & input.next();
currentIdx = 0;
} else {
throw new EOFException("Read past end of bit field from " + this);
}
}
public int next() throws IOException {
if (currentIdx > 7) {
readByte();
}
currentIdx++;
// Highest bit is the first val
return ((current >>> (8 - currentIdx)) & 1);
}
public void nextVector(LongColumnVector previous,
FilterContext filterContext,
long previousLen) throws IOException {
previous.isRepeating = false;
int previousIdx = 0;
if (previous.noNulls) {
for (int i = 0; i != filterContext.getSelectedSize(); i++) {
int idx = filterContext.getSelected()[i];
if (idx - previousIdx > 0) {
skip(idx - previousIdx);
}
previous.vector[idx] = next();
previousIdx = idx + 1;
}
skip(previousLen - previousIdx);
} else {
for (int i = 0; i != filterContext.getSelectedSize(); i++) {
int idx = filterContext.getSelected()[i];
if (idx - previousIdx > 0) {
skip(TreeReaderFactory.TreeReader.countNonNullRowsInRange(
previous.isNull, previousIdx, idx));
}
if (!previous.isNull[idx]) {
previous.vector[idx] = next();
} else {
previous.vector[idx] = 1;
}
previousIdx = idx + 1;
}
skip(TreeReaderFactory.TreeReader.countNonNullRowsInRange(
previous.isNull, previousIdx, (int)previousLen));
}
}
public void nextVector(LongColumnVector previous,
long previousLen) throws IOException {
previous.isRepeating = true;
for (int i = 0; i < previousLen; i++) {
if (previous.noNulls || !previous.isNull[i]) {
previous.vector[i] = next();
} else {
// The default value of null for int types in vectorized
// processing is 1, so set that if the value is null
previous.vector[i] = 1;
}
// The default value for nulls in Vectorization for int types is 1
// and given that non null value can also be 1, we need to check for isNull also
// when determining the isRepeating flag.
if (previous.isRepeating && i > 0 && ((previous.vector[0] != previous.vector[i]) ||
(previous.isNull[0] != previous.isNull[i]))) {
previous.isRepeating = false;
}
}
}
public void seek(PositionProvider index) throws IOException {
input.seek(index);
int consumed = (int) index.getNext();
if (consumed > 8) {
throw new IllegalArgumentException("Seek past end of byte at " +
consumed + " in " + input);
} else if (consumed != 0) {
readByte();
currentIdx = (byte) consumed;
} else {
currentIdx = 8;
}
}
public void skip(long totalBits) throws IOException {
final int availableBits = 8 - currentIdx;
if (totalBits <= availableBits) {
currentIdx += totalBits;
} else {
final long bitsToSkip = (totalBits - availableBits);
input.skip(bitsToSkip / 8);
// Edge case: when skipping the last bits of a bitField there is nothing more to read!
if (input.hasNext()) {
current = input.next();
currentIdx = (byte) (bitsToSkip % 8);
}
}
}
@Override
public String toString() {
return "bit reader current: " + current
+ " current bit index: " + currentIdx
+ " from " + input;
}
}
| 4,794 | 31.842466 | 92 | java |
null | orc-main/java/core/src/java/org/apache/orc/impl/BitFieldWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import java.io.IOException;
import java.util.function.Consumer;
public class BitFieldWriter {
private RunLengthByteWriter output;
private final int bitSize;
private byte current = 0;
private int bitsLeft = 8;
public BitFieldWriter(PositionedOutputStream output,
int bitSize) throws IOException {
this.output = new RunLengthByteWriter(output);
this.bitSize = bitSize;
}
private void writeByte() throws IOException {
output.write(current);
current = 0;
bitsLeft = 8;
}
public void flush() throws IOException {
if (bitsLeft != 8) {
writeByte();
}
output.flush();
}
public void write(int value) throws IOException {
int bitsToWrite = bitSize;
while (bitsToWrite > bitsLeft) {
// add the bits to the bottom of the current word
current |= value >>> (bitsToWrite - bitsLeft);
// subtract out the bits we just added
bitsToWrite -= bitsLeft;
// zero out the bits above bitsToWrite
value &= (1 << bitsToWrite) - 1;
writeByte();
}
bitsLeft -= bitsToWrite;
current |= value << bitsLeft;
if (bitsLeft == 0) {
writeByte();
}
}
public void getPosition(PositionRecorder recorder) throws IOException {
output.getPosition(recorder);
recorder.addPosition(8 - bitsLeft);
}
public long estimateMemory() {
return output.estimateMemory();
}
public void changeIv(Consumer<byte[]> modifier) {
output.changeIv(modifier);
}
}
| 2,326 | 28.455696 | 75 | java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.