repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
null | orc-main/java/core/src/java/org/apache/orc/impl/writer/FloatTreeWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.writer;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
import org.apache.hadoop.hive.ql.util.JavaDataModel;
import org.apache.orc.OrcProto;
import org.apache.orc.TypeDescription;
import org.apache.orc.impl.CryptoUtils;
import org.apache.orc.impl.PositionRecorder;
import org.apache.orc.impl.PositionedOutputStream;
import org.apache.orc.impl.SerializationUtils;
import org.apache.orc.impl.StreamName;
import java.io.IOException;
public class FloatTreeWriter extends TreeWriterBase {
private final PositionedOutputStream stream;
private final SerializationUtils utils;
public FloatTreeWriter(TypeDescription schema,
WriterEncryptionVariant encryption,
WriterContext context) throws IOException {
super(schema, encryption, context);
this.stream = context.createStream(
new StreamName(id, OrcProto.Stream.Kind.DATA, encryption));
this.utils = new SerializationUtils();
if (rowIndexPosition != null) {
recordPosition(rowIndexPosition);
}
}
@Override
public void writeBatch(ColumnVector vector, int offset,
int length) throws IOException {
super.writeBatch(vector, offset, length);
DoubleColumnVector vec = (DoubleColumnVector) vector;
if (vector.isRepeating) {
if (vector.noNulls || !vector.isNull[0]) {
float value = (float) vec.vector[0];
indexStatistics.updateDouble(value);
if (createBloomFilter) {
if (bloomFilter != null) {
bloomFilter.addDouble(value);
}
bloomFilterUtf8.addDouble(value);
}
for (int i = 0; i < length; ++i) {
utils.writeFloat(stream, value);
}
}
} else {
for (int i = 0; i < length; ++i) {
if (vec.noNulls || !vec.isNull[i + offset]) {
float value = (float) vec.vector[i + offset];
utils.writeFloat(stream, value);
indexStatistics.updateDouble(value);
if (createBloomFilter) {
if (bloomFilter != null) {
bloomFilter.addDouble(value);
}
bloomFilterUtf8.addDouble(value);
}
}
}
}
}
@Override
public void writeStripe(int requiredIndexEntries) throws IOException {
super.writeStripe(requiredIndexEntries);
if (rowIndexPosition != null) {
recordPosition(rowIndexPosition);
}
}
@Override
void recordPosition(PositionRecorder recorder) throws IOException {
super.recordPosition(recorder);
stream.getPosition(recorder);
}
@Override
public long estimateMemory() {
return super.estimateMemory() + stream.getBufferSize();
}
@Override
public long getRawDataSize() {
long num = fileStatistics.getNumberOfValues();
return num * JavaDataModel.get().primitive1();
}
@Override
public void flushStreams() throws IOException {
super.flushStreams();
stream.flush();
}
@Override
public void prepareStripe(int stripeId) {
super.prepareStripe(stripeId);
stream.changeIv(CryptoUtils.modifyIvForStripe(stripeId));
}
}
| 4,010 | 31.346774 | 75 | java |
null | orc-main/java/core/src/java/org/apache/orc/impl/writer/IntegerTreeWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.writer;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.util.JavaDataModel;
import org.apache.orc.OrcProto;
import org.apache.orc.TypeDescription;
import org.apache.orc.impl.CryptoUtils;
import org.apache.orc.impl.IntegerWriter;
import org.apache.orc.impl.OutStream;
import org.apache.orc.impl.PositionRecorder;
import org.apache.orc.impl.StreamName;
import java.io.IOException;
public class IntegerTreeWriter extends TreeWriterBase {
private final IntegerWriter writer;
private boolean isDirectV2 = true;
private final boolean isLong;
public IntegerTreeWriter(TypeDescription schema,
WriterEncryptionVariant encryption,
WriterContext context) throws IOException {
super(schema, encryption, context);
OutStream out = context.createStream(
new StreamName(id, OrcProto.Stream.Kind.DATA, encryption));
this.isDirectV2 = isNewWriteFormat(context);
this.writer = createIntegerWriter(out, true, isDirectV2, context);
if (rowIndexPosition != null) {
recordPosition(rowIndexPosition);
}
this.isLong = schema.getCategory() == TypeDescription.Category.LONG;
}
@Override
OrcProto.ColumnEncoding.Builder getEncoding() {
OrcProto.ColumnEncoding.Builder result = super.getEncoding();
if (isDirectV2) {
result.setKind(OrcProto.ColumnEncoding.Kind.DIRECT_V2);
} else {
result.setKind(OrcProto.ColumnEncoding.Kind.DIRECT);
}
return result;
}
@Override
public void writeBatch(ColumnVector vector, int offset,
int length) throws IOException {
super.writeBatch(vector, offset, length);
LongColumnVector vec = (LongColumnVector) vector;
if (vector.isRepeating) {
if (vector.noNulls || !vector.isNull[0]) {
long value = vec.vector[0];
indexStatistics.updateInteger(value, length);
if (createBloomFilter) {
if (bloomFilter != null) {
bloomFilter.addLong(value);
}
bloomFilterUtf8.addLong(value);
}
for (int i = 0; i < length; ++i) {
writer.write(value);
}
}
} else {
for (int i = 0; i < length; ++i) {
if (vec.noNulls || !vec.isNull[i + offset]) {
long value = vec.vector[i + offset];
writer.write(value);
indexStatistics.updateInteger(value, 1);
if (createBloomFilter) {
if (bloomFilter != null) {
bloomFilter.addLong(value);
}
bloomFilterUtf8.addLong(value);
}
}
}
}
}
@Override
public void writeStripe(int requiredIndexEntries) throws IOException {
super.writeStripe(requiredIndexEntries);
if (rowIndexPosition != null) {
recordPosition(rowIndexPosition);
}
}
@Override
void recordPosition(PositionRecorder recorder) throws IOException {
super.recordPosition(recorder);
writer.getPosition(recorder);
}
@Override
public long estimateMemory() {
return super.estimateMemory() + writer.estimateMemory();
}
@Override
public long getRawDataSize() {
JavaDataModel jdm = JavaDataModel.get();
long num = fileStatistics.getNumberOfValues();
return num * (isLong ? jdm.primitive2() : jdm.primitive1());
}
@Override
public void flushStreams() throws IOException {
super.flushStreams();
writer.flush();
}
@Override
public void prepareStripe(int stripeId) {
super.prepareStripe(stripeId);
writer.changeIv(CryptoUtils.modifyIvForStripe(stripeId));
}
}
| 4,500 | 31.615942 | 75 | java |
null | orc-main/java/core/src/java/org/apache/orc/impl/writer/ListTreeWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.writer;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ListColumnVector;
import org.apache.orc.ColumnStatistics;
import org.apache.orc.OrcProto;
import org.apache.orc.StripeStatistics;
import org.apache.orc.TypeDescription;
import org.apache.orc.impl.CryptoUtils;
import org.apache.orc.impl.IntegerWriter;
import org.apache.orc.impl.PositionRecorder;
import org.apache.orc.impl.StreamName;
import java.io.IOException;
public class ListTreeWriter extends TreeWriterBase {
private final IntegerWriter lengths;
private final boolean isDirectV2;
private final TreeWriter childWriter;
ListTreeWriter(TypeDescription schema,
WriterEncryptionVariant encryption,
WriterContext context) throws IOException {
super(schema, encryption, context);
this.isDirectV2 = isNewWriteFormat(context);
childWriter = Factory.create(schema.getChildren().get(0), encryption, context);
lengths = createIntegerWriter(context.createStream(
new StreamName(id, OrcProto.Stream.Kind.LENGTH, encryption)),
false, isDirectV2, context);
if (rowIndexPosition != null) {
recordPosition(rowIndexPosition);
}
}
@Override
OrcProto.ColumnEncoding.Builder getEncoding() {
OrcProto.ColumnEncoding.Builder result = super.getEncoding();
if (isDirectV2) {
result.setKind(OrcProto.ColumnEncoding.Kind.DIRECT_V2);
} else {
result.setKind(OrcProto.ColumnEncoding.Kind.DIRECT);
}
return result;
}
@Override
public void createRowIndexEntry() throws IOException {
super.createRowIndexEntry();
childWriter.createRowIndexEntry();
}
@Override
public void writeBatch(ColumnVector vector, int offset,
int length) throws IOException {
super.writeBatch(vector, offset, length);
ListColumnVector vec = (ListColumnVector) vector;
/* update aggregate statistics */
indexStatistics.updateCollectionLength(vec.lengths.length);
if (vector.isRepeating) {
if (vector.noNulls || !vector.isNull[0]) {
int childOffset = (int) vec.offsets[0];
int childLength = (int) vec.lengths[0];
for (int i = 0; i < length; ++i) {
lengths.write(childLength);
childWriter.writeBatch(vec.child, childOffset, childLength);
}
if (createBloomFilter) {
if (bloomFilter != null) {
bloomFilter.addLong(childLength);
}
bloomFilterUtf8.addLong(childLength);
}
}
} else {
// write the elements in runs
int currentOffset = 0;
int currentLength = 0;
for (int i = 0; i < length; ++i) {
if (!vec.isNull[i + offset]) {
int nextLength = (int) vec.lengths[offset + i];
int nextOffset = (int) vec.offsets[offset + i];
lengths.write(nextLength);
if (currentLength == 0) {
currentOffset = nextOffset;
currentLength = nextLength;
} else if (currentOffset + currentLength != nextOffset) {
childWriter.writeBatch(vec.child, currentOffset,
currentLength);
currentOffset = nextOffset;
currentLength = nextLength;
} else {
currentLength += nextLength;
}
if (createBloomFilter) {
if (bloomFilter != null) {
bloomFilter.addLong(nextLength);
}
bloomFilterUtf8.addLong(nextLength);
}
}
}
if (currentLength != 0) {
childWriter.writeBatch(vec.child, currentOffset,
currentLength);
}
}
}
@Override
public void writeStripe(int requiredIndexEntries) throws IOException {
super.writeStripe(requiredIndexEntries);
childWriter.writeStripe(requiredIndexEntries);
if (rowIndexPosition != null) {
recordPosition(rowIndexPosition);
}
}
@Override
void recordPosition(PositionRecorder recorder) throws IOException {
super.recordPosition(recorder);
lengths.getPosition(recorder);
}
@Override
public void addStripeStatistics(StripeStatistics[] stats
) throws IOException {
super.addStripeStatistics(stats);
childWriter.addStripeStatistics(stats);
}
@Override
public long estimateMemory() {
return super.estimateMemory() + lengths.estimateMemory() +
childWriter.estimateMemory();
}
@Override
public long getRawDataSize() {
return childWriter.getRawDataSize();
}
@Override
public void writeFileStatistics() throws IOException {
super.writeFileStatistics();
childWriter.writeFileStatistics();
}
@Override
public void flushStreams() throws IOException {
super.flushStreams();
lengths.flush();
childWriter.flushStreams();
}
@Override
public void getCurrentStatistics(ColumnStatistics[] output) {
super.getCurrentStatistics(output);
childWriter.getCurrentStatistics(output);
}
@Override
public void prepareStripe(int stripeId) {
super.prepareStripe(stripeId);
lengths.changeIv(CryptoUtils.modifyIvForStripe(stripeId));
childWriter.prepareStripe(stripeId);
}
}
| 6,044 | 31.326203 | 83 | java |
null | orc-main/java/core/src/java/org/apache/orc/impl/writer/MapTreeWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.writer;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.MapColumnVector;
import org.apache.orc.ColumnStatistics;
import org.apache.orc.OrcProto;
import org.apache.orc.StripeStatistics;
import org.apache.orc.TypeDescription;
import org.apache.orc.impl.CryptoUtils;
import org.apache.orc.impl.IntegerWriter;
import org.apache.orc.impl.PositionRecorder;
import org.apache.orc.impl.StreamName;
import java.io.IOException;
import java.util.List;
public class MapTreeWriter extends TreeWriterBase {
private final IntegerWriter lengths;
private final boolean isDirectV2;
private final TreeWriter keyWriter;
private final TreeWriter valueWriter;
MapTreeWriter(TypeDescription schema,
WriterEncryptionVariant encryption,
WriterContext context) throws IOException {
super(schema, encryption, context);
this.isDirectV2 = isNewWriteFormat(context);
List<TypeDescription> children = schema.getChildren();
keyWriter = Factory.create(children.get(0), encryption, context);
valueWriter = Factory.create(children.get(1), encryption, context);
lengths = createIntegerWriter(context.createStream(
new StreamName(id, OrcProto.Stream.Kind.LENGTH, encryption)),
false, isDirectV2, context);
if (rowIndexPosition != null) {
recordPosition(rowIndexPosition);
}
}
@Override
OrcProto.ColumnEncoding.Builder getEncoding() {
OrcProto.ColumnEncoding.Builder result = super.getEncoding();
if (isDirectV2) {
result.setKind(OrcProto.ColumnEncoding.Kind.DIRECT_V2);
} else {
result.setKind(OrcProto.ColumnEncoding.Kind.DIRECT);
}
return result;
}
@Override
public void createRowIndexEntry() throws IOException {
super.createRowIndexEntry();
keyWriter.createRowIndexEntry();
valueWriter.createRowIndexEntry();
}
@Override
public void writeBatch(ColumnVector vector, int offset,
int length) throws IOException {
super.writeBatch(vector, offset, length);
MapColumnVector vec = (MapColumnVector) vector;
/* update aggregate statistics */
indexStatistics.updateCollectionLength(vec.lengths.length);
if (vector.isRepeating) {
if (vector.noNulls || !vector.isNull[0]) {
int childOffset = (int) vec.offsets[0];
int childLength = (int) vec.lengths[0];
for (int i = 0; i < length; ++i) {
lengths.write(childLength);
keyWriter.writeBatch(vec.keys, childOffset, childLength);
valueWriter.writeBatch(vec.values, childOffset, childLength);
}
if (createBloomFilter) {
if (bloomFilter != null) {
bloomFilter.addLong(childLength);
}
bloomFilterUtf8.addLong(childLength);
}
}
} else {
// write the elements in runs
int currentOffset = 0;
int currentLength = 0;
for (int i = 0; i < length; ++i) {
if (!vec.isNull[i + offset]) {
int nextLength = (int) vec.lengths[offset + i];
int nextOffset = (int) vec.offsets[offset + i];
lengths.write(nextLength);
if (currentLength == 0) {
currentOffset = nextOffset;
currentLength = nextLength;
} else if (currentOffset + currentLength != nextOffset) {
keyWriter.writeBatch(vec.keys, currentOffset,
currentLength);
valueWriter.writeBatch(vec.values, currentOffset,
currentLength);
currentOffset = nextOffset;
currentLength = nextLength;
} else {
currentLength += nextLength;
}
if (createBloomFilter) {
if (bloomFilter != null) {
bloomFilter.addLong(nextLength);
}
bloomFilterUtf8.addLong(nextLength);
}
}
}
if (currentLength != 0) {
keyWriter.writeBatch(vec.keys, currentOffset,
currentLength);
valueWriter.writeBatch(vec.values, currentOffset,
currentLength);
}
}
}
@Override
public void writeStripe(int requiredIndexEntries) throws IOException {
super.writeStripe(requiredIndexEntries);
keyWriter.writeStripe(requiredIndexEntries);
valueWriter.writeStripe(requiredIndexEntries);
if (rowIndexPosition != null) {
recordPosition(rowIndexPosition);
}
}
@Override
void recordPosition(PositionRecorder recorder) throws IOException {
super.recordPosition(recorder);
lengths.getPosition(recorder);
}
@Override
public void addStripeStatistics(StripeStatistics[] stats
) throws IOException {
super.addStripeStatistics(stats);
keyWriter.addStripeStatistics(stats);
valueWriter.addStripeStatistics(stats);
}
@Override
public long estimateMemory() {
return super.estimateMemory() + lengths.estimateMemory() +
keyWriter.estimateMemory() + valueWriter.estimateMemory();
}
@Override
public long getRawDataSize() {
return keyWriter.getRawDataSize() + valueWriter.getRawDataSize();
}
@Override
public void writeFileStatistics() throws IOException {
super.writeFileStatistics();
keyWriter.writeFileStatistics();
valueWriter.writeFileStatistics();
}
@Override
public void flushStreams() throws IOException {
super.flushStreams();
lengths.flush();
keyWriter.flushStreams();
valueWriter.flushStreams();
}
@Override
public void getCurrentStatistics(ColumnStatistics[] output) {
super.getCurrentStatistics(output);
keyWriter.getCurrentStatistics(output);
valueWriter.getCurrentStatistics(output);
}
@Override
public void prepareStripe(int stripeId) {
super.prepareStripe(stripeId);
lengths.changeIv(CryptoUtils.modifyIvForStripe(stripeId));
keyWriter.prepareStripe(stripeId);
valueWriter.prepareStripe(stripeId);
}
}
| 6,794 | 32.472906 | 75 | java |
null | orc-main/java/core/src/java/org/apache/orc/impl/writer/StreamOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.writer;
import org.apache.orc.CompressionCodec;
import org.apache.orc.EncryptionAlgorithm;
import java.security.Key;
import java.util.Arrays;
import java.util.function.Consumer;
/**
* The compression and encryption options for writing a stream.
*/
public class StreamOptions {
private CompressionCodec codec;
private CompressionCodec.Options options;
private int bufferSize;
private EncryptionAlgorithm algorithm;
private Key key;
private byte[] iv;
public StreamOptions(StreamOptions other) {
this.codec = other.codec;
if (other.options != null) {
this.options = other.options.copy();
}
this.bufferSize = other.bufferSize;
this.algorithm = other.algorithm;
this.key = other.key;
if (other.iv != null) {
this.iv = Arrays.copyOf(other.iv, other.iv.length);
}
}
/**
* An option object with the given buffer size set.
* @param bufferSize the size of the buffers.
*/
public StreamOptions(int bufferSize) {
this.bufferSize = bufferSize;
}
public StreamOptions bufferSize(int bufferSize) {
this.bufferSize = bufferSize;
return this;
}
/**
* Compress using the given codec.
* @param codec the codec to compress with
* @return this
*/
public StreamOptions withCodec(CompressionCodec codec,
CompressionCodec.Options options) {
this.codec = codec;
this.options = options;
return this;
}
public StreamOptions withEncryption(EncryptionAlgorithm algorithm,
Key key) {
this.algorithm = algorithm;
this.key = key;
return this;
}
/**
* Modify the IV.
* @param modifier the function to modify the IV
* @return returns this
*/
public StreamOptions modifyIv(Consumer<byte[]> modifier) {
modifier.accept(getIv());
return this;
}
public CompressionCodec getCodec() {
return codec;
}
public CompressionCodec.Options getCodecOptions() {
return options;
}
public byte[] getIv() {
if (iv == null) {
iv = new byte[algorithm.getIvLength()];
}
return iv;
}
public int getBufferSize() {
return bufferSize;
}
public boolean isEncrypted() {
return key != null;
}
public Key getKey() {
return key;
}
public EncryptionAlgorithm getAlgorithm() {
return algorithm;
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("Compress: ");
if (codec == null) {
builder.append("none");
} else {
builder.append(codec.getKind());
}
builder.append(" buffer: ");
builder.append(bufferSize);
if (isEncrypted()) {
builder.append(" encryption: ");
builder.append(algorithm.getAlgorithm());
builder.append("/");
builder.append(algorithm.keyLength());
}
return builder.toString();
}
}
| 3,718 | 24.826389 | 75 | java |
null | orc-main/java/core/src/java/org/apache/orc/impl/writer/StringBaseTreeWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.writer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.ql.util.JavaDataModel;
import org.apache.orc.OrcConf;
import org.apache.orc.OrcProto;
import org.apache.orc.StringColumnStatistics;
import org.apache.orc.TypeDescription;
import org.apache.orc.impl.CryptoUtils;
import org.apache.orc.impl.Dictionary;
import org.apache.orc.impl.DynamicIntArray;
import org.apache.orc.impl.IntegerWriter;
import org.apache.orc.impl.OutStream;
import org.apache.orc.impl.PositionRecorder;
import org.apache.orc.impl.PositionedOutputStream;
import org.apache.orc.impl.StreamName;
import org.apache.orc.impl.StringHashTableDictionary;
import org.apache.orc.impl.StringRedBlackTree;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.function.Consumer;
import static org.apache.orc.OrcConf.DICTIONARY_IMPL;
import static org.apache.orc.impl.Dictionary.INITIAL_DICTIONARY_SIZE;
public abstract class StringBaseTreeWriter extends TreeWriterBase {
// Stream for dictionary's key
private final OutStream stringOutput;
protected final IntegerWriter lengthOutput;
// Stream for dictionary-encoded value
private final IntegerWriter rowOutput;
protected final DynamicIntArray rows = new DynamicIntArray();
protected final PositionedOutputStream directStreamOutput;
private final List<OrcProto.RowIndexEntry> savedRowIndex =
new ArrayList<>();
private final boolean buildIndex;
private final List<Long> rowIndexValueCount = new ArrayList<>();
// If the number of keys in a dictionary is greater than this fraction of
//the total number of non-null rows, turn off dictionary encoding
private final double dictionaryKeySizeThreshold;
protected Dictionary dictionary;
protected boolean useDictionaryEncoding = true;
private boolean isDirectV2 = true;
private boolean doneDictionaryCheck;
private final boolean strideDictionaryCheck;
private static Dictionary createDict(Configuration conf) {
String dictImpl = conf.get(DICTIONARY_IMPL.getAttribute(),
DICTIONARY_IMPL.getDefaultValue().toString()).toUpperCase();
switch (Dictionary.IMPL.valueOf(dictImpl)) {
case RBTREE:
return new StringRedBlackTree(INITIAL_DICTIONARY_SIZE);
case HASH:
return new StringHashTableDictionary(INITIAL_DICTIONARY_SIZE);
default:
throw new UnsupportedOperationException("Unknown implementation:" + dictImpl);
}
}
StringBaseTreeWriter(TypeDescription schema,
WriterEncryptionVariant encryption,
WriterContext context) throws IOException {
super(schema, encryption, context);
Configuration conf = context.getConfiguration();
this.dictionary = createDict(conf);
this.isDirectV2 = isNewWriteFormat(context);
directStreamOutput = context.createStream(
new StreamName(id, OrcProto.Stream.Kind.DATA, encryption));
stringOutput = context.createStream(
new StreamName(id, OrcProto.Stream.Kind.DICTIONARY_DATA, encryption));
lengthOutput = createIntegerWriter(context.createStream(
new StreamName(id, OrcProto.Stream.Kind.LENGTH, encryption)),
false, isDirectV2, context);
rowOutput = createIntegerWriter(directStreamOutput, false, isDirectV2,
context);
if (rowIndexPosition != null) {
recordPosition(rowIndexPosition);
}
rowIndexValueCount.add(0L);
buildIndex = context.buildIndex();
dictionaryKeySizeThreshold = context.getDictionaryKeySizeThreshold(id);
strideDictionaryCheck =
OrcConf.ROW_INDEX_STRIDE_DICTIONARY_CHECK.getBoolean(conf);
if (dictionaryKeySizeThreshold <= 0.0) {
useDictionaryEncoding = false;
doneDictionaryCheck = true;
recordDirectStreamPosition();
} else {
doneDictionaryCheck = false;
}
}
private void checkDictionaryEncoding() {
if (!doneDictionaryCheck) {
// Set the flag indicating whether or not to use dictionary encoding
// based on whether or not the fraction of distinct keys over number of
// non-null rows is less than the configured threshold
float ratio = rows.size() > 0 ? (float) (dictionary.size()) / rows.size() : 0.0f;
useDictionaryEncoding = !isDirectV2 || ratio <= dictionaryKeySizeThreshold;
doneDictionaryCheck = true;
}
}
@Override
public void writeStripe(int requiredIndexEntries) throws IOException {
// if rows in stripe is less than dictionaryCheckAfterRows, dictionary
// checking would not have happened. So do it again here.
checkDictionaryEncoding();
if (!useDictionaryEncoding) {
stringOutput.suppress();
}
// we need to build the rowindex before calling super, since it
// writes it out.
super.writeStripe(requiredIndexEntries);
// reset all of the fields to be ready for the next stripe.
dictionary.clear();
savedRowIndex.clear();
rowIndexValueCount.clear();
if (rowIndexPosition != null) {
recordPosition(rowIndexPosition);
}
rowIndexValueCount.add(0L);
if (!useDictionaryEncoding) {
// record the start positions of first index stride of next stripe i.e
// beginning of the direct streams when dictionary is disabled
recordDirectStreamPosition();
}
}
private void flushDictionary() throws IOException {
final int[] dumpOrder = new int[dictionary.size()];
if (useDictionaryEncoding) {
// Write the dictionary by traversing the dictionary writing out
// the bytes and lengths; and creating the map from the original order
// to the final sorted order.
dictionary.visit(new Dictionary.Visitor() {
private int currentId = 0;
@Override
public void visit(Dictionary.VisitorContext context
) throws IOException {
context.writeBytes(stringOutput);
lengthOutput.write(context.getLength());
dumpOrder[context.getOriginalPosition()] = currentId++;
}
});
} else {
// for direct encoding, we don't want the dictionary data stream
stringOutput.suppress();
}
int length = rows.size();
int rowIndexEntry = 0;
OrcProto.RowIndex.Builder rowIndex = getRowIndex();
// write the values translated into the dump order.
for (int i = 0; i <= length; ++i) {
// now that we are writing out the row values, we can finalize the
// row index
if (buildIndex) {
while (i == rowIndexValueCount.get(rowIndexEntry) &&
rowIndexEntry < savedRowIndex.size()) {
OrcProto.RowIndexEntry.Builder base =
savedRowIndex.get(rowIndexEntry++).toBuilder();
if (useDictionaryEncoding) {
rowOutput.getPosition(new RowIndexPositionRecorder(base));
} else {
PositionRecorder posn = new RowIndexPositionRecorder(base);
directStreamOutput.getPosition(posn);
lengthOutput.getPosition(posn);
}
rowIndex.addEntry(base.build());
}
}
if (i != length) {
if (useDictionaryEncoding) {
rowOutput.write(dumpOrder[rows.get(i)]);
} else {
final int writeLen = dictionary.writeTo(directStreamOutput, rows.get(i));
lengthOutput.write(writeLen);
}
}
}
rows.clear();
}
@Override
OrcProto.ColumnEncoding.Builder getEncoding() {
OrcProto.ColumnEncoding.Builder result = super.getEncoding();
if (useDictionaryEncoding) {
result.setDictionarySize(dictionary.size());
if (isDirectV2) {
result.setKind(OrcProto.ColumnEncoding.Kind.DICTIONARY_V2);
} else {
result.setKind(OrcProto.ColumnEncoding.Kind.DICTIONARY);
}
} else {
if (isDirectV2) {
result.setKind(OrcProto.ColumnEncoding.Kind.DIRECT_V2);
} else {
result.setKind(OrcProto.ColumnEncoding.Kind.DIRECT);
}
}
return result;
}
/**
* This method doesn't call the super method, because unlike most of the
* other TreeWriters, this one can't record the position in the streams
* until the stripe is being flushed. Therefore it saves all of the entries
* and augments them with the final information as the stripe is written.
*/
@Override
public void createRowIndexEntry() throws IOException {
getStripeStatistics().merge(indexStatistics);
OrcProto.RowIndexEntry.Builder rowIndexEntry = getRowIndexEntry();
rowIndexEntry.setStatistics(indexStatistics.serialize());
indexStatistics.reset();
OrcProto.RowIndexEntry base = rowIndexEntry.build();
savedRowIndex.add(base);
rowIndexEntry.clear();
addBloomFilterEntry();
recordPosition(rowIndexPosition);
rowIndexValueCount.add((long) rows.size());
if (strideDictionaryCheck) {
checkDictionaryEncoding();
}
if (!useDictionaryEncoding) {
if (rows.size() > 0) {
flushDictionary();
// just record the start positions of next index stride
recordDirectStreamPosition();
} else {
// record the start positions of next index stride
recordDirectStreamPosition();
getRowIndex().addEntry(base);
}
}
}
private void recordDirectStreamPosition() throws IOException {
if (rowIndexPosition != null) {
directStreamOutput.getPosition(rowIndexPosition);
lengthOutput.getPosition(rowIndexPosition);
}
}
@Override
public long estimateMemory() {
long parent = super.estimateMemory();
if (useDictionaryEncoding) {
return parent + dictionary.getSizeInBytes() + rows.getSizeInBytes();
} else {
return parent + lengthOutput.estimateMemory() +
directStreamOutput.getBufferSize();
}
}
@Override
public long getRawDataSize() {
// ORC strings are converted to java Strings. so use JavaDataModel to
// compute the overall size of strings
StringColumnStatistics scs = (StringColumnStatistics) fileStatistics;
long numVals = fileStatistics.getNumberOfValues();
if (numVals == 0) {
return 0;
} else {
int avgSize = (int) (scs.getSum() / numVals);
return numVals * JavaDataModel.get().lengthForStringOfLength(avgSize);
}
}
@Override
public void flushStreams() throws IOException {
super.flushStreams();
// if rows in stripe is less than dictionaryCheckAfterRows, dictionary
// checking would not have happened. So do it again here.
checkDictionaryEncoding();
if (useDictionaryEncoding) {
flushDictionary();
stringOutput.flush();
lengthOutput.flush();
rowOutput.flush();
} else {
// flushout any left over entries from dictionary
if (rows.size() > 0) {
flushDictionary();
}
// suppress the stream for every stripe if dictionary is disabled
stringOutput.suppress();
directStreamOutput.flush();
lengthOutput.flush();
}
}
@Override
public void prepareStripe(int stripeId) {
super.prepareStripe(stripeId);
Consumer<byte[]> updater = CryptoUtils.modifyIvForStripe(stripeId);
stringOutput.changeIv(updater);
lengthOutput.changeIv(updater);
rowOutput.changeIv(updater);
directStreamOutput.changeIv(updater);
}
}
| 12,076 | 35.158683 | 87 | java |
null | orc-main/java/core/src/java/org/apache/orc/impl/writer/StringTreeWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.writer;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.orc.TypeDescription;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
public class StringTreeWriter extends StringBaseTreeWriter {
StringTreeWriter(TypeDescription schema,
WriterEncryptionVariant encryption,
WriterContext context) throws IOException {
super(schema, encryption, context);
}
@Override
public void writeBatch(ColumnVector vector, int offset,
int length) throws IOException {
super.writeBatch(vector, offset, length);
BytesColumnVector vec = (BytesColumnVector) vector;
if (vector.isRepeating) {
if (vector.noNulls || !vector.isNull[0]) {
if (useDictionaryEncoding) {
int id = dictionary.add(vec.vector[0], vec.start[0], vec.length[0]);
for (int i = 0; i < length; ++i) {
rows.add(id);
}
} else {
for (int i = 0; i < length; ++i) {
directStreamOutput.write(vec.vector[0], vec.start[0],
vec.length[0]);
lengthOutput.write(vec.length[0]);
}
}
indexStatistics.updateString(vec.vector[0], vec.start[0],
vec.length[0], length);
if (createBloomFilter) {
if (bloomFilter != null) {
// translate from UTF-8 to the default charset
bloomFilter.addString(new String(vec.vector[0], vec.start[0],
vec.length[0], StandardCharsets.UTF_8));
}
bloomFilterUtf8.addBytes(vec.vector[0], vec.start[0], vec.length[0]);
}
}
} else {
for (int i = 0; i < length; ++i) {
if (vec.noNulls || !vec.isNull[i + offset]) {
if (useDictionaryEncoding) {
rows.add(dictionary.add(vec.vector[offset + i],
vec.start[offset + i], vec.length[offset + i]));
} else {
directStreamOutput.write(vec.vector[offset + i],
vec.start[offset + i], vec.length[offset + i]);
lengthOutput.write(vec.length[offset + i]);
}
indexStatistics.updateString(vec.vector[offset + i],
vec.start[offset + i], vec.length[offset + i], 1);
if (createBloomFilter) {
if (bloomFilter != null) {
// translate from UTF-8 to the default charset
bloomFilter.addString(new String(vec.vector[offset + i],
vec.start[offset + i], vec.length[offset + i],
StandardCharsets.UTF_8));
}
bloomFilterUtf8.addBytes(vec.vector[offset + i],
vec.start[offset + i], vec.length[offset + i]);
}
}
}
}
}
}
| 3,662 | 38.387097 | 79 | java |
null | orc-main/java/core/src/java/org/apache/orc/impl/writer/StructTreeWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.writer;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.StructColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.ColumnStatistics;
import org.apache.orc.StripeStatistics;
import org.apache.orc.TypeDescription;
import java.io.IOException;
import java.util.List;
public class StructTreeWriter extends TreeWriterBase {
final TreeWriter[] childrenWriters;
public StructTreeWriter(TypeDescription schema,
WriterEncryptionVariant encryption,
WriterContext context) throws IOException {
super(schema, encryption, context);
List<TypeDescription> children = schema.getChildren();
childrenWriters = new TreeWriter[children.size()];
for (int i = 0; i < childrenWriters.length; ++i) {
childrenWriters[i] = Factory.create(children.get(i), encryption, context);
}
if (rowIndexPosition != null) {
recordPosition(rowIndexPosition);
}
}
@Override
public void writeRootBatch(VectorizedRowBatch batch, int offset,
int length) throws IOException {
// update the statistics for the root column
indexStatistics.increment(length);
// I'm assuming that the root column isn't nullable so that I don't need
// to update isPresent.
for (int i = 0; i < childrenWriters.length; ++i) {
childrenWriters[i].writeBatch(batch.cols[i], offset, length);
}
}
private static void writeFields(StructColumnVector vector,
TreeWriter[] childrenWriters,
int offset, int length) throws IOException {
for (int field = 0; field < childrenWriters.length; ++field) {
childrenWriters[field].writeBatch(vector.fields[field], offset, length);
}
}
@Override
public void writeBatch(ColumnVector vector, int offset,
int length) throws IOException {
super.writeBatch(vector, offset, length);
StructColumnVector vec = (StructColumnVector) vector;
if (vector.isRepeating) {
if (vector.noNulls || !vector.isNull[0]) {
writeFields(vec, childrenWriters, offset, length);
}
} else if (vector.noNulls) {
writeFields(vec, childrenWriters, offset, length);
} else {
// write the records in runs
int currentRun = 0;
boolean started = false;
for (int i = 0; i < length; ++i) {
if (!vec.isNull[i + offset]) {
if (!started) {
started = true;
currentRun = i;
}
} else if (started) {
started = false;
writeFields(vec, childrenWriters, offset + currentRun,
i - currentRun);
}
}
if (started) {
writeFields(vec, childrenWriters, offset + currentRun,
length - currentRun);
}
}
}
@Override
public void createRowIndexEntry() throws IOException {
super.createRowIndexEntry();
for (TreeWriter child : childrenWriters) {
child.createRowIndexEntry();
}
}
@Override
public void writeStripe(int requiredIndexEntries) throws IOException {
super.writeStripe(requiredIndexEntries);
for (TreeWriter child : childrenWriters) {
child.writeStripe(requiredIndexEntries);
}
if (rowIndexPosition != null) {
recordPosition(rowIndexPosition);
}
}
@Override
public void addStripeStatistics(StripeStatistics[] stats
) throws IOException {
super.addStripeStatistics(stats);
for (TreeWriter child : childrenWriters) {
child.addStripeStatistics(stats);
}
}
@Override
public long estimateMemory() {
long result = 0;
for (TreeWriter writer : childrenWriters) {
result += writer.estimateMemory();
}
return super.estimateMemory() + result;
}
@Override
public long getRawDataSize() {
long result = 0;
for (TreeWriter writer : childrenWriters) {
result += writer.getRawDataSize();
}
return result;
}
@Override
public void writeFileStatistics() throws IOException {
super.writeFileStatistics();
for (TreeWriter child : childrenWriters) {
child.writeFileStatistics();
}
}
@Override
public void flushStreams() throws IOException {
super.flushStreams();
for (TreeWriter child : childrenWriters) {
child.flushStreams();
}
}
@Override
public void getCurrentStatistics(ColumnStatistics[] output) {
super.getCurrentStatistics(output);
for (TreeWriter child: childrenWriters) {
child.getCurrentStatistics(output);
}
}
@Override
public void prepareStripe(int stripeId) {
super.prepareStripe(stripeId);
for (TreeWriter child: childrenWriters) {
child.prepareStripe(stripeId);
}
}
}
| 5,689 | 30.611111 | 80 | java |
null | orc-main/java/core/src/java/org/apache/orc/impl/writer/TimestampTreeWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.writer;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.hadoop.hive.ql.util.JavaDataModel;
import org.apache.orc.OrcProto;
import org.apache.orc.TypeDescription;
import org.apache.orc.impl.CryptoUtils;
import org.apache.orc.impl.IntegerWriter;
import org.apache.orc.impl.PositionRecorder;
import org.apache.orc.impl.SerializationUtils;
import org.apache.orc.impl.StreamName;
import java.io.IOException;
import java.text.DateFormat;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.TimeZone;
import java.util.function.Consumer;
public class TimestampTreeWriter extends TreeWriterBase {
public static final int MILLIS_PER_SECOND = 1000;
public static final String BASE_TIMESTAMP_STRING = "2015-01-01 00:00:00";
private static final TimeZone UTC = TimeZone.getTimeZone("UTC");
private final IntegerWriter seconds;
private final IntegerWriter nanos;
private final boolean isDirectV2;
private final boolean alwaysUTC;
private final TimeZone localTimezone;
private final long epoch;
private final boolean useProleptic;
public TimestampTreeWriter(TypeDescription schema,
WriterEncryptionVariant encryption,
WriterContext context,
boolean instantType) throws IOException {
super(schema, encryption, context);
this.isDirectV2 = isNewWriteFormat(context);
this.seconds = createIntegerWriter(context.createStream(
new StreamName(id, OrcProto.Stream.Kind.DATA, encryption)),
true, isDirectV2, context);
this.nanos = createIntegerWriter(context.createStream(
new StreamName(id, OrcProto.Stream.Kind.SECONDARY, encryption)),
false, isDirectV2, context);
if (rowIndexPosition != null) {
recordPosition(rowIndexPosition);
}
this.alwaysUTC = instantType || context.getUseUTCTimestamp();
DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
try {
if (this.alwaysUTC) {
dateFormat.setTimeZone(UTC);
localTimezone = null;
epoch = dateFormat.parse(TimestampTreeWriter.BASE_TIMESTAMP_STRING).getTime() /
TimestampTreeWriter.MILLIS_PER_SECOND;
} else {
localTimezone = TimeZone.getDefault();
dateFormat.setTimeZone(localTimezone);
epoch = dateFormat.parse(TimestampTreeWriter.BASE_TIMESTAMP_STRING).getTime() /
TimestampTreeWriter.MILLIS_PER_SECOND;
}
} catch (ParseException e) {
throw new IOException("Unable to create base timestamp tree writer", e);
}
useProleptic = context.getProlepticGregorian();
}
@Override
OrcProto.ColumnEncoding.Builder getEncoding() {
OrcProto.ColumnEncoding.Builder result = super.getEncoding();
result.setKind(isDirectV2 ? OrcProto.ColumnEncoding.Kind.DIRECT_V2
: OrcProto.ColumnEncoding.Kind.DIRECT);
return result;
}
@Override
public void writeBatch(ColumnVector vector, int offset,
int length) throws IOException {
super.writeBatch(vector, offset, length);
TimestampColumnVector vec = (TimestampColumnVector) vector;
vec.changeCalendar(useProleptic, true);
if (vector.isRepeating) {
if (vector.noNulls || !vector.isNull[0]) {
// ignore the bottom three digits from the vec.time field
final long secs = vec.time[0] / MILLIS_PER_SECOND;
final int newNanos = vec.nanos[0];
// set the millis based on the top three digits of the nanos
long millis = secs * MILLIS_PER_SECOND + newNanos / 1_000_000;
if (millis < 0 && newNanos > 999_999) {
millis -= MILLIS_PER_SECOND;
}
long utc = vec.isUTC() || alwaysUTC ?
millis : SerializationUtils.convertToUtc(localTimezone, millis);
indexStatistics.updateTimestamp(utc, newNanos % 1_000_000);
if (createBloomFilter) {
if (bloomFilter != null) {
bloomFilter.addLong(millis);
}
bloomFilterUtf8.addLong(utc);
}
final long nano = formatNanos(vec.nanos[0]);
for (int i = 0; i < length; ++i) {
seconds.write(secs - epoch);
nanos.write(nano);
}
}
} else {
for (int i = 0; i < length; ++i) {
if (vec.noNulls || !vec.isNull[i + offset]) {
// ignore the bottom three digits from the vec.time field
final long secs = vec.time[i + offset] / MILLIS_PER_SECOND;
final int newNanos = vec.nanos[i + offset];
// set the millis based on the top three digits of the nanos
long millis = secs * MILLIS_PER_SECOND + newNanos / 1_000_000;
if (millis < 0 && newNanos > 999_999) {
millis -= MILLIS_PER_SECOND;
}
long utc = vec.isUTC() || alwaysUTC ?
millis : SerializationUtils.convertToUtc(localTimezone, millis);
seconds.write(secs - epoch);
nanos.write(formatNanos(newNanos));
indexStatistics.updateTimestamp(utc, newNanos % 1_000_000);
if (createBloomFilter) {
if (bloomFilter != null) {
bloomFilter.addLong(millis);
}
bloomFilterUtf8.addLong(utc);
}
}
}
}
}
@Override
public void writeStripe(int requiredIndexEntries) throws IOException {
super.writeStripe(requiredIndexEntries);
if (rowIndexPosition != null) {
recordPosition(rowIndexPosition);
}
}
static long formatNanos(int nanos) {
if (nanos == 0) {
return 0;
} else if (nanos % 100 != 0) {
return ((long) nanos) << 3;
} else {
nanos /= 100;
int trailingZeros = 1;
while (nanos % 10 == 0 && trailingZeros < 7) {
nanos /= 10;
trailingZeros += 1;
}
return ((long) nanos) << 3 | trailingZeros;
}
}
@Override
void recordPosition(PositionRecorder recorder) throws IOException {
super.recordPosition(recorder);
seconds.getPosition(recorder);
nanos.getPosition(recorder);
}
@Override
public long estimateMemory() {
return super.estimateMemory() + seconds.estimateMemory() +
nanos.estimateMemory();
}
@Override
public long getRawDataSize() {
return fileStatistics.getNumberOfValues() *
JavaDataModel.get().lengthOfTimestamp();
}
@Override
public void flushStreams() throws IOException {
super.flushStreams();
seconds.flush();
nanos.flush();
}
@Override
public void prepareStripe(int stripeId) {
super.prepareStripe(stripeId);
Consumer<byte[]> updater = CryptoUtils.modifyIvForStripe(stripeId);
seconds.changeIv(updater);
nanos.changeIv(updater);
}
}
| 7,673 | 35.198113 | 87 | java |
null | orc-main/java/core/src/java/org/apache/orc/impl/writer/TreeWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.writer;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.ColumnStatistics;
import org.apache.orc.OrcFile;
import org.apache.orc.StripeStatistics;
import org.apache.orc.TypeDescription;
import java.io.IOException;
/**
* The writers for the specific writers of each type. This provides
* the generic API that they must all implement.
*/
public interface TreeWriter {
/**
* Estimate the memory currently used to buffer the stripe.
* @return the number of bytes
*/
long estimateMemory();
/**
* Estimate the memory used if the file was read into Hive's Writable
* types. This is used as an estimate for the query optimizer.
* @return the number of bytes
*/
long getRawDataSize();
/**
* Set up for the next stripe.
* @param stripeId the next stripe id
*/
void prepareStripe(int stripeId);
/**
* Write a VectorizedRowBatch to the file. This is called by the WriterImplV2
* at the top level.
* @param batch the list of all of the columns
* @param offset the first row from the batch to write
* @param length the number of rows to write
*/
void writeRootBatch(VectorizedRowBatch batch, int offset,
int length) throws IOException;
/**
* Write a ColumnVector to the file. This is called recursively by
* writeRootBatch.
* @param vector the data to write
* @param offset the first value offset to write.
* @param length the number of values to write
*/
void writeBatch(ColumnVector vector, int offset,
int length) throws IOException;
/**
* Create a row index entry at the current point in the stripe.
*/
void createRowIndexEntry() throws IOException;
/**
* Flush the TreeWriter stream
* @throws IOException
*/
void flushStreams() throws IOException;
/**
* Write the stripe out to the file.
* @param requiredIndexEntries the number of index entries that are
* required. this is to check to make sure the
* row index is well formed.
*/
void writeStripe(int requiredIndexEntries) throws IOException;
/**
* During a stripe append, we need to handle the stripe statistics.
* @param stripeStatistics the statistics for the new stripe across the
* encryption variants
*/
void addStripeStatistics(StripeStatistics[] stripeStatistics
) throws IOException;
/**
* Write the FileStatistics for each column in each encryption variant.
*/
void writeFileStatistics() throws IOException;
/**
* Get the current file statistics for each column. If a column is encrypted,
* the encrypted variant statistics are used.
* @param output an array that is filled in with the results
*/
void getCurrentStatistics(ColumnStatistics[] output);
class Factory {
/**
* Create a new tree writer for the given types and insert encryption if
* required.
* @param schema the type to build a writer for
* @param encryption the encryption status
* @param streamFactory the writer context
* @return a new tree writer
*/
public static TreeWriter create(TypeDescription schema,
WriterEncryptionVariant encryption,
WriterContext streamFactory) throws IOException {
if (encryption == null) {
// If we are the root of an encryption variant, create a special writer.
encryption = streamFactory.getEncryption(schema.getId());
if (encryption != null) {
return new EncryptionTreeWriter(schema, encryption, streamFactory);
}
}
return createSubtree(schema, encryption, streamFactory);
}
/**
* Create a subtree without inserting encryption nodes
* @param schema the schema to create
* @param encryption the encryption variant
* @param streamFactory the writer context
* @return a new tree writer
*/
static TreeWriter createSubtree(TypeDescription schema,
WriterEncryptionVariant encryption,
WriterContext streamFactory) throws IOException {
OrcFile.Version version = streamFactory.getVersion();
switch (schema.getCategory()) {
case BOOLEAN:
return new BooleanTreeWriter(schema, encryption, streamFactory);
case BYTE:
return new ByteTreeWriter(schema, encryption, streamFactory);
case SHORT:
case INT:
case LONG:
return new IntegerTreeWriter(schema, encryption, streamFactory);
case FLOAT:
return new FloatTreeWriter(schema, encryption, streamFactory);
case DOUBLE:
return new DoubleTreeWriter(schema, encryption, streamFactory);
case STRING:
return new StringTreeWriter(schema, encryption, streamFactory);
case CHAR:
return new CharTreeWriter(schema, encryption, streamFactory);
case VARCHAR:
return new VarcharTreeWriter(schema, encryption, streamFactory);
case BINARY:
return new BinaryTreeWriter(schema, encryption, streamFactory);
case TIMESTAMP:
return new TimestampTreeWriter(schema, encryption, streamFactory, false);
case TIMESTAMP_INSTANT:
return new TimestampTreeWriter(schema, encryption, streamFactory, true);
case DATE:
return new DateTreeWriter(schema, encryption, streamFactory);
case DECIMAL:
if (version == OrcFile.Version.UNSTABLE_PRE_2_0 &&
schema.getPrecision() <= TypeDescription.MAX_DECIMAL64_PRECISION) {
return new Decimal64TreeWriter(schema, encryption, streamFactory);
}
return new DecimalTreeWriter(schema, encryption, streamFactory);
case STRUCT:
return new StructTreeWriter(schema, encryption, streamFactory);
case MAP:
return new MapTreeWriter(schema, encryption, streamFactory);
case LIST:
return new ListTreeWriter(schema, encryption, streamFactory);
case UNION:
return new UnionTreeWriter(schema, encryption, streamFactory);
default:
throw new IllegalArgumentException("Bad category: " +
schema.getCategory());
}
}
}
}
| 7,309 | 36.487179 | 85 | java |
null | orc-main/java/core/src/java/org/apache/orc/impl/writer/TreeWriterBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.writer;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.ColumnStatistics;
import org.apache.orc.OrcFile;
import org.apache.orc.OrcProto;
import org.apache.orc.StripeStatistics;
import org.apache.orc.TypeDescription;
import org.apache.orc.impl.BitFieldWriter;
import org.apache.orc.impl.ColumnStatisticsImpl;
import org.apache.orc.impl.CryptoUtils;
import org.apache.orc.impl.IntegerWriter;
import org.apache.orc.impl.OutStream;
import org.apache.orc.impl.PositionRecorder;
import org.apache.orc.impl.PositionedOutputStream;
import org.apache.orc.impl.RunLengthIntegerWriter;
import org.apache.orc.impl.RunLengthIntegerWriterV2;
import org.apache.orc.impl.StreamName;
import org.apache.orc.util.BloomFilter;
import org.apache.orc.util.BloomFilterIO;
import org.apache.orc.util.BloomFilterUtf8;
import java.io.IOException;
import java.util.List;
/**
* The parent class of all of the writers for each column. Each column
* is written by an instance of this class. The compound types (struct,
* list, map, and union) have children tree writers that write the children
* types.
*/
public abstract class TreeWriterBase implements TreeWriter {
protected final int id;
protected final BitFieldWriter isPresent;
protected final TypeDescription schema;
protected final WriterEncryptionVariant encryption;
private final boolean isCompressed;
protected final ColumnStatisticsImpl indexStatistics;
protected final ColumnStatisticsImpl stripeColStatistics;
protected final ColumnStatisticsImpl fileStatistics;
protected final RowIndexPositionRecorder rowIndexPosition;
private final OrcProto.RowIndex.Builder rowIndex;
private final OrcProto.RowIndexEntry.Builder rowIndexEntry;
protected final BloomFilter bloomFilter;
protected final BloomFilterUtf8 bloomFilterUtf8;
protected final boolean createBloomFilter;
private final OrcProto.BloomFilterIndex.Builder bloomFilterIndex;
private final OrcProto.BloomFilterIndex.Builder bloomFilterIndexUtf8;
protected final OrcProto.BloomFilter.Builder bloomFilterEntry;
private boolean foundNulls;
private OutStream isPresentOutStream;
protected final WriterContext context;
/**
* Create a tree writer.
* @param schema the row schema
* @param encryption the encryption variant or null if it is unencrypted
* @param context limited access to the Writer's data.
*/
TreeWriterBase(TypeDescription schema,
WriterEncryptionVariant encryption,
WriterContext context) throws IOException {
this.schema = schema;
this.encryption = encryption;
this.context = context;
this.isCompressed = context.isCompressed();
this.id = schema.getId();
isPresentOutStream = context.createStream(new StreamName(id,
OrcProto.Stream.Kind.PRESENT, encryption));
isPresent = new BitFieldWriter(isPresentOutStream, 1);
this.foundNulls = false;
createBloomFilter = context.getBloomFilterColumns()[id];
boolean proleptic = context.getProlepticGregorian();
indexStatistics = ColumnStatisticsImpl.create(schema, proleptic);
stripeColStatistics = ColumnStatisticsImpl.create(schema, proleptic);
fileStatistics = ColumnStatisticsImpl.create(schema, proleptic);
if (context.buildIndex()) {
rowIndex = OrcProto.RowIndex.newBuilder();
rowIndexEntry = OrcProto.RowIndexEntry.newBuilder();
rowIndexPosition = new RowIndexPositionRecorder(rowIndexEntry);
} else {
rowIndex = null;
rowIndexEntry = null;
rowIndexPosition = null;
}
if (createBloomFilter) {
bloomFilterEntry = OrcProto.BloomFilter.newBuilder();
if (context.getBloomFilterVersion() == OrcFile.BloomFilterVersion.ORIGINAL) {
bloomFilter = new BloomFilter(context.getRowIndexStride(),
context.getBloomFilterFPP());
bloomFilterIndex = OrcProto.BloomFilterIndex.newBuilder();
} else {
bloomFilter = null;
bloomFilterIndex = null;
}
bloomFilterUtf8 = new BloomFilterUtf8(context.getRowIndexStride(),
context.getBloomFilterFPP());
bloomFilterIndexUtf8 = OrcProto.BloomFilterIndex.newBuilder();
} else {
bloomFilterEntry = null;
bloomFilterIndex = null;
bloomFilterIndexUtf8 = null;
bloomFilter = null;
bloomFilterUtf8 = null;
}
}
protected OrcProto.RowIndex.Builder getRowIndex() {
return rowIndex;
}
protected ColumnStatisticsImpl getStripeStatistics() {
return stripeColStatistics;
}
protected OrcProto.RowIndexEntry.Builder getRowIndexEntry() {
return rowIndexEntry;
}
IntegerWriter createIntegerWriter(PositionedOutputStream output,
boolean signed, boolean isDirectV2,
WriterContext writer) {
if (isDirectV2) {
boolean alignedBitpacking =
writer.getEncodingStrategy().equals(OrcFile.EncodingStrategy.SPEED);
return new RunLengthIntegerWriterV2(output, signed, alignedBitpacking);
} else {
return new RunLengthIntegerWriter(output, signed);
}
}
boolean isNewWriteFormat(WriterContext writer) {
return writer.getVersion() != OrcFile.Version.V_0_11;
}
/**
* Handle the top level object write.
*
* This default method is used for all types except structs, which are the
* typical case. VectorizedRowBatch assumes the top level object is a
* struct, so we use the first column for all other types.
* @param batch the batch to write from
* @param offset the row to start on
* @param length the number of rows to write
*/
@Override
public void writeRootBatch(VectorizedRowBatch batch, int offset,
int length) throws IOException {
writeBatch(batch.cols[0], offset, length);
}
/**
* Write the values from the given vector from offset for length elements.
* @param vector the vector to write from
* @param offset the first value from the vector to write
* @param length the number of values from the vector to write
*/
@Override
public void writeBatch(ColumnVector vector, int offset,
int length) throws IOException {
if (vector.noNulls) {
indexStatistics.increment(length);
if (isPresent != null) {
for (int i = 0; i < length; ++i) {
isPresent.write(1);
}
}
} else {
if (vector.isRepeating) {
boolean isNull = vector.isNull[0];
if (isPresent != null) {
for (int i = 0; i < length; ++i) {
isPresent.write(isNull ? 0 : 1);
}
}
if (isNull) {
foundNulls = true;
indexStatistics.setNull();
} else {
indexStatistics.increment(length);
}
} else {
// count the number of non-null values
int nonNullCount = 0;
for(int i = 0; i < length; ++i) {
boolean isNull = vector.isNull[i + offset];
if (!isNull) {
nonNullCount += 1;
}
if (isPresent != null) {
isPresent.write(isNull ? 0 : 1);
}
}
indexStatistics.increment(nonNullCount);
if (nonNullCount != length) {
foundNulls = true;
indexStatistics.setNull();
}
}
}
}
private void removeIsPresentPositions() {
for(int i=0; i < rowIndex.getEntryCount(); ++i) {
OrcProto.RowIndexEntry.Builder entry = rowIndex.getEntryBuilder(i);
List<Long> positions = entry.getPositionsList();
// bit streams use 3 positions if uncompressed, 4 if compressed
positions = positions.subList(isCompressed ? 4 : 3, positions.size());
entry.clearPositions();
entry.addAllPositions(positions);
}
}
@Override
public void prepareStripe(int stripeId) {
if (isPresent != null) {
isPresent.changeIv(CryptoUtils.modifyIvForStripe(stripeId));
}
}
@Override
public void flushStreams() throws IOException {
if (isPresent != null) {
isPresent.flush();
}
}
@Override
public void writeStripe(int requiredIndexEntries) throws IOException {
// if no nulls are found in a stream, then suppress the stream
if (isPresent != null && !foundNulls) {
isPresentOutStream.suppress();
// since isPresent bitstream is suppressed, update the index to
// remove the positions of the isPresent stream
if (rowIndex != null) {
removeIsPresentPositions();
}
}
/* Update byte count */
final long byteCount = context.getPhysicalWriter().getFileBytes(id, encryption);
stripeColStatistics.updateByteCount(byteCount);
// merge stripe-level column statistics to file statistics and write it to
// stripe statistics
fileStatistics.merge(stripeColStatistics);
context.writeStatistics(
new StreamName(id, OrcProto.Stream.Kind.STRIPE_STATISTICS, encryption),
stripeColStatistics.serialize());
stripeColStatistics.reset();
// reset the flag for next stripe
foundNulls = false;
context.setEncoding(id, encryption, getEncoding().build());
if (rowIndex != null) {
if (rowIndex.getEntryCount() != requiredIndexEntries) {
throw new IllegalArgumentException("Column has wrong number of " +
"index entries found: " + rowIndex.getEntryCount() + " expected: " +
requiredIndexEntries);
}
context.writeIndex(new StreamName(id, OrcProto.Stream.Kind.ROW_INDEX, encryption), rowIndex);
rowIndex.clear();
rowIndexEntry.clear();
}
// write the bloom filter to out stream
if (bloomFilterIndex != null) {
context.writeBloomFilter(new StreamName(id,
OrcProto.Stream.Kind.BLOOM_FILTER), bloomFilterIndex);
bloomFilterIndex.clear();
}
// write the bloom filter to out stream
if (bloomFilterIndexUtf8 != null) {
context.writeBloomFilter(new StreamName(id,
OrcProto.Stream.Kind.BLOOM_FILTER_UTF8), bloomFilterIndexUtf8);
bloomFilterIndexUtf8.clear();
}
}
/**
* Get the encoding for this column.
* @return the information about the encoding of this column
*/
OrcProto.ColumnEncoding.Builder getEncoding() {
OrcProto.ColumnEncoding.Builder builder =
OrcProto.ColumnEncoding.newBuilder()
.setKind(OrcProto.ColumnEncoding.Kind.DIRECT);
if (createBloomFilter) {
builder.setBloomEncoding(BloomFilterIO.Encoding.CURRENT.getId());
}
return builder;
}
/**
* Create a row index entry with the previous location and the current
* index statistics. Also merges the index statistics into the file
* statistics before they are cleared. Finally, it records the start of the
* next index and ensures all of the children columns also create an entry.
*/
@Override
public void createRowIndexEntry() throws IOException {
stripeColStatistics.merge(indexStatistics);
rowIndexEntry.setStatistics(indexStatistics.serialize());
indexStatistics.reset();
rowIndex.addEntry(rowIndexEntry);
rowIndexEntry.clear();
addBloomFilterEntry();
recordPosition(rowIndexPosition);
}
void addBloomFilterEntry() {
if (createBloomFilter) {
if (bloomFilter != null) {
BloomFilterIO.serialize(bloomFilterEntry, bloomFilter);
bloomFilterIndex.addBloomFilter(bloomFilterEntry.build());
bloomFilter.reset();
}
if (bloomFilterUtf8 != null) {
BloomFilterIO.serialize(bloomFilterEntry, bloomFilterUtf8);
bloomFilterIndexUtf8.addBloomFilter(bloomFilterEntry.build());
bloomFilterUtf8.reset();
}
}
}
@Override
public void addStripeStatistics(StripeStatistics[] stats
) throws IOException {
// pick out the correct statistics for this writer
int variantId;
int relativeColumn;
if (encryption == null) {
variantId = stats.length - 1;
relativeColumn = id;
} else {
variantId = encryption.getVariantId();
relativeColumn = id - encryption.getRoot().getId();
}
OrcProto.ColumnStatistics colStats = stats[variantId].getColumn(relativeColumn);
// update the file statistics
fileStatistics.merge(ColumnStatisticsImpl.deserialize(schema, colStats));
// write them out to the file
context.writeStatistics(
new StreamName(id, OrcProto.Stream.Kind.STRIPE_STATISTICS, encryption),
colStats.toBuilder());
}
/**
* Record the current position in each of this column's streams.
* @param recorder where should the locations be recorded
*/
void recordPosition(PositionRecorder recorder) throws IOException {
if (isPresent != null) {
isPresent.getPosition(recorder);
}
}
/**
* Estimate how much memory the writer is consuming excluding the streams.
* @return the number of bytes.
*/
@Override
public long estimateMemory() {
long result = 0;
if (isPresent != null) {
result = isPresentOutStream.getBufferSize();
}
return result;
}
@Override
public void writeFileStatistics() throws IOException {
context.writeStatistics(new StreamName(id,
OrcProto.Stream.Kind.FILE_STATISTICS, encryption),
fileStatistics.serialize());
}
static class RowIndexPositionRecorder implements PositionRecorder {
private final OrcProto.RowIndexEntry.Builder builder;
RowIndexPositionRecorder(OrcProto.RowIndexEntry.Builder builder) {
this.builder = builder;
}
@Override
public void addPosition(long position) {
builder.addPositions(position);
}
}
@Override
public void getCurrentStatistics(ColumnStatistics[] output) {
output[id] = fileStatistics;
}
}
| 14,647 | 34.127098 | 99 | java |
null | orc-main/java/core/src/java/org/apache/orc/impl/writer/UnionTreeWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.writer;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.UnionColumnVector;
import org.apache.orc.ColumnStatistics;
import org.apache.orc.OrcProto;
import org.apache.orc.StripeStatistics;
import org.apache.orc.TypeDescription;
import org.apache.orc.impl.CryptoUtils;
import org.apache.orc.impl.PositionRecorder;
import org.apache.orc.impl.RunLengthByteWriter;
import org.apache.orc.impl.StreamName;
import java.io.IOException;
import java.util.List;
public class UnionTreeWriter extends TreeWriterBase {
private final RunLengthByteWriter tags;
private final TreeWriter[] childrenWriters;
UnionTreeWriter(TypeDescription schema,
WriterEncryptionVariant encryption,
WriterContext context) throws IOException {
super(schema, encryption, context);
List<TypeDescription> children = schema.getChildren();
childrenWriters = new TreeWriterBase[children.size()];
for (int i = 0; i < childrenWriters.length; ++i) {
childrenWriters[i] = Factory.create(children.get(i), encryption, context);
}
tags =
new RunLengthByteWriter(context.createStream(
new StreamName(id, OrcProto.Stream.Kind.DATA, encryption)));
if (rowIndexPosition != null) {
recordPosition(rowIndexPosition);
}
}
@Override
public void writeBatch(ColumnVector vector, int offset,
int length) throws IOException {
super.writeBatch(vector, offset, length);
UnionColumnVector vec = (UnionColumnVector) vector;
if (vector.isRepeating) {
if (vector.noNulls || !vector.isNull[0]) {
byte tag = (byte) vec.tags[0];
for (int i = 0; i < length; ++i) {
tags.write(tag);
}
if (createBloomFilter) {
if (bloomFilter != null) {
bloomFilter.addLong(tag);
}
bloomFilterUtf8.addLong(tag);
}
childrenWriters[tag].writeBatch(vec.fields[tag], offset, length);
}
} else {
// write the records in runs of the same tag
int[] currentStart = new int[vec.fields.length];
int[] currentLength = new int[vec.fields.length];
for (int i = 0; i < length; ++i) {
// only need to deal with the non-nulls, since the nulls were dealt
// with in the super method.
if (vec.noNulls || !vec.isNull[i + offset]) {
byte tag = (byte) vec.tags[offset + i];
tags.write(tag);
if (currentLength[tag] == 0) {
// start a new sequence
currentStart[tag] = i + offset;
currentLength[tag] = 1;
} else if (currentStart[tag] + currentLength[tag] == i + offset) {
// ok, we are extending the current run for that tag.
currentLength[tag] += 1;
} else {
// otherwise, we need to close off the old run and start a new one
childrenWriters[tag].writeBatch(vec.fields[tag],
currentStart[tag], currentLength[tag]);
currentStart[tag] = i + offset;
currentLength[tag] = 1;
}
if (createBloomFilter) {
if (bloomFilter != null) {
bloomFilter.addLong(tag);
}
bloomFilterUtf8.addLong(tag);
}
}
}
// write out any left over sequences
for (int tag = 0; tag < currentStart.length; ++tag) {
if (currentLength[tag] != 0) {
childrenWriters[tag].writeBatch(vec.fields[tag], currentStart[tag],
currentLength[tag]);
}
}
}
}
@Override
public void createRowIndexEntry() throws IOException {
super.createRowIndexEntry();
for (TreeWriter child : childrenWriters) {
child.createRowIndexEntry();
}
}
@Override
public void writeStripe(int requiredIndexEntries) throws IOException {
super.writeStripe(requiredIndexEntries);
for (TreeWriter child : childrenWriters) {
child.writeStripe(requiredIndexEntries);
}
if (rowIndexPosition != null) {
recordPosition(rowIndexPosition);
}
}
@Override
void recordPosition(PositionRecorder recorder) throws IOException {
super.recordPosition(recorder);
tags.getPosition(recorder);
}
@Override
public void addStripeStatistics(StripeStatistics[] stats
) throws IOException {
super.addStripeStatistics(stats);
for (TreeWriter child : childrenWriters) {
child.addStripeStatistics(stats);
}
}
@Override
public long estimateMemory() {
long children = 0;
for (TreeWriter writer : childrenWriters) {
children += writer.estimateMemory();
}
return children + super.estimateMemory() + tags.estimateMemory();
}
@Override
public long getRawDataSize() {
long result = 0;
for (TreeWriter writer : childrenWriters) {
result += writer.getRawDataSize();
}
return result;
}
@Override
public void writeFileStatistics() throws IOException {
super.writeFileStatistics();
for (TreeWriter child : childrenWriters) {
child.writeFileStatistics();
}
}
@Override
public void flushStreams() throws IOException {
super.flushStreams();
tags.flush();
for (TreeWriter child : childrenWriters) {
child.flushStreams();
}
}
@Override
public void getCurrentStatistics(ColumnStatistics[] output) {
super.getCurrentStatistics(output);
for(TreeWriter child: childrenWriters) {
child.getCurrentStatistics(output);
}
}
@Override
public void prepareStripe(int stripeId) {
super.prepareStripe(stripeId);
tags.changeIv(CryptoUtils.modifyIvForStripe(stripeId));
for (TreeWriter child: childrenWriters) {
child.prepareStripe(stripeId);
}
}
}
| 6,640 | 31.714286 | 80 | java |
null | orc-main/java/core/src/java/org/apache/orc/impl/writer/VarcharTreeWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.writer;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.orc.TypeDescription;
import org.apache.orc.impl.Utf8Utils;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
/**
* Under the covers, varchar is written to ORC the same way as string.
*/
public class VarcharTreeWriter extends StringBaseTreeWriter {
private final int maxLength;
VarcharTreeWriter(TypeDescription schema,
WriterEncryptionVariant encryption,
WriterContext context) throws IOException {
super(schema, encryption, context);
maxLength = schema.getMaxLength();
}
@Override
public void writeBatch(ColumnVector vector, int offset,
int length) throws IOException {
super.writeBatch(vector, offset, length);
BytesColumnVector vec = (BytesColumnVector) vector;
if (vector.isRepeating) {
if (vector.noNulls || !vector.isNull[0]) {
// 0, length times
writeTruncated(vec, 0, length);
}
} else {
for(int i=0; i < length; ++i) {
if (vec.noNulls || !vec.isNull[i + offset]) {
// offset + i, once per loop
writeTruncated(vec, i + offset, 1);
}
}
}
}
private void writeTruncated(BytesColumnVector vec, int row, int repeats)
throws IOException {
int itemLength =
Utf8Utils.truncateBytesTo(maxLength, vec.vector[row], vec.start[row], vec.length[row]);
if (useDictionaryEncoding) {
int id = dictionary.add(vec.vector[row], vec.start[row], itemLength);
for (int i = 0; i < repeats; ++i) {
rows.add(id);
}
} else {
for (int i = 0; i < repeats; ++i) {
directStreamOutput.write(vec.vector[row], vec.start[row], itemLength);
lengthOutput.write(itemLength);
}
}
indexStatistics.updateString(vec.vector[row], vec.start[row], itemLength, repeats);
if (createBloomFilter) {
if (bloomFilter != null) {
// translate from UTF-8 to the default charset
bloomFilter.addString(new String(vec.vector[row], vec.start[row], itemLength,
StandardCharsets.UTF_8));
}
bloomFilterUtf8.addBytes(vec.vector[row], vec.start[row], itemLength);
}
}
}
| 3,154 | 34.852273 | 95 | java |
null | orc-main/java/core/src/java/org/apache/orc/impl/writer/WriterContext.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.writer;
import org.apache.hadoop.conf.Configuration;
import org.apache.orc.DataMask;
import org.apache.orc.OrcFile;
import org.apache.orc.OrcProto;
import org.apache.orc.PhysicalWriter;
import org.apache.orc.impl.OutStream;
import org.apache.orc.impl.StreamName;
import java.io.IOException;
public interface WriterContext {
/**
* Create a stream to store part of a column.
* @param name the name of the stream
* @return The output outStream that the section needs to be written to.
*/
OutStream createStream(StreamName name) throws IOException;
/**
* Get the stride rate of the row index.
*/
int getRowIndexStride();
/**
* Should be building the row index.
* @return true if we are building the index
*/
boolean buildIndex();
/**
* Is the ORC file compressed?
* @return are the streams compressed
*/
boolean isCompressed();
/**
* Get the encoding strategy to use.
* @return encoding strategy
*/
OrcFile.EncodingStrategy getEncodingStrategy();
/**
* Get the bloom filter columns
* @return bloom filter columns
*/
boolean[] getBloomFilterColumns();
/**
* Get bloom filter false positive percentage.
* @return fpp
*/
double getBloomFilterFPP();
/**
* Get the writer's configuration.
* @return configuration
*/
Configuration getConfiguration();
/**
* Get the version of the file to write.
*/
OrcFile.Version getVersion();
OrcFile.BloomFilterVersion getBloomFilterVersion();
void writeIndex(StreamName name,
OrcProto.RowIndex.Builder index) throws IOException;
void writeBloomFilter(StreamName name,
OrcProto.BloomFilterIndex.Builder bloom
) throws IOException;
/**
* Get the mask for the unencrypted variant.
* @param columnId the column id
* @return the mask to apply to the unencrypted data or null if there is none
*/
DataMask getUnencryptedMask(int columnId);
/**
* Get the encryption for the given column.
* @param columnId the root column id
* @return the column encryption or null if it isn't encrypted
*/
WriterEncryptionVariant getEncryption(int columnId);
/**
* Get the PhysicalWriter.
* @return the file's physical writer.
*/
PhysicalWriter getPhysicalWriter();
/**
* Set the encoding for the current stripe.
* @param column the column identifier
* @param variant the encryption variant
* @param encoding the encoding for this stripe
*/
void setEncoding(int column, WriterEncryptionVariant variant,
OrcProto.ColumnEncoding encoding);
/**
* Set the column statistics for the stripe or file.
* @param name the name of the statistics stream
* @param stats the statistics for this column in this stripe
*/
void writeStatistics(StreamName name,
OrcProto.ColumnStatistics.Builder stats
) throws IOException;
/**
* Should the writer use UTC as the timezone?
*/
boolean getUseUTCTimestamp();
/**
* Get the dictionary key size threshold.
* @param columnId the column id
* @return the minimum ratio for using a dictionary
*/
double getDictionaryKeySizeThreshold(int columnId);
/**
* Should we write the data using the proleptic Gregorian calendar?
* @return true if we should use the proleptic Gregorian calendar
*/
boolean getProlepticGregorian();
}
| 4,274 | 27.311258 | 79 | java |
null | orc-main/java/core/src/java/org/apache/orc/impl/writer/WriterEncryptionKey.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.writer;
import org.apache.orc.EncryptionAlgorithm;
import org.apache.orc.EncryptionKey;
import org.apache.orc.impl.HadoopShims;
import org.jetbrains.annotations.NotNull;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
public class WriterEncryptionKey implements EncryptionKey {
private final HadoopShims.KeyMetadata metadata;
private final List<WriterEncryptionVariant> roots = new ArrayList<>();
private int id;
public WriterEncryptionKey(HadoopShims.KeyMetadata key) {
this.metadata = key;
}
public void addRoot(WriterEncryptionVariant root) {
roots.add(root);
}
public HadoopShims.KeyMetadata getMetadata() {
return metadata;
}
public void setId(int id) {
this.id = id;
}
@Override
public String getKeyName() {
return metadata.getKeyName();
}
@Override
public int getKeyVersion() {
return metadata.getVersion();
}
@Override
public EncryptionAlgorithm getAlgorithm() {
return metadata.getAlgorithm();
}
@Override
public WriterEncryptionVariant[] getEncryptionRoots() {
return roots.toArray(new WriterEncryptionVariant[0]);
}
@Override
public boolean isAvailable() {
return true;
}
public int getId() {
return id;
}
public void sortRoots() {
Collections.sort(roots);
}
@Override
public int hashCode() {
return id;
}
@Override
public boolean equals(Object other) {
if (other == null || getClass() != other.getClass()) {
return false;
}
return compareTo((EncryptionKey) other) == 0;
}
@Override
public int compareTo(@NotNull EncryptionKey other) {
int result = getKeyName().compareTo(other.getKeyName());
if (result == 0) {
result = Integer.compare(getKeyVersion(), other.getKeyVersion());
}
return result;
}
@Override
public String toString() {
return metadata.toString();
}
}
| 2,739 | 23.684685 | 75 | java |
null | orc-main/java/core/src/java/org/apache/orc/impl/writer/WriterEncryptionVariant.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.writer;
import org.apache.orc.EncryptionVariant;
import org.apache.orc.OrcProto;
import org.apache.orc.TypeDescription;
import org.apache.orc.impl.LocalKey;
import org.jetbrains.annotations.NotNull;
import java.security.Key;
import java.util.ArrayList;
import java.util.List;
public class WriterEncryptionVariant implements EncryptionVariant {
private int id;
private final WriterEncryptionKey key;
private final TypeDescription root;
private final LocalKey material;
private final OrcProto.FileStatistics.Builder fileStats =
OrcProto.FileStatistics.newBuilder();
private final List<OrcProto.ColumnEncoding> encodings = new ArrayList<>();
public WriterEncryptionVariant(WriterEncryptionKey key,
TypeDescription root,
LocalKey columnKey) {
this.key = key;
this.root = root;
this.material = columnKey;
}
@Override
public WriterEncryptionKey getKeyDescription() {
return key;
}
@Override
public TypeDescription getRoot() {
return root;
}
public void setId(int id) {
this.id = id;
}
@Override
public int getVariantId() {
return id;
}
@Override
public Key getFileFooterKey() {
return material.getDecryptedKey();
}
@Override
public Key getStripeKey(long stripe) {
return material.getDecryptedKey();
}
public LocalKey getMaterial() {
return material;
}
public void clearFileStatistics() {
fileStats.clearColumn();
}
public OrcProto.FileStatistics getFileStatistics() {
return fileStats.build();
}
public void addEncoding(OrcProto.ColumnEncoding encoding) {
encodings.add(encoding);
}
public List<OrcProto.ColumnEncoding> getEncodings() {
return encodings;
}
public void clearEncodings() {
encodings.clear();
}
@Override
public int hashCode() {
return key.hashCode() << 16 ^ root.getId();
}
@Override
public boolean equals(Object other) {
if (other == this) {
return true;
} else if (other == null || other.getClass() != getClass()) {
return false;
}
return compareTo((WriterEncryptionVariant) other) == 0;
}
@Override
public int compareTo(@NotNull EncryptionVariant other) {
int result = key.compareTo(other.getKeyDescription());
if (result == 0) {
result = Integer.compare(root.getId(), other.getRoot().getId());
}
return result;
}
}
| 3,261 | 24.888889 | 76 | java |
null | orc-main/java/core/src/java/org/apache/orc/impl/writer/WriterImplV2.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.writer;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.orc.OrcFile;
import org.apache.orc.impl.WriterImpl;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
/**
* An ORCv2 file writer. The file is divided into stripes, which is the natural
* unit of work when reading. Each stripe is buffered in memory until the
* memory reaches the stripe size and then it is written out broken down by
* columns. Each column is written by a TreeWriter that is specific to that
* type of column. TreeWriters may have children TreeWriters that handle the
* sub-types. Each of the TreeWriters writes the column's data as a set of
* streams.
* <p>
* This class is unsynchronized like most Stream objects, so from the creation
* of an OrcFile and all access to a single instance has to be from a single
* thread.
* <p>
* There are no known cases where these happen between different threads today.
* <p>
* Caveat: the MemoryManager is created during WriterOptions create, that has
* to be confined to a single thread as well.
*
*/
public class WriterImplV2 extends WriterImpl {
private static final Logger LOG = LoggerFactory.getLogger(WriterImplV2.class);
public WriterImplV2(FileSystem fs,
Path path,
OrcFile.WriterOptions opts) throws IOException {
super(fs, path, opts);
LOG.warn("ORC files written in " +
OrcFile.Version.UNSTABLE_PRE_2_0.getName() + " will not be" +
" readable by other versions of the software. It is only for" +
" developer testing.");
}
}
| 2,469 | 38.206349 | 80 | java |
null | orc-main/java/core/src/java/org/apache/orc/util/BloomFilter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.util;
import java.nio.charset.Charset;
import java.util.Arrays;
/**
* BloomFilter is a probabilistic data structure for set membership check. BloomFilters are
* highly space efficient when compared to using a HashSet. Because of the probabilistic nature of
* bloom filter false positive (element not present in bloom filter but test() says true) are
* possible but false negatives are not possible (if element is present then test() will never
* say false). The false positive probability is configurable (default: 5%) depending on which
* storage requirement may increase or decrease. Lower the false positive probability greater
* is the space requirement.
* Bloom filters are sensitive to number of elements that will be inserted in the bloom filter.
* During the creation of bloom filter expected number of entries must be specified. If the number
* of insertions exceed the specified initial number of entries then false positive probability will
* increase accordingly.
* <p>
* Internally, this implementation of bloom filter uses Murmur3 fast non-cryptographic hash
* algorithm. Although Murmur2 is slightly faster than Murmur3 in Java, it suffers from hash
* collisions for specific sequence of repeating bytes. Check the following link for more info
* https://code.google.com/p/smhasher/wiki/MurmurHash2Flaw
* <p>
* Note that this class is here for backwards compatibility, because it uses
* the JVM default character set for strings. All new users should
* BloomFilterUtf8, which always uses UTF8 for the encoding.
*/
public class BloomFilter {
public static final double DEFAULT_FPP = 0.05;
private final BitSet bitSet;
private final int numBits;
private final int numHashFunctions;
static void checkArgument(boolean expression, String message) {
if (!expression) {
throw new IllegalArgumentException(message);
}
}
public BloomFilter(long expectedEntries) {
this(expectedEntries, DEFAULT_FPP);
}
public BloomFilter(long expectedEntries, double fpp) {
expectedEntries = Math.max(expectedEntries, 1);
checkArgument(fpp > 0.0 && fpp < 1.0, "False positive probability should be > 0.0 & < 1.0");
int nb = optimalNumOfBits(expectedEntries, fpp);
// make 'm' multiple of 64
this.numBits = nb + (Long.SIZE - (nb % Long.SIZE));
this.numHashFunctions = optimalNumOfHashFunctions(expectedEntries, numBits);
this.bitSet = new BitSet(numBits);
}
/**
* A constructor to support rebuilding the BloomFilter from a serialized representation.
* @param bits the serialized bits
* @param numFuncs the number of functions used
*/
public BloomFilter(long[] bits, int numFuncs) {
super();
bitSet = new BitSet(bits);
this.numBits = (int) bitSet.bitSize();
numHashFunctions = numFuncs;
}
static int optimalNumOfHashFunctions(long n, long m) {
return Math.max(1, (int) Math.round((double) m / n * Math.log(2)));
}
static int optimalNumOfBits(long n, double p) {
return (int) (-n * Math.log(p) / (Math.log(2) * Math.log(2)));
}
@Override
public boolean equals(Object other) {
return other != null &&
other.getClass() == getClass() &&
numBits == ((BloomFilter) other).numBits &&
numHashFunctions == ((BloomFilter) other).numHashFunctions &&
bitSet.equals(((BloomFilter) other).bitSet);
}
@Override
public int hashCode() {
return bitSet.hashCode() + numHashFunctions * 5;
}
public void add(byte[] val) {
addBytes(val, 0, val == null ? 0 : val.length);
}
public void addBytes(byte[] val, int offset, int length) {
// We use the trick mentioned in "Less Hashing, Same Performance: Building a Better Bloom Filter"
// by Kirsch et.al. From abstract 'only two hash functions are necessary to effectively
// implement a Bloom filter without any loss in the asymptotic false positive probability'
// Lets split up 64-bit hashcode into two 32-bit hash codes and employ the technique mentioned
// in the above paper
long hash64 = val == null ? Murmur3.NULL_HASHCODE :
Murmur3.hash64(val, offset, length);
addHash(hash64);
}
private void addHash(long hash64) {
int hash1 = (int) hash64;
int hash2 = (int) (hash64 >>> 32);
for (int i = 1; i <= numHashFunctions; i++) {
int combinedHash = hash1 + (i * hash2);
// hashcode should be positive, flip all the bits if it's negative
if (combinedHash < 0) {
combinedHash = ~combinedHash;
}
int pos = combinedHash % numBits;
bitSet.set(pos);
}
}
public void addString(String val) {
if (val == null) {
add(null);
} else {
add(val.getBytes(Charset.defaultCharset()));
}
}
public void addLong(long val) {
addHash(getLongHash(val));
}
public void addDouble(double val) {
addLong(Double.doubleToLongBits(val));
}
public boolean test(byte[] val) {
return testBytes(val, 0, val == null ? 0 : val.length);
}
public boolean testBytes(byte[] val, int offset, int length) {
long hash64 = val == null ? Murmur3.NULL_HASHCODE :
Murmur3.hash64(val, offset, length);
return testHash(hash64);
}
private boolean testHash(long hash64) {
int hash1 = (int) hash64;
int hash2 = (int) (hash64 >>> 32);
for (int i = 1; i <= numHashFunctions; i++) {
int combinedHash = hash1 + (i * hash2);
// hashcode should be positive, flip all the bits if it's negative
if (combinedHash < 0) {
combinedHash = ~combinedHash;
}
int pos = combinedHash % numBits;
if (!bitSet.get(pos)) {
return false;
}
}
return true;
}
public boolean testString(String val) {
if (val == null) {
return test(null);
} else {
return test(val.getBytes(Charset.defaultCharset()));
}
}
public boolean testLong(long val) {
return testHash(getLongHash(val));
}
// Thomas Wang's integer hash function
// http://web.archive.org/web/20071223173210/http://www.concentric.net/~Ttwang/tech/inthash.htm
static long getLongHash(long key) {
key = (~key) + (key << 21); // key = (key << 21) - key - 1;
key = key ^ (key >> 24);
key = (key + (key << 3)) + (key << 8); // key * 265
key = key ^ (key >> 14);
key = (key + (key << 2)) + (key << 4); // key * 21
key = key ^ (key >> 28);
key = key + (key << 31);
return key;
}
public boolean testDouble(double val) {
return testLong(Double.doubleToLongBits(val));
}
public long sizeInBytes() {
return getBitSize() / 8;
}
public int getBitSize() {
return bitSet.getData().length * Long.SIZE;
}
public int getNumHashFunctions() {
return numHashFunctions;
}
public long[] getBitSet() {
return bitSet.getData();
}
@Override
public String toString() {
return "m: " + numBits + " k: " + numHashFunctions;
}
/**
* Merge the specified bloom filter with current bloom filter.
*
* @param that - bloom filter to merge
*/
public void merge(BloomFilter that) {
if (this != that && numBits == that.numBits && numHashFunctions == that.numHashFunctions) {
this.bitSet.putAll(that.bitSet);
} else {
throw new IllegalArgumentException("BloomFilters are not compatible for merging." +
" this - " + this + " that - " + that);
}
}
public void reset() {
this.bitSet.clear();
}
/**
* Helper method that only used for tests. Check if the given position in the bitSet is
* true. Use default visibility.
*/
boolean testBitSetPos(int pos) {
return this.bitSet.get(pos);
}
/**
* Bare metal bit set implementation. For performance reasons, this implementation does not check
* for index bounds nor expand the bit set size if the specified index is greater than the size.
*/
public static class BitSet {
private final long[] data;
public BitSet(long bits) {
this(new long[(int) Math.ceil((double) bits / (double) Long.SIZE)]);
}
/**
* Deserialize long array as bit set.
*
* @param data - bit array
*/
public BitSet(long[] data) {
assert data.length > 0 : "data length is zero!";
this.data = data;
}
/**
* Sets the bit at specified index.
*
* @param index - position
*/
public void set(int index) {
data[index >>> 6] |= (1L << index);
}
/**
* Returns true if the bit is set in the specified index.
*
* @param index - position
* @return - value at the bit position
*/
public boolean get(int index) {
return (data[index >>> 6] & (1L << index)) != 0;
}
/**
* Number of bits
*/
public long bitSize() {
return (long) data.length * Long.SIZE;
}
public long[] getData() {
return data;
}
/**
* Combines the two BitArrays using bitwise OR.
*/
public void putAll(BitSet array) {
assert data.length == array.data.length :
"BitArrays must be of equal length (" + data.length + "!= " + array.data.length + ")";
for (int i = 0; i < data.length; i++) {
data[i] |= array.data[i];
}
}
/**
* Clear the bit set.
*/
public void clear() {
Arrays.fill(data, 0);
}
@Override
public boolean equals(Object other) {
return other != null &&
other.getClass() == getClass() &&
Arrays.equals(data, ((BitSet) other).data);
}
@Override
public int hashCode() {
return Arrays.hashCode(data);
}
}
}
| 10,391 | 29.654867 | 101 | java |
null | orc-main/java/core/src/java/org/apache/orc/util/BloomFilterIO.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.util;
import com.google.protobuf.ByteString;
import org.apache.orc.OrcFile;
import org.apache.orc.OrcProto;
import org.apache.orc.TypeDescription;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
public class BloomFilterIO {
public enum Encoding {
ORIGINAL(0),
UTF8_UTC(1),
FUTURE(Integer.MAX_VALUE);
public static final Encoding CURRENT = UTF8_UTC;
private final int id;
Encoding(int id) {
this.id = id;
}
public int getId() {
return id;
}
public static Encoding from(OrcProto.ColumnEncoding encoding) {
if (!encoding.hasBloomEncoding()) {
return ORIGINAL;
}
switch (encoding.getBloomEncoding()) {
case 0:
return ORIGINAL;
case 1:
return UTF8_UTC;
default:
return FUTURE;
}
}
}
private BloomFilterIO() {
// never called
}
/**
* Deserialize a bloom filter from the ORC file.
*/
public static BloomFilter deserialize(OrcProto.Stream.Kind kind,
OrcProto.ColumnEncoding encoding,
OrcFile.WriterVersion fileVersion,
TypeDescription.Category type,
OrcProto.BloomFilter bloomFilter) {
if (bloomFilter == null) {
return null;
}
int numFuncs = bloomFilter.getNumHashFunctions();
switch (kind) {
case BLOOM_FILTER: {
long[] values = new long[bloomFilter.getBitsetCount()];
for (int i = 0; i < values.length; ++i) {
values[i] = bloomFilter.getBitset(i);
}
// After HIVE-12055 the bloom filters for strings correctly use
// UTF8.
if (fileVersion.includes(OrcFile.WriterVersion.HIVE_12055) &&
(type == TypeDescription.Category.STRING ||
type == TypeDescription.Category.CHAR ||
type == TypeDescription.Category.VARCHAR)) {
return new BloomFilterUtf8(values, numFuncs);
}
return new BloomFilter(values, numFuncs);
}
case BLOOM_FILTER_UTF8: {
// make sure we don't use unknown encodings or original timestamp encodings
Encoding version = Encoding.from(encoding);
if (version == Encoding.FUTURE ||
(type == TypeDescription.Category.TIMESTAMP &&
version == Encoding.ORIGINAL)) {
return null;
}
ByteString bits = bloomFilter.getUtf8Bitset();
long[] values = new long[bits.size() / 8];
bits.asReadOnlyByteBuffer().order(ByteOrder.LITTLE_ENDIAN)
.asLongBuffer().get(values);
return new BloomFilterUtf8(values, numFuncs);
}
default:
throw new IllegalArgumentException("Unknown bloom filter kind " + kind);
}
}
/**
* Serialize the BloomFilter to the ORC file.
* @param builder the builder to write to
* @param bloomFilter the bloom filter to serialize
*/
public static void serialize(OrcProto.BloomFilter.Builder builder,
BloomFilter bloomFilter) {
builder.clear();
builder.setNumHashFunctions(bloomFilter.getNumHashFunctions());
long[] bitset = bloomFilter.getBitSet();
if (bloomFilter instanceof BloomFilterUtf8) {
ByteBuffer buffer = ByteBuffer.allocate(bitset.length * 8);
buffer.order(ByteOrder.LITTLE_ENDIAN);
buffer.asLongBuffer().put(bitset);
builder.setUtf8Bitset(ByteString.copyFrom(buffer));
} else {
for(int i=0; i < bitset.length; ++i) {
builder.addBitset(bitset[i]);
}
}
}
}
| 4,460 | 31.801471 | 83 | java |
null | orc-main/java/core/src/java/org/apache/orc/util/BloomFilterUtf8.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.util;
import java.nio.charset.StandardCharsets;
/**
* This class represents the fix from ORC-101 where we fixed the bloom filter
* from using the JVM's default character set to always using UTF-8.
*/
public class BloomFilterUtf8 extends BloomFilter {
public BloomFilterUtf8(long expectedEntries, double fpp) {
super(expectedEntries, fpp);
}
public BloomFilterUtf8(long[] bits, int numFuncs) {
super(bits, numFuncs);
}
@Override
public void addString(String val) {
if (val == null) {
add(null);
} else {
add(val.getBytes(StandardCharsets.UTF_8));
}
}
@Override
public boolean testString(String val) {
if (val == null) {
return test(null);
} else {
return test(val.getBytes(StandardCharsets.UTF_8));
}
}
}
| 1,620 | 27.946429 | 77 | java |
null | orc-main/java/core/src/java/org/apache/orc/util/CuckooSetBytes.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.util;
import org.apache.hadoop.hive.ql.exec.vector.expressions.StringExpr;
import java.util.Random;
/**
* A high-performance set implementation used to support fast set membership testing,
* using Cuckoo hashing. This is used to support fast tests of the form
* <p>
* column IN ( list-of-values )
* <p>
* For details on the algorithm, see R. Pagh and F. F. Rodler, "Cuckoo Hashing,"
* Elsevier Science preprint, Dec. 2003. http://www.itu.dk/people/pagh/papers/cuckoo-jour.pdf.
* <p>
* Copied from CuckooSetBytes@Apache Hive project for convenience
*/
public class CuckooSetBytes {
private byte[][] t1;
private byte[][] t2;
private byte[][] prev1 = null; // used for rehashing to get last set of values
private byte[][] prev2 = null; // " "
private int n; // current array size
private static final double PADDING_FACTOR = 1.0/0.40; // have minimum 40% fill factor
private int salt = 0;
private final Random gen = new Random(676983475);
private int rehashCount = 0;
private static final long INT_MASK = 0x00000000ffffffffL;
private static final long BYTE_MASK = 0x00000000000000ffL;
// some prime numbers spaced about at powers of 2 in magnitude
static final int[] primes = {7, 13, 17, 23, 31, 53, 67, 89, 127, 269, 571, 1019, 2089,
4507, 8263, 16361, 32327, 65437, 131111, 258887, 525961, 999983, 2158909, 4074073,
8321801, 15485863, 32452867, 67867967, 122949829, 256203221, 553105253, 982451653,
1645333507, 2147483647};
/**
* Allocate a new set to hold expectedSize values. Re-allocation to expand
* the set is not implemented, so the expected size must be at least the
* size of the set to be inserted.
* @param expectedSize At least the size of the set of values that will be inserted.
*/
public CuckooSetBytes(int expectedSize) {
// Choose array size. We have two hash tables to hold entries, so the sum
// of the two should have a bit more than twice as much space as the
// minimum required.
n = (int) (expectedSize * PADDING_FACTOR / 2.0);
// some prime numbers spaced about at powers of 2 in magnitude
// try to get prime number table size to have less dependence on good hash function
for (int i = 0; i != primes.length; i++) {
if (n <= primes[i]) {
n = primes[i];
break;
}
}
t1 = new byte[n][];
t2 = new byte[n][];
updateHashSalt();
}
/**
* Return true if and only if the value in byte array b beginning at start
* and ending at start+len is present in the set.
*/
public boolean lookup(byte[] b, int start, int len) {
return entryEqual(t1, h1(b, start, len), b, start, len) ||
entryEqual(t2, h2(b, start, len), b, start, len);
}
private static boolean entryEqual(byte[][] t, int hash, byte[] b, int start, int len) {
return t[hash] != null && StringExpr.equal(t[hash], 0, t[hash].length, b, start, len);
}
public void insert(byte[] x) {
byte[] temp;
if (lookup(x, 0, x.length)) {
return;
}
// Try to insert up to n times. Rehash if that fails.
for(int i = 0; i != n; i++) {
int hash1 = h1(x, 0, x.length);
if (t1[hash1] == null) {
t1[hash1] = x;
return;
}
// swap x and t1[h1(x)]
temp = t1[hash1];
t1[hash1] = x;
x = temp;
int hash2 = h2(x, 0, x.length);
if (t2[hash2] == null) {
t2[hash2] = x;
return;
}
// swap x and t2[h2(x)]
temp = t2[hash2];
t2[hash2] = x;
x = temp;
}
rehash();
insert(x);
}
/**
* Insert all values in the input array into the set.
*/
public void load(byte[][] a) {
for (byte[] x : a) {
insert(x);
}
}
/**
* Try to insert with up to n value's "poked out". Return the last value poked out.
* If the value is not blank then we assume there was a cycle.
* Don't try to insert the same value twice. This is for use in rehash only,
* so you won't see the same value twice.
*/
private byte[] tryInsert(byte[] x) {
byte[] temp;
for(int i = 0; i != n; i++) {
int hash1 = h1(x, 0, x.length);
if (t1[hash1] == null) {
t1[hash1] = x;
return null;
}
// swap x and t1[h1(x)]
temp = t1[hash1];
t1[hash1] = x;
x = temp;
int hash2 = h2(x, 0, x.length);
if (t2[hash2] == null) {
t2[hash2] = x;
return null;
}
// swap x and t2[h2(x)]
temp = t2[hash2];
t2[hash2] = x;
x = temp;
if (x == null) {
break;
}
}
return x;
}
/**
* first hash function
*/
private int h1(byte[] b, int start, int len) {
// AND hash with mask to 0 out sign bit to make sure it's positive.
// Then we know taking the result mod n is in the range (0..n-1).
return (hash(b, start, len, 0) & 0x7FFFFFFF) % n;
}
/**
* second hash function
*/
private int h2(byte[] b, int start, int len) {
// AND hash with mask to 0 out sign bit to make sure it's positive.
// Then we know taking the result mod n is in the range (0..n-1).
// Include salt as argument so this hash function can be varied
// if we need to rehash.
return (hash(b, start, len, salt) & 0x7FFFFFFF) % n;
}
/**
* In case of rehash, hash function h2 is changed by updating the
* salt value passed in to the function hash().
*/
private void updateHashSalt() {
salt = gen.nextInt(0x7FFFFFFF);
}
private void rehash() {
rehashCount++;
if (rehashCount > 20) {
throw new RuntimeException("Too many rehashes");
}
updateHashSalt();
// Save original values
if (prev1 == null) {
prev1 = t1;
prev2 = t2;
}
t1 = new byte[n][];
t2 = new byte[n][];
for (byte[] v : prev1) {
if (v != null) {
byte[] x = tryInsert(v);
if (x != null) {
rehash();
return;
}
}
}
for (byte[] v : prev2) {
if (v != null) {
byte[] x = tryInsert(v);
if (x != null) {
rehash();
return;
}
}
}
// We succeeded in adding all the values, so
// clear the previous values recorded.
prev1 = null;
prev2 = null;
}
/**
* This is adapted from the org.apache.hadoop.util.hash.JenkinsHash package.
* The interface needed to be modified to suit the use here, by adding
* a start offset parameter to the hash function.
*
* In the future, folding this back into the original Hadoop package should
* be considered. This could could them import that package and use it.
* The original comments from the source are below.
*
* taken from hashlittle() -- hash a variable-length key into a 32-bit value
*
* @param key the key (the unaligned variable-length array of bytes)
* @param nbytes number of bytes to include in hash
* @param initval can be any integer value
* @return a 32-bit value. Every bit of the key affects every bit of the
* return value. Two keys differing by one or two bits will have totally
* different hash values.
*
* <p>The best hash table sizes are powers of 2. There is no need to do mod
* a prime (mod is sooo slow!). If you need less than 32 bits, use a bitmask.
* For example, if you need only 10 bits, do
* <code>h = (h & hashmask(10));</code>
* In which case, the hash table should have hashsize(10) elements.
*
* <p>If you are hashing n strings byte[][] k, do it like this:
* for (int i = 0, h = 0; i < n; ++i) h = hash( k[i], h);
*
* <p>By Bob Jenkins, 2006. [email protected]. You may use this
* code any way you wish, private, educational, or commercial. It's free.
*
* <p>Use for hash table lookup, or anything where one collision in 2^^32 is
* acceptable. Do NOT use for cryptographic purposes.
*/
@SuppressWarnings("fallthrough")
private int hash(byte[] key, int start, int nbytes, int initval) {
int length = nbytes;
long a, b, c; // We use longs because we don't have unsigned ints
a = b = c = (0x00000000deadbeefL + length + initval) & INT_MASK;
int offset = start;
for (; length > 12; offset += 12, length -= 12) {
a = (a + (key[offset] & BYTE_MASK)) & INT_MASK;
a = (a + (((key[offset + 1] & BYTE_MASK) << 8) & INT_MASK)) & INT_MASK;
a = (a + (((key[offset + 2] & BYTE_MASK) << 16) & INT_MASK)) & INT_MASK;
a = (a + (((key[offset + 3] & BYTE_MASK) << 24) & INT_MASK)) & INT_MASK;
b = (b + (key[offset + 4] & BYTE_MASK)) & INT_MASK;
b = (b + (((key[offset + 5] & BYTE_MASK) << 8) & INT_MASK)) & INT_MASK;
b = (b + (((key[offset + 6] & BYTE_MASK) << 16) & INT_MASK)) & INT_MASK;
b = (b + (((key[offset + 7] & BYTE_MASK) << 24) & INT_MASK)) & INT_MASK;
c = (c + (key[offset + 8] & BYTE_MASK)) & INT_MASK;
c = (c + (((key[offset + 9] & BYTE_MASK) << 8) & INT_MASK)) & INT_MASK;
c = (c + (((key[offset + 10] & BYTE_MASK) << 16) & INT_MASK)) & INT_MASK;
c = (c + (((key[offset + 11] & BYTE_MASK) << 24) & INT_MASK)) & INT_MASK;
/*
* mix -- mix 3 32-bit values reversibly.
* This is reversible, so any information in (a,b,c) before mix() is
* still in (a,b,c) after mix().
*
* If four pairs of (a,b,c) inputs are run through mix(), or through
* mix() in reverse, there are at least 32 bits of the output that
* are sometimes the same for one pair and different for another pair.
*
* This was tested for:
* - pairs that differed by one bit, by two bits, in any combination
* of top bits of (a,b,c), or in any combination of bottom bits of
* (a,b,c).
* - "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
* the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
* is commonly produced by subtraction) look like a single 1-bit
* difference.
* - the base values were pseudorandom, all zero but one bit set, or
* all zero plus a counter that starts at zero.
*
* Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that
* satisfy this are
* 4 6 8 16 19 4
* 9 15 3 18 27 15
* 14 9 3 7 17 3
* Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing for
* "differ" defined as + with a one-bit base and a two-bit delta. I
* used http://burtleburtle.net/bob/hash/avalanche.html to choose
* the operations, constants, and arrangements of the variables.
*
* This does not achieve avalanche. There are input bits of (a,b,c)
* that fail to affect some output bits of (a,b,c), especially of a.
* The most thoroughly mixed value is c, but it doesn't really even
* achieve avalanche in c.
*
* This allows some parallelism. Read-after-writes are good at doubling
* the number of bits affected, so the goal of mixing pulls in the
* opposite direction as the goal of parallelism. I did what I could.
* Rotates seem to cost as much as shifts on every machine I could lay
* my hands on, and rotates are much kinder to the top and bottom bits,
* so I used rotates.
*
* #define mix(a,b,c) \
* { \
* a -= c; a ^= rot(c, 4); c += b; \
* b -= a; b ^= rot(a, 6); a += c; \
* c -= b; c ^= rot(b, 8); b += a; \
* a -= c; a ^= rot(c,16); c += b; \
* b -= a; b ^= rot(a,19); a += c; \
* c -= b; c ^= rot(b, 4); b += a; \
* }
*
* mix(a,b,c);
*/
a = (a - c) & INT_MASK;
a ^= rot(c, 4);
c = (c + b) & INT_MASK;
b = (b - a) & INT_MASK;
b ^= rot(a, 6);
a = (a + c) & INT_MASK;
c = (c - b) & INT_MASK;
c ^= rot(b, 8);
b = (b + a) & INT_MASK;
a = (a - c) & INT_MASK;
a ^= rot(c,16);
c = (c + b) & INT_MASK;
b = (b - a) & INT_MASK;
b ^= rot(a,19);
a = (a + c) & INT_MASK;
c = (c - b) & INT_MASK;
c ^= rot(b, 4);
b = (b + a) & INT_MASK;
}
//-------------------------------- last block: affect all 32 bits of (c)
switch (length) { // all the case statements fall through
case 12:
c = (c + (((key[offset + 11] & BYTE_MASK) << 24) & INT_MASK)) & INT_MASK;
case 11:
c = (c + (((key[offset + 10] & BYTE_MASK) << 16) & INT_MASK)) & INT_MASK;
case 10:
c = (c + (((key[offset + 9] & BYTE_MASK) << 8) & INT_MASK)) & INT_MASK;
case 9:
c = (c + (key[offset + 8] & BYTE_MASK)) & INT_MASK;
case 8:
b = (b + (((key[offset + 7] & BYTE_MASK) << 24) & INT_MASK)) & INT_MASK;
case 7:
b = (b + (((key[offset + 6] & BYTE_MASK) << 16) & INT_MASK)) & INT_MASK;
case 6:
b = (b + (((key[offset + 5] & BYTE_MASK) << 8) & INT_MASK)) & INT_MASK;
case 5:
b = (b + (key[offset + 4] & BYTE_MASK)) & INT_MASK;
case 4:
a = (a + (((key[offset + 3] & BYTE_MASK) << 24) & INT_MASK)) & INT_MASK;
case 3:
a = (a + (((key[offset + 2] & BYTE_MASK) << 16) & INT_MASK)) & INT_MASK;
case 2:
a = (a + (((key[offset + 1] & BYTE_MASK) << 8) & INT_MASK)) & INT_MASK;
case 1:
a = (a + (key[offset] & BYTE_MASK)) & INT_MASK;
break;
case 0:
return (int)(c & INT_MASK);
}
/*
* final -- final mixing of 3 32-bit values (a,b,c) into c
*
* Pairs of (a,b,c) values differing in only a few bits will usually
* produce values of c that look totally different. This was tested for
* - pairs that differed by one bit, by two bits, in any combination
* of top bits of (a,b,c), or in any combination of bottom bits of
* (a,b,c).
*
* - "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
* the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
* is commonly produced by subtraction) look like a single 1-bit
* difference.
*
* - the base values were pseudorandom, all zero but one bit set, or
* all zero plus a counter that starts at zero.
*
* These constants passed:
* 14 11 25 16 4 14 24
* 12 14 25 16 4 14 24
* and these came close:
* 4 8 15 26 3 22 24
* 10 8 15 26 3 22 24
* 11 8 15 26 3 22 24
*
* #define final(a,b,c) \
* {
* c ^= b; c -= rot(b,14); \
* a ^= c; a -= rot(c,11); \
* b ^= a; b -= rot(a,25); \
* c ^= b; c -= rot(b,16); \
* a ^= c; a -= rot(c,4); \
* b ^= a; b -= rot(a,14); \
* c ^= b; c -= rot(b,24); \
* }
*
*/
c ^= b;
c = (c - rot(b,14)) & INT_MASK;
a ^= c;
a = (a - rot(c,11)) & INT_MASK;
b ^= a;
b = (b - rot(a,25)) & INT_MASK;
c ^= b;
c = (c - rot(b,16)) & INT_MASK;
a ^= c;
a = (a - rot(c,4)) & INT_MASK;
b ^= a;
b = (b - rot(a,14)) & INT_MASK;
c ^= b;
c = (c - rot(b,24)) & INT_MASK;
return (int)(c & INT_MASK);
}
private static long rot(long val, int pos) {
return ((Integer.rotateLeft(
(int)(val & INT_MASK), pos)) & INT_MASK);
}
}
| 16,161 | 33.907127 | 94 | java |
null | orc-main/java/core/src/java/org/apache/orc/util/Murmur3.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.util;
/**
* Murmur3 is successor to Murmur2 fast non-crytographic hash algorithms.
* <p>
* Murmur3 32 and 128 bit variants.
* 32-bit Java port of https://code.google.com/p/smhasher/source/browse/trunk/MurmurHash3.cpp#94
* 128-bit Java port of https://code.google.com/p/smhasher/source/browse/trunk/MurmurHash3.cpp#255
* <p>
* This is a public domain code with no copyrights.
* From homepage of MurmurHash (https://code.google.com/p/smhasher/),
* "All MurmurHash versions are public domain software, and the author disclaims all copyright
* to their code."
*/
public class Murmur3 {
// from 64-bit linear congruential generator
public static final long NULL_HASHCODE = 2862933555777941757L;
// Constants for 32 bit variant
private static final int C1_32 = 0xcc9e2d51;
private static final int C2_32 = 0x1b873593;
private static final int R1_32 = 15;
private static final int R2_32 = 13;
private static final int M_32 = 5;
private static final int N_32 = 0xe6546b64;
// Constants for 128 bit variant
private static final long C1 = 0x87c37b91114253d5L;
private static final long C2 = 0x4cf5ad432745937fL;
private static final int R1 = 31;
private static final int R2 = 27;
private static final int R3 = 33;
private static final int M = 5;
private static final int N1 = 0x52dce729;
private static final int N2 = 0x38495ab5;
private static final int DEFAULT_SEED = 104729;
/**
* Murmur3 32-bit variant.
*
* @param data - input byte array
* @return - hashcode
*/
public static int hash32(byte[] data) {
return hash32(data, data.length, DEFAULT_SEED);
}
/**
* Murmur3 32-bit variant.
*
* @param data - input byte array
* @param length - length of array
* @param seed - seed. (default 0)
* @return - hashcode
*/
public static int hash32(byte[] data, int length, int seed) {
int hash = seed;
final int nblocks = length >> 2;
// body
for (int i = 0; i < nblocks; i++) {
int i_4 = i << 2;
int k = (data[i_4] & 0xff)
| ((data[i_4 + 1] & 0xff) << 8)
| ((data[i_4 + 2] & 0xff) << 16)
| ((data[i_4 + 3] & 0xff) << 24);
// mix functions
k *= C1_32;
k = Integer.rotateLeft(k, R1_32);
k *= C2_32;
hash ^= k;
hash = Integer.rotateLeft(hash, R2_32) * M_32 + N_32;
}
// tail
int idx = nblocks << 2;
int k1 = 0;
switch (length - idx) {
case 3:
k1 ^= data[idx + 2] << 16;
case 2:
k1 ^= data[idx + 1] << 8;
case 1:
k1 ^= data[idx];
// mix functions
k1 *= C1_32;
k1 = Integer.rotateLeft(k1, R1_32);
k1 *= C2_32;
hash ^= k1;
}
// finalization
hash ^= length;
hash ^= (hash >>> 16);
hash *= 0x85ebca6b;
hash ^= (hash >>> 13);
hash *= 0xc2b2ae35;
hash ^= (hash >>> 16);
return hash;
}
/**
* Murmur3 64-bit variant. This is essentially MSB 8 bytes of Murmur3 128-bit variant.
*
* @param data - input byte array
* @return - hashcode
*/
public static long hash64(byte[] data) {
return hash64(data, 0, data.length, DEFAULT_SEED);
}
public static long hash64(byte[] data, int offset, int length) {
return hash64(data, offset, length, DEFAULT_SEED);
}
/**
* Murmur3 64-bit variant. This is essentially MSB 8 bytes of Murmur3 128-bit variant.
*
* @param data - input byte array
* @param length - length of array
* @param seed - seed. (default is 0)
* @return - hashcode
*/
public static long hash64(byte[] data, int offset, int length, int seed) {
long hash = seed;
final int nblocks = length >> 3;
// body
for (int i = 0; i < nblocks; i++) {
final int i8 = i << 3;
long k = ((long) data[offset + i8] & 0xff)
| (((long) data[offset + i8 + 1] & 0xff) << 8)
| (((long) data[offset + i8 + 2] & 0xff) << 16)
| (((long) data[offset + i8 + 3] & 0xff) << 24)
| (((long) data[offset + i8 + 4] & 0xff) << 32)
| (((long) data[offset + i8 + 5] & 0xff) << 40)
| (((long) data[offset + i8 + 6] & 0xff) << 48)
| (((long) data[offset + i8 + 7] & 0xff) << 56);
// mix functions
k *= C1;
k = Long.rotateLeft(k, R1);
k *= C2;
hash ^= k;
hash = Long.rotateLeft(hash, R2) * M + N1;
}
// tail
long k1 = 0;
int tailStart = nblocks << 3;
switch (length - tailStart) {
case 7:
k1 ^= ((long) data[offset + tailStart + 6] & 0xff) << 48;
case 6:
k1 ^= ((long) data[offset + tailStart + 5] & 0xff) << 40;
case 5:
k1 ^= ((long) data[offset + tailStart + 4] & 0xff) << 32;
case 4:
k1 ^= ((long) data[offset + tailStart + 3] & 0xff) << 24;
case 3:
k1 ^= ((long) data[offset + tailStart + 2] & 0xff) << 16;
case 2:
k1 ^= ((long) data[offset + tailStart + 1] & 0xff) << 8;
case 1:
k1 ^= ((long) data[offset + tailStart] & 0xff);
k1 *= C1;
k1 = Long.rotateLeft(k1, R1);
k1 *= C2;
hash ^= k1;
}
// finalization
hash ^= length;
hash = fmix64(hash);
return hash;
}
/**
* Murmur3 128-bit variant.
*
* @param data - input byte array
* @return - hashcode (2 longs)
*/
public static long[] hash128(byte[] data) {
return hash128(data, 0, data.length, DEFAULT_SEED);
}
/**
* Murmur3 128-bit variant.
*
* @param data - input byte array
* @param offset - the first element of array
* @param length - length of array
* @param seed - seed. (default is 0)
* @return - hashcode (2 longs)
*/
public static long[] hash128(byte[] data, int offset, int length, int seed) {
long h1 = seed;
long h2 = seed;
final int nblocks = length >> 4;
// body
for (int i = 0; i < nblocks; i++) {
final int i16 = i << 4;
long k1 = ((long) data[offset + i16] & 0xff)
| (((long) data[offset + i16 + 1] & 0xff) << 8)
| (((long) data[offset + i16 + 2] & 0xff) << 16)
| (((long) data[offset + i16 + 3] & 0xff) << 24)
| (((long) data[offset + i16 + 4] & 0xff) << 32)
| (((long) data[offset + i16 + 5] & 0xff) << 40)
| (((long) data[offset + i16 + 6] & 0xff) << 48)
| (((long) data[offset + i16 + 7] & 0xff) << 56);
long k2 = ((long) data[offset + i16 + 8] & 0xff)
| (((long) data[offset + i16 + 9] & 0xff) << 8)
| (((long) data[offset + i16 + 10] & 0xff) << 16)
| (((long) data[offset + i16 + 11] & 0xff) << 24)
| (((long) data[offset + i16 + 12] & 0xff) << 32)
| (((long) data[offset + i16 + 13] & 0xff) << 40)
| (((long) data[offset + i16 + 14] & 0xff) << 48)
| (((long) data[offset + i16 + 15] & 0xff) << 56);
// mix functions for k1
k1 *= C1;
k1 = Long.rotateLeft(k1, R1);
k1 *= C2;
h1 ^= k1;
h1 = Long.rotateLeft(h1, R2);
h1 += h2;
h1 = h1 * M + N1;
// mix functions for k2
k2 *= C2;
k2 = Long.rotateLeft(k2, R3);
k2 *= C1;
h2 ^= k2;
h2 = Long.rotateLeft(h2, R1);
h2 += h1;
h2 = h2 * M + N2;
}
// tail
long k1 = 0;
long k2 = 0;
int tailStart = nblocks << 4;
switch (length - tailStart) {
case 15:
k2 ^= (long) (data[offset + tailStart + 14] & 0xff) << 48;
case 14:
k2 ^= (long) (data[offset + tailStart + 13] & 0xff) << 40;
case 13:
k2 ^= (long) (data[offset + tailStart + 12] & 0xff) << 32;
case 12:
k2 ^= (long) (data[offset + tailStart + 11] & 0xff) << 24;
case 11:
k2 ^= (long) (data[offset + tailStart + 10] & 0xff) << 16;
case 10:
k2 ^= (long) (data[offset + tailStart + 9] & 0xff) << 8;
case 9:
k2 ^= (long) (data[offset + tailStart + 8] & 0xff);
k2 *= C2;
k2 = Long.rotateLeft(k2, R3);
k2 *= C1;
h2 ^= k2;
case 8:
k1 ^= (long) (data[offset + tailStart + 7] & 0xff) << 56;
case 7:
k1 ^= (long) (data[offset + tailStart + 6] & 0xff) << 48;
case 6:
k1 ^= (long) (data[offset + tailStart + 5] & 0xff) << 40;
case 5:
k1 ^= (long) (data[offset + tailStart + 4] & 0xff) << 32;
case 4:
k1 ^= (long) (data[offset + tailStart + 3] & 0xff) << 24;
case 3:
k1 ^= (long) (data[offset + tailStart + 2] & 0xff) << 16;
case 2:
k1 ^= (long) (data[offset + tailStart + 1] & 0xff) << 8;
case 1:
k1 ^= (long) (data[offset + tailStart] & 0xff);
k1 *= C1;
k1 = Long.rotateLeft(k1, R1);
k1 *= C2;
h1 ^= k1;
}
// finalization
h1 ^= length;
h2 ^= length;
h1 += h2;
h2 += h1;
h1 = fmix64(h1);
h2 = fmix64(h2);
h1 += h2;
h2 += h1;
return new long[]{h1, h2};
}
private static long fmix64(long h) {
h ^= (h >>> 33);
h *= 0xff51afd7ed558ccdL;
h ^= (h >>> 33);
h *= 0xc4ceb9fe1a85ec53L;
h ^= (h >>> 33);
return h;
}
}
| 9,962 | 28.651786 | 98 | java |
null | orc-main/java/core/src/java/org/apache/orc/util/StreamWrapperFileSystem.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.util;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.Progressable;
import org.apache.orc.OrcConf;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
/**
* This class provides an adaptor so that tools that want to read an ORC
* file from an FSDataInputStream can do so. Create an instance with the
* stream, path, and fileSize and pass it in to the reader as the FileSystem.
*/
public class StreamWrapperFileSystem extends FileSystem {
private final FSDataInputStream stream;
private final FileStatus status;
/**
* Create a FileSystem that only has information about the given stream.
* @param stream the data of the stream
* @param status the file status of the stream
* @param conf the configuration to use
*/
public StreamWrapperFileSystem(FSDataInputStream stream,
FileStatus status,
Configuration conf) {
this.stream = stream;
this.status = status;
setConf(conf);
}
/**
* Create a FileSystem that only has information about the given stream.
* @param stream the data of the stream
* @param path the file name of the stream
* @param fileSize the length of the stream in bytes
* @param conf the configuration to use
*/
public StreamWrapperFileSystem(FSDataInputStream stream,
Path path,
long fileSize,
Configuration conf) {
this(stream,
new FileStatus(fileSize, false, 1, OrcConf.BLOCK_SIZE.getInt(conf), 0, path),
conf);
}
@Override
public URI getUri() {
return URI.create("stream://" + status.getPath());
}
@Override
public FSDataInputStream open(Path path, int bufferSize) throws IOException {
if (status.getPath().equals(path)) {
return stream;
} else {
throw new FileNotFoundException(path.toString());
}
}
@Override
public FSDataOutputStream create(Path path, FsPermission fsPermission,
boolean b, int i, short i1, long l,
Progressable progressable) {
throw new UnsupportedOperationException("Write operations on " +
getClass().getName());
}
@Override
public FSDataOutputStream append(Path path, int i,
Progressable progressable) {
throw new UnsupportedOperationException("Write operations on " +
getClass().getName());
}
@Override
public boolean rename(Path path, Path path1) {
throw new UnsupportedOperationException("Write operations on " +
getClass().getName());
}
@Override
public boolean delete(Path path, boolean b) {
throw new UnsupportedOperationException("Write operations on " +
getClass().getName());
}
@Override
public void setWorkingDirectory(Path path) {
throw new UnsupportedOperationException("Write operations on " +
getClass().getName());
}
@Override
public Path getWorkingDirectory() {
return status.getPath().getParent();
}
@Override
public boolean mkdirs(Path path, FsPermission fsPermission) {
throw new UnsupportedOperationException("Write operations on " +
getClass().getName());
}
@Override
public FileStatus[] listStatus(Path path) throws IOException {
return new FileStatus[]{getFileStatus(path)};
}
@Override
public FileStatus getFileStatus(Path path) throws IOException {
if (status.getPath().equals(path)) {
return status;
} else {
throw new FileNotFoundException(path.toString());
}
}
}
| 4,752 | 31.554795 | 85 | java |
null | orc-main/java/core/src/java/org/threeten/extra/chrono/HybridChronology.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.threeten.extra.chrono;
import java.io.Serializable;
import java.time.Clock;
import java.time.DateTimeException;
import java.time.Instant;
import java.time.LocalDate;
import java.time.ZoneId;
import java.time.chrono.AbstractChronology;
import java.time.chrono.ChronoLocalDateTime;
import java.time.chrono.ChronoZonedDateTime;
import java.time.chrono.Chronology;
import java.time.chrono.Era;
import java.time.chrono.IsoChronology;
import java.time.format.ResolverStyle;
import java.time.temporal.ChronoField;
import java.time.temporal.TemporalAccessor;
import java.time.temporal.TemporalField;
import java.time.temporal.ValueRange;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
/**
* The Julian-Gregorian hybrid calendar system.
* <p>
* The British calendar system follows the rules of the Julian calendar
* until 1752 and the rules of the Gregorian (ISO) calendar since then.
* The Julian differs from the Gregorian only in terms of the leap year rule.
* <p>
* The Julian and Gregorian calendar systems are linked to Rome and the Vatican
* with the Julian preceding the Gregorian. The Gregorian was introduced to
* handle the drift of the seasons through the year due to the inaccurate
* Julian leap year rules. When first introduced by the Vatican in 1582,
* the cutover resulted in a "gap" of 10 days.
* <p>
* While the calendar was introduced in 1582, it was not adopted everywhere.
* Britain did not adopt it until the 1752, when Wednesday 2nd September 1752
* was followed by Thursday 14th September 1752.
* <p>
* This chronology implements the proleptic Julian calendar system followed by
* the proleptic Gregorian calendar system (identical to the ISO calendar system).
* Dates are aligned such that {@code 0001-01-01 (British)} is {@code 0000-12-30 (ISO)}.
* <p>
* This class implements a calendar where January 1st is the start of the year.
* The history of the start of the year is complex and using the current standard
* is the most consistent.
* <p>
* The eras of this calendar system are defined by {@link JulianEra} to avoid unnecessary duplication.
* <p>
* The fields are defined as follows:
* <ul>
* <li>era - There are two eras, the current 'Anno Domini' (AD) and the previous era 'Before Christ' (BC).
* <li>year-of-era - The year-of-era for the current era increases uniformly from the epoch at year one.
* For the previous era the year increases from one as time goes backwards.
* <li>proleptic-year - The proleptic year is the same as the year-of-era for the
* current era. For the previous era, years have zero, then negative values.
* <li>month-of-year - There are 12 months in a year, numbered from 1 to 12.
* <li>day-of-month - There are between 28 and 31 days in each month, numbered from 1 to 31.
* Months 4, 6, 9 and 11 have 30 days, Months 1, 3, 5, 7, 8, 10 and 12 have 31 days.
* Month 2 has 28 days, or 29 in a leap year.
* The cutover month, September 1752, has a value range from 1 to 30, but a length of 19.
* <li>day-of-year - There are 365 days in a standard year and 366 in a leap year.
* The days are numbered from 1 to 365 or 1 to 366.
* The cutover year 1752 has values from 1 to 355 and a length of 355 days.
* </ul>
*
* <h3>Implementation Requirements</h3>
* This class is immutable and thread-safe.
*/
public final class HybridChronology
extends AbstractChronology
implements Serializable {
/**
* Singleton instance for the Coptic chronology.
*/
public static final HybridChronology INSTANCE = new HybridChronology();
/**
* The cutover date, October 15, 1582.
*/
public static final LocalDate CUTOVER = LocalDate.of(1582, 10, 15);
/**
* The number of cutover days.
*/
static final int CUTOVER_DAYS = 10;
/**
* The cutover year.
*/
static final int CUTOVER_YEAR = 1582;
/**
* Serialization version.
*/
private static final long serialVersionUID = 87235724675472658L;
/**
* Range of day-of-year.
*/
static final ValueRange DOY_RANGE = ValueRange.of(1, 355, 366);
/**
* Range of aligned-week-of-month.
*/
static final ValueRange ALIGNED_WOM_RANGE = ValueRange.of(1, 3, 5);
/**
* Range of aligned-week-of-year.
*/
static final ValueRange ALIGNED_WOY_RANGE = ValueRange.of(1, 51, 53);
/**
* Range of proleptic-year.
*/
static final ValueRange YEAR_RANGE = ValueRange.of(-999_998, 999_999);
/**
* Range of year.
*/
static final ValueRange YOE_RANGE = ValueRange.of(1, 999_999);
/**
* Range of proleptic month.
*/
static final ValueRange PROLEPTIC_MONTH_RANGE =
ValueRange.of(-999_998 * 12L, 999_999 * 12L + 11);
/**
* Private constructor, that is public to satisfy the {@code ServiceLoader}.
*
* @deprecated Use the singleton {@link #INSTANCE} instead.
*/
@Deprecated
public HybridChronology() {
}
/**
* Resolve singleton.
*
* @return the singleton instance, not null
*/
private Object readResolve() {
return INSTANCE;
}
//-------------------------------------------------------------------------
/**
* Gets the cutover date between the Julian and Gregorian calendar.
* <p>
* The date returned is the first date that the Gregorian (ISO) calendar applies,
* which is Thursday 14th September 1752.
*
* @return the first date after the cutover, not null
*/
public LocalDate getCutover() {
return CUTOVER;
}
//-----------------------------------------------------------------------
/**
* Gets the ID of the chronology - 'Hybrid'.
* <p>
* The ID uniquely identifies the {@code Chronology}.
* It can be used to lookup the {@code Chronology} using {@link Chronology#of(String)}.
*
* @return the chronology ID - 'Hybrid'
* @see #getCalendarType()
*/
@Override
public String getId() {
return "Hybrid";
}
/**
* Gets the calendar type of the underlying calendar system, which returns null.
* <p>
* The <em>Unicode Locale Data Markup Language (LDML)</em> specification
* does not define an identifier for this calendar system, thus null is returned.
*
* @return the calendar system type, null
* @see #getId()
*/
@Override
public String getCalendarType() {
return null;
}
//-----------------------------------------------------------------------
/**
* Obtains a local date in British Cutover calendar system from the
* era, year-of-era, month-of-year and day-of-month fields.
* <p>
* Dates in the middle of the cutover gap, such as the 10th September 1752,
* will not throw an exception. Instead, the date will be treated as a Julian date
* and converted to an ISO date, with the day of month shifted by 11 days.
*
* @param era the British Cutover era, not null
* @param yearOfEra the year-of-era
* @param month the month-of-year
* @param dayOfMonth the day-of-month
* @return the British Cutover local date, not null
* @throws DateTimeException if unable to create the date
* @throws ClassCastException if the {@code era} is not a {@code JulianEra}
*/
@Override
public HybridDate date(Era era, int yearOfEra, int month, int dayOfMonth) {
return date(prolepticYear(era, yearOfEra), month, dayOfMonth);
}
/**
* Obtains a local date in British Cutover calendar system from the
* proleptic-year, month-of-year and day-of-month fields.
* <p>
* Dates in the middle of the cutover gap, such as the 10th September 1752,
* will not throw an exception. Instead, the date will be treated as a Julian date
* and converted to an ISO date, with the day of month shifted by 11 days.
*
* @param prolepticYear the proleptic-year
* @param month the month-of-year
* @param dayOfMonth the day-of-month
* @return the British Cutover local date, not null
* @throws DateTimeException if unable to create the date
*/
@Override
public HybridDate date(int prolepticYear, int month, int dayOfMonth) {
return HybridDate.of(prolepticYear, month, dayOfMonth);
}
/**
* Obtains a local date in British Cutover calendar system from the
* era, year-of-era and day-of-year fields.
* <p>
* The day-of-year takes into account the cutover, thus there are only 355 days in 1752.
*
* @param era the British Cutover era, not null
* @param yearOfEra the year-of-era
* @param dayOfYear the day-of-year
* @return the British Cutover local date, not null
* @throws DateTimeException if unable to create the date
* @throws ClassCastException if the {@code era} is not a {@code JulianEra}
*/
@Override
public HybridDate dateYearDay(Era era, int yearOfEra, int dayOfYear) {
return dateYearDay(prolepticYear(era, yearOfEra), dayOfYear);
}
/**
* Obtains a local date in British Cutover calendar system from the
* proleptic-year and day-of-year fields.
* <p>
* The day-of-year takes into account the cutover, thus there are only 355 days in 1752.
*
* @param prolepticYear the proleptic-year
* @param dayOfYear the day-of-year
* @return the British Cutover local date, not null
* @throws DateTimeException if unable to create the date
*/
@Override
public HybridDate dateYearDay(int prolepticYear, int dayOfYear) {
return HybridDate.ofYearDay(prolepticYear, dayOfYear);
}
/**
* Obtains a local date in the British Cutover calendar system from the epoch-day.
*
* @param epochDay the epoch day
* @return the British Cutover local date, not null
* @throws DateTimeException if unable to create the date
*/
@Override // override with covariant return type
public HybridDate dateEpochDay(long epochDay) {
return HybridDate.ofEpochDay(epochDay);
}
//-------------------------------------------------------------------------
/**
* Obtains the current British Cutover local date from the system clock in the default time-zone.
* <p>
* This will query the {@link Clock#systemDefaultZone() system clock} in the default
* time-zone to obtain the current date.
* <p>
* Using this method will prevent the ability to use an alternate clock for testing
* because the clock is hard-coded.
*
* @return the current British Cutover local date using the system clock and default time-zone, not null
* @throws DateTimeException if unable to create the date
*/
@Override // override with covariant return type
public HybridDate dateNow() {
return HybridDate.now();
}
/**
* Obtains the current British Cutover local date from the system clock in the specified time-zone.
* <p>
* This will query the {@link Clock#system(ZoneId) system clock} to obtain the current date.
* Specifying the time-zone avoids dependence on the default time-zone.
* <p>
* Using this method will prevent the ability to use an alternate clock for testing
* because the clock is hard-coded.
*
* @param zone the zone ID to use, not null
* @return the current British Cutover local date using the system clock, not null
* @throws DateTimeException if unable to create the date
*/
@Override // override with covariant return type
public HybridDate dateNow(ZoneId zone) {
return HybridDate.now(zone);
}
/**
* Obtains the current British Cutover local date from the specified clock.
* <p>
* This will query the specified clock to obtain the current date - today.
* Using this method allows the use of an alternate clock for testing.
* The alternate clock may be introduced using {@link Clock dependency injection}.
*
* @param clock the clock to use, not null
* @return the current British Cutover local date, not null
* @throws DateTimeException if unable to create the date
*/
@Override // override with covariant return type
public HybridDate dateNow(Clock clock) {
return HybridDate.now(clock);
}
//-------------------------------------------------------------------------
/**
* Obtains a British Cutover local date from another date-time object.
*
* @param temporal the date-time object to convert, not null
* @return the British Cutover local date, not null
* @throws DateTimeException if unable to create the date
*/
@Override
public HybridDate date(TemporalAccessor temporal) {
return HybridDate.from(temporal);
}
/**
* Obtains a British Cutover local date-time from another date-time object.
*
* @param temporal the date-time object to convert, not null
* @return the British Cutover local date-time, not null
* @throws DateTimeException if unable to create the date-time
*/
@Override
@SuppressWarnings("unchecked")
public ChronoLocalDateTime<HybridDate> localDateTime(TemporalAccessor temporal) {
return (ChronoLocalDateTime<HybridDate>) super.localDateTime(temporal);
}
/**
* Obtains a British Cutover zoned date-time from another date-time object.
*
* @param temporal the date-time object to convert, not null
* @return the British Cutover zoned date-time, not null
* @throws DateTimeException if unable to create the date-time
*/
@Override
@SuppressWarnings("unchecked")
public ChronoZonedDateTime<HybridDate> zonedDateTime(TemporalAccessor temporal) {
return (ChronoZonedDateTime<HybridDate>) super.zonedDateTime(temporal);
}
/**
* Obtains a British Cutover zoned date-time in this chronology from an {@code Instant}.
*
* @param instant the instant to create the date-time from, not null
* @param zone the time-zone, not null
* @return the British Cutover zoned date-time, not null
* @throws DateTimeException if the result exceeds the supported range
*/
@Override
@SuppressWarnings("unchecked")
public ChronoZonedDateTime<HybridDate> zonedDateTime(Instant instant, ZoneId zone) {
return (ChronoZonedDateTime<HybridDate>) super.zonedDateTime(instant, zone);
}
//-----------------------------------------------------------------------
/**
* Checks if the specified year is a leap year.
* <p>
* The result will return the same as {@link JulianChronology#isLeapYear(long)} for
* year 1752 and earlier, and {@link IsoChronology#isLeapYear(long)} otherwise.
* This method does not validate the year passed in, and only has a
* well-defined result for years in the supported range.
*
* @param prolepticYear the proleptic-year to check, not validated for range
* @return true if the year is a leap year
*/
@Override
public boolean isLeapYear(long prolepticYear) {
if (prolepticYear <= CUTOVER_YEAR) {
return JulianChronology.INSTANCE.isLeapYear(prolepticYear);
}
return IsoChronology.INSTANCE.isLeapYear(prolepticYear);
}
@Override
public int prolepticYear(Era era, int yearOfEra) {
if (era instanceof JulianEra == false) {
throw new ClassCastException("Era must be JulianEra");
}
return (era == JulianEra.AD ? yearOfEra : 1 - yearOfEra);
}
@Override
public JulianEra eraOf(int eraValue) {
return JulianEra.of(eraValue);
}
@Override
public List<Era> eras() {
return Arrays.<Era>asList(JulianEra.values());
}
//-----------------------------------------------------------------------
@Override
public ValueRange range(ChronoField field) {
switch (field) {
case DAY_OF_YEAR:
return DOY_RANGE;
case ALIGNED_WEEK_OF_MONTH:
return ALIGNED_WOM_RANGE;
case ALIGNED_WEEK_OF_YEAR:
return ALIGNED_WOY_RANGE;
case PROLEPTIC_MONTH:
return PROLEPTIC_MONTH_RANGE;
case YEAR_OF_ERA:
return YOE_RANGE;
case YEAR:
return YEAR_RANGE;
default:
break;
}
return field.range();
}
//-----------------------------------------------------------------------
@Override // override for return type
public HybridDate resolveDate(
Map<TemporalField, Long> fieldValues, ResolverStyle resolverStyle) {
return (HybridDate) super.resolveDate(fieldValues, resolverStyle);
}
}
| 16,845 | 35.227957 | 106 | java |
null | orc-main/java/core/src/java/org/threeten/extra/chrono/HybridDate.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.threeten.extra.chrono;
import java.io.Serializable;
import java.time.Clock;
import java.time.DateTimeException;
import java.time.LocalDate;
import java.time.LocalTime;
import java.time.ZoneId;
import java.time.chrono.ChronoLocalDate;
import java.time.chrono.ChronoLocalDateTime;
import java.time.chrono.ChronoPeriod;
import java.time.temporal.ChronoField;
import java.time.temporal.Temporal;
import java.time.temporal.TemporalAccessor;
import java.time.temporal.TemporalAdjuster;
import java.time.temporal.TemporalAmount;
import java.time.temporal.TemporalField;
import java.time.temporal.TemporalQueries;
import java.time.temporal.TemporalQuery;
import java.time.temporal.TemporalUnit;
import java.time.temporal.ValueRange;
import java.util.Objects;
import static org.threeten.extra.chrono.HybridChronology.CUTOVER;
import static org.threeten.extra.chrono.HybridChronology.CUTOVER_DAYS;
import static org.threeten.extra.chrono.HybridChronology.CUTOVER_YEAR;
/**
* A date in the British Cutover calendar system.
* <p>
* This date operates using the {@linkplain HybridChronology British Cutover calendar}.
*
* <h3>Implementation Requirements</h3>
* This class is immutable and thread-safe.
* <p>
* This class must be treated as a value type. Do not synchronize, rely on the
* identity hash code or use the distinction between equals() and ==.
*/
public final class HybridDate
extends AbstractDate
implements ChronoLocalDate, Serializable {
/**
* Serialization version.
*/
private static final long serialVersionUID = -9626278512674L;
/**
* The underlying date.
*/
private final LocalDate isoDate;
/**
* The underlying Julian date if before the cutover.
*/
private final transient JulianDate julianDate;
//-----------------------------------------------------------------------
/**
* Obtains the current {@code HybridDate} from the system clock in the default time-zone.
* <p>
* This will query the {@link Clock#systemDefaultZone() system clock} in the default
* time-zone to obtain the current date.
* <p>
* Using this method will prevent the ability to use an alternate clock for testing
* because the clock is hard-coded.
*
* @return the current date using the system clock and default time-zone, not null
*/
public static HybridDate now() {
return now(Clock.systemDefaultZone());
}
/**
* Obtains the current {@code HybridDate} from the system clock in the specified time-zone.
* <p>
* This will query the {@link Clock#system(ZoneId) system clock} to obtain the current date.
* Specifying the time-zone avoids dependence on the default time-zone.
* <p>
* Using this method will prevent the ability to use an alternate clock for testing
* because the clock is hard-coded.
*
* @param zone the zone ID to use, not null
* @return the current date using the system clock, not null
*/
public static HybridDate now(ZoneId zone) {
return now(Clock.system(zone));
}
/**
* Obtains the current {@code HybridDate} from the specified clock.
* <p>
* This will query the specified clock to obtain the current date - today.
* Using this method allows the use of an alternate clock for testing.
* The alternate clock may be introduced using {@linkplain Clock dependency injection}.
*
* @param clock the clock to use, not null
* @return the current date, not null
* @throws DateTimeException if the current date cannot be obtained
*/
public static HybridDate now(Clock clock) {
return new HybridDate(LocalDate.now(clock));
}
/**
* Obtains a {@code HybridDate} representing a date in the British Cutover calendar
* system from the proleptic-year, month-of-year and day-of-month fields.
* <p>
* This returns a {@code HybridDate} with the specified fields.
* <p>
* Dates in the middle of the cutover gap, such as the 10th September 1752,
* will not throw an exception. Instead, the date will be treated as a Julian date
* and converted to an ISO date, with the day of month shifted by 11 days.
* <p>
* Invalid dates, such as September 31st will throw an exception.
*
* @param prolepticYear the British Cutover proleptic-year
* @param month the British Cutover month-of-year, from 1 to 12
* @param dayOfMonth the British Cutover day-of-month, from 1 to 31
* @return the date in British Cutover calendar system, not null
* @throws DateTimeException if the value of any field is out of range,
* or if the day-of-month is invalid for the month-year
*/
public static HybridDate of(int prolepticYear, int month, int dayOfMonth) {
return HybridDate.create(prolepticYear, month, dayOfMonth);
}
/**
* Obtains a {@code HybridDate} from a temporal object.
* <p>
* This obtains a date in the British Cutover calendar system based on the specified temporal.
* A {@code TemporalAccessor} represents an arbitrary set of date and time information,
* which this factory converts to an instance of {@code HybridDate}.
* <p>
* The conversion uses the {@link ChronoField#EPOCH_DAY EPOCH_DAY}
* field, which is standardized across calendar systems.
* <p>
* This method matches the signature of the functional interface {@link TemporalQuery}
* allowing it to be used as a query via method reference, {@code HybridDate::from}.
*
* @param temporal the temporal object to convert, not null
* @return the date in British Cutover calendar system, not null
* @throws DateTimeException if unable to convert to a {@code HybridDate}
*/
public static HybridDate from(TemporalAccessor temporal) {
if (temporal instanceof HybridDate) {
return (HybridDate) temporal;
}
return new HybridDate(LocalDate.from(temporal));
}
//-----------------------------------------------------------------------
/**
* Obtains a {@code HybridDate} representing a date in the British Cutover calendar
* system from the proleptic-year and day-of-year fields.
* <p>
* This returns a {@code HybridDate} with the specified fields.
* The day must be valid for the year, otherwise an exception will be thrown.
*
* @param prolepticYear the British Cutover proleptic-year
* @param dayOfYear the British Cutover day-of-year, from 1 to 366
* @return the date in British Cutover calendar system, not null
* @throws DateTimeException if the value of any field is out of range,
* or if the day-of-year is invalid for the year
*/
static HybridDate ofYearDay(int prolepticYear, int dayOfYear) {
if (prolepticYear < CUTOVER_YEAR || (prolepticYear == CUTOVER_YEAR && dayOfYear <= 246)) {
JulianDate julian = JulianDate.ofYearDay(prolepticYear, dayOfYear);
return new HybridDate(julian);
} else if (prolepticYear == CUTOVER_YEAR) {
LocalDate iso = LocalDate.ofYearDay(prolepticYear, dayOfYear + CUTOVER_DAYS);
return new HybridDate(iso);
} else {
LocalDate iso = LocalDate.ofYearDay(prolepticYear, dayOfYear);
return new HybridDate(iso);
}
}
/**
* Obtains a {@code HybridDate} representing a date in the British Cutover calendar
* system from the epoch-day.
*
* @param epochDay the epoch day to convert based on 1970-01-01 (ISO)
* @return the date in British Cutover calendar system, not null
* @throws DateTimeException if the epoch-day is out of range
*/
static HybridDate ofEpochDay(final long epochDay) {
return new HybridDate(LocalDate.ofEpochDay(epochDay));
}
/**
* Creates a {@code HybridDate} validating the input.
*
* @param prolepticYear the British Cutover proleptic-year
* @param month the British Cutover month-of-year, from 1 to 12
* @param dayOfMonth the British Cutover day-of-month, from 1 to 31
* @return the date in British Cutover calendar system, not null
* @throws DateTimeException if the value of any field is out of range,
* or if the day-of-month is invalid for the month-year
*/
static HybridDate create(int prolepticYear, int month, int dayOfMonth) {
if (prolepticYear < CUTOVER_YEAR) {
JulianDate julian = JulianDate.of(prolepticYear, month, dayOfMonth);
return new HybridDate(julian);
} else {
LocalDate iso = LocalDate.of(prolepticYear, month, dayOfMonth);
if (iso.isBefore(CUTOVER)) {
JulianDate julian = JulianDate.of(prolepticYear, month, dayOfMonth);
return new HybridDate(julian);
}
return new HybridDate(iso);
}
}
//-----------------------------------------------------------------------
/**
* Creates an instance from an ISO date.
*
* @param isoDate the standard local date, not null
*/
HybridDate(LocalDate isoDate) {
Objects.requireNonNull(isoDate, "isoDate");
this.isoDate = isoDate;
this.julianDate = (isoDate.isBefore(CUTOVER) ? JulianDate.from(isoDate) : null);
}
/**
* Creates an instance from a Julian date.
*
* @param julianDate the Julian date before the cutover, not null
*/
HybridDate(JulianDate julianDate) {
Objects.requireNonNull(julianDate, "julianDate");
this.isoDate = LocalDate.from(julianDate);
this.julianDate = (isoDate.isBefore(CUTOVER) ? julianDate : null);
}
/**
* Validates the object.
*
* @return the resolved date, not null
*/
private Object readResolve() {
return new HybridDate(isoDate);
}
//-----------------------------------------------------------------------
private boolean isCutoverYear() {
return isoDate.getYear() == CUTOVER_YEAR && isoDate.getDayOfYear() > CUTOVER_DAYS;
}
private boolean isCutoverMonth() {
return isoDate.getYear() == CUTOVER_YEAR &&
isoDate.getMonthValue() == 9 && isoDate.getDayOfMonth() > CUTOVER_DAYS;
}
//-------------------------------------------------------------------------
@Override
int getAlignedDayOfWeekInMonth() {
if (isCutoverMonth() && julianDate == null) {
return ((getDayOfMonth() - 1 - CUTOVER_DAYS) % lengthOfWeek()) + 1;
}
return super.getAlignedDayOfWeekInMonth();
}
@Override
int getAlignedWeekOfMonth() {
if (isCutoverMonth() && julianDate == null) {
return ((getDayOfMonth() - 1 - CUTOVER_DAYS) / lengthOfWeek()) + 1;
}
return super.getAlignedWeekOfMonth();
}
@Override
int getProlepticYear() {
return (julianDate != null ? julianDate.getProlepticYear() : isoDate.getYear());
}
@Override
int getMonth() {
return (julianDate != null ? julianDate.getMonth() : isoDate.getMonthValue());
}
@Override
int getDayOfMonth() {
return (julianDate != null ? julianDate.getDayOfMonth() : isoDate.getDayOfMonth());
}
@Override
int getDayOfYear() {
if (julianDate != null) {
return julianDate.getDayOfYear();
}
if (isoDate.getYear() == CUTOVER_YEAR) {
return isoDate.getDayOfYear() - CUTOVER_DAYS;
}
return isoDate.getDayOfYear();
}
@Override
public ValueRange rangeChrono(ChronoField field) {
switch (field) {
case DAY_OF_MONTH:
// short length, but value range still 1 to 30
if (isCutoverMonth()) {
return ValueRange.of(1, 30);
}
return ValueRange.of(1, lengthOfMonth());
case DAY_OF_YEAR:
// 1 to 355 in cutover year, otherwise 1 to 365/366
return ValueRange.of(1, lengthOfYear());
case ALIGNED_WEEK_OF_MONTH:
// 1 to 3 in cutover month, otherwise 1 to 4/5
return rangeAlignedWeekOfMonth();
case ALIGNED_WEEK_OF_YEAR:
// 1 to 51 in cutover year, otherwise 1 to 53
if (isCutoverYear()) {
return ValueRange.of(1, 51);
}
return ChronoField.ALIGNED_WEEK_OF_YEAR.range();
default:
return getChronology().range(field);
}
}
@Override
ValueRange rangeAlignedWeekOfMonth() {
if (isCutoverMonth()) {
return ValueRange.of(1, 3);
}
return ValueRange.of(1, getMonth() == 2 && isLeapYear() == false ? 4 : 5);
}
@Override
HybridDate resolvePrevious(int year, int month, int dayOfMonth) {
switch (month) {
case 2:
dayOfMonth = Math.min(dayOfMonth, getChronology().isLeapYear(year) ? 29 : 28);
break;
case 4:
case 6:
case 9:
case 11:
dayOfMonth = Math.min(dayOfMonth, 30);
break;
default:
break;
}
return create(year, month, dayOfMonth);
}
//-----------------------------------------------------------------------
/**
* Gets the chronology of this date, which is the British Cutover calendar system.
* <p>
* The {@code Chronology} represents the calendar system in use.
* The era and other fields in {@link ChronoField} are defined by the chronology.
*
* @return the British Cutover chronology, not null
*/
@Override
public HybridChronology getChronology() {
return HybridChronology.INSTANCE;
}
/**
* Gets the era applicable at this date.
* <p>
* The British Cutover calendar system has two eras, 'AD' and 'BC',
* defined by {@link JulianEra}.
*
* @return the era applicable at this date, not null
*/
@Override
public JulianEra getEra() {
return (getProlepticYear() >= 1 ? JulianEra.AD : JulianEra.BC);
}
/**
* Returns the length of the month represented by this date.
* <p>
* This returns the length of the month in days.
* This takes into account the cutover, returning 19 in September 1752.
*
* @return the length of the month in days, from 19 to 31
*/
@Override
public int lengthOfMonth() {
if (isCutoverMonth()) {
return 19;
}
return (julianDate != null ? julianDate.lengthOfMonth() : isoDate.lengthOfMonth());
}
/**
* Returns the length of the year represented by this date.
* <p>
* This returns the length of the year in days.
* This takes into account the cutover, returning 355 in 1752.
*
* @return the length of the month in days, from 19 to 31
*/
@Override
public int lengthOfYear() {
if (isCutoverYear()) {
return 355;
}
return (julianDate != null ? julianDate.lengthOfYear() : isoDate.lengthOfYear());
}
//-------------------------------------------------------------------------
@Override
public HybridDate with(TemporalAdjuster adjuster) {
return (HybridDate) adjuster.adjustInto(this);
}
@Override
public HybridDate with(TemporalField field, long newValue) {
return (HybridDate) super.with(field, newValue);
}
//-----------------------------------------------------------------------
@Override
public HybridDate plus(TemporalAmount amount) {
return (HybridDate) amount.addTo(this);
}
@Override
public HybridDate plus(long amountToAdd, TemporalUnit unit) {
return (HybridDate) super.plus(amountToAdd, unit);
}
@Override
public HybridDate minus(TemporalAmount amount) {
return (HybridDate) amount.subtractFrom(this);
}
@Override
public HybridDate minus(long amountToSubtract, TemporalUnit unit) {
return (amountToSubtract == Long.MIN_VALUE ?
plus(Long.MAX_VALUE, unit).plus(1, unit) : plus(-amountToSubtract, unit));
}
//-------------------------------------------------------------------------
@Override // for covariant return type
@SuppressWarnings("unchecked")
public ChronoLocalDateTime<HybridDate> atTime(LocalTime localTime) {
return (ChronoLocalDateTime<HybridDate>) super.atTime(localTime);
}
@Override
public long until(Temporal endExclusive, TemporalUnit unit) {
return super.until(HybridDate.from(endExclusive), unit);
}
@Override
public ChronoPeriod until(ChronoLocalDate endDateExclusive) {
HybridDate end = HybridDate.from(endDateExclusive);
long totalMonths = end.getProlepticMonth() - this.getProlepticMonth(); // safe
int days = end.getDayOfMonth() - this.getDayOfMonth();
if (totalMonths == 0 && isCutoverMonth()) {
if (julianDate != null && end.julianDate == null) {
days -= CUTOVER_DAYS;
} else if (julianDate == null && end.julianDate != null) {
days += CUTOVER_DAYS;
}
} else if (totalMonths > 0) {
if (julianDate != null && end.julianDate == null) {
AbstractDate calcDate = this.plusMonths(totalMonths);
days = (int) (end.toEpochDay() - calcDate.toEpochDay()); // safe
}
if (days < 0) {
totalMonths--;
AbstractDate calcDate = this.plusMonths(totalMonths);
days = (int) (end.toEpochDay() - calcDate.toEpochDay()); // safe
}
} else if (totalMonths < 0 && days > 0) {
totalMonths++;
AbstractDate calcDate = this.plusMonths(totalMonths);
days = (int) (end.toEpochDay() - calcDate.toEpochDay()); // safe
}
int years = Math.toIntExact(totalMonths / lengthOfYearInMonths()); // safe
int months = (int) (totalMonths % lengthOfYearInMonths()); // safe
return getChronology().period(years, months, days);
}
//-----------------------------------------------------------------------
@Override
public long toEpochDay() {
return isoDate.toEpochDay();
}
@SuppressWarnings("unchecked")
@Override
public <R> R query(TemporalQuery<R> query) {
if (query == TemporalQueries.localDate()) {
return (R) isoDate;
}
return super.query(query);
}
//-------------------------------------------------------------------------
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof HybridDate) {
HybridDate otherDate = (HybridDate) obj;
return this.isoDate.equals(otherDate.isoDate);
}
return false;
}
/**
* A hash code for this date.
*
* @return a suitable hash code based only on the Chronology and the date
*/
@Override
public int hashCode() {
return getChronology().getId().hashCode() ^ isoDate.hashCode();
}
}
| 18,818 | 33.59375 | 96 | java |
null | orc-main/java/core/src/test/org/apache/orc/StringDictTestingUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.orc.impl.Dictionary;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import static org.junit.jupiter.api.Assertions.assertEquals;
/**
* The utility class for testing different implementation of dictionary.
*/
public class StringDictTestingUtils {
private StringDictTestingUtils() {
// Avoid accidental instantiate
}
public static void checkContents(Dictionary dict, int[] order, String... params)
throws IOException {
dict.visit(new MyVisitor(params, order));
}
public static class MyVisitor implements Dictionary.Visitor {
private final String[] words;
private final int[] order;
private final DataOutputBuffer buffer = new DataOutputBuffer();
int current = 0;
MyVisitor(String[] args, int[] order) {
words = args;
this.order = order;
}
@Override
public void visit(Dictionary.VisitorContext context)
throws IOException {
String word = context.getText().toString();
assertEquals(words[current], word, "in word " + current);
assertEquals(order[current], context.getOriginalPosition(), "in word " + current);
buffer.reset();
context.writeBytes(buffer);
assertEquals(word, new String(buffer.getData(), 0, buffer.getLength(), StandardCharsets.UTF_8));
current += 1;
}
}
}
| 2,219 | 32.636364 | 102 | java |
null | orc-main/java/core/src/test/org/apache/orc/TestColumnStatistics.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.commons.lang3.StringEscapeUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.serde2.io.DateWritable;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.hadoop.io.Text;
import org.apache.orc.impl.ColumnStatisticsImpl;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInfo;
import java.io.File;
import java.math.BigDecimal;
import java.nio.charset.StandardCharsets;
import java.sql.Timestamp;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.TimeZone;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Test ColumnStatisticsImpl for ORC.
*/
public class TestColumnStatistics {
@Test
public void testLongSumOverflow() {
TypeDescription schema = TypeDescription.createInt();
ColumnStatisticsImpl stats = ColumnStatisticsImpl.create(schema);
stats.updateInteger(1, 1);
assertTrue(((IntegerColumnStatistics) stats).isSumDefined());
stats.updateInteger(Long.MAX_VALUE, 3);
assertFalse(((IntegerColumnStatistics) stats).isSumDefined());
}
@Test
public void testLongMerge() throws Exception {
TypeDescription schema = TypeDescription.createInt();
ColumnStatisticsImpl stats1 = ColumnStatisticsImpl.create(schema);
ColumnStatisticsImpl stats2 = ColumnStatisticsImpl.create(schema);
stats1.updateInteger(10, 2);
stats2.updateInteger(1, 1);
stats2.updateInteger(1000, 1);
stats1.merge(stats2);
IntegerColumnStatistics typed = (IntegerColumnStatistics) stats1;
assertEquals(1, typed.getMinimum());
assertEquals(1000, typed.getMaximum());
stats1.reset();
stats1.updateInteger(-10, 1);
stats1.updateInteger(10000, 1);
stats1.merge(stats2);
assertEquals(-10, typed.getMinimum());
assertEquals(10000, typed.getMaximum());
}
@Test
public void testDoubleMerge() throws Exception {
TypeDescription schema = TypeDescription.createDouble();
ColumnStatisticsImpl stats1 = ColumnStatisticsImpl.create(schema);
ColumnStatisticsImpl stats2 = ColumnStatisticsImpl.create(schema);
stats1.updateDouble(10.0);
stats1.updateDouble(100.0);
stats2.updateDouble(1.0);
stats2.updateDouble(1000.0);
stats1.merge(stats2);
DoubleColumnStatistics typed = (DoubleColumnStatistics) stats1;
assertEquals(1.0, typed.getMinimum(), 0.001);
assertEquals(1000.0, typed.getMaximum(), 0.001);
stats1.reset();
stats1.updateDouble(-10);
stats1.updateDouble(10000);
stats1.merge(stats2);
assertEquals(-10, typed.getMinimum(), 0.001);
assertEquals(10000, typed.getMaximum(), 0.001);
}
@Test
public void testStringMerge() throws Exception {
TypeDescription schema = TypeDescription.createString();
ColumnStatisticsImpl stats1 = ColumnStatisticsImpl.create(schema);
ColumnStatisticsImpl stats2 = ColumnStatisticsImpl.create(schema);
stats1.increment(3);
stats1.updateString(new Text("bob"));
stats1.updateString(new Text("david"));
stats1.updateString(new Text("charles"));
stats2.increment(2);
stats2.updateString(new Text("anne"));
byte[] erin = new byte[]{0, 1, 2, 3, 4, 5, 101, 114, 105, 110};
stats2.increment();
stats2.updateString(erin, 6, 4, 5);
assertEquals(24, ((StringColumnStatistics)stats2).getSum());
stats1.merge(stats2);
StringColumnStatistics typed = (StringColumnStatistics) stats1;
assertEquals("anne", typed.getMinimum());
assertEquals("erin", typed.getMaximum());
assertEquals(39, typed.getSum());
stats1.reset();
stats1.increment(2);
stats1.updateString(new Text("aaa"));
stats1.updateString(new Text("zzz"));
stats1.merge(stats2);
assertEquals("aaa", typed.getMinimum());
assertEquals("zzz", typed.getMaximum());
}
@Test
public void testUpperAndLowerBounds() throws Exception {
final TypeDescription schema = TypeDescription.createString();
final String test = RandomStringUtils.random(1024+10);
final String fragment = "foo"+test;
final String fragmentLowerBound = "bar"+test;
final ColumnStatisticsImpl stats1 = ColumnStatisticsImpl.create(schema);
final ColumnStatisticsImpl stats2 = ColumnStatisticsImpl.create(schema);
/* test a scenario for the first max string */
stats1.increment();
stats1.updateString(new Text(test));
final StringColumnStatistics typed = (StringColumnStatistics) stats1;
final StringColumnStatistics typed2 = (StringColumnStatistics) stats2;
assertTrue(1024 >= typed.getUpperBound().getBytes(StandardCharsets.UTF_8).length,
"Upperbound cannot be more than 1024 bytes");
assertTrue(1024 >= typed.getLowerBound().getBytes(StandardCharsets.UTF_8).length,
"Lowerbound cannot be more than 1024 bytes");
assertNull(typed.getMinimum());
assertNull(typed.getMaximum());
stats1.reset();
/* test a scenario for the first max bytes */
stats1.increment();
stats1.updateString(test.getBytes(StandardCharsets.UTF_8), 0,
test.getBytes(StandardCharsets.UTF_8).length, 0);
assertTrue(1024 >= typed.getLowerBound().getBytes(StandardCharsets.UTF_8).length,
"Lowerbound cannot be more than 1024 bytes");
assertTrue(1024 >= typed.getUpperBound().getBytes(StandardCharsets.UTF_8).length,
"Upperbound cannot be more than 1024 bytes");
assertNull(typed.getMinimum());
assertNull(typed.getMaximum());
stats1.reset();
/* test upper bound - merging */
stats1.increment(3);
stats1.updateString(new Text("bob"));
stats1.updateString(new Text("david"));
stats1.updateString(new Text("charles"));
stats2.increment(2);
stats2.updateString(new Text("anne"));
stats2.updateString(new Text(fragment));
assertEquals("anne", typed2.getMinimum());
assertNull(typed2.getMaximum());
stats1.merge(stats2);
assertEquals("anne", typed.getMinimum());
assertNull(typed.getMaximum());
/* test lower bound - merging */
stats1.reset();
stats2.reset();
stats1.increment(2);
stats1.updateString(new Text("david"));
stats1.updateString(new Text("charles"));
stats2.increment(2);
stats2.updateString(new Text("jane"));
stats2.updateString(new Text(fragmentLowerBound));
stats1.merge(stats2);
assertNull(typed.getMinimum());
assertEquals("jane", typed.getMaximum());
}
/**
* Test the string truncation with 1 byte characters. The last character
* of the truncated string is 0x7f so that it will expand into a 2 byte
* utf-8 character.
*/
@Test
public void testBoundsAscii() {
StringBuilder buffer = new StringBuilder();
for(int i=0; i < 256; ++i) {
buffer.append("Owe\u007fn");
}
ColumnStatisticsImpl stats = ColumnStatisticsImpl.create(
TypeDescription.createString());
stats.increment();
stats.updateString(new Text(buffer.toString()));
StringColumnStatistics stringStats = (StringColumnStatistics) stats;
// make sure that the min/max are null
assertNull(stringStats.getMinimum());
assertNull(stringStats.getMaximum());
assertEquals(5 * 256, stringStats.getSum());
// and that the lower and upper bound are correct
assertEquals(buffer.substring(0, 1024), stringStats.getLowerBound());
assertEquals("Owe\u0080", stringStats.getUpperBound().substring(1020));
assertEquals("count: 1 hasNull: false lower: " + stringStats.getLowerBound()
+ " upper: " + stringStats.getUpperBound() + " sum: 1280",
stringStats.toString());
// make sure that when we replace the min & max the flags get cleared.
stats.increment();
stats.updateString(new Text("xxx"));
assertEquals("xxx", stringStats.getMaximum());
assertEquals("xxx", stringStats.getUpperBound());
stats.increment();
stats.updateString(new Text("A"));
assertEquals("A", stringStats.getMinimum());
assertEquals("A", stringStats.getLowerBound());
assertEquals("count: 3 hasNull: false min: A max: xxx sum: 1284",
stats.toString());
}
/**
* Test truncation with 2 byte utf-8 characters.
*/
@Test
public void testBoundsTwoByte() {
StringBuilder buffer = new StringBuilder();
final String PATTERN = "\u0080\u07ff\u0432\u0246\u0123";
for(int i=0; i < 256; ++i) {
buffer.append(PATTERN);
}
ColumnStatisticsImpl stats = ColumnStatisticsImpl.create(
TypeDescription.createString());
stats.increment();
stats.updateString(new Text(buffer.toString()));
StringColumnStatistics stringStats = (StringColumnStatistics) stats;
// make sure that the min/max are null
assertNull(stringStats.getMinimum());
assertNull(stringStats.getMaximum());
assertEquals(2 * 5 * 256, stringStats.getSum());
// and that the lower and upper bound are correct
// 512 two byte characters fit in 1024 bytes
assertEquals(buffer.substring(0, 512), stringStats.getLowerBound());
assertEquals(buffer.substring(0, 511),
stringStats.getUpperBound().substring(0, 511));
assertEquals("\u0800", stringStats.getUpperBound().substring(511));
}
/**
* Test truncation with 3 byte utf-8 characters.
*/
@Test
public void testBoundsThreeByte() {
StringBuilder buffer = new StringBuilder();
final String PATTERN = "\uffff\u0800\u4321\u1234\u3137";
for(int i=0; i < 256; ++i) {
buffer.append(PATTERN);
}
ColumnStatisticsImpl stats = ColumnStatisticsImpl.create(
TypeDescription.createString());
stats.increment();
stats.updateString(new Text(buffer.toString()));
StringColumnStatistics stringStats = (StringColumnStatistics) stats;
// make sure that the min/max are null
assertNull(stringStats.getMinimum());
assertNull(stringStats.getMaximum());
assertEquals(3 * 5 * 256, stringStats.getSum());
// and that the lower and upper bound are correct
// 341 three byte characters fit in 1024 bytes
assertEquals(buffer.substring(0, 341), stringStats.getLowerBound());
assertEquals(buffer.substring(0, 340),
stringStats.getUpperBound().substring(0,340));
assertEquals("\ud800\udc00", stringStats.getUpperBound().substring(340));
}
/**
* Test truncation with 4 byte utf-8 characters.
*/
@Test
public void testBoundsFourByte() {
StringBuilder buffer = new StringBuilder();
final String PATTERN = "\ud800\udc00\ud801\udc01\ud802\udc02\ud803\udc03\ud804\udc04";
for(int i=0; i < 256; ++i) {
buffer.append(PATTERN);
}
ColumnStatisticsImpl stats = ColumnStatisticsImpl.create(
TypeDescription.createString());
stats.increment();
stats.updateString(new Text(buffer.toString()));
StringColumnStatistics stringStats = (StringColumnStatistics) stats;
// make sure that the min/max are null
assertNull(stringStats.getMinimum());
assertNull(stringStats.getMaximum());
assertEquals(4 * 5 * 256, stringStats.getSum());
// and that the lower and upper bound are correct
// 256 four byte characters fit in 1024 bytes
assertEquals(buffer.substring(0, 512), stringStats.getLowerBound());
assertEquals(buffer.substring(0, 510),
stringStats.getUpperBound().substring(0, 510));
assertEquals("\\uD800\\uDC01",
StringEscapeUtils.escapeJava(stringStats.getUpperBound().substring(510)));
}
@Test
public void testUpperBoundCodepointIncrement() {
/* test with characters that use more than one byte */
final String fragment = "載記応存環敢辞月発併際岩。外現抱疑曲旧持九柏先済索。"
+ "富扁件戒程少交文相修宮由改価苦。位季供幾日本求知集機所江取号均下犯変第勝。"
+ "管今文図石職常暮海営感覧果賞挙。難加判郵年太願会周面市害成産。"
+ "内分載函取片領披見復来車必教。元力理関未法会伊団万球幕点帳幅為都話間。"
+ "親禁感栗合開注読月島月紀間卒派伏闘。幕経阿刊間都紹知禁追半業。"
+ "根案協話射格治位相機遇券外野何。話第勝平当降負京複掲書変痛。"
+ "博年群辺軽妻止和真権暑着要質在破応。"
+ "नीचे मुक्त बिन्दुओ समस्याओ आंतरकार्यक्षमता सुना प्रति सभीकुछ यायेका दिनांक वातावरण ";
final String input = fragment
+ "मुश्किले केन्द्रिय "
+ "लगती नवंबर प्रमान गयेगया समस्याओ विश्व लिये समजते आपके एकत्रित विकेन्द्रित स्वतंत्र "
+ "व्याख्यान भेदनक्षमता शीघ्र होभर मुखय करता। दर्शाता वातावरण विस्तरणक्षमता दोषसके प्राप्त समाजो "
+ "।क तकनीकी दर्शाता कार्यकर्ता बाधा औषधिक समस्याओ समस्याए गोपनीयता प्राण पसंद "
+ "भीयह नवंबर दोषसके अनुवादक सोफ़तवेर समस्याए क्षमता। कार्य होभर\n";
final String lowerBound = fragment +
"मुश्किले केन्द्रिय लगती नवंबर प्रमान गयेगया समस्याओ विश्व लिये ";
final String upperbound = fragment +
"मुश्किले केन्द्रिय लगती नवंबर प्रमान गयेगया समस्याओ विश्व लिये!";
final TypeDescription schema = TypeDescription.createString();
final ColumnStatisticsImpl stats1 = ColumnStatisticsImpl.create(schema);
byte[] utf8 = input.getBytes(StandardCharsets.UTF_8);
stats1.updateString(utf8, 0, utf8.length, 1);
stats1.increment();
final StringColumnStatistics typed = (StringColumnStatistics) stats1;
assertEquals(354, typed.getUpperBound().length());
assertEquals(354, typed.getLowerBound().length());
assertEquals(1764L, typed.getSum());
assertEquals(upperbound, typed.getUpperBound());
assertEquals(lowerBound, typed.getLowerBound());
OrcProto.ColumnStatistics serial = stats1.serialize().build();
ColumnStatisticsImpl stats2 =
ColumnStatisticsImpl.deserialize(schema, serial);
StringColumnStatistics typed2 = (StringColumnStatistics) stats2;
assertNull(typed2.getMinimum());
assertNull(typed2.getMaximum());
assertEquals(lowerBound, typed2.getLowerBound());
assertEquals(upperbound, typed2.getUpperBound());
assertEquals(1764L, typed2.getSum());
}
@Test
public void testDateMerge() throws Exception {
TypeDescription schema = TypeDescription.createDate();
ColumnStatisticsImpl stats1 = ColumnStatisticsImpl.create(schema);
ColumnStatisticsImpl stats2 = ColumnStatisticsImpl.create(schema);
stats1.updateDate(new DateWritable(1000));
stats1.updateDate(new DateWritable(100));
stats1.increment(2);
stats2.updateDate(new DateWritable(10));
stats2.updateDate(new DateWritable(2000));
stats2.increment(2);
stats1.merge(stats2);
DateColumnStatistics typed = (DateColumnStatistics) stats1;
assertEquals(new DateWritable(10).get(), typed.getMinimum());
assertEquals(new DateWritable(2000).get(), typed.getMaximum());
stats1.reset();
stats1.updateDate(new DateWritable(-10));
stats1.updateDate(new DateWritable(10000));
stats1.increment(2);
stats1.merge(stats2);
assertEquals(new DateWritable(-10).get(), typed.getMinimum());
assertEquals(new DateWritable(10000).get(), typed.getMaximum());
}
@Test
public void testLocalDateMerge() throws Exception {
TypeDescription schema = TypeDescription.createDate();
ColumnStatisticsImpl stats1 = ColumnStatisticsImpl.create(schema);
ColumnStatisticsImpl stats2 = ColumnStatisticsImpl.create(schema);
stats1.updateDate(1000);
stats1.updateDate(100);
stats1.increment(2);
stats2.updateDate(10);
stats2.updateDate(2000);
stats2.increment(2);
stats1.merge(stats2);
DateColumnStatistics typed = (DateColumnStatistics) stats1;
assertEquals(10, typed.getMinimumDayOfEpoch());
assertEquals(2000, typed.getMaximumDayOfEpoch());
stats1.reset();
stats1.updateDate(-10);
stats1.updateDate(10000);
stats1.increment(2);
stats1.merge(stats2);
assertEquals(-10, typed.getMinimumLocalDate().toEpochDay());
assertEquals(10000, typed.getMaximumLocalDate().toEpochDay());
}
@Test
public void testTimestampMergeUTC() throws Exception {
TypeDescription schema = TypeDescription.createTimestamp();
TimeZone original = TimeZone.getDefault();
TimeZone.setDefault(TimeZone.getTimeZone("UTC"));
ColumnStatisticsImpl stats1 = ColumnStatisticsImpl.create(schema);
ColumnStatisticsImpl stats2 = ColumnStatisticsImpl.create(schema);
stats1.updateTimestamp(new Timestamp(10));
stats1.updateTimestamp(new Timestamp(100));
stats2.updateTimestamp(new Timestamp(1));
stats2.updateTimestamp(new Timestamp(1000));
stats1.increment(2);
stats2.increment(2);
stats1.merge(stats2);
TimestampColumnStatistics typed = (TimestampColumnStatistics) stats1;
assertEquals(1, typed.getMinimum().getTime());
assertEquals(1000, typed.getMaximum().getTime());
stats1.reset();
stats1.updateTimestamp(new Timestamp(-10));
stats1.updateTimestamp(new Timestamp(10000));
stats1.increment(2);
stats1.merge(stats2);
assertEquals(-10, typed.getMinimum().getTime());
assertEquals(10000, typed.getMaximum().getTime());
TimeZone.setDefault(original);
}
private static final String TIME_FORMAT = "yyyy-MM-dd HH:mm:ss";
private static Timestamp parseTime(SimpleDateFormat format, String value) {
try {
return new Timestamp(format.parse(value).getTime());
} catch (ParseException e) {
throw new IllegalArgumentException("bad time parse for " + value, e);
}
}
@Test
public void testTimestampMergeLA() throws Exception {
TypeDescription schema = TypeDescription.createTimestamp();
TimeZone original = TimeZone.getDefault();
TimeZone.setDefault(TimeZone.getTimeZone("America/Los_Angeles"));
SimpleDateFormat format = new SimpleDateFormat(TIME_FORMAT);
ColumnStatisticsImpl stats1 = ColumnStatisticsImpl.create(schema);
ColumnStatisticsImpl stats2 = ColumnStatisticsImpl.create(schema);
stats1.updateTimestamp(parseTime(format, "2000-04-02 03:30:00"));
stats1.updateTimestamp(parseTime(format, "2000-04-02 01:30:00"));
stats1.increment(2);
stats2.updateTimestamp(parseTime(format, "2000-10-29 01:30:00"));
stats2.updateTimestamp(parseTime(format, "2000-10-29 03:30:00"));
stats2.increment(2);
TimestampColumnStatistics typed = (TimestampColumnStatistics) stats1;
assertEquals("2000-04-02 01:30:00.0", typed.getMinimum().toString());
assertEquals("2000-04-02 03:30:00.0", typed.getMaximum().toString());
stats1.merge(stats2);
assertEquals("2000-04-02 01:30:00.0", typed.getMinimum().toString());
assertEquals("2000-10-29 03:30:00.0", typed.getMaximum().toString());
stats1.reset();
stats1.updateTimestamp(parseTime(format, "1999-04-04 00:00:00"));
stats1.updateTimestamp(parseTime(format, "2009-03-08 12:00:00"));
stats1.increment(2);
stats1.merge(stats2);
assertEquals("1999-04-04 00:00:00.0", typed.getMinimum().toString());
assertEquals("2009-03-08 12:00:00.0", typed.getMaximum().toString());
// serialize and read back in with phoenix timezone
OrcProto.ColumnStatistics serial = stats2.serialize().build();
TimeZone.setDefault(TimeZone.getTimeZone("America/Phoenix"));
ColumnStatisticsImpl stats3 = ColumnStatisticsImpl.deserialize(schema, serial);
assertEquals("2000-10-29 01:30:00.0",
((TimestampColumnStatistics) stats3).getMinimum().toString());
assertEquals("2000-10-29 03:30:00.0",
((TimestampColumnStatistics) stats3).getMaximum().toString());
TimeZone.setDefault(original);
}
@Test
public void testTimestampNanoPrecision() {
TypeDescription schema = TypeDescription.createTimestamp();
TimeZone original = TimeZone.getDefault();
TimeZone.setDefault(TimeZone.getTimeZone("America/Los_Angeles"));
ColumnStatisticsImpl stats1 = ColumnStatisticsImpl.create(schema);
stats1.updateTimestamp(Timestamp.valueOf( "2000-04-02 03:31:00.000"));
stats1.increment(1);
TimestampColumnStatistics typed = (TimestampColumnStatistics) stats1;
// Base case no nanos
assertEquals("2000-04-02 03:31:00.0", typed.getMinimum().toString());
assertEquals("2000-04-02 03:31:00.0", typed.getMaximum().toString());
// Add nano precision to min
stats1.updateTimestamp(Timestamp.valueOf( "2000-04-01 03:30:00.0005"));
stats1.increment(1);
assertEquals("2000-04-01 03:30:00.0005", typed.getMinimum().toString());
assertEquals("2000-04-02 03:31:00.0", typed.getMaximum().toString());
// Change max with precision
stats1.updateTimestamp(Timestamp.valueOf( "2000-04-04 03:30:00.0008"));
stats1.increment(1);
assertEquals("2000-04-01 03:30:00.0005", typed.getMinimum().toString());
assertEquals("2000-04-04 03:30:00.0008", typed.getMaximum().toString());
// Equal min with nano diff
stats1.updateTimestamp(Timestamp.valueOf( "2000-04-04 03:30:00.0009"));
stats1.increment(1);
assertEquals("2000-04-01 03:30:00.0005", typed.getMinimum().toString());
assertEquals("2000-04-04 03:30:00.0009", typed.getMaximum().toString());
// Test serialisation/deserialisation
OrcProto.ColumnStatistics serial = stats1.serialize().build();
ColumnStatisticsImpl stats2 =
ColumnStatisticsImpl.deserialize(schema, serial);
TimestampColumnStatistics typed2 = (TimestampColumnStatistics) stats2;
assertEquals("2000-04-01 03:30:00.0005", typed2.getMinimum().toString());
assertEquals("2000-04-04 03:30:00.0009", typed2.getMaximum().toString());
assertEquals(4L, typed2.getNumberOfValues());
TimeZone.setDefault(original);
}
@Test
public void testTimestampNanoPrecisionMerge() {
TypeDescription schema = TypeDescription.createTimestamp();
TimeZone original = TimeZone.getDefault();
TimeZone.setDefault(TimeZone.getTimeZone("America/Los_Angeles"));
ColumnStatisticsImpl stats1 = ColumnStatisticsImpl.create(schema);
ColumnStatisticsImpl stats2 = ColumnStatisticsImpl.create(schema);
stats1.updateTimestamp(Timestamp.valueOf("2000-04-02 03:30:00.0001"));
stats1.updateTimestamp(Timestamp.valueOf( "2000-04-02 01:30:00.0009"));
stats1.increment(2);
stats2.updateTimestamp(Timestamp.valueOf( "2000-04-02 01:30:00.00088"));
stats2.updateTimestamp(Timestamp.valueOf( "2000-04-02 03:30:00.00001"));
stats2.increment(2);
TimestampColumnStatistics typed = (TimestampColumnStatistics) stats1;
assertEquals("2000-04-02 01:30:00.0009", typed.getMinimum().toString());
assertEquals("2000-04-02 03:30:00.0001", typed.getMaximum().toString());
TimestampColumnStatistics typed2 = (TimestampColumnStatistics) stats2;
assertEquals("2000-04-02 03:30:00.00001", typed2.getMaximum().toString());
assertEquals("2000-04-02 01:30:00.00088", typed2.getMinimum().toString());
// make sure merge goes down to ns precision
stats1.merge(stats2);
assertEquals("2000-04-02 01:30:00.00088", typed.getMinimum().toString());
assertEquals("2000-04-02 03:30:00.0001", typed.getMaximum().toString());
stats1.reset();
assertNull(typed.getMinimum());
assertNull(typed.getMaximum());
stats1.updateTimestamp(Timestamp.valueOf( "1999-04-04 00:00:00.000231"));
stats1.updateTimestamp(Timestamp.valueOf( "2009-03-08 12:00:00.000654"));
stats1.increment(2);
stats1.merge(stats2);
assertEquals("1999-04-04 00:00:00.000231", typed.getMinimum().toString());
assertEquals("2009-03-08 12:00:00.000654", typed.getMaximum().toString());
TimeZone.setDefault(original);
}
@Test
public void testNegativeTimestampNanoPrecision() {
TypeDescription schema = TypeDescription.createTimestamp();
TimeZone original = TimeZone.getDefault();
TimeZone.setDefault(TimeZone.getTimeZone("America/Los_Angeles"));
ColumnStatisticsImpl stats1 = ColumnStatisticsImpl.create(schema);
ColumnStatisticsImpl stats2 = ColumnStatisticsImpl.create(schema);
stats1.updateTimestamp(Timestamp.valueOf("1960-04-02 03:30:00.0001"));
stats1.updateTimestamp(Timestamp.valueOf( "1969-12-31 16:00:00.0009"));
stats1.increment(2);
stats2.updateTimestamp(Timestamp.valueOf( "1962-04-02 01:30:00.00088"));
stats2.updateTimestamp(Timestamp.valueOf( "1969-12-31 16:00:00.00001"));
stats2.increment(2);
stats1.merge(stats2);
TimestampColumnStatistics typed = (TimestampColumnStatistics) stats1;
assertEquals("1960-04-02 03:30:00.0001", typed.getMinimum().toString());
assertEquals("1969-12-31 16:00:00.0009", typed.getMaximum().toString());
stats1.reset();
assertNull(typed.getMinimum());
assertNull(typed.getMaximum());
stats1.updateTimestamp(Timestamp.valueOf("1969-12-31 15:00:00.0005"));
stats1.increment(1);
assertEquals("1969-12-31 15:00:00.0005", typed.getMinimum().toString());
assertEquals("1969-12-31 15:00:00.0005", typed.getMaximum().toString());
stats1.updateTimestamp(Timestamp.valueOf("1969-12-31 15:00:00.00055"));
stats1.increment(1);
assertEquals("1969-12-31 15:00:00.0005", typed.getMinimum().toString());
assertEquals("1969-12-31 15:00:00.00055", typed.getMaximum().toString());
TimeZone.setDefault(original);
}
@Test
public void testDecimalMerge() throws Exception {
TypeDescription schema = TypeDescription.createDecimal()
.withPrecision(38).withScale(16);
ColumnStatisticsImpl stats1 = ColumnStatisticsImpl.create(schema);
ColumnStatisticsImpl stats2 = ColumnStatisticsImpl.create(schema);
stats1.updateDecimal(new HiveDecimalWritable(10));
stats1.updateDecimal(new HiveDecimalWritable(100));
stats2.updateDecimal(new HiveDecimalWritable(1));
stats2.updateDecimal(new HiveDecimalWritable(1000));
stats1.merge(stats2);
DecimalColumnStatistics typed = (DecimalColumnStatistics) stats1;
assertEquals(1, typed.getMinimum().longValue());
assertEquals(1000, typed.getMaximum().longValue());
stats1.reset();
stats1.updateDecimal(new HiveDecimalWritable(-10));
stats1.updateDecimal(new HiveDecimalWritable(10000));
stats1.merge(stats2);
assertEquals(-10, typed.getMinimum().longValue());
assertEquals(10000, typed.getMaximum().longValue());
}
@Test
public void testDecimalMinMaxStatistics() throws Exception {
TypeDescription schema = TypeDescription.fromString("decimal(7,2)");
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema));
VectorizedRowBatch batch = schema.createRowBatch();
DecimalColumnVector decimalColumnVector = (DecimalColumnVector) batch.cols[0];
batch.size = 2;
decimalColumnVector.set(0, new HiveDecimalWritable("-99999.99"));
decimalColumnVector.set(1, new HiveDecimalWritable("-88888.88"));
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
DecimalColumnStatistics statistics = (DecimalColumnStatistics) reader.getStatistics()[0];
assertEquals(new BigDecimal("-99999.99"), statistics.getMinimum().bigDecimalValue(),
"Incorrect maximum value");
assertEquals(new BigDecimal("-88888.88"), statistics.getMaximum().bigDecimalValue(),
"Incorrect minimum value");
}
Path workDir = new Path(System.getProperty("test.tmp.dir",
"target" + File.separator + "test" + File.separator + "tmp"));
Configuration conf;
FileSystem fs;
Path testFilePath;
@BeforeEach
public void openFileSystem(TestInfo testInfo) throws Exception {
conf = new Configuration();
fs = FileSystem.getLocal(conf);
fs.setWorkingDirectory(workDir);
testFilePath = new Path(
"TestOrcFile." + testInfo.getTestMethod().get().getName() + ".orc");
fs.delete(testFilePath, false);
}
}
| 28,685 | 38.841667 | 110 | java |
null | orc-main/java/core/src/test/org/apache/orc/TestCorruptTypes.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import org.junit.jupiter.api.Test;
import java.util.ArrayList;
import java.util.List;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.fail;
public class TestCorruptTypes {
@Test
public void testIllType() {
testCorruptHelper(OrcProto.Type.Kind.LIST, 0,
"LIST type should contain exactly one subtype but has 0");
testCorruptHelper(OrcProto.Type.Kind.LIST, 2,
"LIST type should contain exactly one subtype but has 2");
testCorruptHelper(OrcProto.Type.Kind.MAP, 1,
"MAP type should contain exactly two subtypes but has 1");
testCorruptHelper(OrcProto.Type.Kind.MAP, 3,
"MAP type should contain exactly two subtypes but has 3");
testCorruptHelper(OrcProto.Type.Kind.UNION, 0,
"UNION type should contain at least one subtype but has none");
}
private void testCorruptHelper(OrcProto.Type.Kind type,
int subTypesCnt,
String errMsg) {
List<OrcProto.Type> types = new ArrayList<OrcProto.Type>();
OrcProto.Type.Builder builder = OrcProto.Type.newBuilder().setKind(type);
for (int i = 0; i < subTypesCnt; ++i) {
builder.addSubtypes(i + 2);
}
types.add(builder.build());
try {
OrcUtils.convertTypeFromProtobuf(types, 0);
fail("Should throw FileFormatException for ill types");
} catch (FileFormatException e) {
assertEquals(errMsg, e.getMessage());
} catch (Throwable e) {
fail("Should only trow FileFormatException for ill types");
}
}
}
| 2,446 | 37.234375 | 77 | java |
null | orc-main/java/core/src/test/org/apache/orc/TestInMemoryKeystore.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import org.apache.hadoop.io.BytesWritable;
import org.apache.orc.impl.HadoopShims;
import org.apache.orc.impl.LocalKey;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.security.Key;
import java.util.Random;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
/**
* Test {@link InMemoryKeystore} class
*/
public class TestInMemoryKeystore {
private InMemoryKeystore memoryKeystore;
public TestInMemoryKeystore() {
super();
}
@BeforeEach
public void init() throws IOException {
// For testing, use a fixed random number generator so that everything
// is repeatable.
Random random = new Random(2);
memoryKeystore =
new InMemoryKeystore(random)
.addKey("key128", EncryptionAlgorithm.AES_CTR_128, "123".getBytes(StandardCharsets.UTF_8))
.addKey("key256", EncryptionAlgorithm.AES_CTR_256, "secret123".getBytes(StandardCharsets.UTF_8))
.addKey("key256short", EncryptionAlgorithm.AES_CTR_256, "5".getBytes(StandardCharsets.UTF_8));
}
private static String stringify(byte[] buffer) {
return new BytesWritable(buffer).toString();
}
@Test
public void testGetKeyNames() {
assertTrue(memoryKeystore.getKeyNames().contains("key128"));
assertTrue(memoryKeystore.getKeyNames().contains("key256"));
assertTrue(memoryKeystore.getKeyNames().contains("key256short"));
}
@Test
public void testGetCurrentKeyVersion() {
final HadoopShims.KeyMetadata metadata = memoryKeystore
.getCurrentKeyVersion("key256");
assertEquals("key256", metadata.getKeyName());
if (InMemoryKeystore.SUPPORTS_AES_256) {
assertEquals(EncryptionAlgorithm.AES_CTR_256, metadata.getAlgorithm());
} else {
assertEquals(EncryptionAlgorithm.AES_CTR_128, metadata.getAlgorithm());
}
assertEquals(0, metadata.getVersion());
}
@Test
public void testGetLocalKey() {
HadoopShims.KeyMetadata metadata128 = memoryKeystore
.getCurrentKeyVersion("key128");
LocalKey key128 = memoryKeystore.createLocalKey(metadata128);
// we are sure the key is the same because of the random generator.
assertEquals("39 72 2c bb f8 b9 1a 4b 90 45 c5 e6 17 5f 10 01",
stringify(key128.getEncryptedKey()));
assertEquals("46 33 66 fd 79 57 66 9a ba 4a 28 df bf 16 f2 88",
stringify(key128.getDecryptedKey().getEncoded()));
// used online aes/cbc calculator to encrypt key
assertEquals("AES", key128.getDecryptedKey().getAlgorithm());
// now decrypt the key again
Key decryptKey = memoryKeystore.decryptLocalKey(metadata128,
key128.getEncryptedKey());
assertEquals(stringify(key128.getDecryptedKey().getEncoded()),
stringify(decryptKey.getEncoded()));
HadoopShims.KeyMetadata metadata256 = memoryKeystore
.getCurrentKeyVersion("key256");
LocalKey key256 = memoryKeystore.createLocalKey(metadata256);
// this is forced by the fixed Random in the keystore for this test
if (InMemoryKeystore.SUPPORTS_AES_256) {
assertEquals("ea c3 2f 7f cd 5e cc da 5c 6e 62 fc 4e 63 85 08 0f " +
"7b 6c db 79 e5 51 ec 9c 9c c7 fc bd 60 ee 73",
stringify(key256.getEncryptedKey()));
// used online aes/cbc calculator to encrypt key
assertEquals("00 b0 1c 24 d9 03 bc 02 63 87 b3 f9 65 4e e7 a8 b8" +
" 58 eb a0 81 06 b3 61 cf f8 06 ba 30 d4 c5 36",
stringify(key256.getDecryptedKey().getEncoded()));
} else {
assertEquals("ea c3 2f 7f cd 5e cc da 5c 6e 62 fc 4e 63 85 08",
stringify(key256.getEncryptedKey()));
assertEquals("6d 1c ff 55 a5 44 75 11 fb e6 8e 08 cd 2a 10 e8",
stringify(key256.getDecryptedKey().getEncoded()));
}
assertEquals("AES", key256.getDecryptedKey().getAlgorithm());
// now decrypt the key again
decryptKey = memoryKeystore.decryptLocalKey(metadata256, key256.getEncryptedKey());
assertEquals(stringify(key256.getDecryptedKey().getEncoded()),
stringify(decryptKey.getEncoded()));
}
@Test
public void testRollNewVersion() throws IOException {
assertEquals(0,
memoryKeystore.getCurrentKeyVersion("key128").getVersion());
memoryKeystore.addKey("key128", 1, EncryptionAlgorithm.AES_CTR_128, "NewSecret".getBytes(StandardCharsets.UTF_8));
assertEquals(1,
memoryKeystore.getCurrentKeyVersion("key128").getVersion());
}
@Test
public void testDuplicateKeyNames() {
try {
memoryKeystore.addKey("key128", 0, EncryptionAlgorithm.AES_CTR_128,
"exception".getBytes(StandardCharsets.UTF_8));
fail("Keys with same name cannot be added.");
} catch (IOException e) {
assertTrue(e.toString().contains("equal or higher version"));
}
}
/**
* This will test:
* 1. Scenario where key with smaller version then existing should not be allowed
* 2. Test multiple versions of the key
* 3. Test get current version
* 4. Ensure the different versions of the key have different material.
*/
@Test
public void testMultipleVersion() throws IOException {
assertEquals(0,
memoryKeystore.getCurrentKeyVersion("key256").getVersion());
memoryKeystore.addKey("key256", 1, EncryptionAlgorithm.AES_CTR_256,
"NewSecret".getBytes(StandardCharsets.UTF_8));
assertEquals(1,
memoryKeystore.getCurrentKeyVersion("key256").getVersion());
try {
memoryKeystore.addKey("key256", 1, EncryptionAlgorithm.AES_CTR_256,
"BadSecret".getBytes(StandardCharsets.UTF_8));
fail("Keys with smaller version should not be added.");
} catch (final IOException e) {
assertTrue(e.toString().contains("equal or higher version"));
}
memoryKeystore.addKey("key256", 2, EncryptionAlgorithm.AES_CTR_256,
"NewerSecret".getBytes(StandardCharsets.UTF_8));
assertEquals(2,
memoryKeystore.getCurrentKeyVersion("key256").getVersion());
// make sure that all 3 versions of key256 exist and have different secrets
Key key0 = memoryKeystore.decryptLocalKey(
new HadoopShims.KeyMetadata("key256", 0, EncryptionAlgorithm.AES_CTR_256),
new byte[16]);
Key key1 = memoryKeystore.decryptLocalKey(
new HadoopShims.KeyMetadata("key256", 1, EncryptionAlgorithm.AES_CTR_256),
new byte[16]);
Key key2 = memoryKeystore.decryptLocalKey(
new HadoopShims.KeyMetadata("key256", 2, EncryptionAlgorithm.AES_CTR_256),
new byte[16]);
assertNotEquals(new BytesWritable(key0.getEncoded()).toString(),
new BytesWritable(key1.getEncoded()).toString());
assertNotEquals(new BytesWritable(key1.getEncoded()).toString(),
new BytesWritable(key2.getEncoded()).toString());
}
}
| 7,873 | 37.409756 | 118 | java |
null | orc-main/java/core/src/test/org/apache/orc/TestMinSeekSize.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.Random;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestMinSeekSize {
private static final Logger LOG = LoggerFactory.getLogger(TestMinSeekSize.class);
private static final Path workDir = new Path(System.getProperty("test.tmp.dir",
"target" + File.separator + "test"
+ File.separator + "tmp"));
private static final Path filePath = new Path(workDir, "min_seek_size_file.orc");
private static Configuration conf;
private static FileSystem fs;
private static final TypeDescription schema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createLong())
.addField("f2", TypeDescription.createDecimal().withPrecision(20).withScale(6))
.addField("f3", TypeDescription.createLong())
.addField("f4", TypeDescription.createString())
.addField("ridx", TypeDescription.createLong());
private static final boolean[] AlternateColumns = new boolean[] {true, true, false, true, false
, true};
private static final long RowCount = 16384;
private static final int scale = 3;
@BeforeAll
public static void setup() throws IOException {
conf = new Configuration();
fs = FileSystem.get(conf);
LOG.info("Creating file {} with schema {}", filePath, schema);
try (Writer writer = OrcFile.createWriter(filePath,
OrcFile.writerOptions(conf)
.fileSystem(fs)
.overwrite(true)
.rowIndexStride(8192)
.setSchema(schema))) {
Random rnd = new Random(1024);
VectorizedRowBatch b = schema.createRowBatch();
for (int rowIdx = 0; rowIdx < RowCount; rowIdx++) {
long v = rnd.nextLong();
for (int colIdx = 0; colIdx < schema.getChildren().size() - 1; colIdx++) {
switch (schema.getChildren().get(colIdx).getCategory()) {
case LONG:
((LongColumnVector) b.cols[colIdx]).vector[b.size] = v;
break;
case DECIMAL:
HiveDecimalWritable d = new HiveDecimalWritable();
d.setFromLongAndScale(v, scale);
((DecimalColumnVector) b.cols[colIdx]).vector[b.size] = d;
break;
case STRING:
((BytesColumnVector) b.cols[colIdx]).setVal(b.size,
String.valueOf(v)
.getBytes(StandardCharsets.UTF_8));
break;
default:
throw new IllegalArgumentException();
}
}
// Populate the rowIdx
((LongColumnVector) b.cols[4]).vector[b.size] = rowIdx;
b.size += 1;
if (b.size == b.getMaxSize()) {
writer.addRowBatch(b);
b.reset();
}
}
if (b.size > 0) {
writer.addRowBatch(b);
b.reset();
}
}
LOG.info("Created file {}", filePath);
}
@Test
public void writeIsSuccessful() throws IOException {
Reader r = OrcFile.createReader(filePath, OrcFile.readerOptions(conf).filesystem(fs));
assertEquals(RowCount, r.getNumberOfRows());
}
private long validateFilteredRecordReader(RecordReader rr, VectorizedRowBatch b)
throws IOException {
long rowCount = 0;
while (rr.nextBatch(b)) {
validateBatch(b, rowCount);
rowCount += b.size;
}
return rowCount;
}
private void validateColumnNull(VectorizedRowBatch b, int colIdx) {
assertFalse(b.cols[colIdx].noNulls);
assertTrue(b.cols[colIdx].isRepeating);
assertTrue(b.cols[colIdx].isNull[0]);
}
private void validateBatch(VectorizedRowBatch b, long expRowNum) {
HiveDecimalWritable d = new HiveDecimalWritable();
validateColumnNull(b, 1);
validateColumnNull(b, 3);
for (int i = 0; i < b.size; i++) {
int rowIdx;
if (b.selectedInUse) {
rowIdx = b.selected[i];
} else {
rowIdx = i;
}
long expValue = ((LongColumnVector) b.cols[0]).vector[rowIdx];
d.setFromLongAndScale(expValue, scale);
assertEquals(expValue, ((LongColumnVector) b.cols[2]).vector[rowIdx]);
if (expRowNum != -1) {
assertEquals(expRowNum + i, ((LongColumnVector) b.cols[4]).vector[rowIdx]);
}
}
}
@Test
public void readAlternateColumnsWOMinSeekSize() throws IOException {
readStart();
OrcConf.ORC_MIN_DISK_SEEK_SIZE.setInt(conf, 0);
Reader r = OrcFile.createReader(filePath, OrcFile.readerOptions(conf).filesystem(fs));
Reader.Options opts = r.options().include(AlternateColumns);
VectorizedRowBatch b = schema.createRowBatch();
long rowCount;
try (RecordReader rr = r.rows(opts)) {
rowCount = validateFilteredRecordReader(rr, b);
}
FileSystem.Statistics stats = readEnd();
double p = readPercentage(stats, fs.getFileStatus(filePath).getLen());
assertEquals(RowCount, rowCount);
assertTrue(p < 60);
}
@Test
public void readAlternateColumnsWMinSeekSize() throws IOException {
readStart();
OrcConf.ORC_MIN_DISK_SEEK_SIZE.setInt(conf, 1024 * 1024);
Reader r = OrcFile.createReader(filePath, OrcFile.readerOptions(conf).filesystem(fs));
Reader.Options opts = r.options().include(AlternateColumns);
assertEquals(opts.minSeekSize(), 1024 * 1024);
VectorizedRowBatch b = schema.createRowBatch();
long rowCount;
try (RecordReader rr = r.rows(opts)) {
rowCount = validateFilteredRecordReader(rr, b);
}
FileSystem.Statistics stats = readEnd();
double p = readPercentage(stats, fs.getFileStatus(filePath).getLen());
assertEquals(RowCount, rowCount);
// Read all bytes
assertTrue(p >= 100);
}
private double readPercentage(FileSystem.Statistics stats, long fileSize) {
double p = stats.getBytesRead() * 100.0 / fileSize;
LOG.info(String.format("%nFileSize: %d%nReadSize: %d%nRead %%: %.2f",
fileSize,
stats.getBytesRead(),
p));
return p;
}
private static void readStart() {
FileSystem.clearStatistics();
}
private static FileSystem.Statistics readEnd() {
return FileSystem.getAllStatistics().get(0);
}
}
| 8,161 | 37.682464 | 100 | java |
null | orc-main/java/core/src/test/org/apache/orc/TestNewIntegerEncoding.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import com.google.common.collect.Lists;
import com.google.common.primitives.Longs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInfo;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
import java.io.File;
import java.sql.Timestamp;
import java.util.List;
import java.util.Random;
import java.util.stream.Stream;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestNewIntegerEncoding {
private static Stream<Arguments> data() {
return Stream.of(
Arguments.of(OrcFile.EncodingStrategy.COMPRESSION),
Arguments.of(OrcFile.EncodingStrategy.SPEED));
}
public static TypeDescription getRowSchema() {
return TypeDescription.createStruct()
.addField("int1", TypeDescription.createInt())
.addField("long1", TypeDescription.createLong());
}
public static void appendRow(VectorizedRowBatch batch,
int int1, long long1) {
int row = batch.size++;
((LongColumnVector) batch.cols[0]).vector[row] = int1;
((LongColumnVector) batch.cols[1]).vector[row] = long1;
}
public static void appendLong(VectorizedRowBatch batch,
long long1) {
int row = batch.size++;
((LongColumnVector) batch.cols[0]).vector[row] = long1;
}
Path workDir = new Path(System.getProperty("test.tmp.dir", "target"
+ File.separator + "test" + File.separator + "tmp"));
Configuration conf;
FileSystem fs;
Path testFilePath;
@BeforeEach
public void openFileSystem(TestInfo testInfo) throws Exception {
conf = new Configuration();
fs = FileSystem.getLocal(conf);
testFilePath = new Path(workDir, "TestOrcFile."
+ testInfo.getTestMethod().get().getName() + ".orc");
fs.delete(testFilePath, false);
}
@ParameterizedTest
@MethodSource("data")
public void testBasicRow(OrcFile.EncodingStrategy encodingStrategy) throws Exception {
TypeDescription schema= getRowSchema();
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(100000)
.compress(CompressionKind.NONE)
.bufferSize(10000)
.encodingStrategy(encodingStrategy));
VectorizedRowBatch batch = schema.createRowBatch();
appendRow(batch, 111, 1111L);
appendRow(batch, 111, 1111L);
appendRow(batch, 111, 1111L);
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(111, ((LongColumnVector) batch.cols[0]).vector[r]);
assertEquals(1111, ((LongColumnVector) batch.cols[1]).vector[r]);
}
}
}
@ParameterizedTest
@MethodSource("data")
public void testBasicOld(OrcFile.EncodingStrategy encodingStrategy) throws Exception {
TypeDescription schema = TypeDescription.createLong();
long[] inp = new long[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 1, 1, 1, 1, 1, 1, 10, 9, 7, 6, 5, 4, 3, 2, 1, 1, 1, 1, 1,
2, 5, 1, 3, 7, 1, 9, 2, 6, 3, 7, 1, 9, 2, 6, 3, 7, 1, 9, 2, 6, 3, 7, 1,
9, 2, 6, 3, 7, 1, 9, 2, 6, 2000, 2, 1, 1, 1, 1, 1, 3, 7, 1, 9, 2, 6, 1,
1, 1, 1, 1 };
List<Long> input = Lists.newArrayList(Longs.asList(inp));
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.compress(CompressionKind.NONE)
.version(OrcFile.Version.V_0_11)
.bufferSize(10000)
.encodingStrategy(encodingStrategy));
VectorizedRowBatch batch = schema.createRowBatch();
for(Long l : input) {
appendLong(batch, l);
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
int idx = 0;
batch = reader.getSchema().createRowBatch();
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(input.get(idx++).longValue(),
((LongColumnVector) batch.cols[0]).vector[r]);
}
}
}
@ParameterizedTest
@MethodSource("data")
public void testBasicNew(OrcFile.EncodingStrategy encodingStrategy) throws Exception {
TypeDescription schema = TypeDescription.createLong();
long[] inp = new long[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 1, 1, 1, 1, 1, 1, 10, 9, 7, 6, 5, 4, 3, 2, 1, 1, 1, 1, 1,
2, 5, 1, 3, 7, 1, 9, 2, 6, 3, 7, 1, 9, 2, 6, 3, 7, 1, 9, 2, 6, 3, 7, 1,
9, 2, 6, 3, 7, 1, 9, 2, 6, 2000, 2, 1, 1, 1, 1, 1, 3, 7, 1, 9, 2, 6, 1,
1, 1, 1, 1 };
List<Long> input = Lists.newArrayList(Longs.asList(inp));
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(100000)
.compress(CompressionKind.NONE)
.bufferSize(10000)
.encodingStrategy(encodingStrategy));
VectorizedRowBatch batch = schema.createRowBatch();
for(Long l : input) {
appendLong(batch, l);
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
int idx = 0;
batch = reader.getSchema().createRowBatch();
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(input.get(idx++).longValue(),
((LongColumnVector) batch.cols[0]).vector[r]);
}
}
}
@ParameterizedTest
@MethodSource("data")
public void testBasicDelta1(OrcFile.EncodingStrategy encodingStrategy) throws Exception {
TypeDescription schema = TypeDescription.createLong();
long[] inp = new long[] { -500, -400, -350, -325, -310 };
List<Long> input = Lists.newArrayList(Longs.asList(inp));
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(100000)
.compress(CompressionKind.NONE)
.bufferSize(10000)
.encodingStrategy(encodingStrategy));
VectorizedRowBatch batch = schema.createRowBatch();
for(Long l : input) {
appendLong(batch, l);
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
int idx = 0;
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(input.get(idx++).longValue(),
((LongColumnVector) batch.cols[0]).vector[r]);
}
}
}
@ParameterizedTest
@MethodSource("data")
public void testBasicDelta2(OrcFile.EncodingStrategy encodingStrategy) throws Exception {
TypeDescription schema = TypeDescription.createLong();
long[] inp = new long[] { -500, -600, -650, -675, -710 };
List<Long> input = Lists.newArrayList(Longs.asList(inp));
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(100000)
.compress(CompressionKind.NONE)
.bufferSize(10000)
.encodingStrategy(encodingStrategy));
VectorizedRowBatch batch = schema.createRowBatch();
for(Long l : input) {
appendLong(batch, l);
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
int idx = 0;
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(input.get(idx++).longValue(),
((LongColumnVector) batch.cols[0]).vector[r]);
}
}
}
@ParameterizedTest
@MethodSource("data")
public void testBasicDelta3(OrcFile.EncodingStrategy encodingStrategy) throws Exception {
TypeDescription schema = TypeDescription.createLong();
long[] inp = new long[] { 500, 400, 350, 325, 310 };
List<Long> input = Lists.newArrayList(Longs.asList(inp));
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(100000)
.compress(CompressionKind.NONE)
.bufferSize(10000)
.encodingStrategy(encodingStrategy));
VectorizedRowBatch batch = schema.createRowBatch();
for(Long l : input) {
appendLong(batch, l);
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
int idx = 0;
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(input.get(idx++).longValue(),
((LongColumnVector) batch.cols[0]).vector[r]);
}
}
}
@ParameterizedTest
@MethodSource("data")
public void testBasicDelta4(OrcFile.EncodingStrategy encodingStrategy) throws Exception {
TypeDescription schema = TypeDescription.createLong();
long[] inp = new long[] { 500, 600, 650, 675, 710 };
List<Long> input = Lists.newArrayList(Longs.asList(inp));
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(100000)
.compress(CompressionKind.NONE)
.bufferSize(10000)
.encodingStrategy(encodingStrategy));
VectorizedRowBatch batch = schema.createRowBatch();
for(Long l : input) {
appendLong(batch, l);
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
int idx = 0;
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(input.get(idx++).longValue(),
((LongColumnVector) batch.cols[0]).vector[r]);
}
}
}
@Test
public void testDeltaOverflow() throws Exception {
TypeDescription schema = TypeDescription.createLong();
long[] inp = new long[]{4513343538618202719l, 4513343538618202711l,
2911390882471569739l,
-9181829309989854913l};
List<Long> input = Lists.newArrayList(Longs.asList(inp));
Writer writer = OrcFile.createWriter(
testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).stripeSize(100000)
.compress(CompressionKind.NONE).bufferSize(10000));
VectorizedRowBatch batch = schema.createRowBatch();
for (Long l : input) {
appendLong(batch, l);
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile
.createReader(testFilePath, OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
int idx = 0;
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(input.get(idx++).longValue(),
((LongColumnVector) batch.cols[0]).vector[r]);
}
}
}
@Test
public void testDeltaOverflow2() throws Exception {
TypeDescription schema = TypeDescription.createLong();
long[] inp = new long[]{Long.MAX_VALUE, 4513343538618202711l,
2911390882471569739l,
Long.MIN_VALUE};
List<Long> input = Lists.newArrayList(Longs.asList(inp));
Writer writer = OrcFile.createWriter(
testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).stripeSize(100000)
.compress(CompressionKind.NONE).bufferSize(10000));
VectorizedRowBatch batch = schema.createRowBatch();
for (Long l : input) {
appendLong(batch, l);
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile
.createReader(testFilePath, OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
int idx = 0;
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(input.get(idx++).longValue(),
((LongColumnVector) batch.cols[0]).vector[r]);
}
}
}
@Test
public void testDeltaOverflow3() throws Exception {
TypeDescription schema = TypeDescription.createLong();
long[] inp = new long[]{-4513343538618202711l, -2911390882471569739l, -2,
Long.MAX_VALUE};
List<Long> input = Lists.newArrayList(Longs.asList(inp));
Writer writer = OrcFile.createWriter(
testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).stripeSize(100000)
.compress(CompressionKind.NONE).bufferSize(10000));
VectorizedRowBatch batch = schema.createRowBatch();
for (Long l : input) {
appendLong(batch, l);
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile
.createReader(testFilePath, OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
int idx = 0;
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(input.get(idx++).longValue(),
((LongColumnVector) batch.cols[0]).vector[r]);
}
}
}
@ParameterizedTest
@MethodSource("data")
public void testIntegerMin(OrcFile.EncodingStrategy encodingStrategy) throws Exception {
TypeDescription schema = TypeDescription.createLong();
List<Long> input = Lists.newArrayList();
input.add((long) Integer.MIN_VALUE);
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(100000)
.bufferSize(10000)
.encodingStrategy(encodingStrategy));
VectorizedRowBatch batch = schema.createRowBatch();
for(Long l : input) {
appendLong(batch, l);
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
int idx = 0;
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(input.get(idx++).longValue(),
((LongColumnVector) batch.cols[0]).vector[r]);
}
}
}
@ParameterizedTest
@MethodSource("data")
public void testIntegerMax(OrcFile.EncodingStrategy encodingStrategy) throws Exception {
TypeDescription schema = TypeDescription.createLong();
List<Long> input = Lists.newArrayList();
input.add((long) Integer.MAX_VALUE);
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(100000)
.compress(CompressionKind.NONE)
.bufferSize(10000)
.encodingStrategy(encodingStrategy));
VectorizedRowBatch batch = schema.createRowBatch();
for(Long l : input) {
appendLong(batch, l);
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
int idx = 0;
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(input.get(idx++).longValue(),
((LongColumnVector) batch.cols[0]).vector[r]);
}
}
}
@ParameterizedTest
@MethodSource("data")
public void testLongMin(OrcFile.EncodingStrategy encodingStrategy) throws Exception {
TypeDescription schema = TypeDescription.createLong();
List<Long> input = Lists.newArrayList();
input.add(Long.MIN_VALUE);
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(100000)
.compress(CompressionKind.NONE)
.bufferSize(10000)
.encodingStrategy(encodingStrategy));
VectorizedRowBatch batch = schema.createRowBatch();
for(Long l : input) {
appendLong(batch, l);
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
int idx = 0;
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(input.get(idx++).longValue(),
((LongColumnVector) batch.cols[0]).vector[r]);
}
}
}
@ParameterizedTest
@MethodSource("data")
public void testLongMax(OrcFile.EncodingStrategy encodingStrategy) throws Exception {
TypeDescription schema = TypeDescription.createLong();
List<Long> input = Lists.newArrayList();
input.add(Long.MAX_VALUE);
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(100000)
.compress(CompressionKind.NONE)
.bufferSize(10000)
.encodingStrategy(encodingStrategy));
VectorizedRowBatch batch = schema.createRowBatch();
for(Long l : input) {
appendLong(batch, l);
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
int idx = 0;
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(input.get(idx++).longValue(),
((LongColumnVector) batch.cols[0]).vector[r]);
}
}
}
@ParameterizedTest
@MethodSource("data")
public void testRandomInt(OrcFile.EncodingStrategy encodingStrategy) throws Exception {
TypeDescription schema = TypeDescription.createLong();
List<Long> input = Lists.newArrayList();
Random rand = new Random();
for(int i = 0; i < 100000; i++) {
input.add((long) rand.nextInt());
}
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(100000)
.compress(CompressionKind.NONE)
.bufferSize(10000)
.encodingStrategy(encodingStrategy));
VectorizedRowBatch batch = schema.createRowBatch(100000);
for(Long l : input) {
appendLong(batch, l);
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
int idx = 0;
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(input.get(idx++).longValue(),
((LongColumnVector) batch.cols[0]).vector[r]);
}
}
}
@ParameterizedTest
@MethodSource("data")
public void testRandomLong(OrcFile.EncodingStrategy encodingStrategy) throws Exception {
TypeDescription schema = TypeDescription.createLong();
List<Long> input = Lists.newArrayList();
Random rand = new Random();
for(int i = 0; i < 100000; i++) {
input.add(rand.nextLong());
}
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(100000)
.compress(CompressionKind.NONE)
.bufferSize(10000)
.encodingStrategy(encodingStrategy));
VectorizedRowBatch batch = schema.createRowBatch(100000);
for(Long l : input) {
appendLong(batch, l);
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
int idx = 0;
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(input.get(idx++).longValue(),
((LongColumnVector) batch.cols[0]).vector[r]);
}
}
}
@ParameterizedTest
@MethodSource("data")
public void testPatchedBaseNegativeMin(OrcFile.EncodingStrategy encodingStrategy) throws Exception {
TypeDescription schema = TypeDescription.createLong();
long[] inp = new long[] { 20, 2, 3, 2, 1, 3, 17, 71, 35, 2, 1, 139, 2, 2,
3, 1783, 475, 2, 1, 1, 3, 1, 3, 2, 32, 1, 2, 3, 1, 8, 30, 1, 3, 414, 1,
1, 135, 3, 3, 1, 414, 2, 1, 2, 2, 594, 2, 5, 6, 4, 11, 1, 2, 2, 1, 1,
52, 4, 1, 2, 7, 1, 17, 334, 1, 2, 1, 2, 2, 6, 1, 266, 1, 2, 217, 2, 6,
2, 13, 2, 2, 1, 2, 3, 5, 1, 2, 1, 7244, 11813, 1, 33, 2, -13, 1, 2, 3,
13, 1, 92, 3, 13, 5, 14, 9, 141, 12, 6, 15, 25, 1, 1, 1, 46, 2, 1, 1,
141, 3, 1, 1, 1, 1, 2, 1, 4, 34, 5, 78, 8, 1, 2, 2, 1, 9, 10, 2, 1, 4,
13, 1, 5, 4, 4, 19, 5, 1, 1, 1, 68, 33, 399, 1, 1885, 25, 5, 2, 4, 1,
1, 2, 16, 1, 2966, 3, 1, 1, 25501, 1, 1, 1, 66, 1, 3, 8, 131, 14, 5, 1,
2, 2, 1, 1, 8, 1, 1, 2, 1, 5, 9, 2, 3, 112, 13, 2, 2, 1, 5, 10, 3, 1,
1, 13, 2, 3, 4, 1, 3, 1, 1, 2, 1, 1, 2, 4, 2, 207, 1, 1, 2, 4, 3, 3, 2,
2, 16 };
List<Long> input = Lists.newArrayList(Longs.asList(inp));
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(100000)
.compress(CompressionKind.NONE)
.bufferSize(10000)
.encodingStrategy(encodingStrategy));
VectorizedRowBatch batch = schema.createRowBatch();
for(Long l : input) {
appendLong(batch, l);
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
int idx = 0;
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(input.get(idx++).longValue(),
((LongColumnVector) batch.cols[0]).vector[r]);
}
}
}
@ParameterizedTest
@MethodSource("data")
public void testPatchedBaseNegativeMin2(OrcFile.EncodingStrategy encodingStrategy) throws Exception {
TypeDescription schema = TypeDescription.createLong();
long[] inp = new long[] { 20, 2, 3, 2, 1, 3, 17, 71, 35, 2, 1, 139, 2, 2,
3, 1783, 475, 2, 1, 1, 3, 1, 3, 2, 32, 1, 2, 3, 1, 8, 30, 1, 3, 414, 1,
1, 135, 3, 3, 1, 414, 2, 1, 2, 2, 594, 2, 5, 6, 4, 11, 1, 2, 2, 1, 1,
52, 4, 1, 2, 7, 1, 17, 334, 1, 2, 1, 2, 2, 6, 1, 266, 1, 2, 217, 2, 6,
2, 13, 2, 2, 1, 2, 3, 5, 1, 2, 1, 7244, 11813, 1, 33, 2, -1, 1, 2, 3,
13, 1, 92, 3, 13, 5, 14, 9, 141, 12, 6, 15, 25, 1, 1, 1, 46, 2, 1, 1,
141, 3, 1, 1, 1, 1, 2, 1, 4, 34, 5, 78, 8, 1, 2, 2, 1, 9, 10, 2, 1, 4,
13, 1, 5, 4, 4, 19, 5, 1, 1, 1, 68, 33, 399, 1, 1885, 25, 5, 2, 4, 1,
1, 2, 16, 1, 2966, 3, 1, 1, 25501, 1, 1, 1, 66, 1, 3, 8, 131, 14, 5, 1,
2, 2, 1, 1, 8, 1, 1, 2, 1, 5, 9, 2, 3, 112, 13, 2, 2, 1, 5, 10, 3, 1,
1, 13, 2, 3, 4, 1, 3, 1, 1, 2, 1, 1, 2, 4, 2, 207, 1, 1, 2, 4, 3, 3, 2,
2, 16 };
List<Long> input = Lists.newArrayList(Longs.asList(inp));
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(100000)
.compress(CompressionKind.NONE)
.bufferSize(10000)
.encodingStrategy(encodingStrategy));
VectorizedRowBatch batch = schema.createRowBatch();
for(Long l : input) {
appendLong(batch, l);
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
int idx = 0;
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(input.get(idx++).longValue(),
((LongColumnVector) batch.cols[0]).vector[r]);
}
}
}
@ParameterizedTest
@MethodSource("data")
public void testPatchedBaseNegativeMin3(OrcFile.EncodingStrategy encodingStrategy) throws Exception {
TypeDescription schema = TypeDescription.createLong();
long[] inp = new long[] { 20, 2, 3, 2, 1, 3, 17, 71, 35, 2, 1, 139, 2, 2,
3, 1783, 475, 2, 1, 1, 3, 1, 3, 2, 32, 1, 2, 3, 1, 8, 30, 1, 3, 414, 1,
1, 135, 3, 3, 1, 414, 2, 1, 2, 2, 594, 2, 5, 6, 4, 11, 1, 2, 2, 1, 1,
52, 4, 1, 2, 7, 1, 17, 334, 1, 2, 1, 2, 2, 6, 1, 266, 1, 2, 217, 2, 6,
2, 13, 2, 2, 1, 2, 3, 5, 1, 2, 1, 7244, 11813, 1, 33, 2, 0, 1, 2, 3,
13, 1, 92, 3, 13, 5, 14, 9, 141, 12, 6, 15, 25, 1, 1, 1, 46, 2, 1, 1,
141, 3, 1, 1, 1, 1, 2, 1, 4, 34, 5, 78, 8, 1, 2, 2, 1, 9, 10, 2, 1, 4,
13, 1, 5, 4, 4, 19, 5, 1, 1, 1, 68, 33, 399, 1, 1885, 25, 5, 2, 4, 1,
1, 2, 16, 1, 2966, 3, 1, 1, 25501, 1, 1, 1, 66, 1, 3, 8, 131, 14, 5, 1,
2, 2, 1, 1, 8, 1, 1, 2, 1, 5, 9, 2, 3, 112, 13, 2, 2, 1, 5, 10, 3, 1,
1, 13, 2, 3, 4, 1, 3, 1, 1, 2, 1, 1, 2, 4, 2, 207, 1, 1, 2, 4, 3, 3, 2,
2, 16 };
List<Long> input = Lists.newArrayList(Longs.asList(inp));
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(100000)
.compress(CompressionKind.NONE)
.bufferSize(10000)
.encodingStrategy(encodingStrategy));
VectorizedRowBatch batch = schema.createRowBatch();
for(Long l : input) {
appendLong(batch, l);
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
int idx = 0;
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(input.get(idx++).longValue(),
((LongColumnVector) batch.cols[0]).vector[r]);
}
}
}
@ParameterizedTest
@MethodSource("data")
public void testPatchedBaseNegativeMin4(OrcFile.EncodingStrategy encodingStrategy) throws Exception {
TypeDescription schema = TypeDescription.createLong();
long[] inp = new long[] { 13, 13, 11, 8, 13, 10, 10, 11, 11, 14, 11, 7, 13,
12, 12, 11, 15, 12, 12, 9, 8, 10, 13, 11, 8, 6, 5, 6, 11, 7, 15, 10, 7,
6, 8, 7, 9, 9, 11, 33, 11, 3, 7, 4, 6, 10, 14, 12, 5, 14, 7, 6 };
List<Long> input = Lists.newArrayList(Longs.asList(inp));
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(100000)
.compress(CompressionKind.NONE)
.bufferSize(10000)
.encodingStrategy(encodingStrategy));
VectorizedRowBatch batch = schema.createRowBatch();
for(Long l : input) {
appendLong(batch, l);
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
int idx = 0;
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(input.get(idx++).longValue(),
((LongColumnVector) batch.cols[0]).vector[r]);
}
}
}
@ParameterizedTest
@MethodSource("data")
public void testPatchedBaseAt0(OrcFile.EncodingStrategy encodingStrategy) throws Exception {
TypeDescription schema = TypeDescription.createLong();
List<Long> input = Lists.newArrayList();
Random rand = new Random();
for(int i = 0; i < 5120; i++) {
input.add((long) rand.nextInt(100));
}
input.set(0, 20000L);
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(100000)
.compress(CompressionKind.NONE)
.bufferSize(10000)
.encodingStrategy(encodingStrategy));
VectorizedRowBatch batch = schema.createRowBatch(5120);
for(Long l : input) {
appendLong(batch, l);
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
int idx = 0;
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(input.get(idx++).longValue(),
((LongColumnVector) batch.cols[0]).vector[r]);
}
}
}
@ParameterizedTest
@MethodSource("data")
public void testPatchedBaseAt1(OrcFile.EncodingStrategy encodingStrategy) throws Exception {
TypeDescription schema = TypeDescription.createLong();
List<Long> input = Lists.newArrayList();
Random rand = new Random();
for(int i = 0; i < 5120; i++) {
input.add((long) rand.nextInt(100));
}
input.set(1, 20000L);
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(100000)
.compress(CompressionKind.NONE)
.bufferSize(10000)
.encodingStrategy(encodingStrategy));
VectorizedRowBatch batch = schema.createRowBatch(5120);
for(Long l : input) {
appendLong(batch, l);
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
int idx = 0;
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(input.get(idx++).longValue(),
((LongColumnVector) batch.cols[0]).vector[r]);
}
}
}
@ParameterizedTest
@MethodSource("data")
public void testPatchedBaseAt255(OrcFile.EncodingStrategy encodingStrategy) throws Exception {
TypeDescription schema = TypeDescription.createLong();
List<Long> input = Lists.newArrayList();
Random rand = new Random();
for(int i = 0; i < 5120; i++) {
input.add((long) rand.nextInt(100));
}
input.set(255, 20000L);
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(100000)
.bufferSize(10000)
.encodingStrategy(encodingStrategy));
VectorizedRowBatch batch = schema.createRowBatch(5120);
for(Long l : input) {
appendLong(batch, l);
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
int idx = 0;
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(input.get(idx++).longValue(),
((LongColumnVector) batch.cols[0]).vector[r]);
}
}
}
@ParameterizedTest
@MethodSource("data")
public void testPatchedBaseAt256(OrcFile.EncodingStrategy encodingStrategy) throws Exception {
TypeDescription schema = TypeDescription.createLong();
List<Long> input = Lists.newArrayList();
Random rand = new Random();
for(int i = 0; i < 5120; i++) {
input.add((long) rand.nextInt(100));
}
input.set(256, 20000L);
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(100000)
.bufferSize(10000)
.encodingStrategy(encodingStrategy));
VectorizedRowBatch batch = schema.createRowBatch(5120);
for(Long l : input) {
appendLong(batch, l);
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
int idx = 0;
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(input.get(idx++).longValue(),
((LongColumnVector) batch.cols[0]).vector[r]);
}
}
}
@ParameterizedTest
@MethodSource("data")
public void testPatchedBase510(OrcFile.EncodingStrategy encodingStrategy) throws Exception {
TypeDescription schema = TypeDescription.createLong();
List<Long> input = Lists.newArrayList();
Random rand = new Random();
for(int i = 0; i < 5120; i++) {
input.add((long) rand.nextInt(100));
}
input.set(510, 20000L);
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(100000)
.bufferSize(10000)
.encodingStrategy(encodingStrategy));
VectorizedRowBatch batch = schema.createRowBatch(5120);
for(Long l : input) {
appendLong(batch, l);
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
int idx = 0;
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(input.get(idx++).longValue(),
((LongColumnVector) batch.cols[0]).vector[r]);
}
}
}
@ParameterizedTest
@MethodSource("data")
public void testPatchedBase511(OrcFile.EncodingStrategy encodingStrategy) throws Exception {
TypeDescription schema = TypeDescription.createLong();
List<Long> input = Lists.newArrayList();
Random rand = new Random();
for(int i = 0; i < 5120; i++) {
input.add((long) rand.nextInt(100));
}
input.set(511, 20000L);
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(100000)
.bufferSize(10000)
.encodingStrategy(encodingStrategy));
VectorizedRowBatch batch = schema.createRowBatch(5120);
for(Long l : input) {
appendLong(batch, l);
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
int idx = 0;
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(input.get(idx++).longValue(),
((LongColumnVector) batch.cols[0]).vector[r]);
}
}
}
@ParameterizedTest
@MethodSource("data")
public void testPatchedBaseMax1(OrcFile.EncodingStrategy encodingStrategy) throws Exception {
TypeDescription schema = TypeDescription.createLong();
List<Long> input = Lists.newArrayList();
Random rand = new Random();
for (int i = 0; i < 5120; i++) {
input.add((long) rand.nextInt(60));
}
input.set(511, Long.MAX_VALUE);
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(100000)
.bufferSize(10000)
.encodingStrategy(encodingStrategy));
VectorizedRowBatch batch = schema.createRowBatch(5120);
for (Long l : input) {
appendLong(batch, l);
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
int idx = 0;
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(input.get(idx++).longValue(),
((LongColumnVector) batch.cols[0]).vector[r]);
}
}
}
@ParameterizedTest
@MethodSource("data")
public void testPatchedBaseMax2(OrcFile.EncodingStrategy encodingStrategy) throws Exception {
TypeDescription schema = TypeDescription.createLong();
List<Long> input = Lists.newArrayList();
Random rand = new Random();
for (int i = 0; i < 5120; i++) {
input.add((long) rand.nextInt(60));
}
input.set(128, Long.MAX_VALUE);
input.set(256, Long.MAX_VALUE);
input.set(511, Long.MAX_VALUE);
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(100000)
.bufferSize(10000)
.encodingStrategy(encodingStrategy));
VectorizedRowBatch batch = schema.createRowBatch(5120);
for (Long l : input) {
appendLong(batch, l);
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
int idx = 0;
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(input.get(idx++).longValue(),
((LongColumnVector) batch.cols[0]).vector[r]);
}
}
}
@ParameterizedTest
@MethodSource("data")
public void testPatchedBaseMax3(OrcFile.EncodingStrategy encodingStrategy) throws Exception {
TypeDescription schema = TypeDescription.createLong();
List<Long> input = Lists.newArrayList();
input.add(371946367L);
input.add(11963367L);
input.add(68639400007L);
input.add(100233367L);
input.add(6367L);
input.add(10026367L);
input.add(3670000L);
input.add(3602367L);
input.add(4719226367L);
input.add(7196367L);
input.add(444442L);
input.add(210267L);
input.add(21033L);
input.add(160267L);
input.add(400267L);
input.add(23634347L);
input.add(16027L);
input.add(46026367L);
input.add(Long.MAX_VALUE);
input.add(33333L);
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(100000)
.bufferSize(10000)
.encodingStrategy(encodingStrategy));
VectorizedRowBatch batch = schema.createRowBatch();
for (Long l : input) {
appendLong(batch, l);
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
int idx = 0;
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(input.get(idx++).longValue(),
((LongColumnVector) batch.cols[0]).vector[r]);
}
}
}
@ParameterizedTest
@MethodSource("data")
public void testPatchedBaseMax4(OrcFile.EncodingStrategy encodingStrategy) throws Exception {
TypeDescription schema = TypeDescription.createLong();
List<Long> input = Lists.newArrayList();
for (int i = 0; i < 25; i++) {
input.add(371292224226367L);
input.add(119622332222267L);
input.add(686329400222007L);
input.add(100233333222367L);
input.add(636272333322222L);
input.add(10202633223267L);
input.add(36700222022230L);
input.add(36023226224227L);
input.add(47192226364427L);
input.add(71963622222447L);
input.add(22244444222222L);
input.add(21220263327442L);
input.add(21032233332232L);
input.add(16026322232227L);
input.add(40022262272212L);
input.add(23634342227222L);
input.add(16022222222227L);
input.add(46026362222227L);
input.add(46026362222227L);
input.add(33322222222323L);
}
input.add(Long.MAX_VALUE);
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(100000)
.bufferSize(10000)
.encodingStrategy(encodingStrategy));
VectorizedRowBatch batch = schema.createRowBatch();
for (Long l : input) {
appendLong(batch, l);
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
int idx = 0;
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(input.get(idx++).longValue(),
((LongColumnVector) batch.cols[0]).vector[r]);
}
}
}
@ParameterizedTest
@MethodSource("data")
public void testPatchedBaseTimestamp(OrcFile.EncodingStrategy encodingStrategy) throws Exception {
TypeDescription schema = TypeDescription.createStruct()
.addField("ts", TypeDescription.createTimestamp());
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(100000)
.bufferSize(10000)
.encodingStrategy(encodingStrategy));
VectorizedRowBatch batch = schema.createRowBatch();
List<Timestamp> tslist = Lists.newArrayList();
tslist.add(Timestamp.valueOf("2099-01-01 00:00:00"));
tslist.add(Timestamp.valueOf("2003-01-01 00:00:00"));
tslist.add(Timestamp.valueOf("1999-01-01 00:00:00"));
tslist.add(Timestamp.valueOf("1995-01-01 00:00:00"));
tslist.add(Timestamp.valueOf("2002-01-01 00:00:00"));
tslist.add(Timestamp.valueOf("2010-03-02 00:00:00"));
tslist.add(Timestamp.valueOf("2005-01-01 00:00:00"));
tslist.add(Timestamp.valueOf("2006-01-01 00:00:00"));
tslist.add(Timestamp.valueOf("2003-01-01 00:00:00"));
tslist.add(Timestamp.valueOf("1996-08-02 00:00:00"));
tslist.add(Timestamp.valueOf("1998-11-02 00:00:00"));
tslist.add(Timestamp.valueOf("2008-10-02 00:00:00"));
tslist.add(Timestamp.valueOf("1993-08-02 00:00:00"));
tslist.add(Timestamp.valueOf("2008-01-02 00:00:00"));
tslist.add(Timestamp.valueOf("2007-01-01 00:00:00"));
tslist.add(Timestamp.valueOf("2004-01-01 00:00:00"));
tslist.add(Timestamp.valueOf("2008-10-02 00:00:00"));
tslist.add(Timestamp.valueOf("2003-01-01 00:00:00"));
tslist.add(Timestamp.valueOf("2004-01-01 00:00:00"));
tslist.add(Timestamp.valueOf("2008-01-01 00:00:00"));
tslist.add(Timestamp.valueOf("2005-01-01 00:00:00"));
tslist.add(Timestamp.valueOf("1994-01-01 00:00:00"));
tslist.add(Timestamp.valueOf("2006-01-01 00:00:00"));
tslist.add(Timestamp.valueOf("2004-01-01 00:00:00"));
tslist.add(Timestamp.valueOf("2001-01-01 00:00:00"));
tslist.add(Timestamp.valueOf("2000-01-01 00:00:00"));
tslist.add(Timestamp.valueOf("2000-01-01 00:00:00"));
tslist.add(Timestamp.valueOf("2002-01-01 00:00:00"));
tslist.add(Timestamp.valueOf("2006-01-01 00:00:00"));
tslist.add(Timestamp.valueOf("2011-01-01 00:00:00"));
tslist.add(Timestamp.valueOf("2002-01-01 00:00:00"));
tslist.add(Timestamp.valueOf("2005-01-01 00:00:00"));
tslist.add(Timestamp.valueOf("1974-01-01 00:00:00"));
int idx = 0;
for (Timestamp ts : tslist) {
((TimestampColumnVector) batch.cols[0]).set(idx++, ts);
}
batch.size = tslist.size();
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
idx = 0;
while (rows.nextBatch(batch)) {
assertEquals(tslist.size(), batch.size);
for(int r=0; r < batch.size; ++r) {
assertEquals(tslist.get(idx++),
((TimestampColumnVector) batch.cols[0]).asScratchTimestamp(r));
}
}
}
@ParameterizedTest
@MethodSource("data")
public void testDirectLargeNegatives(OrcFile.EncodingStrategy encodingStrategy) throws Exception {
TypeDescription schema = TypeDescription.createLong();
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(100000)
.bufferSize(10000)
.encodingStrategy(encodingStrategy));
VectorizedRowBatch batch = schema.createRowBatch();
appendLong(batch, -7486502418706614742L);
appendLong(batch, 0L);
appendLong(batch, 1L);
appendLong(batch, 1L);
appendLong(batch, -5535739865598783616L);
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
assertTrue(rows.nextBatch(batch));
assertEquals(5, batch.size);
assertEquals(-7486502418706614742L,
((LongColumnVector) batch.cols[0]).vector[0]);
assertEquals(0L,
((LongColumnVector) batch.cols[0]).vector[1]);
assertEquals(1L,
((LongColumnVector) batch.cols[0]).vector[2]);
assertEquals(1L,
((LongColumnVector) batch.cols[0]).vector[3]);
assertEquals(-5535739865598783616L,
((LongColumnVector) batch.cols[0]).vector[4]);
assertFalse(rows.nextBatch(batch));
}
@ParameterizedTest
@MethodSource("data")
public void testSeek(OrcFile.EncodingStrategy encodingStrategy) throws Exception {
TypeDescription schema = TypeDescription.createLong();
List<Long> input = Lists.newArrayList();
Random rand = new Random();
for(int i = 0; i < 100000; i++) {
input.add((long) rand.nextInt());
}
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.compress(CompressionKind.NONE)
.stripeSize(100000)
.bufferSize(10000)
.version(OrcFile.Version.V_0_11)
.encodingStrategy(encodingStrategy));
VectorizedRowBatch batch = schema.createRowBatch(100000);
for(Long l : input) {
appendLong(batch, l);
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
int idx = 55555;
rows.seekToRow(idx);
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(input.get(idx++).longValue(),
((LongColumnVector) batch.cols[0]).vector[r]);
}
}
}
}
| 49,240 | 34.527417 | 103 | java |
null | orc-main/java/core/src/test/org/apache/orc/TestOrcConf.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import org.apache.hadoop.conf.Configuration;
import org.junit.jupiter.api.Test;
import java.util.List;
import java.util.Properties;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestOrcConf {
// Long
public final OrcConf ORC_STRIPE_SIZE_CONF = OrcConf.STRIPE_SIZE;
// Boolean
public final OrcConf ORC_ENABLE_INDEXES_CONF = OrcConf.ENABLE_INDEXES;
// int
public final OrcConf ORC_BLOCK_SIZE_CONF = OrcConf.ENABLE_INDEXES;
// String
public final OrcConf ORC_COMPRESS_CONF = OrcConf.COMPRESS;
// Double
public final OrcConf ORC_BLOOM_FILTER_FPP_CONF = OrcConf.BLOOM_FILTER_FPP;
@Test
public void testLoadConfFromProperties() {
Properties tableProperties = new Properties();
Configuration conf = new Configuration();
tableProperties.setProperty(ORC_STRIPE_SIZE_CONF.getAttribute(), "1000");
conf.setInt(ORC_STRIPE_SIZE_CONF.getAttribute(), 2000);
assertEquals(1000, ORC_STRIPE_SIZE_CONF.getLong(tableProperties, conf));
tableProperties.setProperty(ORC_BLOCK_SIZE_CONF.getAttribute(), "1000");
conf.setInt(ORC_BLOCK_SIZE_CONF.getAttribute(), 2000);
assertEquals(1000, ORC_BLOCK_SIZE_CONF.getInt(tableProperties, conf));
tableProperties.setProperty(ORC_ENABLE_INDEXES_CONF.getAttribute(), "true");
conf.setBoolean(ORC_ENABLE_INDEXES_CONF.getAttribute(), false);
assertTrue(ORC_ENABLE_INDEXES_CONF.getBoolean(tableProperties, conf));
tableProperties.setProperty(ORC_COMPRESS_CONF.getAttribute(), "snappy2");
conf.set(ORC_COMPRESS_CONF.getAttribute(), "snappy3");
assertEquals("snappy2", ORC_COMPRESS_CONF.getString(tableProperties, conf));
tableProperties.setProperty(ORC_BLOOM_FILTER_FPP_CONF.getAttribute(), "0.5");
conf.setDouble(ORC_BLOOM_FILTER_FPP_CONF.getAttribute(), 0.4);
assertEquals(0.5, ORC_BLOOM_FILTER_FPP_CONF.getDouble(tableProperties, conf));
}
@Test
public void testLoadConfFromConfiguration() {
Properties tableProperties = new Properties();
Configuration conf = new Configuration();
conf.setInt(ORC_STRIPE_SIZE_CONF.getAttribute(), 2000);
assertEquals(2000, ORC_STRIPE_SIZE_CONF.getLong(tableProperties, conf));
conf.setInt(ORC_BLOCK_SIZE_CONF.getAttribute(), 2000);
assertEquals(2000, ORC_BLOCK_SIZE_CONF.getInt(tableProperties, conf));
conf.setBoolean(ORC_ENABLE_INDEXES_CONF.getAttribute(), false);
assertFalse(ORC_ENABLE_INDEXES_CONF.getBoolean(tableProperties, conf));
conf.set(ORC_COMPRESS_CONF.getAttribute(), "snappy3");
assertEquals("snappy3", ORC_COMPRESS_CONF.getString(tableProperties, conf));
conf.setDouble(ORC_BLOOM_FILTER_FPP_CONF.getAttribute(), 0.4);
assertEquals(0.4, ORC_BLOOM_FILTER_FPP_CONF.getDouble(tableProperties, conf));
}
@Test
public void testDefaultValue() {
Properties tableProperties = new Properties();
Configuration conf = new Configuration();
for (OrcConf orcConf : OrcConf.values()) {
if (orcConf.getDefaultValue() instanceof String) {
assertEquals(orcConf.getString(tableProperties, conf), orcConf.getDefaultValue());
}
if (orcConf.getDefaultValue() instanceof Long) {
assertEquals(orcConf.getLong(tableProperties, conf), orcConf.getDefaultValue());
}
if (orcConf.getDefaultValue() instanceof Integer) {
assertEquals(orcConf.getInt(tableProperties, conf), orcConf.getDefaultValue());
}
if (orcConf.getDefaultValue() instanceof Boolean) {
assertEquals(orcConf.getBoolean(tableProperties, conf), orcConf.getDefaultValue());
}
if (orcConf.getDefaultValue() instanceof Double) {
assertEquals(orcConf.getDouble(tableProperties, conf), orcConf.getDefaultValue());
}
}
}
@Test
public void testSetValue() {
Properties tableProperties = new Properties();
Configuration conf = new Configuration();
ORC_STRIPE_SIZE_CONF.setLong(conf, 1000);
assertEquals(1000, ORC_STRIPE_SIZE_CONF.getLong(tableProperties, conf));
ORC_BLOCK_SIZE_CONF.setInt(conf, 2000);
assertEquals(2000, ORC_BLOCK_SIZE_CONF.getLong(tableProperties, conf));
ORC_ENABLE_INDEXES_CONF.setBoolean(conf, false);
assertFalse(ORC_ENABLE_INDEXES_CONF.getBoolean(tableProperties, conf));
ORC_COMPRESS_CONF.setString(conf, "snappy3");
assertEquals("snappy3", ORC_COMPRESS_CONF.getString(tableProperties, conf));
ORC_COMPRESS_CONF.setDouble(conf, 0.4);
assertEquals(0.4, ORC_COMPRESS_CONF.getDouble(tableProperties, conf));
}
@Test
public void testGetHiveConfValue() {
Properties tableProperties = new Properties();
Configuration conf = new Configuration();
conf.setInt(ORC_STRIPE_SIZE_CONF.getHiveConfName(), 2000);
assertEquals(2000, ORC_STRIPE_SIZE_CONF.getLong(tableProperties, conf));
}
@Test
public void testGetStringAsList() {
Configuration conf = new Configuration();
conf.set(ORC_COMPRESS_CONF.getHiveConfName(), "a,,b,c, ,d,");
List<String> valueList1 = ORC_COMPRESS_CONF.getStringAsList(conf);
assertEquals(valueList1.size(), 4);
assertEquals(valueList1.get(0), "a");
assertEquals(valueList1.get(1), "b");
assertEquals(valueList1.get(2), "c");
assertEquals(valueList1.get(3), "d");
conf.set(ORC_COMPRESS_CONF.getHiveConfName(), "");
List<String> valueList2 = ORC_COMPRESS_CONF.getStringAsList(conf);
assertEquals(valueList2.size(), 0);
conf.set(ORC_COMPRESS_CONF.getHiveConfName(), " abc, efg, ");
List<String> valueList3 = ORC_COMPRESS_CONF.getStringAsList(conf);
assertEquals(valueList3.size(), 2);
assertEquals(valueList3.get(0), "abc");
assertEquals(valueList3.get(1), "efg");
}
}
| 6,621 | 38.652695 | 91 | java |
null | orc-main/java/core/src/test/org/apache/orc/TestOrcDSTNoTimezone.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.ValueSource;
import java.sql.Timestamp;
import java.text.SimpleDateFormat;
import java.util.TimeZone;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Test over an orc file that does not store time zone information in the footer
* and it was written from a time zone that observes DST for one of the timestamp
* values stored ('2014-06-06 12:34:56.0').
*/
public class TestOrcDSTNoTimezone {
Configuration conf;
FileSystem fs;
SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.S");
static TimeZone defaultTimeZone = TimeZone.getDefault();
@BeforeEach
public void openFileSystem() throws Exception {
conf = new Configuration();
fs = FileSystem.getLocal(conf);
}
@AfterEach
public void restoreTimeZone() {
TimeZone.setDefault(defaultTimeZone);
}
@ParameterizedTest
@ValueSource(strings = {"America/Los_Angeles", "Europe/Berlin", "Asia/Jerusalem"})
public void testReadOldTimestampFormat(String readerTimeZone) throws Exception {
TimeZone.setDefault(TimeZone.getTimeZone(readerTimeZone));
Path oldFilePath = new Path(getClass().getClassLoader().
getSystemResource("orc-file-dst-no-timezone.orc").getPath());
Reader reader = OrcFile.createReader(oldFilePath,
OrcFile.readerOptions(conf).filesystem(fs).useUTCTimestamp(true));
formatter.setTimeZone(TimeZone.getTimeZone("UTC"));
TypeDescription schema = reader.getSchema();
VectorizedRowBatch batch = schema.createRowBatch(10);
TimestampColumnVector ts = (TimestampColumnVector) batch.cols[0];
boolean[] include = new boolean[schema.getMaximumId() + 1];
include[schema.getChildren().get(0).getId()] = true;
RecordReader rows = reader.rows
(reader.options().include(include));
assertTrue(rows.nextBatch(batch));
Timestamp timestamp = ts.asScratchTimestamp(0);
assertEquals(Timestamp.valueOf("2014-01-01 12:34:56.0").toString(),
formatter.format(timestamp));
// check the contents of second row
rows.seekToRow(1);
assertTrue(rows.nextBatch(batch));
assertEquals(1, batch.size);
timestamp = ts.asScratchTimestamp(0);
assertEquals(Timestamp.valueOf("2014-06-06 12:34:56.0").toString(),
formatter.format(timestamp));
// handle the close up
assertFalse(rows.nextBatch(batch));
rows.close();
}
}
| 3,729 | 38.263158 | 84 | java |
null | orc-main/java/core/src/test/org/apache/orc/TestOrcFilterContext.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ListColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.MapColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.StructColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.UnionColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.orc.impl.OrcFilterContextImpl;
import org.apache.orc.impl.SchemaEvolution;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.io.File;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.Random;
import java.util.Arrays;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestOrcFilterContext {
private final TypeDescription schema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createLong())
.addField("f2", TypeDescription.createString())
.addField("f3",
TypeDescription.createStruct()
.addField("a", TypeDescription.createInt())
.addField("b", TypeDescription.createLong())
.addField("c",
TypeDescription.createMap(TypeDescription.createInt(),
TypeDescription.createDate())))
.addField("f4",
TypeDescription.createList(TypeDescription.createStruct()
.addField("a", TypeDescription.createChar())
.addField("b", TypeDescription.createBoolean())))
.addField("f5",
TypeDescription.createMap(TypeDescription.createInt(),
TypeDescription.createDate()))
.addField("f6",
TypeDescription.createUnion()
.addUnionChild(TypeDescription.createInt())
.addUnionChild(TypeDescription.createStruct()
.addField("a", TypeDescription.createDate())
.addField("b",
TypeDescription.createList(TypeDescription.createChar()))
)
);
private static Configuration configuration;
private static FileSystem fileSystem;
private static final Path workDir = new Path(System.getProperty("test.tmp.dir",
"target" + File.separator + "test"
+ File.separator + "tmp"));
private static final Path filePath = new Path(workDir, "orc_filter_file.orc");
private static final int RowCount = 400;
private final OrcFilterContext filterContext = new OrcFilterContextImpl(schema, false)
.setBatch(schema.createRowBatch());
TypeDescription typeDescriptionACID =
TypeDescription.fromString("struct<int1:int,string1:string>");
TypeDescription acidSchema = SchemaEvolution.createEventSchema(typeDescriptionACID);
private final OrcFilterContext filterContextACID = new OrcFilterContextImpl(acidSchema, true)
.setBatch(acidSchema.createRowBatch());
@BeforeEach
public void setup() {
filterContext.reset();
}
@Test
public void testTopLevelElementaryType() {
ColumnVector[] vectorBranch = filterContext.findColumnVector("f1");
assertEquals(1, vectorBranch.length);
assertTrue(vectorBranch[0] instanceof LongColumnVector);
}
@Test
public void testTopLevelElementaryTypeCaseInsensitive() {
ColumnVector[] vectorBranch = filterContext.findColumnVector("F1");
assertEquals(1, vectorBranch.length);
assertTrue(vectorBranch[0] instanceof LongColumnVector);
}
@Test
public void testTopLevelCompositeType() {
ColumnVector[] vectorBranch = filterContext.findColumnVector("f3");
assertEquals(1, vectorBranch.length);
assertTrue(vectorBranch[0] instanceof StructColumnVector);
vectorBranch = filterContext.findColumnVector("f4");
assertEquals(1, vectorBranch.length);
assertTrue(vectorBranch[0] instanceof ListColumnVector);
vectorBranch = filterContext.findColumnVector("f5");
assertEquals(1, vectorBranch.length);
assertTrue(vectorBranch[0] instanceof MapColumnVector);
vectorBranch = filterContext.findColumnVector("f6");
assertEquals(1, vectorBranch.length);
assertTrue(vectorBranch[0] instanceof UnionColumnVector);
}
@Test
public void testNestedType() {
ColumnVector[] vectorBranch = filterContext.findColumnVector("f3.a");
assertEquals(2, vectorBranch.length);
assertTrue(vectorBranch[0] instanceof StructColumnVector);
assertTrue(vectorBranch[1] instanceof LongColumnVector);
vectorBranch = filterContext.findColumnVector("f3.c");
assertEquals(2, vectorBranch.length);
assertTrue(vectorBranch[0] instanceof StructColumnVector);
assertTrue(vectorBranch[1] instanceof MapColumnVector);
vectorBranch = filterContext.findColumnVector("f6.1.b");
assertEquals(3, vectorBranch.length);
assertTrue(vectorBranch[0] instanceof UnionColumnVector);
assertTrue(vectorBranch[1] instanceof StructColumnVector);
assertTrue(vectorBranch[2] instanceof ListColumnVector);
}
@Test
public void testTopLevelVector() {
ColumnVector[] vectorBranch = filterContext.findColumnVector("f3");
vectorBranch[0].noNulls = true;
assertTrue(OrcFilterContext.noNulls(vectorBranch));
assertFalse(OrcFilterContext.isNull(vectorBranch, 0));
vectorBranch[0].noNulls = false;
vectorBranch[0].isNull[0] = true;
assertFalse(OrcFilterContext.noNulls(vectorBranch));
assertTrue(OrcFilterContext.isNull(vectorBranch, 0));
assertFalse(OrcFilterContext.isNull(vectorBranch, 1));
}
@Test
public void testNestedVector() {
ColumnVector[] vectorBranch = filterContext.findColumnVector("f3.a");
vectorBranch[0].noNulls = true;
vectorBranch[1].noNulls = true;
assertTrue(OrcFilterContext.noNulls(vectorBranch));
assertFalse(OrcFilterContext.isNull(vectorBranch, 0));
assertFalse(OrcFilterContext.isNull(vectorBranch, 1));
assertFalse(OrcFilterContext.isNull(vectorBranch, 2));
vectorBranch = filterContext.findColumnVector("f3.a");
vectorBranch[0].noNulls = false;
vectorBranch[0].isNull[0] = true;
vectorBranch[1].noNulls = true;
assertFalse(OrcFilterContext.noNulls(vectorBranch));
assertTrue(OrcFilterContext.isNull(vectorBranch, 0));
assertFalse(OrcFilterContext.isNull(vectorBranch, 1));
assertFalse(OrcFilterContext.isNull(vectorBranch, 2));
vectorBranch = filterContext.findColumnVector("f3.a");
vectorBranch[0].noNulls = true;
vectorBranch[1].noNulls = false;
vectorBranch[1].isNull[2] = true;
assertFalse(OrcFilterContext.noNulls(vectorBranch));
assertFalse(OrcFilterContext.isNull(vectorBranch, 0));
assertFalse(OrcFilterContext.isNull(vectorBranch, 1));
assertTrue(OrcFilterContext.isNull(vectorBranch, 2));
vectorBranch = filterContext.findColumnVector("f3.a");
vectorBranch[0].noNulls = false;
vectorBranch[0].isNull[0] = true;
vectorBranch[1].noNulls = false;
vectorBranch[1].isNull[2] = true;
assertFalse(OrcFilterContext.noNulls(vectorBranch));
assertTrue(OrcFilterContext.isNull(vectorBranch, 0));
assertFalse(OrcFilterContext.isNull(vectorBranch, 1));
assertTrue(OrcFilterContext.isNull(vectorBranch, 2));
}
@Test
public void testTopLevelList() {
TypeDescription topListSchema = TypeDescription.createList(
TypeDescription.createStruct()
.addField("a", TypeDescription.createChar())
.addField("b", TypeDescription
.createBoolean()));
OrcFilterContext fc = new OrcFilterContextImpl(topListSchema, false)
.setBatch(topListSchema.createRowBatch());
ColumnVector[] vectorBranch = fc.findColumnVector("_elem");
assertEquals(2, vectorBranch.length);
assertTrue(vectorBranch[0] instanceof ListColumnVector);
assertTrue(vectorBranch[1] instanceof StructColumnVector);
}
@Test
public void testUnsupportedIsNullUse() {
ColumnVector[] vectorBranch = filterContext.findColumnVector("f4._elem.a");
assertEquals(3, vectorBranch.length);
assertTrue(vectorBranch[0] instanceof ListColumnVector);
assertTrue(vectorBranch[1] instanceof StructColumnVector);
assertTrue(vectorBranch[2] instanceof BytesColumnVector);
assertTrue(OrcFilterContext.noNulls(vectorBranch));
IllegalArgumentException exception = assertThrows(IllegalArgumentException.class,
() -> OrcFilterContext.isNull(vectorBranch,
0));
assertTrue(exception.getMessage().contains("ListColumnVector"));
assertTrue(exception.getMessage().contains("List and Map vectors are not supported"));
}
@Test
public void testRepeatingVector() {
ColumnVector[] vectorBranch = filterContext.findColumnVector("f3.a");
vectorBranch[0].noNulls = true;
vectorBranch[0].isRepeating = true;
vectorBranch[1].noNulls = true;
assertTrue(OrcFilterContext.noNulls(vectorBranch));
assertFalse(OrcFilterContext.isNull(vectorBranch, 0));
assertFalse(OrcFilterContext.isNull(vectorBranch, 1));
assertFalse(OrcFilterContext.isNull(vectorBranch, 2));
vectorBranch[0].noNulls = false;
vectorBranch[0].isRepeating = true;
vectorBranch[0].isNull[0] = true;
vectorBranch[1].noNulls = true;
assertFalse(OrcFilterContext.noNulls(vectorBranch));
assertTrue(OrcFilterContext.isNull(vectorBranch, 0));
assertTrue(OrcFilterContext.isNull(vectorBranch, 1));
assertTrue(OrcFilterContext.isNull(vectorBranch, 2));
}
@Test
public void testACIDTable() {
ColumnVector[] columnVector = filterContextACID.findColumnVector("string1");
assertEquals(1, columnVector.length);
assertTrue(columnVector[0] instanceof BytesColumnVector, "Expected a BytesColumnVector, but found "+ columnVector[0].getClass());
columnVector = filterContextACID.findColumnVector("int1");
assertEquals(1, columnVector.length);
assertTrue(columnVector[0] instanceof LongColumnVector, "Expected a LongColumnVector, but found "+ columnVector[0].getClass());
}
@Test
public void testRowFilterWithACIDTable() throws IOException {
createAcidORCFile();
readSingleRowWithFilter(new Random().nextInt(RowCount));
fileSystem.delete(filePath, false);
}
private void createAcidORCFile() throws IOException {
configuration = new Configuration();
fileSystem = FileSystem.get(configuration);
try (Writer writer = OrcFile.createWriter(filePath,
OrcFile.writerOptions(configuration)
.fileSystem(fileSystem)
.overwrite(true)
.rowIndexStride(8192)
.setSchema(acidSchema))) {
Random random = new Random(1024);
VectorizedRowBatch vectorizedRowBatch = acidSchema.createRowBatch();
for (int rowId = 0; rowId < RowCount; rowId++) {
long v = random.nextLong();
populateColumnValues(acidSchema, vectorizedRowBatch.cols,vectorizedRowBatch.size, v);
// Populate the rowId
((LongColumnVector) vectorizedRowBatch.cols[3]).vector[vectorizedRowBatch.size] = rowId;
StructColumnVector row = (StructColumnVector) vectorizedRowBatch.cols[5];
((LongColumnVector) row.fields[0]).vector[vectorizedRowBatch.size] = rowId;
vectorizedRowBatch.size += 1;
if (vectorizedRowBatch.size == vectorizedRowBatch.getMaxSize()) {
writer.addRowBatch(vectorizedRowBatch);
vectorizedRowBatch.reset();
}
}
if (vectorizedRowBatch.size > 0) {
writer.addRowBatch(vectorizedRowBatch);
vectorizedRowBatch.reset();
}
}
}
private void populateColumnValues(TypeDescription typeDescription, ColumnVector[] columnVectors, int index, long value) {
for (int columnId = 0; columnId < typeDescription.getChildren().size() ; columnId++) {
switch (typeDescription.getChildren().get(columnId).getCategory()) {
case INT:
((LongColumnVector)columnVectors[columnId]).vector[index] = value;
break;
case LONG:
((LongColumnVector)columnVectors[columnId]).vector[index] = value;
break;
case STRING:
((BytesColumnVector) columnVectors[columnId]).setVal(index,
("String-"+ index).getBytes(StandardCharsets.UTF_8));
break;
case STRUCT:
populateColumnValues(typeDescription.getChildren().get(columnId), ((StructColumnVector)columnVectors[columnId]).fields, index, value);
break;
default:
throw new IllegalArgumentException();
}
}
}
private void readSingleRowWithFilter(int id) throws IOException {
Reader reader = OrcFile.createReader(filePath, OrcFile.readerOptions(configuration).filesystem(fileSystem));
SearchArgument searchArgument = SearchArgumentFactory.newBuilder()
.in("int1", PredicateLeaf.Type.LONG, new Long(id))
.build();
Reader.Options readerOptions = reader.options()
.searchArgument(searchArgument, new String[] {"int1"})
.useSelected(true)
.allowSARGToFilter(true);
VectorizedRowBatch vectorizedRowBatch = acidSchema.createRowBatch();
long rowCount = 0;
try (RecordReader recordReader = reader.rows(readerOptions)) {
assertTrue(recordReader.nextBatch(vectorizedRowBatch));
rowCount += vectorizedRowBatch.size;
assertEquals(6, vectorizedRowBatch.cols.length);
assertTrue(vectorizedRowBatch.cols[5] instanceof StructColumnVector);
assertTrue(((StructColumnVector) vectorizedRowBatch.cols[5]).fields[0] instanceof LongColumnVector);
assertTrue(((StructColumnVector) vectorizedRowBatch.cols[5]).fields[1] instanceof BytesColumnVector);
assertEquals(id, ((LongColumnVector) ((StructColumnVector) vectorizedRowBatch.cols[5]).fields[0]).vector[vectorizedRowBatch.selected[0]]);
checkStringColumn(id, vectorizedRowBatch);
assertFalse(recordReader.nextBatch(vectorizedRowBatch));
}
assertEquals(1, rowCount);
}
private static void checkStringColumn(int id, VectorizedRowBatch vectorizedRowBatch) {
BytesColumnVector bytesColumnVector = (BytesColumnVector) ((StructColumnVector) vectorizedRowBatch.cols[5]).fields[1];
assertEquals("String-"+ id, bytesColumnVector.toString(id));
}
}
| 15,935 | 43.389972 | 144 | java |
null | orc-main/java/core/src/test/org/apache/orc/TestOrcNoTimezone.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.ValueSource;
import java.sql.Timestamp;
import java.text.SimpleDateFormat;
import java.util.TimeZone;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Test over an orc file that does not store time zone information in the footer
* and it was written from a time zone that does not observe DST.
*/
public class TestOrcNoTimezone {
Configuration conf;
FileSystem fs;
SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.S");
static TimeZone defaultTimeZone = TimeZone.getDefault();
@BeforeEach
public void openFileSystem() throws Exception {
conf = new Configuration();
fs = FileSystem.getLocal(conf);
}
@AfterEach
public void restoreTimeZone() {
TimeZone.setDefault(defaultTimeZone);
}
@ParameterizedTest
@ValueSource(strings = {"GMT-12:00", "UTC", "GMT+8:00"})
public void testReadOldTimestampFormat(String readerTimeZone) throws Exception {
TimeZone.setDefault(TimeZone.getTimeZone(readerTimeZone));
Path oldFilePath = new Path(getClass().getClassLoader().
getSystemResource("orc-file-no-timezone.orc").getPath());
Reader reader = OrcFile.createReader(oldFilePath,
OrcFile.readerOptions(conf).filesystem(fs).useUTCTimestamp(true));
formatter.setTimeZone(TimeZone.getTimeZone("UTC"));
TypeDescription schema = reader.getSchema();
VectorizedRowBatch batch = schema.createRowBatch(10);
TimestampColumnVector ts = (TimestampColumnVector) batch.cols[0];
boolean[] include = new boolean[schema.getMaximumId() + 1];
include[schema.getChildren().get(0).getId()] = true;
RecordReader rows = reader.rows
(reader.options().include(include));
assertTrue(rows.nextBatch(batch));
Timestamp timestamp = ts.asScratchTimestamp(0);
assertEquals(
Timestamp.valueOf("2014-01-01 12:34:56.0").toString(),
formatter.format(timestamp),
"For timezone : " + TimeZone.getTimeZone(readerTimeZone));
// check the contents of second row
rows.seekToRow(1);
assertTrue(rows.nextBatch(batch));
assertEquals(1, batch.size);
timestamp = ts.asScratchTimestamp(0);
assertEquals(
Timestamp.valueOf("2014-06-06 12:34:56.0").toString(),
formatter.format(timestamp),
"For timezone : " + TimeZone.getTimeZone(readerTimeZone));
// handle the close up
assertFalse(rows.nextBatch(batch));
rows.close();
}
}
| 3,786 | 37.642857 | 82 | java |
null | orc-main/java/core/src/test/org/apache/orc/TestOrcNullOptimization.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import com.google.common.collect.Lists;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ListColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.StructColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.impl.RecordReaderImpl;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInfo;
import java.io.File;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.List;
import java.util.Random;
import static org.apache.orc.TestVectorOrcFile.assertEmptyStats;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestOrcNullOptimization {
TypeDescription createMyStruct() {
return TypeDescription.createStruct()
.addField("a", TypeDescription.createInt())
.addField("b", TypeDescription.createString())
.addField("c", TypeDescription.createBoolean())
.addField("d", TypeDescription.createList(
TypeDescription.createStruct()
.addField("z", TypeDescription.createInt())));
}
void addRow(Writer writer, VectorizedRowBatch batch,
Integer a, String b, Boolean c,
Integer... d) throws IOException {
if (batch.size == batch.getMaxSize()) {
writer.addRowBatch(batch);
batch.reset();
}
int row = batch.size++;
LongColumnVector aColumn = (LongColumnVector) batch.cols[0];
BytesColumnVector bColumn = (BytesColumnVector) batch.cols[1];
LongColumnVector cColumn = (LongColumnVector) batch.cols[2];
ListColumnVector dColumn = (ListColumnVector) batch.cols[3];
StructColumnVector dStruct = (StructColumnVector) dColumn.child;
LongColumnVector dInt = (LongColumnVector) dStruct.fields[0];
if (a == null) {
aColumn.noNulls = false;
aColumn.isNull[row] = true;
} else {
aColumn.vector[row] = a;
}
if (b == null) {
bColumn.noNulls = false;
bColumn.isNull[row] = true;
} else {
bColumn.setVal(row, b.getBytes(StandardCharsets.UTF_8));
}
if (c == null) {
cColumn.noNulls = false;
cColumn.isNull[row] = true;
} else {
cColumn.vector[row] = c ? 1 : 0;
}
if (d == null) {
dColumn.noNulls = false;
dColumn.isNull[row] = true;
} else {
dColumn.offsets[row] = dColumn.childCount;
dColumn.lengths[row] = d.length;
dColumn.childCount += d.length;
for(int e=0; e < d.length; ++e) {
dInt.vector[(int) dColumn.offsets[row] + e] = d[e];
}
}
}
Path workDir = new Path(System.getProperty("test.tmp.dir",
"target" + File.separator + "test" + File.separator + "tmp"));
Configuration conf;
FileSystem fs;
Path testFilePath;
@BeforeEach
public void openFileSystem(TestInfo testInfo) throws Exception {
conf = new Configuration();
fs = FileSystem.getLocal(conf);
testFilePath = new Path(workDir, "TestOrcNullOptimization." +
testInfo.getTestMethod().get().getName() + ".orc");
fs.delete(testFilePath, false);
}
@Test
public void testMultiStripeWithNull() throws Exception {
TypeDescription schema = createMyStruct();
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(100000)
.compress(CompressionKind.NONE)
.bufferSize(10000));
Random rand = new Random(100);
VectorizedRowBatch batch = schema.createRowBatch();
addRow(writer, batch, null, null, true, 100);
for (int i = 2; i < 20000; i++) {
addRow(writer, batch, rand.nextInt(1), "a", true, 100);
}
addRow(writer, batch, null, null, true, 100);
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
// check the stats
ColumnStatistics[] stats = reader.getStatistics();
assertEquals(20000, reader.getNumberOfRows());
assertEquals(20000, stats[0].getNumberOfValues());
assertEquals(0, ((IntegerColumnStatistics) stats[1]).getMaximum());
assertEquals(0, ((IntegerColumnStatistics) stats[1]).getMinimum());
assertTrue(((IntegerColumnStatistics) stats[1]).isSumDefined());
assertEquals(0, ((IntegerColumnStatistics) stats[1]).getSum());
assertEquals("count: 19998 hasNull: true bytesOnDisk: 184 min: 0 max: 0 sum: 0",
stats[1].toString());
assertEquals("a", ((StringColumnStatistics) stats[2]).getMaximum());
assertEquals("a", ((StringColumnStatistics) stats[2]).getMinimum());
assertEquals(19998, stats[2].getNumberOfValues());
assertEquals("count: 19998 hasNull: true bytesOnDisk: 200 min: a max: a sum: 19998",
stats[2].toString());
// check the inspectors
assertEquals("struct<a:int,b:string,c:boolean,d:array<struct<z:int>>>",
reader.getSchema().toString());
RecordReader rows = reader.rows();
List<Boolean> expected = Lists.newArrayList();
for (StripeInformation sinfo : reader.getStripes()) {
expected.add(false);
}
// only the first and last stripe will have PRESENT stream
expected.set(0, true);
expected.set(expected.size() - 1, true);
List<Boolean> got = Lists.newArrayList();
// check if the strip footer contains PRESENT stream
for (StripeInformation sinfo : reader.getStripes()) {
OrcProto.StripeFooter sf =
((RecordReaderImpl) rows).readStripeFooter(sinfo);
got.add(sf.toString().indexOf(OrcProto.Stream.Kind.PRESENT.toString())
!= -1);
}
assertEquals(expected, got);
batch = reader.getSchema().createRowBatch();
LongColumnVector aColumn = (LongColumnVector) batch.cols[0];
BytesColumnVector bColumn = (BytesColumnVector) batch.cols[1];
LongColumnVector cColumn = (LongColumnVector) batch.cols[2];
ListColumnVector dColumn = (ListColumnVector) batch.cols[3];
LongColumnVector dElements =
(LongColumnVector)(((StructColumnVector) dColumn.child).fields[0]);
assertTrue(rows.nextBatch(batch));
assertEquals(1024, batch.size);
// row 1
assertTrue(aColumn.isNull[0]);
assertTrue(bColumn.isNull[0]);
assertEquals(1, cColumn.vector[0]);
assertEquals(0, dColumn.offsets[0]);
assertEquals(1, dColumn.lengths[1]);
assertEquals(100, dElements.vector[0]);
rows.seekToRow(19998);
rows.nextBatch(batch);
assertEquals(2, batch.size);
// last-1 row
assertEquals(0, aColumn.vector[0]);
assertEquals("a", bColumn.toString(0));
assertEquals(1, cColumn.vector[0]);
assertEquals(0, dColumn.offsets[0]);
assertEquals(1, dColumn.lengths[0]);
assertEquals(100, dElements.vector[0]);
// last row
assertTrue(aColumn.isNull[1]);
assertTrue(bColumn.isNull[1]);
assertEquals(1, cColumn.vector[1]);
assertEquals(1, dColumn.offsets[1]);
assertEquals(1, dColumn.lengths[1]);
assertEquals(100, dElements.vector[1]);
assertFalse(rows.nextBatch(batch));
rows.close();
}
@Test
public void testMultiStripeWithoutNull() throws Exception {
TypeDescription schema = createMyStruct();
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(100000)
.compress(CompressionKind.NONE)
.bufferSize(10000));
Random rand = new Random(100);
int batchSize = 5000;
VectorizedRowBatch batch = schema.createRowBatch(batchSize);
ColumnStatistics[] writerStats = writer.getStatistics();
assertEmptyStats(writerStats);
int count = 0;
for (int i = 1; i < 20000; i++) {
addRow(writer, batch, rand.nextInt(1), "a", true, 100);
count++;
if (count % batchSize == 1) {
writerStats = writer.getStatistics();
} else {
assertArrayEquals(writerStats, writer.getStatistics());
}
}
addRow(writer, batch, 0, "b", true, 100);
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
// check the stats
ColumnStatistics[] stats = reader.getStatistics();
assertArrayEquals(stats, writer.getStatistics());
assertEquals(20000, reader.getNumberOfRows());
assertEquals(20000, stats[0].getNumberOfValues());
assertEquals(0, ((IntegerColumnStatistics) stats[1]).getMaximum());
assertEquals(0, ((IntegerColumnStatistics) stats[1]).getMinimum());
assertTrue(((IntegerColumnStatistics) stats[1]).isSumDefined());
assertEquals(0, ((IntegerColumnStatistics) stats[1]).getSum());
assertEquals("count: 20000 hasNull: false bytesOnDisk: 160 min: 0 max: 0 sum: 0",
stats[1].toString());
assertEquals("b", ((StringColumnStatistics) stats[2]).getMaximum());
assertEquals("a", ((StringColumnStatistics) stats[2]).getMinimum());
assertEquals(20000, stats[2].getNumberOfValues());
assertEquals("count: 20000 hasNull: false bytesOnDisk: 180 min: a max: b sum: 20000",
stats[2].toString());
// check the inspectors
assertEquals("struct<a:int,b:string,c:boolean,d:array<struct<z:int>>>",
reader.getSchema().toString());
RecordReader rows = reader.rows();
// none of the stripes will have PRESENT stream
List<Boolean> expected = Lists.newArrayList();
for (StripeInformation sinfo : reader.getStripes()) {
expected.add(false);
}
List<Boolean> got = Lists.newArrayList();
// check if the strip footer contains PRESENT stream
for (StripeInformation sinfo : reader.getStripes()) {
OrcProto.StripeFooter sf =
((RecordReaderImpl) rows).readStripeFooter(sinfo);
got.add(sf.toString().indexOf(OrcProto.Stream.Kind.PRESENT.toString())
!= -1);
}
assertEquals(expected, got);
rows.seekToRow(19998);
batch = reader.getSchema().createRowBatch();
LongColumnVector aColumn = (LongColumnVector) batch.cols[0];
BytesColumnVector bColumn = (BytesColumnVector) batch.cols[1];
LongColumnVector cColumn = (LongColumnVector) batch.cols[2];
ListColumnVector dColumn = (ListColumnVector) batch.cols[3];
LongColumnVector dElements =
(LongColumnVector)(((StructColumnVector) dColumn.child).fields[0]);
assertTrue(rows.nextBatch(batch));
assertEquals(2, batch.size);
// last-1 row
assertEquals(0, aColumn.vector[0]);
assertEquals("a", bColumn.toString(0));
assertEquals(1, cColumn.vector[0]);
assertEquals(0, dColumn.offsets[0]);
assertEquals(1, dColumn.lengths[0]);
assertEquals(100, dElements.vector[0]);
// last row
assertEquals(0, aColumn.vector[1]);
assertEquals("b", bColumn.toString(1));
assertEquals(1, cColumn.vector[1]);
assertEquals(1, dColumn.offsets[1]);
assertEquals(1, dColumn.lengths[1]);
assertEquals(100, dElements.vector[1]);
rows.close();
}
@Test
public void testColumnsWithNullAndCompression() throws Exception {
TypeDescription schema = createMyStruct();
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(100000)
.bufferSize(10000));
VectorizedRowBatch batch = schema.createRowBatch();
addRow(writer, batch, 3, "a", true, 100);
addRow(writer, batch, null, "b", true, 100);
addRow(writer, batch, 3, null, false, 100);
addRow(writer, batch, 3, "d", true, 100);
addRow(writer, batch, 2, "e", true, 100);
addRow(writer, batch, 2, "f", true, 100);
addRow(writer, batch, 2, "g", true, 100);
addRow(writer, batch, 2, "h", true, 100);
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
// check the stats
ColumnStatistics[] stats = reader.getStatistics();
assertArrayEquals(stats, writer.getStatistics());
assertEquals(8, reader.getNumberOfRows());
assertEquals(8, stats[0].getNumberOfValues());
assertEquals(3, ((IntegerColumnStatistics) stats[1]).getMaximum());
assertEquals(2, ((IntegerColumnStatistics) stats[1]).getMinimum());
assertTrue(((IntegerColumnStatistics) stats[1]).isSumDefined());
assertEquals(17, ((IntegerColumnStatistics) stats[1]).getSum());
assertEquals("count: 7 hasNull: true bytesOnDisk: 12 min: 2 max: 3 sum: 17",
stats[1].toString());
assertEquals("h", ((StringColumnStatistics) stats[2]).getMaximum());
assertEquals("a", ((StringColumnStatistics) stats[2]).getMinimum());
assertEquals(7, stats[2].getNumberOfValues());
assertEquals("count: 7 hasNull: true bytesOnDisk: 20 min: a max: h sum: 7",
stats[2].toString());
// check the inspectors
batch = reader.getSchema().createRowBatch();
LongColumnVector aColumn = (LongColumnVector) batch.cols[0];
BytesColumnVector bColumn = (BytesColumnVector) batch.cols[1];
LongColumnVector cColumn = (LongColumnVector) batch.cols[2];
ListColumnVector dColumn = (ListColumnVector) batch.cols[3];
LongColumnVector dElements =
(LongColumnVector)(((StructColumnVector) dColumn.child).fields[0]);
assertEquals("struct<a:int,b:string,c:boolean,d:array<struct<z:int>>>",
reader.getSchema().toString());
RecordReader rows = reader.rows();
// only the last strip will have PRESENT stream
List<Boolean> expected = Lists.newArrayList();
for (StripeInformation sinfo : reader.getStripes()) {
expected.add(false);
}
expected.set(expected.size() - 1, true);
List<Boolean> got = Lists.newArrayList();
// check if the strip footer contains PRESENT stream
for (StripeInformation sinfo : reader.getStripes()) {
OrcProto.StripeFooter sf =
((RecordReaderImpl) rows).readStripeFooter(sinfo);
got.add(sf.toString().indexOf(OrcProto.Stream.Kind.PRESENT.toString())
!= -1);
}
assertEquals(expected, got);
assertTrue(rows.nextBatch(batch));
assertEquals(8, batch.size);
// row 1
assertEquals(3, aColumn.vector[0]);
assertEquals("a", bColumn.toString(0));
assertEquals(1, cColumn.vector[0]);
assertEquals(0, dColumn.offsets[0]);
assertEquals(1, dColumn.lengths[0]);
assertEquals(100, dElements.vector[0]);
// row 2
assertTrue(aColumn.isNull[1]);
assertEquals("b", bColumn.toString(1));
assertEquals(1, cColumn.vector[1]);
assertEquals(1, dColumn.offsets[1]);
assertEquals(1, dColumn.lengths[1]);
assertEquals(100, dElements.vector[1]);
// row 3
assertEquals(3, aColumn.vector[2]);
assertTrue(bColumn.isNull[2]);
assertEquals(0, cColumn.vector[2]);
assertEquals(2, dColumn.offsets[2]);
assertEquals(1, dColumn.lengths[2]);
assertEquals(100, dElements.vector[2]);
rows.close();
}
}
| 16,583 | 37.929577 | 89 | java |
null | orc-main/java/core/src/test/org/apache/orc/TestOrcTimestampPPD.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import com.google.common.collect.Lists;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentImpl;
import org.apache.orc.impl.RecordReaderImpl;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInfo;
import java.io.File;
import java.io.IOException;
import java.sql.Timestamp;
import java.util.List;
import java.util.TimeZone;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class TestOrcTimestampPPD {
Path workDir =
new Path(System.getProperty("test.tmp.dir", "target" + File.separator + "test" + File.separator + "tmp"));
Configuration conf;
FileSystem fs;
Path testFilePath;
static TimeZone defaultTimeZone = TimeZone.getDefault();
public TestOrcTimestampPPD() {
}
@BeforeEach
public void openFileSystem(TestInfo testInfo) throws Exception {
conf = new Configuration();
fs = FileSystem.getLocal(conf);
testFilePath = new Path(workDir,
"TestOrcTimestampPPD." + testInfo.getTestMethod().get().getName() + ".orc");
fs.delete(testFilePath, false);
}
@AfterEach
public void restoreTimeZone() {
TimeZone.setDefault(defaultTimeZone);
}
public static PredicateLeaf createPredicateLeaf(PredicateLeaf.Operator operator,
PredicateLeaf.Type type,
String columnName,
Object literal,
List<Object> literalList) {
return new SearchArgumentImpl.PredicateLeafImpl(operator, type, columnName,
literal, literalList);
}
@Test
// ORC-611 : PPD evaluation with min-max stats for sub-millisecond timestamps
public void testSubMsTimestampWriterStats() throws Exception {
TypeDescription schema = TypeDescription.createTimestamp();
TimeZone.setDefault(TimeZone.getTimeZone("America/Los_Angeles"));
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).stripeSize(100000).bufferSize(10000)
.version(OrcFile.Version.CURRENT));
List<Timestamp> tslist = Lists.newArrayList();
tslist.add(Timestamp.valueOf("1970-01-01 00:00:00.0005"));
VectorizedRowBatch batch = schema.createRowBatch();
TimestampColumnVector times = (TimestampColumnVector) batch.cols[0];
for (Timestamp t : tslist) {
times.set(batch.size++, t);
}
times.isRepeating = true;
writer.addRowBatch(batch);
// Done writing to file
writer.close();
TimeZone.setDefault(TimeZone.getTimeZone("America/Los_Angeles"));
// Now reading
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
times = (TimestampColumnVector) batch.cols[0];
while (rows.nextBatch(batch)) {
for (int r = 0; r < batch.size; ++r) {
assertEquals(tslist.get(0), times.asScratchTimestamp(r));
assertEquals(tslist.get(0).getNanos(), times.asScratchTimestamp(r).getNanos());
}
}
rows.close();
ColumnStatistics[] colStats = reader.getStatistics();
Timestamp gotMin = ((TimestampColumnStatistics) colStats[0]).getMinimum();
assertEquals("1970-01-01 00:00:00.0005", gotMin.toString());
Timestamp gotMax = ((TimestampColumnStatistics) colStats[0]).getMaximum();
assertEquals("1970-01-01 00:00:00.0005", gotMax.toString());
PredicateLeaf pred = createPredicateLeaf(PredicateLeaf.Operator.EQUALS, PredicateLeaf.Type.TIMESTAMP, "c",
Timestamp.valueOf("1970-01-01 00:00:00.0005"), null);
// Make sure PPD is now passing
assertEquals(SearchArgument.TruthValue.YES, RecordReaderImpl.evaluatePredicate(colStats[0], pred, null));
pred = createPredicateLeaf(PredicateLeaf.Operator.LESS_THAN_EQUALS, PredicateLeaf.Type.TIMESTAMP, "c",
Timestamp.valueOf("1970-01-01 00:00:00.0005"), null);
assertEquals(SearchArgument.TruthValue.YES, RecordReaderImpl.evaluatePredicate(colStats[0], pred, null));
pred = createPredicateLeaf(PredicateLeaf.Operator.LESS_THAN, PredicateLeaf.Type.TIMESTAMP, "c",
Timestamp.valueOf("1970-01-01 00:00:00.0005"), null);
assertEquals(SearchArgument.TruthValue.NO, RecordReaderImpl.evaluatePredicate(colStats[0], pred, null));
}
@Test
public void testSubMsComplexStats() throws IOException {
TypeDescription schema = TypeDescription.createTimestamp();
TimeZone.setDefault(TimeZone.getTimeZone("America/Los_Angeles"));
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).stripeSize(100000).bufferSize(10000)
.version(OrcFile.Version.CURRENT));
List<Timestamp> tslist = Lists.newArrayList();
tslist.add(Timestamp.valueOf("2037-01-01 00:00:00.001109"));
tslist.add(Timestamp.valueOf("2037-01-01 00:00:00.001279"));
tslist.add(Timestamp.valueOf("2037-01-01 00:00:00.001499"));
tslist.add(Timestamp.valueOf("2037-01-01 00:00:00.0067891"));
tslist.add(Timestamp.valueOf("2037-01-01 00:00:00.005199"));
tslist.add(Timestamp.valueOf("2037-01-01 00:00:00.006789"));
VectorizedRowBatch batch = schema.createRowBatch();
TimestampColumnVector times = (TimestampColumnVector) batch.cols[0];
for (Timestamp ts: tslist) {
times.set(batch.size++, ts);
}
times.isRepeating = false;
writer.addRowBatch(batch);
// Done writing to file
writer.close();
TimeZone.setDefault(TimeZone.getTimeZone("America/Los_Angeles"));
// Now reading
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
times = (TimestampColumnVector) batch.cols[0];
while (rows.nextBatch(batch)) {
for (int r = 0; r < batch.size; ++r) {
assertEquals(tslist.get(r), times.asScratchTimestamp(r));
assertEquals(tslist.get(r).getNanos(), times.asScratchTimestamp(r).getNanos());
}
}
rows.close();
ColumnStatistics[] colStats = reader.getStatistics();
Timestamp gotMin = ((TimestampColumnStatistics) colStats[0]).getMinimum();
assertEquals("2037-01-01 00:00:00.001109", gotMin.toString());
Timestamp gotMax = ((TimestampColumnStatistics) colStats[0]).getMaximum();
assertEquals("2037-01-01 00:00:00.0067891", gotMax.toString());
// PPD EQUALS with nano precision passing
PredicateLeaf pred = createPredicateLeaf(PredicateLeaf.Operator.EQUALS, PredicateLeaf.Type.TIMESTAMP, "c",
Timestamp.valueOf("2037-01-01 00:00:00.001109"), null);
assertEquals(SearchArgument.TruthValue.YES_NO, RecordReaderImpl.evaluatePredicate(colStats[0], pred, null));
// PPD EQUALS with ms precision NOT passing
pred = createPredicateLeaf(PredicateLeaf.Operator.EQUALS, PredicateLeaf.Type.TIMESTAMP, "c",
Timestamp.valueOf("2037-01-01 00:00:001"), null);
assertEquals(SearchArgument.TruthValue.NO, RecordReaderImpl.evaluatePredicate(colStats[0], pred, null));
// PPD LESS_THAN with ns precision passing
pred = createPredicateLeaf(PredicateLeaf.Operator.LESS_THAN, PredicateLeaf.Type.TIMESTAMP, "c",
Timestamp.valueOf("2037-01-01 00:00:00.006789"), null);
assertEquals(SearchArgument.TruthValue.YES_NO, RecordReaderImpl.evaluatePredicate(colStats[0], pred, null));
// PPD LESS_THAN with ms precision passing
pred = createPredicateLeaf(PredicateLeaf.Operator.LESS_THAN, PredicateLeaf.Type.TIMESTAMP, "c",
Timestamp.valueOf("2037-01-01 00:00:00.002"), null);
assertEquals(SearchArgument.TruthValue.YES_NO, RecordReaderImpl.evaluatePredicate(colStats[0], pred, null));
}
}
| 8,861 | 42.229268 | 112 | java |
null | orc-main/java/core/src/test/org/apache/orc/TestOrcTimezone1.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import com.google.common.collect.Lists;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.TestInfo;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
import java.io.File;
import java.sql.Timestamp;
import java.util.List;
import java.util.TimeZone;
import java.util.stream.Stream;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
/**
*
*/
public class TestOrcTimezone1 {
Path workDir = new Path(System.getProperty("test.tmp.dir",
"target" + File.separator + "test" + File.separator + "tmp"));
Configuration conf;
FileSystem fs;
Path testFilePath;
static TimeZone defaultTimeZone = TimeZone.getDefault();
private static Stream<Arguments> data() {
return Stream.of(/* Extreme timezones */
Arguments.of("GMT-12:00", "GMT+14:00"),
/* No difference in DST */
Arguments.of("America/Los_Angeles", "America/Los_Angeles"), /* same timezone both with DST */
Arguments.of("Europe/Berlin", "Europe/Berlin"), /* same as above but europe */
Arguments.of("America/Phoenix", "Asia/Kolkata") /* Writer no DST, Reader no DST */,
Arguments.of("Europe/Berlin", "America/Los_Angeles") /* Writer DST, Reader DST */,
Arguments.of("Europe/Berlin", "America/Chicago") /* Writer DST, Reader DST */,
/* With DST difference */
Arguments.of("Europe/Berlin", "UTC"),
Arguments.of("UTC", "Europe/Berlin") /* Writer no DST, Reader DST */,
Arguments.of("America/Los_Angeles", "Asia/Kolkata") /* Writer DST, Reader no DST */,
Arguments.of("Europe/Berlin", "Asia/Kolkata") /* Writer DST, Reader no DST */,
/* Timezone offsets for the reader has changed historically */
Arguments.of("Asia/Saigon", "Pacific/Enderbury"),
Arguments.of("UTC", "Asia/Jerusalem")
// NOTE:
// "1995-01-01 03:00:00.688888888" this is not a valid time in Pacific/Enderbury timezone.
// On 1995-01-01 00:00:00 GMT offset moved from -11:00 hr to +13:00 which makes all values
// on 1995-01-01 invalid. Try this with joda time
// new MutableDateTime("1995-01-01", DateTimeZone.forTimeZone(readerTimeZone));
);
}
@BeforeEach
public void openFileSystem(TestInfo testInfo) throws Exception {
conf = new Configuration();
fs = FileSystem.getLocal(conf);
testFilePath = new Path(workDir, "TestOrcFile." +
testInfo.getTestMethod().get().getName() + ".orc");
fs.delete(testFilePath, false);
}
@AfterEach
public void restoreTimeZone() {
TimeZone.setDefault(defaultTimeZone);
}
@ParameterizedTest
@MethodSource("data")
public void testTimestampWriter(String writerTimeZone, String readerTimeZone) throws Exception {
TypeDescription schema = TypeDescription.createTimestamp();
TimeZone.setDefault(TimeZone.getTimeZone(writerTimeZone));
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).stripeSize(100000)
.bufferSize(10000));
assertEquals(writerTimeZone, TimeZone.getDefault().getID());
List<String> ts = Lists.newArrayList();
ts.add("2003-01-01 01:00:00.000000222");
ts.add("1996-08-02 09:00:00.723100809");
ts.add("1999-01-01 02:00:00.999999999");
ts.add("1995-01-02 03:00:00.688888888");
ts.add("2002-01-01 04:00:00.1");
ts.add("2010-03-02 05:00:00.000009001");
ts.add("2005-01-01 06:00:00.000002229");
ts.add("2006-01-01 07:00:00.900203003");
ts.add("2003-01-01 08:00:00.800000007");
ts.add("1998-11-02 10:00:00.857340643");
ts.add("2008-10-02 11:00:00.0");
ts.add("2037-01-01 00:00:00.000999");
ts.add("2014-03-28 00:00:00.0");
VectorizedRowBatch batch = schema.createRowBatch();
TimestampColumnVector times = (TimestampColumnVector) batch.cols[0];
for (String t : ts) {
times.set(batch.size++, Timestamp.valueOf(t));
}
writer.addRowBatch(batch);
writer.close();
TimeZone.setDefault(TimeZone.getTimeZone(readerTimeZone));
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
assertEquals(readerTimeZone, TimeZone.getDefault().getID());
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
times = (TimestampColumnVector) batch.cols[0];
int idx = 0;
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(ts.get(idx++), times.asScratchTimestamp(r).toString());
}
}
rows.close();
}
@ParameterizedTest
@MethodSource("data")
public void testReadTimestampFormat_0_11(String writerTimeZone, String readerTimeZone) throws Exception {
TimeZone.setDefault(TimeZone.getTimeZone(readerTimeZone));
Path oldFilePath = new Path(getClass().getClassLoader().
getSystemResource("orc-file-11-format.orc").getPath());
Reader reader = OrcFile.createReader(oldFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
TypeDescription schema = reader.getSchema();
int col = schema.getFieldNames().indexOf("ts");
VectorizedRowBatch batch = schema.createRowBatch(10);
TimestampColumnVector ts = (TimestampColumnVector) batch.cols[col];
boolean[] include = new boolean[schema.getMaximumId() + 1];
include[schema.getChildren().get(col).getId()] = true;
RecordReader rows = reader.rows
(reader.options().include(include));
assertTrue(rows.nextBatch(batch));
assertEquals(Timestamp.valueOf("2000-03-12 15:00:00"),
ts.asScratchTimestamp(0));
// check the contents of second row
rows.seekToRow(7499);
assertTrue(rows.nextBatch(batch));
assertEquals(1, batch.size);
assertEquals(Timestamp.valueOf("2000-03-12 15:00:01"),
ts.asScratchTimestamp(0));
// handle the close up
assertFalse(rows.nextBatch(batch));
rows.close();
}
}
| 7,215 | 40.234286 | 107 | java |
null | orc-main/java/core/src/test/org/apache/orc/TestOrcTimezone2.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import com.google.common.collect.Lists;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.TestInfo;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
import java.io.File;
import java.sql.Timestamp;
import java.util.List;
import java.util.Random;
import java.util.TimeZone;
import java.util.stream.Stream;
import static org.junit.jupiter.api.Assertions.assertEquals;
/**
*
*/
public class TestOrcTimezone2 {
Path workDir = new Path(System.getProperty("test.tmp.dir",
"target" + File.separator + "test" + File.separator + "tmp"));
Configuration conf;
FileSystem fs;
Path testFilePath;
static TimeZone defaultTimeZone = TimeZone.getDefault();
private static Stream<Arguments> data() {
String[] allTimeZones = TimeZone.getAvailableIDs();
Random rand = new Random(123);
int len = allTimeZones.length;
int n = 500;
Arguments[] data = new Arguments[n];
for (int i = 0; i < n; i++) {
int wIdx = rand.nextInt(len);
int rIdx = rand.nextInt(len);
data[i] = Arguments.of(allTimeZones[wIdx], allTimeZones[rIdx]);
}
return Stream.of(data);
}
@BeforeEach
public void openFileSystem(TestInfo testInfo) throws Exception {
conf = new Configuration();
fs = FileSystem.getLocal(conf);
testFilePath = new Path(workDir, "TestOrcFile." +
testInfo.getTestMethod().get().getName() + ".orc");
fs.delete(testFilePath, false);
}
@AfterEach
public void restoreTimeZone() {
TimeZone.setDefault(defaultTimeZone);
}
@ParameterizedTest
@MethodSource("data")
public void testTimestampWriter(String writerTimeZone, String readerTimeZone) throws Exception {
TypeDescription schema = TypeDescription.createTimestamp();
TimeZone.setDefault(TimeZone.getTimeZone(writerTimeZone));
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema)
.stripeSize(100000).bufferSize(10000));
assertEquals(writerTimeZone, TimeZone.getDefault().getID());
List<String> ts = Lists.newArrayList();
ts.add("2003-01-01 01:00:00.000000222");
ts.add("1999-01-01 02:00:00.999999999");
ts.add("1995-01-02 03:00:00.688888888");
ts.add("2002-01-01 04:00:00.1");
ts.add("2010-03-02 05:00:00.000009001");
ts.add("2005-01-01 06:00:00.000002229");
ts.add("2006-01-01 07:00:00.900203003");
ts.add("2003-01-01 08:00:00.800000007");
ts.add("1996-08-02 09:00:00.723100809");
ts.add("1998-11-02 10:00:00.857340643");
ts.add("2008-10-02 11:00:00.0");
ts.add("2037-01-01 00:00:00.000999");
VectorizedRowBatch batch = schema.createRowBatch();
TimestampColumnVector tsc = (TimestampColumnVector) batch.cols[0];
for (String t : ts) {
tsc.set(batch.size++, Timestamp.valueOf(t));
}
writer.addRowBatch(batch);
writer.close();
TimeZone.setDefault(TimeZone.getTimeZone(readerTimeZone));
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
assertEquals(readerTimeZone, TimeZone.getDefault().getID());
RecordReader rows = reader.rows();
int idx = 0;
batch = reader.getSchema().createRowBatch();
tsc = (TimestampColumnVector) batch.cols[0];
while (rows.nextBatch(batch)) {
for (int r=0; r < batch.size; ++r) {
assertEquals(ts.get(idx++), tsc.asScratchTimestamp(r).toString());
}
}
rows.close();
}
}
| 4,661 | 35.421875 | 98 | java |
null | orc-main/java/core/src/test/org/apache/orc/TestOrcTimezone3.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import com.google.common.collect.Lists;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.TestInfo;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
import java.io.File;
import java.sql.Timestamp;
import java.util.List;
import java.util.TimeZone;
import java.util.stream.Stream;
import static org.junit.jupiter.api.Assertions.assertEquals;
/**
*
*/
public class TestOrcTimezone3 {
Path workDir = new Path(System.getProperty("test.tmp.dir",
"target" + File.separator + "test" + File.separator + "tmp"));
Configuration conf;
FileSystem fs;
Path testFilePath;
static TimeZone defaultTimeZone = TimeZone.getDefault();
private static Stream<Arguments> data() {
return Stream.of(Arguments.of("America/Chicago", "America/Los_Angeles"));
}
@BeforeEach
public void openFileSystem(TestInfo testInfo) throws Exception {
conf = new Configuration();
fs = FileSystem.getLocal(conf);
testFilePath = new Path(workDir, "TestOrcTimezone3." +
testInfo.getTestMethod().get().getName() + ".orc");
fs.delete(testFilePath, false);
}
@AfterEach
public void restoreTimeZone() {
TimeZone.setDefault(defaultTimeZone);
}
@ParameterizedTest
@MethodSource("data")
public void testTimestampWriter(String writerTimeZone, String readerTimeZone) throws Exception {
TypeDescription schema = TypeDescription.createTimestamp();
TimeZone.setDefault(TimeZone.getTimeZone(writerTimeZone));
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).stripeSize(100000)
.bufferSize(10000));
assertEquals(writerTimeZone, TimeZone.getDefault().getID());
List<String> ts = Lists.newArrayList();
ts.add("1969-12-31 16:00:14.007");
ts.add("1969-12-31 16:00:06.021");
ts.add("1969-12-31 16:00:03.963");
VectorizedRowBatch batch = schema.createRowBatch();
TimestampColumnVector times = (TimestampColumnVector) batch.cols[0];
for (String t : ts) {
times.set(batch.size++, Timestamp.valueOf(t));
}
writer.addRowBatch(batch);
writer.close();
TimeZone.setDefault(TimeZone.getTimeZone(readerTimeZone));
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
assertEquals(readerTimeZone, TimeZone.getDefault().getID());
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
times = (TimestampColumnVector) batch.cols[0];
int idx = 0;
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(ts.get(idx++), times.asScratchTimestamp(r).toString());
}
}
rows.close();
}
}
| 3,928 | 35.37963 | 98 | java |
null | orc-main/java/core/src/test/org/apache/orc/TestOrcTimezone4.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import com.google.common.collect.Lists;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInfo;
import java.io.File;
import java.sql.Timestamp;
import java.text.SimpleDateFormat;
import java.util.List;
import java.util.TimeZone;
import static org.junit.jupiter.api.Assertions.assertEquals;
/**
*
*/
public class TestOrcTimezone4 {
Path workDir = new Path(System.getProperty("test.tmp.dir",
"target" + File.separator + "test" + File.separator + "tmp"));
Configuration conf;
FileSystem fs;
Path testFilePath;
SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
static TimeZone defaultTimeZone = TimeZone.getDefault();
public TestOrcTimezone4() {
}
@BeforeEach
public void openFileSystem(TestInfo testInfo) throws Exception {
conf = new Configuration();
fs = FileSystem.getLocal(conf);
testFilePath = new Path(workDir, "TestOrcTimezone4." +
testInfo.getTestMethod().get().getName() + ".orc");
fs.delete(testFilePath, false);
}
@AfterEach
public void restoreTimeZone() {
TimeZone.setDefault(defaultTimeZone);
}
@Test
public void testTimestampWriter() throws Exception {
TypeDescription schema = TypeDescription.createTimestamp();
TimeZone.setDefault(TimeZone.getTimeZone("America/Los_Angeles"));
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).stripeSize(100000)
.bufferSize(10000));
List<String> ts = Lists.newArrayList();
ts.add("1969-12-31 15:59:56.007");
ts.add("1969-12-31 16:00:14.007");
ts.add("1969-12-31 16:00:06.021");
VectorizedRowBatch batch = schema.createRowBatch();
TimestampColumnVector times = (TimestampColumnVector) batch.cols[0];
for (String t : ts) {
long time = formatter.parse(t).getTime();
times.set(batch.size++, new Timestamp(time));
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs).useUTCTimestamp(true));
formatter.setTimeZone(TimeZone.getTimeZone("UTC"));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
times = (TimestampColumnVector) batch.cols[0];
int idx = 0;
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
Timestamp timestamp = times.asScratchTimestamp(r);
assertEquals(ts.get(idx++), formatter.format(timestamp));
}
}
rows.close();
}
}
| 3,703 | 33.943396 | 79 | java |
null | orc-main/java/core/src/test/org/apache/orc/TestOrcTimezonePPD.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import com.google.common.collect.Lists;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentImpl;
import org.apache.orc.impl.OrcIndex;
import org.apache.orc.impl.RecordReaderImpl;
import org.apache.orc.impl.SerializationUtils;
import org.apache.orc.util.BloomFilter;
import org.apache.orc.util.BloomFilterIO;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.TestInfo;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.sql.Timestamp;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.Arrays;
import java.util.List;
import java.util.TimeZone;
import java.util.stream.Stream;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNull;
/**
*
*/
public class TestOrcTimezonePPD {
private static final Logger LOG = LoggerFactory.getLogger(TestOrcTimezonePPD.class);
Path workDir = new Path(System.getProperty("test.tmp.dir",
"target" + File.separator + "test" + File.separator + "tmp"));
Configuration conf;
FileSystem fs;
Path testFilePath;
static TimeZone defaultTimeZone = TimeZone.getDefault();
TimeZone utcTz = TimeZone.getTimeZone("UTC");
DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
private static Stream<Arguments> data() {
return Stream.of(
Arguments.of("US/Eastern", "America/Los_Angeles"),
Arguments.of("US/Eastern", "UTC"),
/* Extreme timezones */
Arguments.of("GMT-12:00", "GMT+14:00"),
/* No difference in DST */
Arguments.of("America/Los_Angeles", "America/Los_Angeles"), /* same timezone both with DST */
Arguments.of("Europe/Berlin", "Europe/Berlin"), /* same as above but europe */
Arguments.of("America/Phoenix", "Asia/Kolkata") /* Writer no DST, Reader no DST */,
Arguments.of("Europe/Berlin", "America/Los_Angeles") /* Writer DST, Reader DST */,
Arguments.of("Europe/Berlin", "America/Chicago") /* Writer DST, Reader DST */,
/* With DST difference */
Arguments.of("Europe/Berlin", "UTC"),
Arguments.of("UTC", "Europe/Berlin") /* Writer no DST, Reader DST */,
Arguments.of("America/Los_Angeles", "Asia/Kolkata") /* Writer DST, Reader no DST */,
Arguments.of("Europe/Berlin", "Asia/Kolkata") /* Writer DST, Reader no DST */,
/* Timezone offsets for the reader has changed historically */
Arguments.of("Asia/Saigon", "Pacific/Enderbury"),
Arguments.of("UTC", "Asia/Jerusalem"));
}
@BeforeEach
public void openFileSystem(TestInfo testInfo) throws Exception {
conf = new Configuration();
fs = FileSystem.getLocal(conf);
testFilePath = new Path(workDir, "TestOrcFile." +
testInfo.getTestMethod().get().getName() + ".orc");
fs.delete(testFilePath, false);
}
@AfterEach
public void restoreTimeZone() {
TimeZone.setDefault(defaultTimeZone);
}
public static PredicateLeaf createPredicateLeaf(PredicateLeaf.Operator operator,
PredicateLeaf.Type type,
String columnName,
Object literal,
List<Object> literalList) {
return new SearchArgumentImpl.PredicateLeafImpl(operator, type, columnName,
literal, literalList);
}
@ParameterizedTest
@MethodSource("data")
public void testTimestampPPDMinMax(String writerTimeZone, String readerTimeZone) throws Exception {
TypeDescription schema = TypeDescription.createTimestamp();
TimeZone.setDefault(TimeZone.getTimeZone(writerTimeZone));
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).stripeSize(100000)
.bufferSize(10000));
assertEquals(writerTimeZone, TimeZone.getDefault().getID());
List<String> ts = Lists.newArrayList();
ts.add("2007-08-01 00:00:00.0");
ts.add("2007-08-01 04:00:00.0");
VectorizedRowBatch batch = schema.createRowBatch();
TimestampColumnVector times = (TimestampColumnVector) batch.cols[0];
for (String t : ts) {
times.set(batch.size++, Timestamp.valueOf(t));
}
writer.addRowBatch(batch);
writer.close();
TimeZone.setDefault(TimeZone.getTimeZone(readerTimeZone));
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
assertEquals(readerTimeZone, TimeZone.getDefault().getID());
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
times = (TimestampColumnVector) batch.cols[0];
int idx = 0;
while (rows.nextBatch(batch)) {
for (int r = 0; r < batch.size; ++r) {
assertEquals(ts.get(idx++), times.asScratchTimestamp(r).toString());
}
}
rows.close();
ColumnStatistics[] colStats = reader.getStatistics();
Timestamp gotMin = ((TimestampColumnStatistics) colStats[0]).getMinimum();
assertEquals("2007-08-01 00:00:00.0", gotMin.toString());
Timestamp gotMax = ((TimestampColumnStatistics) colStats[0]).getMaximum();
assertEquals("2007-08-01 04:00:00.0", gotMax.toString());
assertEquals(SearchArgument.TruthValue.YES_NO, RecordReaderImpl.evaluatePredicate(colStats[0],
SearchArgumentFactory.newBuilder().equals
("c", PredicateLeaf.Type.TIMESTAMP, Timestamp.valueOf("2007-08-01 00:00:00.0")).build().getLeaves().get(0),
null));
assertEquals(SearchArgument.TruthValue.NO, RecordReaderImpl.evaluatePredicate(colStats[0],
SearchArgumentFactory.newBuilder().equals
("c", PredicateLeaf.Type.TIMESTAMP, Timestamp.valueOf("2007-08-02 00:00:00.0")).build().getLeaves().get(0),
null));
assertEquals(SearchArgument.TruthValue.NO, RecordReaderImpl.evaluatePredicate(colStats[0],
SearchArgumentFactory.newBuilder().between
("c", PredicateLeaf.Type.TIMESTAMP, Timestamp.valueOf("2007-08-01 05:00:00.0"),
Timestamp.valueOf("2007-08-01 06:00:00.0")).build().getLeaves().get(0),
null));
assertEquals(SearchArgument.TruthValue.YES_NO, RecordReaderImpl.evaluatePredicate(colStats[0],
SearchArgumentFactory.newBuilder().between
("c", PredicateLeaf.Type.TIMESTAMP, Timestamp.valueOf("2007-08-01 00:00:00.0"),
Timestamp.valueOf("2007-08-01 03:00:00.0")).build().getLeaves().get(0),
null));
assertEquals(SearchArgument.TruthValue.YES_NO, RecordReaderImpl.evaluatePredicate(colStats[0],
SearchArgumentFactory.newBuilder().in
("c", PredicateLeaf.Type.TIMESTAMP, Timestamp.valueOf("2007-08-01 00:00:00.0"),
Timestamp.valueOf("2007-08-01 03:00:00.0")).build().getLeaves().get(0),
null));
assertEquals(SearchArgument.TruthValue.NO, RecordReaderImpl.evaluatePredicate(colStats[0],
SearchArgumentFactory.newBuilder().in
("c", PredicateLeaf.Type.TIMESTAMP, Timestamp.valueOf("2007-08-02 00:00:00.0"),
Timestamp.valueOf("2007-08-02 03:00:00.0")).build().getLeaves().get(0),
null));
}
static OrcProto.ColumnEncoding buildEncoding() {
OrcProto.ColumnEncoding.Builder result =
OrcProto.ColumnEncoding.newBuilder();
result.setKind(OrcProto.ColumnEncoding.Kind.DIRECT)
.setBloomEncoding(BloomFilterIO.Encoding.UTF8_UTC.getId());
return result.build();
}
@ParameterizedTest
@MethodSource("data")
public void testTimestampPPDBloomFilter(String writerTimeZone, String readerTimeZone) throws Exception {
LOG.info("Writer = " + writerTimeZone + " reader = " + readerTimeZone);
TypeDescription schema = TypeDescription.createStruct().addField("ts", TypeDescription.createTimestamp());
TimeZone.setDefault(TimeZone.getTimeZone(writerTimeZone));
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).stripeSize(100000)
.bufferSize(10000).bloomFilterColumns("ts").writerVersion(OrcFile.WriterVersion.ORC_101));
assertEquals(writerTimeZone, TimeZone.getDefault().getID());
List<String> ts = Lists.newArrayList();
ts.add("2007-08-01 00:00:00.0");
ts.add("2007-08-01 04:00:00.0");
VectorizedRowBatch batch = schema.createRowBatch();
TimestampColumnVector times = (TimestampColumnVector) batch.cols[0];
for (String t : ts) {
times.set(batch.size++, Timestamp.valueOf(t));
}
writer.addRowBatch(batch);
writer.close();
TimeZone.setDefault(TimeZone.getTimeZone(readerTimeZone));
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
assertEquals(readerTimeZone, TimeZone.getDefault().getID());
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
times = (TimestampColumnVector) batch.cols[0];
int idx = 0;
while (rows.nextBatch(batch)) {
for (int r = 0; r < batch.size; ++r) {
assertEquals(ts.get(idx++), times.asScratchTimestamp(r).toString());
}
}
boolean[] sargColumns = new boolean[2];
Arrays.fill(sargColumns, true);
OrcIndex indices = ((RecordReaderImpl) rows).readRowIndex(0, null, sargColumns);
rows.close();
ColumnStatistics[] colStats = reader.getStatistics();
Timestamp gotMin = ((TimestampColumnStatistics) colStats[1]).getMinimum();
assertEquals("2007-08-01 00:00:00.0", gotMin.toString());
Timestamp gotMax = ((TimestampColumnStatistics) colStats[1]).getMaximum();
assertEquals("2007-08-01 04:00:00.0", gotMax.toString());
OrcProto.BloomFilterIndex[] bloomFilterIndices = indices.getBloomFilterIndex();
OrcProto.BloomFilter bloomFilter = bloomFilterIndices[1].getBloomFilter(0);
BloomFilter bf = BloomFilterIO.deserialize(OrcProto.Stream.Kind.BLOOM_FILTER_UTF8,
buildEncoding(), reader.getWriterVersion(),
TypeDescription.Category.TIMESTAMP, bloomFilter);
assertEquals(SearchArgument.TruthValue.YES_NO, RecordReaderImpl.evaluatePredicate(colStats[1],
SearchArgumentFactory.newBuilder().equals
("c", PredicateLeaf.Type.TIMESTAMP, Timestamp.valueOf("2007-08-01 00:00:00.0")).build().getLeaves().get(0),
bf));
assertEquals(SearchArgument.TruthValue.NO, RecordReaderImpl.evaluatePredicate(colStats[1],
SearchArgumentFactory.newBuilder().equals
("c", PredicateLeaf.Type.TIMESTAMP, Timestamp.valueOf("2007-08-02 00:00:00.0")).build().getLeaves().get(0),
bf));
assertEquals(SearchArgument.TruthValue.YES_NO, RecordReaderImpl.evaluatePredicate(colStats[1],
SearchArgumentFactory.newBuilder().in
("c", PredicateLeaf.Type.TIMESTAMP, Timestamp.valueOf("2007-08-01 00:00:00.0"),
Timestamp.valueOf("2007-08-01 03:00:00.0")).build().getLeaves().get(0),
bf));
assertEquals(SearchArgument.TruthValue.NO, RecordReaderImpl.evaluatePredicate(colStats[1],
SearchArgumentFactory.newBuilder().in
("c", PredicateLeaf.Type.TIMESTAMP, Timestamp.valueOf("2007-08-02 00:00:00.0"),
Timestamp.valueOf("2007-08-02 03:00:00.0")).build().getLeaves().get(0),
bf));
}
@ParameterizedTest
@MethodSource("data")
public void testTimestampMinMaxAndBloomFilter(String writerTimeZone, String readerTimeZone) throws Exception {
TypeDescription schema = TypeDescription.createStruct().addField("ts", TypeDescription.createTimestamp());
TimeZone.setDefault(TimeZone.getTimeZone(writerTimeZone));
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).stripeSize(100000)
.bufferSize(10000).bloomFilterColumns("ts"));
assertEquals(writerTimeZone, TimeZone.getDefault().getID());
List<String> ts = Lists.newArrayList();
ts.add("2007-08-01 00:00:00.0");
ts.add("2007-08-01 04:00:00.0");
VectorizedRowBatch batch = schema.createRowBatch();
TimestampColumnVector times = (TimestampColumnVector) batch.cols[0];
for (String t : ts) {
times.set(batch.size++, Timestamp.valueOf(t));
}
writer.addRowBatch(batch);
writer.close();
TimeZone.setDefault(TimeZone.getTimeZone(readerTimeZone));
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
assertEquals(readerTimeZone, TimeZone.getDefault().getID());
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
times = (TimestampColumnVector) batch.cols[0];
int idx = 0;
while (rows.nextBatch(batch)) {
for (int r = 0; r < batch.size; ++r) {
assertEquals(ts.get(idx++), times.asScratchTimestamp(r).toString());
}
}
boolean[] sargColumns = new boolean[2];
Arrays.fill(sargColumns, true);
OrcIndex indices = ((RecordReaderImpl) rows).readRowIndex(0, null, sargColumns);
rows.close();
ColumnStatistics[] colStats = reader.getStatistics();
Timestamp gotMin = ((TimestampColumnStatistics) colStats[1]).getMinimum();
assertEquals("2007-08-01 00:00:00.0", gotMin.toString());
Timestamp gotMax = ((TimestampColumnStatistics) colStats[1]).getMaximum();
assertEquals("2007-08-01 04:00:00.0", gotMax.toString());
OrcProto.BloomFilterIndex[] bloomFilterIndices = indices.getBloomFilterIndex();
OrcProto.BloomFilter bloomFilter = bloomFilterIndices[1].getBloomFilter(0);
BloomFilter bf = BloomFilterIO.deserialize(OrcProto.Stream.Kind.BLOOM_FILTER_UTF8,
buildEncoding(), reader.getWriterVersion(),
TypeDescription.Category.TIMESTAMP, bloomFilter);
PredicateLeaf pred = createPredicateLeaf(
PredicateLeaf.Operator.NULL_SAFE_EQUALS, PredicateLeaf.Type.TIMESTAMP, "x",
Timestamp.valueOf("2007-08-01 00:00:00.0"), null);
assertEquals(SearchArgument.TruthValue.YES_NO, RecordReaderImpl.evaluatePredicate(colStats[1], pred, bf));
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS, PredicateLeaf.Type.TIMESTAMP, "x",
Timestamp.valueOf("2007-08-01 02:00:00.0"), null);
assertEquals(SearchArgument.TruthValue.NO, RecordReaderImpl.evaluatePredicate(colStats[1], pred, bf));
bf.addLong(SerializationUtils.convertToUtc(TimeZone.getDefault(),
Timestamp.valueOf("2007-08-01 02:00:00.0").getTime()));
assertEquals(SearchArgument.TruthValue.YES_NO, RecordReaderImpl.evaluatePredicate(colStats[1], pred, bf));
pred = createPredicateLeaf(PredicateLeaf.Operator.LESS_THAN, PredicateLeaf.Type.TIMESTAMP, "x",
Timestamp.valueOf("2007-08-01 00:00:00.0"), null);
assertEquals(SearchArgument.TruthValue.NO, RecordReaderImpl.evaluatePredicate(colStats[1], pred, bf));
pred = createPredicateLeaf(PredicateLeaf.Operator.LESS_THAN_EQUALS, PredicateLeaf.Type.TIMESTAMP, "x",
Timestamp.valueOf("2007-08-01 00:00:00.0"), null);
assertEquals(SearchArgument.TruthValue.YES_NO, RecordReaderImpl.evaluatePredicate(colStats[1], pred, bf));
pred = createPredicateLeaf(PredicateLeaf.Operator.IS_NULL, PredicateLeaf.Type.TIMESTAMP, "x", null, null);
assertEquals(SearchArgument.TruthValue.NO, RecordReaderImpl.evaluatePredicate(colStats[1], pred, bf));
}
@ParameterizedTest
@MethodSource("data")
public void testTimestampAllNulls(String writerTimeZone, String readerTimeZone) throws Exception {
TypeDescription schema = TypeDescription.createStruct().addField("ts", TypeDescription.createTimestamp());
TimeZone.setDefault(TimeZone.getTimeZone(writerTimeZone));
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).stripeSize(100000)
.bufferSize(10000).bloomFilterColumns("ts"));
assertEquals(writerTimeZone, TimeZone.getDefault().getID());
VectorizedRowBatch batch = schema.createRowBatch();
TimestampColumnVector times = (TimestampColumnVector) batch.cols[0];
for (int i = 0; i < 3; i++) {
times.set(batch.size++, null);
}
writer.addRowBatch(batch);
writer.close();
TimeZone.setDefault(TimeZone.getTimeZone(readerTimeZone));
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
assertEquals(readerTimeZone, TimeZone.getDefault().getID());
RecordReader rows = reader.rows();
boolean[] sargColumns = new boolean[2];
Arrays.fill(sargColumns, true);
OrcIndex indices = ((RecordReaderImpl) rows).readRowIndex(0, null, sargColumns);
rows.close();
ColumnStatistics[] colStats = reader.getStatistics();
Timestamp gotMin = ((TimestampColumnStatistics) colStats[1]).getMinimum();
assertNull(gotMin);
Timestamp gotMax = ((TimestampColumnStatistics) colStats[1]).getMaximum();
assertNull(gotMax);
OrcProto.BloomFilterIndex[] bloomFilterIndices = indices.getBloomFilterIndex();
OrcProto.BloomFilter bloomFilter = bloomFilterIndices[1].getBloomFilter(0);
BloomFilter bf = BloomFilterIO.deserialize(OrcProto.Stream.Kind.BLOOM_FILTER_UTF8,
buildEncoding(), reader.getWriterVersion(),
TypeDescription.Category.TIMESTAMP, bloomFilter);
PredicateLeaf pred = createPredicateLeaf(
PredicateLeaf.Operator.NULL_SAFE_EQUALS, PredicateLeaf.Type.TIMESTAMP, "x",
Timestamp.valueOf("2007-08-01 00:00:00.0"), null);
assertEquals(SearchArgument.TruthValue.NULL, RecordReaderImpl.evaluatePredicate(colStats[1], pred, bf));
pred = createPredicateLeaf(PredicateLeaf.Operator.IS_NULL, PredicateLeaf.Type.TIMESTAMP, "x", null, null);
assertEquals(SearchArgument.TruthValue.YES, RecordReaderImpl.evaluatePredicate(colStats[1], pred, bf));
}
}
| 18,806 | 46.492424 | 115 | java |
null | orc-main/java/core/src/test/org/apache/orc/TestOrcWithLargeStripeStatistics.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to you under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.EnumSource;
import java.io.IOException;
import java.util.Arrays;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Tests for operations on Orc file with very large stripe statistics.
* <p>
* The test is disabled by default cause it is rather slow (approx 14 minutes) and memory greedy
* (it requires about 4g heap space when creating the files). If you want to run it remove the
* {@code Disabled} annotation and ensure that max heap (Xmx) is at least 4g.
* </p>
*/
@Disabled("ORC-1361")
public class TestOrcWithLargeStripeStatistics {
@ParameterizedTest
@EnumSource(value = OrcFile.Version.class, mode = EnumSource.Mode.EXCLUDE, names = "FUTURE")
public void testGetStripeStatisticsNoProtocolBufferExceptions(OrcFile.Version version)
throws Exception {
// Use a size that exceeds the protobuf limit (e.g., 1GB) to trigger protobuf exception
Path p = createOrcFile(1024L << 20, version);
try (Reader reader = OrcFile.createReader(p, OrcFile.readerOptions(new Configuration()))) {
assertTrue(reader.getStripeStatistics().isEmpty());
}
}
/**
* Creates an Orc file with a metadata section of the specified size and return its path in the
* filesystem.
*
* The file has a fixed schema (500 string columns) and content (every column contains 200
* characters, which is roughly 200 bytes). Each row is roughly 100KB uncompressed and each stripe
* holds exactly one row thus stripe metadata (column statistics) per row is 200KB (100KB for min,
* 100KB for max, few bytes for sum).
*
* @param metadataSize the desired size of the resulting metadata section in bytes
* @param version the desired version to create the file
* @return the path to filesystem where the file was created.
* @throws IOException if an IO problem occurs while creating the file
*/
private static Path createOrcFile(long metadataSize, OrcFile.Version version) throws IOException {
// Calculate the number of rows/stripes to create based on the size of one row (200KB).
final long ROW_STRIPE_NUM = metadataSize / 200_000L;
Path p = new Path(System.getProperty("test.tmp.dir"),
TestOrcWithLargeStripeStatistics.class.getSimpleName()
+ "_" + ROW_STRIPE_NUM + "_" + version + ".orc");
// Modify defaults to force one row per stripe.
Configuration conf = new Configuration();
conf.set(OrcConf.ROWS_BETWEEN_CHECKS.getAttribute(), "0");
TypeDescription schema = createTypeDescription();
OrcFile.WriterOptions writerOptions =
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(1)
.encodingStrategy(OrcFile.EncodingStrategy.SPEED)
.version(version);
try (Writer writer = OrcFile.createWriter(p, writerOptions)) {
VectorizedRowBatch batch = createSingleRowBatch(schema);
for (long i = 0; i < ROW_STRIPE_NUM; i++) {
writer.addRowBatch(batch);
}
}
return p;
}
private static VectorizedRowBatch createSingleRowBatch(TypeDescription schema) {
VectorizedRowBatch batch = schema.createRowBatch();
batch.size = 1;
byte[] bigString = new byte[200];
Arrays.fill(bigString, (byte) 'A');
for (int i = 0; i < batch.numCols; i++) {
BytesColumnVector col = (BytesColumnVector) batch.cols[i];
col.setVal(0, bigString);
}
return batch;
}
private static TypeDescription createTypeDescription() {
String strCols = IntStream.range(0, 500)
.mapToObj(i -> "col" + i + ":string")
.collect(Collectors.joining(","));
return TypeDescription.fromString("struct<" + strCols + ">");
}
}
| 4,888 | 41.146552 | 100 | java |
null | orc-main/java/core/src/test/org/apache/orc/TestProlepticConversions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DateColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.Decimal64ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.impl.DateUtils;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.TestInfo;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
import org.threeten.extra.chrono.HybridChronology;
import java.io.File;
import java.nio.charset.StandardCharsets;
import java.text.SimpleDateFormat;
import java.time.chrono.Chronology;
import java.time.chrono.IsoChronology;
import java.time.format.DateTimeFormatter;
import java.util.Date;
import java.util.GregorianCalendar;
import java.util.List;
import java.util.TimeZone;
import java.util.concurrent.TimeUnit;
import java.util.stream.Stream;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* This class tests all of the combinations of reading and writing the hybrid
* and proleptic calendars.
*/
public class TestProlepticConversions {
private static Stream<Arguments> data() {
return Stream.of(
Arguments.of(false, false),
Arguments.of(false, true),
Arguments.of(true, false),
Arguments.of(true, true));
}
private Path workDir = new Path(System.getProperty("test.tmp.dir",
"target" + File.separator + "test" + File.separator + "tmp"));
private final Configuration conf;
private final TimeZone UTC = TimeZone.getTimeZone("UTC");
private final GregorianCalendar PROLEPTIC = new GregorianCalendar();
private final GregorianCalendar HYBRID = new GregorianCalendar();
{
conf = new Configuration();
PROLEPTIC.setTimeZone(UTC);
PROLEPTIC.setGregorianChange(new Date(Long.MIN_VALUE));
HYBRID.setTimeZone(UTC);
}
private FileSystem fs;
private Path testFilePath;
@BeforeEach
public void setupPath(TestInfo testInfo) throws Exception {
fs = FileSystem.getLocal(conf);
testFilePath = new Path(workDir, "TestProlepticConversion." +
testInfo.getTestMethod().get().getName().replaceFirst("\\[[0-9]+]", "") + ".orc");
fs.delete(testFilePath, false);
}
public static SimpleDateFormat createParser(String format, GregorianCalendar calendar) {
SimpleDateFormat result = new SimpleDateFormat(format);
result.setCalendar(calendar);
return result;
}
@ParameterizedTest
@MethodSource("data")
public void testReadWrite(
boolean writerProlepticGregorian, boolean readerProlepticGregorian) throws Exception {
TypeDescription schema = TypeDescription.fromString(
"struct<d:date,t:timestamp,i:timestamp with local time zone>");
try (Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.fileSystem(fs)
.setProlepticGregorian(writerProlepticGregorian))) {
VectorizedRowBatch batch = schema.createRowBatchV2();
batch.size = 1024;
DateColumnVector d = (DateColumnVector) batch.cols[0];
TimestampColumnVector t = (TimestampColumnVector) batch.cols[1];
TimestampColumnVector i = (TimestampColumnVector) batch.cols[2];
d.changeCalendar(writerProlepticGregorian, false);
t.changeCalendar(writerProlepticGregorian, false);
i.changeCalendar(writerProlepticGregorian, false);
GregorianCalendar cal = writerProlepticGregorian ? PROLEPTIC : HYBRID;
SimpleDateFormat timeFormat = createParser("yyyy-MM-dd HH:mm:ss", cal);
Chronology writerChronology = writerProlepticGregorian
? IsoChronology.INSTANCE : HybridChronology.INSTANCE;
for(int r=0; r < batch.size; ++r) {
d.vector[r] = writerChronology.date(r * 2 + 1, 1, 23)
.toEpochDay();
Date val = timeFormat.parse(
String.format("%04d-03-21 %02d:12:34", 2 * r + 1, r % 24));
t.time[r] = val.getTime();
t.nanos[r] = 0;
i.time[r] = val.getTime();
i.nanos[r] = 0;
}
writer.addRowBatch(batch);
}
try (Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf)
.filesystem(fs)
.convertToProlepticGregorian(readerProlepticGregorian));
RecordReader rows = reader.rows(reader.options())) {
assertEquals(writerProlepticGregorian, reader.writerUsedProlepticGregorian());
VectorizedRowBatch batch = reader.getSchema().createRowBatchV2();
DateColumnVector d = (DateColumnVector) batch.cols[0];
TimestampColumnVector t = (TimestampColumnVector) batch.cols[1];
TimestampColumnVector i = (TimestampColumnVector) batch.cols[2];
GregorianCalendar cal = readerProlepticGregorian ? PROLEPTIC : HYBRID;
SimpleDateFormat timeFormat = createParser("yyyy-MM-dd HH:mm:ss", cal);
Chronology readerChronology = readerProlepticGregorian
? IsoChronology.INSTANCE : HybridChronology.INSTANCE;
DateTimeFormatter dateFormat = DateTimeFormatter.ISO_LOCAL_DATE.withChronology(readerChronology);
// Check the file statistics
ColumnStatistics[] colStats = reader.getStatistics();
DateColumnStatistics dStats = (DateColumnStatistics) colStats[1];
TimestampColumnStatistics tStats = (TimestampColumnStatistics) colStats[2];
TimestampColumnStatistics iStats = (TimestampColumnStatistics) colStats[3];
assertEquals("0001-01-23", dStats.getMinimumLocalDate().format(dateFormat));
assertEquals("2047-01-23", dStats.getMaximumLocalDate().format(dateFormat));
assertEquals("0001-03-21 00:12:34", timeFormat.format(tStats.getMinimum()));
assertEquals("2047-03-21 15:12:34", timeFormat.format(tStats.getMaximum()));
assertEquals("0001-03-21 00:12:34", timeFormat.format(iStats.getMinimum()));
assertEquals("2047-03-21 15:12:34", timeFormat.format(iStats.getMaximum()));
// Check the stripe stats
List<StripeStatistics> stripeStats = reader.getStripeStatistics();
assertEquals(1, stripeStats.size());
colStats = stripeStats.get(0).getColumnStatistics();
dStats = (DateColumnStatistics) colStats[1];
tStats = (TimestampColumnStatistics) colStats[2];
iStats = (TimestampColumnStatistics) colStats[3];
assertEquals("0001-01-23", dStats.getMinimumLocalDate().format(dateFormat));
assertEquals("2047-01-23", dStats.getMaximumLocalDate().format(dateFormat));
assertEquals("0001-03-21 00:12:34", timeFormat.format(tStats.getMinimum()));
assertEquals("2047-03-21 15:12:34", timeFormat.format(tStats.getMaximum()));
assertEquals("0001-03-21 00:12:34", timeFormat.format(iStats.getMinimum()));
assertEquals("2047-03-21 15:12:34", timeFormat.format(iStats.getMaximum()));
// Check the data
assertTrue(rows.nextBatch(batch));
assertEquals(1024, batch.size);
// Ensure the column vectors are using the right calendar
assertEquals(readerProlepticGregorian, d.isUsingProlepticCalendar());
assertEquals(readerProlepticGregorian, t.usingProlepticCalendar());
assertEquals(readerProlepticGregorian, i.usingProlepticCalendar());
for(int r=0; r < batch.size; ++r) {
String expectedD = String.format("%04d-01-23", r * 2 + 1);
String expectedT = String.format("%04d-03-21 %02d:12:34", 2 * r + 1, r % 24);
assertEquals(expectedD,
readerChronology.dateEpochDay(d.vector[r]).format(dateFormat),
"row " + r);
assertEquals(expectedT, timeFormat.format(t.asScratchTimestamp(r)),
"row " + r);
assertEquals(expectedT, timeFormat.format(i.asScratchTimestamp(r)),
"row " + r);
}
}
}
/**
* Test all of the type conversions from/to date.
*/
@ParameterizedTest
@MethodSource("data")
public void testSchemaEvolutionDate(
boolean writerProlepticGregorian, boolean readerProlepticGregorian) throws Exception {
TypeDescription schema = TypeDescription.fromString(
"struct<d2s:date,d2t:date,s2d:string,t2d:timestamp>");
try (Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.fileSystem(fs)
.setProlepticGregorian(writerProlepticGregorian)
.useUTCTimestamp(true))) {
VectorizedRowBatch batch = schema.createRowBatchV2();
batch.size = 1024;
DateColumnVector d2s = (DateColumnVector) batch.cols[0];
DateColumnVector d2t = (DateColumnVector) batch.cols[1];
BytesColumnVector s2d = (BytesColumnVector) batch.cols[2];
TimestampColumnVector t2d = (TimestampColumnVector) batch.cols[3];
d2s.changeCalendar(writerProlepticGregorian, false);
d2t.changeCalendar(writerProlepticGregorian, false);
t2d.changeCalendar(writerProlepticGregorian, false);
GregorianCalendar cal = writerProlepticGregorian ? PROLEPTIC : HYBRID;
SimpleDateFormat dateFormat = createParser("yyyy-MM-dd", cal);
SimpleDateFormat timeFormat = createParser("yyyy-MM-dd HH:mm:ss", cal);
for(int r=0; r < batch.size; ++r) {
String date = String.format("%04d-01-23", r * 2 + 1);
String time = String.format("%04d-03-21 %02d:12:34", 2 * r + 1, r % 24);
d2s.vector[r] = TimeUnit.MILLISECONDS.toDays(dateFormat.parse(date).getTime());
d2t.vector[r] = d2s.vector[r];
s2d.setVal(r, date.getBytes(StandardCharsets.UTF_8));
t2d.time[r] = timeFormat.parse(time).getTime();
t2d.nanos[r] = 0;
}
writer.addRowBatch(batch);
}
TypeDescription readerSchema = TypeDescription.fromString(
"struct<d2s:string,d2t:timestamp,s2d:date,t2d:date>");
try (Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf)
.filesystem(fs)
.convertToProlepticGregorian(readerProlepticGregorian)
.useUTCTimestamp(true));
RecordReader rows = reader.rows(reader.options()
.schema(readerSchema))) {
assertEquals(writerProlepticGregorian, reader.writerUsedProlepticGregorian());
VectorizedRowBatch batch = readerSchema.createRowBatchV2();
BytesColumnVector d2s = (BytesColumnVector) batch.cols[0];
TimestampColumnVector d2t = (TimestampColumnVector) batch.cols[1];
DateColumnVector s2d = (DateColumnVector) batch.cols[2];
DateColumnVector t2d = (DateColumnVector) batch.cols[3];
GregorianCalendar cal = readerProlepticGregorian ? PROLEPTIC : HYBRID;
SimpleDateFormat dateFormat = createParser("yyyy-MM-dd", cal);
SimpleDateFormat timeFormat = createParser("yyyy-MM-dd HH:mm:ss", cal);
// Check the data
assertTrue(rows.nextBatch(batch));
assertEquals(1024, batch.size);
// Ensure the column vectors are using the right calendar
assertEquals(readerProlepticGregorian, d2t.usingProlepticCalendar());
assertEquals(readerProlepticGregorian, s2d.isUsingProlepticCalendar());
assertEquals(readerProlepticGregorian, t2d.isUsingProlepticCalendar());
for(int r=0; r < batch.size; ++r) {
String expectedD1 = String.format("%04d-01-23", 2 * r + 1);
String expectedD2 = expectedD1 + " 00:00:00";
String expectedT = String.format("%04d-03-21", 2 * r + 1);
assertEquals(expectedD1, d2s.toString(r), "row " + r);
assertEquals(expectedD2, timeFormat.format(d2t.asScratchTimestamp(r)), "row " + r);
assertEquals(expectedD1, DateUtils.printDate((int) s2d.vector[r],
readerProlepticGregorian), "row " + r);
assertEquals(expectedT, dateFormat.format(
new Date(TimeUnit.DAYS.toMillis(t2d.vector[r]))), "row " + r);
}
assertFalse(rows.nextBatch(batch));
}
}
/**
* Test all of the type conversions from/to timestamp, except for date,
* which was handled above.
*/
@ParameterizedTest
@MethodSource("data")
public void testSchemaEvolutionTimestamp(
boolean writerProlepticGregorian, boolean readerProlepticGregorian) throws Exception {
TypeDescription schema = TypeDescription.fromString(
"struct<t2i:timestamp,t2d:timestamp,t2D:timestamp,t2s:timestamp,"
+ "i2t:bigint,d2t:decimal(18,2),D2t:double,s2t:string>");
try (Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.fileSystem(fs)
.setProlepticGregorian(writerProlepticGregorian)
.useUTCTimestamp(true))) {
VectorizedRowBatch batch = schema.createRowBatchV2();
batch.size = 1024;
TimestampColumnVector t2i = (TimestampColumnVector) batch.cols[0];
TimestampColumnVector t2d = (TimestampColumnVector) batch.cols[1];
TimestampColumnVector t2D = (TimestampColumnVector) batch.cols[2];
TimestampColumnVector t2s = (TimestampColumnVector) batch.cols[3];
LongColumnVector i2t = (LongColumnVector) batch.cols[4];
Decimal64ColumnVector d2t = (Decimal64ColumnVector) batch.cols[5];
DoubleColumnVector D2t = (DoubleColumnVector) batch.cols[6];
BytesColumnVector s2t = (BytesColumnVector) batch.cols[7];
t2i.changeCalendar(writerProlepticGregorian, false);
t2d.changeCalendar(writerProlepticGregorian, false);
t2D.changeCalendar(writerProlepticGregorian, false);
t2s.changeCalendar(writerProlepticGregorian, false);
for(int r=0; r < batch.size; ++r) {
String time = String.format("%04d-03-21 %02d:12:34.12", 2 * r + 1, r % 24);
long millis = DateUtils.parseTime(time, writerProlepticGregorian, true);
int nanos = (int) Math.floorMod(millis, 1000) * 1_000_000;
t2i.time[r] = millis;
t2i.nanos[r] = nanos;
t2d.time[r] = millis;
t2d.nanos[r] = nanos;
t2D.time[r] = millis;
t2D.nanos[r] = nanos;
t2s.time[r] = millis;
d2t.vector[r] = millis / 10;
t2s.nanos[r] = nanos;
i2t.vector[r] = Math.floorDiv(millis, 1000);
d2t.vector[r] = Math.floorDiv(millis, 10);
D2t.vector[r] = millis / 1000.0;
s2t.setVal(r, time.getBytes(StandardCharsets.UTF_8));
}
writer.addRowBatch(batch);
}
TypeDescription readerSchema = TypeDescription.fromString(
"struct<i2t:timestamp,d2t:timestamp,D2t:timestamp,s2t:timestamp,"
+ "t2i:bigint,t2d:decimal(18,2),t2D:double,t2s:string>");
try (Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf)
.filesystem(fs)
.convertToProlepticGregorian(readerProlepticGregorian)
.useUTCTimestamp(true));
RecordReader rows = reader.rows(reader.options()
.schema(readerSchema))) {
assertEquals(writerProlepticGregorian, reader.writerUsedProlepticGregorian());
VectorizedRowBatch batch = readerSchema.createRowBatchV2();
TimestampColumnVector i2t = (TimestampColumnVector) batch.cols[0];
TimestampColumnVector d2t = (TimestampColumnVector) batch.cols[1];
TimestampColumnVector D2t = (TimestampColumnVector) batch.cols[2];
TimestampColumnVector s2t = (TimestampColumnVector) batch.cols[3];
LongColumnVector t2i = (LongColumnVector) batch.cols[4];
Decimal64ColumnVector t2d = (Decimal64ColumnVector) batch.cols[5];
DoubleColumnVector t2D = (DoubleColumnVector) batch.cols[6];
BytesColumnVector t2s = (BytesColumnVector) batch.cols[7];
// Check the data
assertTrue(rows.nextBatch(batch));
assertEquals(1024, batch.size);
// Ensure the column vectors are using the right calendar
assertEquals(readerProlepticGregorian, i2t.usingProlepticCalendar());
assertEquals(readerProlepticGregorian, d2t.usingProlepticCalendar());
assertEquals(readerProlepticGregorian, D2t.usingProlepticCalendar());
assertEquals(readerProlepticGregorian, s2t.usingProlepticCalendar());
for(int r=0; r < batch.size; ++r) {
String time = String.format("%04d-03-21 %02d:12:34.12", 2 * r + 1, r % 24);
long millis = DateUtils.parseTime(time, readerProlepticGregorian, true);
assertEquals(time.substring(0, time.length() - 3),
DateUtils.printTime(i2t.time[r], readerProlepticGregorian, true),
"row " + r);
assertEquals(time,
DateUtils.printTime(d2t.time[r], readerProlepticGregorian, true),
"row " + r);
assertEquals(time,
DateUtils.printTime(D2t.time[r], readerProlepticGregorian, true),
"row " + r);
assertEquals(time,
DateUtils.printTime(s2t.time[r], readerProlepticGregorian, true),
"row " + r);
assertEquals(Math.floorDiv(millis, 1000), t2i.vector[r], "row " + r);
assertEquals(Math.floorDiv(millis, 10), t2d.vector[r], "row " + r);
assertEquals(millis/1000.0, t2D.vector[r], 0.1, "row " + r);
assertEquals(time, t2s.toString(r), "row " + r);
}
assertFalse(rows.nextBatch(batch));
}
}
}
| 18,484 | 47.263708 | 103 | java |
null | orc-main/java/core/src/test/org/apache/orc/TestReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInfo;
import java.io.File;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
public class TestReader {
Path workDir = new Path(System.getProperty("test.tmp.dir",
"target" + File.separator + "test" + File.separator + "tmp"));
Configuration conf;
FileSystem fs;
Path testFilePath;
@BeforeEach
public void openFileSystem(TestInfo testInfo) throws Exception {
conf = new Configuration();
fs = FileSystem.getLocal(conf);
testFilePath = new Path(workDir, TestReader.class.getSimpleName() + "." +
testInfo.getTestMethod().get().getName() + ".orc");
fs.delete(testFilePath, false);
}
@Test
public void testReadZeroLengthFile() throws Exception {
FSDataOutputStream fout = fs.create(testFilePath);
fout.close();
assertEquals(0, fs.getFileStatus(testFilePath).getLen());
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
assertEquals(0, reader.getNumberOfRows());
}
@Test
public void testReadFileLengthLessThanMagic() throws Exception {
assertThrows(FileFormatException.class, () -> {
FSDataOutputStream fout = fs.create(testFilePath);
fout.writeBoolean(true);
fout.close();
assertEquals(1, fs.getFileStatus(testFilePath).getLen());
OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
});
}
@Test
public void testReadFileInvalidHeader() throws Exception {
assertThrows(FileFormatException.class, () -> {
FSDataOutputStream fout = fs.create(testFilePath);
fout.writeLong(1);
fout.close();
assertEquals(8, fs.getFileStatus(testFilePath).getLen());
OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
});
}
@Test
public void testReadDocColumn() throws Exception {
Path path = new Path(getClass().getClassLoader().getSystemResource("col.dot.orc").getPath());
Reader reader = OrcFile.createReader(path, OrcFile.readerOptions(conf).filesystem(fs));
assertEquals("col.dot", reader.getSchema().getFieldNames().get(0));
}
}
| 3,298 | 35.655556 | 97 | java |
null | orc-main/java/core/src/test/org/apache/orc/TestRowFilteringComplexTypes.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.Decimal64ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ListColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.MapColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.StructColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.UnionColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.orc.impl.RecordReaderImpl;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInfo;
import java.io.File;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestRowFilteringComplexTypes {
private Path workDir = new Path(System.getProperty("test.tmp.dir", "target" + File.separator + "test"
+ File.separator + "tmp"));
private Configuration conf;
private FileSystem fs;
private Path testFilePath;
private static final int ColumnBatchRows = 1024;
@BeforeEach
public void openFileSystem(TestInfo testInfo) throws Exception {
conf = new Configuration();
OrcConf.READER_USE_SELECTED.setBoolean(conf, true);
fs = FileSystem.getLocal(conf);
testFilePath = new Path(workDir,
"TestRowFilteringComplexTypes." + testInfo.getTestMethod().get().getName() + ".orc");
fs.delete(testFilePath, false);
}
@Test
// Inner Struct should receive the filterContext and propagate it the the SubTypes
public void testInnerStructRowFilter() throws Exception {
// Set the row stride to a multiple of the batch size
final int INDEX_STRIDE = 16 * ColumnBatchRows;
final int NUM_BATCHES = 2;
TypeDescription schema = TypeDescription.createStruct()
.addField("int1", TypeDescription.createInt())
.addField("innerStruct", TypeDescription.createStruct()
.addField("a", TypeDescription.createDecimal())
.addField("b", TypeDescription.createDecimal())
);
try (Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).rowIndexStride(INDEX_STRIDE))) {
VectorizedRowBatch batch = schema.createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
StructColumnVector col2 = (StructColumnVector) batch.cols[1];
DecimalColumnVector innerCol1 = (DecimalColumnVector) col2.fields[0];
DecimalColumnVector innerCol2 = (DecimalColumnVector) col2.fields[1];
for (int b = 0; b < NUM_BATCHES; ++b) {
batch.reset();
batch.size = ColumnBatchRows;
for (int row = 0; row < batch.size; row++) {
col1.vector[row] = row;
if ((row % 2) == 0) {
innerCol1.vector[row] = new HiveDecimalWritable(101 + row);
innerCol2.vector[row] = new HiveDecimalWritable(100 + row);
} else {
innerCol1.vector[row] = new HiveDecimalWritable(999 + row);
innerCol2.vector[row] = new HiveDecimalWritable(998 + row);
}
}
col1.isRepeating = false;
writer.addRowBatch(batch);
}
}
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf));
try (RecordReaderImpl rows = (RecordReaderImpl) reader.rows(
reader.options().setRowFilter(new String[]{"int1"}, TestRowFilteringSkip::intRoundRobbinRowFilter))) {
VectorizedRowBatch batch = reader.getSchema().createRowBatchV2();
StructColumnVector col2 = (StructColumnVector) batch.cols[1];
int noNullCnt = 0;
while (rows.nextBatch(batch)) {
assertTrue(batch.selectedInUse);
assertEquals(ColumnBatchRows / 2, batch.size);
for (int r = 0; r < ColumnBatchRows; ++r) {
StringBuilder sb = new StringBuilder();
col2.stringifyValue(sb, r);
if (sb.toString().compareTo("[0, 0]") != 0) {
noNullCnt++;
}
}
}
// Make sure that our filter worked
assertEquals(NUM_BATCHES * 512, noNullCnt);
assertEquals(0, batch.selected[0]);
assertEquals(2, batch.selected[1]);
}
}
@Test
// Inner UNION should make use of the filterContext
public void testInnerUnionRowFilter() throws Exception {
// Set the row stride to a multiple of the batch size
final int INDEX_STRIDE = 16 * ColumnBatchRows;
final int NUM_BATCHES = 2;
TypeDescription schema = TypeDescription.fromString(
"struct<int1:int,innerUnion:uniontype<decimal(16,3),decimal(16,3)>>");
try (Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).rowIndexStride(INDEX_STRIDE))) {
VectorizedRowBatch batch = schema.createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
UnionColumnVector col2 = (UnionColumnVector) batch.cols[1];
Decimal64ColumnVector innerCol1 = (Decimal64ColumnVector) col2.fields[0];
Decimal64ColumnVector innerCol2 = (Decimal64ColumnVector) col2.fields[1];
for (int b = 0; b < NUM_BATCHES; ++b) {
batch.reset();
batch.size = ColumnBatchRows;
for (int row = 0; row < batch.size; row++) {
int totalRow = ColumnBatchRows * b + row;
col1.vector[row] = totalRow;
col2.tags[row] = totalRow % 2;
if (col2.tags[row] == 0) {
innerCol1.vector[row] = totalRow * 1000;
} else {
innerCol2.vector[row] = totalRow * 3 * 1000;
}
}
col1.isRepeating = false;
writer.addRowBatch(batch);
}
}
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf));
try (RecordReaderImpl rows = (RecordReaderImpl) reader.rows(
reader.options().setRowFilter(new String[]{"int1"}, TestRowFilteringSkip::intRoundRobbinRowFilter))) {
VectorizedRowBatch batch = reader.getSchema().createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
UnionColumnVector col2 = (UnionColumnVector) batch.cols[1];
Decimal64ColumnVector innerCol1 = (Decimal64ColumnVector) col2.fields[0];
Decimal64ColumnVector innerCol2 = (Decimal64ColumnVector) col2.fields[1];
int previousBatchRows = 0;
while (rows.nextBatch(batch)) {
assertTrue(batch.selectedInUse);
assertEquals(ColumnBatchRows / 2, batch.size);
for (int r = 0; r < batch.size; ++r) {
int row = batch.selected[r];
int originalRow = (r + previousBatchRows) * 2;
String msg = "row " + originalRow;
assertEquals(originalRow, col1.vector[row], msg);
assertEquals(0, col2.tags[row], msg);
assertEquals(originalRow * 1000, innerCol1.vector[row], msg);
}
// check to make sure that we didn't read innerCol2
for(int r = 1; r < ColumnBatchRows; r += 2) {
assertEquals(0, innerCol2.vector[r], "row " + r);
}
previousBatchRows += batch.size;
}
}
}
@Test
// Inner MAP should NOT make use of the filterContext
// TODO: selected rows should be combined with map offsets
public void testInnerMapRowFilter() throws Exception {
// Set the row stride to a multiple of the batch size
final int INDEX_STRIDE = 16 * ColumnBatchRows;
final int NUM_BATCHES = 2;
TypeDescription schema = TypeDescription.createStruct()
.addField("int1", TypeDescription.createInt())
.addField("innerMap", TypeDescription.createMap(
TypeDescription.createDecimal(),
TypeDescription.createDecimal()
)
);
try (Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).rowIndexStride(INDEX_STRIDE))) {
VectorizedRowBatch batch = schema.createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
MapColumnVector mapCol = (MapColumnVector) batch.cols[1];
DecimalColumnVector keyCol = (DecimalColumnVector) mapCol.keys;
DecimalColumnVector valCol = (DecimalColumnVector) mapCol.values;
for (int b = 0; b < NUM_BATCHES; ++b) {
batch.reset();
batch.size = ColumnBatchRows;
for (int row = 0; row < batch.size; row++) {
col1.vector[row] = row;
// Insert 2 kv pairs in each row
for (int i = 0; i < 2; i++) {
keyCol.vector[i] = new HiveDecimalWritable(i);
valCol.vector[i] = new HiveDecimalWritable(i * 10);
}
mapCol.lengths[row] = 2;
mapCol.offsets[row] = 0;
}
col1.isRepeating = false;
writer.addRowBatch(batch);
}
}
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf));
try (RecordReaderImpl rows = (RecordReaderImpl) reader.rows(
reader.options().setRowFilter(new String[]{"int1"}, TestRowFilteringSkip::intRoundRobbinRowFilter))) {
VectorizedRowBatch batch = reader.getSchema().createRowBatchV2();
MapColumnVector col2 = (MapColumnVector) batch.cols[1];
int noNullCnt = 0;
while (rows.nextBatch(batch)) {
assertTrue(batch.selectedInUse);
assertEquals(ColumnBatchRows / 2, batch.size);
for (int r = 0; r < ColumnBatchRows; ++r) {
StringBuilder sb = new StringBuilder();
col2.stringifyValue(sb, r);
if (sb.toString().equals("[{\"key\": 0, \"value\": 0}, {\"key\": 1, \"value\": 10}]")) {
noNullCnt++;
}
}
}
// Make sure that we did NOT skip any rows
assertEquals(NUM_BATCHES * ColumnBatchRows, noNullCnt);
// Even though selected Array is still used its not propagated
assertEquals(0, batch.selected[0]);
assertEquals(2, batch.selected[1]);
}
}
@Test
// Inner LIST should NOT make use of the filterContext
// TODO: selected rows should be combined with list offsets
public void testInnerListRowFilter() throws Exception {
// Set the row stride to a multiple of the batch size
final int INDEX_STRIDE = 16 * ColumnBatchRows;
final int NUM_BATCHES = 2;
TypeDescription schema = TypeDescription.createStruct()
.addField("int1", TypeDescription.createInt())
.addField("innerList", TypeDescription
.createList(TypeDescription.createDecimal())
);
try (Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).rowIndexStride(INDEX_STRIDE))) {
VectorizedRowBatch batch = schema.createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
ListColumnVector listCol = (ListColumnVector) batch.cols[1];
DecimalColumnVector listValues = (DecimalColumnVector) listCol.child;
for (int b = 0; b < NUM_BATCHES; ++b) {
batch.reset();
batch.size = ColumnBatchRows;
for (int row = 0; row < batch.size; row++) {
col1.vector[row] = row;
// Insert 10 values to the interList per row
for (int i = 0; i < 10; i++) {
listValues.vector[i] = new HiveDecimalWritable(i);
}
listCol.lengths[row] = 10;
listCol.offsets[row] = 0;
}
col1.isRepeating = false;
writer.addRowBatch(batch);
}
}
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf));
try (RecordReaderImpl rows = (RecordReaderImpl) reader.rows(
reader.options().setRowFilter(new String[]{"int1"}, TestRowFilteringSkip::intRoundRobbinRowFilter))) {
VectorizedRowBatch batch = reader.getSchema().createRowBatchV2();
ListColumnVector col2 = (ListColumnVector) batch.cols[1];
int noNullCnt = 0;
while (rows.nextBatch(batch)) {
assertTrue(batch.selectedInUse);
assertEquals(ColumnBatchRows / 2, batch.size);
for (int r = 0; r < ColumnBatchRows; ++r) {
StringBuilder sb = new StringBuilder();
col2.stringifyValue(sb, r);
if (sb.toString().equals("[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]")) {
noNullCnt++;
}
}
}
// Make sure that we did NOT skip any rows
assertEquals(NUM_BATCHES * ColumnBatchRows, noNullCnt);
// Even though selected Array is still used its not propagated
assertEquals(0, batch.selected[0]);
assertEquals(2, batch.selected[1]);
}
}
}
| 15,251 | 45.642202 | 118 | java |
null | orc-main/java/core/src/test/org/apache/orc/TestRowFilteringComplexTypesNulls.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.type.HiveDecimal;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.StructColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.UnionColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.orc.impl.OrcFilterContextImpl;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.HashSet;
import java.util.Random;
import java.util.Set;
import java.util.function.Consumer;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestRowFilteringComplexTypesNulls {
private static final Logger LOG =
LoggerFactory.getLogger(TestRowFilteringComplexTypesNulls.class);
private static final Path workDir = new Path(System.getProperty("test.tmp.dir",
"target" + File.separator + "test"
+ File.separator + "tmp"));
private static final Path filePath = new Path(workDir, "complex_null_file.orc");
private static Configuration conf;
private static FileSystem fs;
private static final TypeDescription schema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createLong())
.addField("s2", TypeDescription.createStruct()
.addField("f2", TypeDescription.createDecimal().withPrecision(20).withScale(6))
.addField("f2f", TypeDescription.createString())
)
.addField("u3", TypeDescription.createUnion()
.addUnionChild(TypeDescription.createLong())
.addUnionChild(TypeDescription.createString())
)
.addField("f4", TypeDescription.createString())
.addField("ridx", TypeDescription.createLong());
private static final long RowCount = 4000000L;
private static final String[] FilterColumns = new String[] {"ridx", "s2.f2", "u3.0"};
private static final int scale = 3;
@BeforeAll
public static void setup() throws IOException {
conf = new Configuration();
fs = FileSystem.get(conf);
LOG.info("Creating file {} with schema {}", filePath, schema);
try (Writer writer = OrcFile.createWriter(filePath,
OrcFile.writerOptions(conf)
.fileSystem(fs)
.overwrite(true)
.rowIndexStride(8192)
.setSchema(schema))) {
Random rnd = new Random(1024);
VectorizedRowBatch b = schema.createRowBatch();
for (int rowIdx = 0; rowIdx < RowCount; rowIdx++) {
long v = rnd.nextLong();
addRow(b, rowIdx, v);
b.size += 1;
if (b.size == b.getMaxSize()) {
writer.addRowBatch(b);
b.reset();
}
}
if (b.size > 0) {
writer.addRowBatch(b);
b.reset();
}
}
LOG.info("Created file {}", filePath);
}
private static void addRow(VectorizedRowBatch b, long rowIdx, long rndValue) {
//rndValue = rowIdx;
// column f1: LONG
((LongColumnVector) b.cols[0]).vector[b.size] = rndValue;
// column s2: STRUCT<fs: DECIMAL(20, 6)> alternate null values at STRUCT, no nulls on CHILD
if (rowIdx % 2 == 0) {
b.cols[1].noNulls = false;
b.cols[1].isNull[b.size] = true;
} else {
HiveDecimalWritable d = new HiveDecimalWritable();
d.setFromLongAndScale(rndValue, scale);
((DecimalColumnVector) ((StructColumnVector) b.cols[1]).fields[0]).vector[b.size] = d;
((BytesColumnVector) ((StructColumnVector) b.cols[1]).fields[1])
.setVal(b.size, String.valueOf(rndValue).getBytes(StandardCharsets.UTF_8));
}
// column u3: UNION<LONG, STRING> repeat, NULL, LONG, STRING
if (rowIdx % 3 == 0) {
b.cols[2].noNulls = false;
b.cols[2].isNull[b.size] = true;
} else if (rowIdx % 3 == 1) {
((UnionColumnVector) b.cols[2]).tags[b.size] = 0;
((LongColumnVector) ((UnionColumnVector) b.cols[2]).fields[0]).vector[b.size] = rndValue;
} else {
((UnionColumnVector) b.cols[2]).tags[b.size] = 1;
((BytesColumnVector) ((UnionColumnVector) b.cols[2]).fields[1])
.setVal(b.size, String.valueOf(rndValue).getBytes(StandardCharsets.UTF_8));
}
// column f4: STRING
((BytesColumnVector) b.cols[3])
.setVal(b.size, String.valueOf(rndValue).getBytes(StandardCharsets.UTF_8));
// column ridx: LONG
((LongColumnVector) b.cols[4]).vector[b.size] = rowIdx;
}
private static HiveDecimalWritable getF2(VectorizedRowBatch b, int idx) {
return ((DecimalColumnVector) ((StructColumnVector) b.cols[1]).fields[0]).vector[idx];
}
private static String getF2F(VectorizedRowBatch b, int idx) {
return ((BytesColumnVector) ((StructColumnVector) b.cols[1]).fields[1]).toString(idx);
}
private static Object getF3(VectorizedRowBatch b, int idx) {
UnionColumnVector v = (UnionColumnVector) b.cols[2];
if (v.tags[idx] == 0) {
return ((LongColumnVector) v.fields[0]).vector[idx];
} else {
return ((BytesColumnVector) v.fields[1]).toString(idx);
}
}
@Test
public void writeIsSuccessful() throws IOException {
Reader r = OrcFile.createReader(filePath, OrcFile.readerOptions(conf).filesystem(fs));
assertEquals(RowCount, r.getNumberOfRows());
assertTrue(r.getStripes().size() > 1);
}
@Test
public void readEverything() throws IOException {
readStart();
Reader r = OrcFile.createReader(filePath, OrcFile.readerOptions(conf).filesystem(fs));
VectorizedRowBatch b = schema.createRowBatch();
long rowCount;
try (RecordReader rr = r.rows()) {
rowCount = validateFilteredRecordReader(rr, b);
}
double p = readPercentage(readEnd(), fs.getFileStatus(filePath).getLen());
assertEquals(RowCount, rowCount);
assertTrue(p >= 100);
}
@Test
public void filterAllRowsStructColumn() throws IOException {
readStart();
Reader r = OrcFile.createReader(filePath, OrcFile.readerOptions(conf).filesystem(fs));
VectorizedRowBatch b = schema.createRowBatch();
Reader.Options options = r.options()
.setRowFilter(new String[] {"s2.f2"}, new F2InFilter(new HashSet<>(0)));
long rowCount = 0;
try (RecordReader rr = r.rows(options)) {
while (rr.nextBatch(b)) {
rowCount += b.size;
}
}
FileSystem.Statistics stats = readEnd();
assertEquals(0, rowCount);
// We should read less than half the length of the file
double readPercentage = readPercentage(stats, fs.getFileStatus(filePath).getLen());
assertTrue(readPercentage < 50,
String.format("Bytes read %.2f%% should be less than 50%%", readPercentage));
}
private long validateFilteredRecordReader(RecordReader rr, VectorizedRowBatch b)
throws IOException {
long rowCount = 0;
while (rr.nextBatch(b)) {
validateBatch(b, -1);
rowCount += b.size;
}
return rowCount;
}
private void validateBatch(VectorizedRowBatch b, long expRowNum) {
HiveDecimalWritable d = new HiveDecimalWritable();
for (int i = 0; i < b.size; i++) {
int idx;
if (b.selectedInUse) {
idx = b.selected[i];
} else {
idx = i;
}
long expValue = ((LongColumnVector) b.cols[0]).vector[idx];
long rowIdx = ((LongColumnVector) b.cols[4]).vector[idx];
//ridx
if (expRowNum != -1) {
assertEquals(expRowNum + i, rowIdx);
}
//s2
if (rowIdx % 2 == 0) {
assertTrue(b.cols[1].isNull[idx]);
} else {
d.setFromLongAndScale(expValue, scale);
assertEquals(d, getF2(b, idx));
assertEquals(String.valueOf(expValue), getF2F(b, idx));
}
//u3
if (rowIdx % 3 == 0) {
assertTrue(b.cols[2].isNull[idx]);
} else if (rowIdx % 3 == 1) {
assertEquals(expValue, getF3(b, idx));
} else {
assertEquals(String.valueOf(expValue), getF3(b, idx));
}
//f4
BytesColumnVector sv = (BytesColumnVector) b.cols[3];
assertEquals(String.valueOf(expValue),
sv.toString(idx));
}
}
private double readPercentage(FileSystem.Statistics stats, long fileSize) {
double p = stats.getBytesRead() * 100.0 / fileSize;
LOG.info(String.format("%nFileSize: %d%nReadSize: %d%nRead %%: %.2f",
fileSize,
stats.getBytesRead(),
p));
return p;
}
@Test
public void readEverythingWithFilter() throws IOException {
readStart();
Reader r = OrcFile.createReader(filePath, OrcFile.readerOptions(conf).filesystem(fs));
VectorizedRowBatch b = schema.createRowBatch();
long rowCount;
try (RecordReader rr = r.rows(r.options()
.setRowFilter(FilterColumns, new AllowAllFilter()))) {
rowCount = validateFilteredRecordReader(rr, b);
}
double p = readPercentage(readEnd(), fs.getFileStatus(filePath).getLen());
assertEquals(RowCount, rowCount);
assertTrue(p >= 100);
}
@Test
public void filterAlternateBatches() throws IOException {
readStart();
Reader r = OrcFile.createReader(filePath, OrcFile.readerOptions(conf).filesystem(fs));
VectorizedRowBatch b = schema.createRowBatch();
Reader.Options options = r.options()
.setRowFilter(FilterColumns, new AlternateFilter());
long rowCount;
try (RecordReader rr = r.rows(options)) {
rowCount = validateFilteredRecordReader(rr, b);
}
readEnd();
assertTrue(RowCount > rowCount);
}
@Test
public void filterWithSeek() throws IOException {
readStart();
Reader r = OrcFile.createReader(filePath, OrcFile.readerOptions(conf).filesystem(fs));
VectorizedRowBatch b = schema.createRowBatch();
Reader.Options options = r.options()
.setRowFilter(FilterColumns, new AlternateFilter());
long seekRow;
try (RecordReader rr = r.rows(options)) {
// Validate the first batch
assertTrue(rr.nextBatch(b));
validateBatch(b, 0);
assertEquals(b.size, rr.getRowNumber());
// Read the next batch, will skip a batch that is filtered
assertTrue(rr.nextBatch(b));
validateBatch(b, 2048);
assertEquals(2048 + 1024, rr.getRowNumber());
// Seek forward
seekToRow(rr, b, 4096);
// Seek back to the filtered batch
long bytesRead = readEnd().getBytesRead();
seekToRow(rr, b, 1024);
// No IO should have taken place
assertEquals(bytesRead, readEnd().getBytesRead());
// Seek forward to next row group, where the first batch is not filtered
seekToRow(rr, b, 8192);
// Seek forward to next row group but position on filtered batch
seekToRow(rr, b, (8192 * 2) + 1024);
// Seek forward to next stripe
seekRow = r.getStripes().get(0).getNumberOfRows();
seekToRow(rr, b, seekRow);
// Seek back to previous stripe, filtered row, it should require more IO as a result of
// stripe change
bytesRead = readEnd().getBytesRead();
seekToRow(rr, b, 1024);
assertTrue(readEnd().getBytesRead() > bytesRead,
"Change of stripe should require more IO");
}
FileSystem.Statistics stats = readEnd();
double readPercentage = readPercentage(stats, fs.getFileStatus(filePath).getLen());
assertTrue(readPercentage > 130);
}
private void seekToRow(RecordReader rr, VectorizedRowBatch b, long row) throws IOException {
rr.seekToRow(row);
assertTrue(rr.nextBatch(b));
long expRowNum;
if ((row / b.getMaxSize()) % 2 == 0) {
expRowNum = row;
} else {
// As the seek batch gets filtered
expRowNum = row + b.getMaxSize();
}
validateBatch(b, expRowNum);
assertEquals(expRowNum + b.getMaxSize(), rr.getRowNumber());
}
private static class F2InFilter implements Consumer<OrcFilterContext> {
private final Set<HiveDecimal> ids;
private F2InFilter(Set<HiveDecimal> ids) {
this.ids = ids;
}
@Override
public void accept(OrcFilterContext b) {
int newSize = 0;
ColumnVector[] f2Branch = b.findColumnVector("s2.f2");
DecimalColumnVector f2 = (DecimalColumnVector) f2Branch[f2Branch.length - 1];
for (int i = 0; i < b.getSelectedSize(); i++) {
if (!OrcFilterContext.isNull(f2Branch, i)
&& ids.contains(f2.vector[i].getHiveDecimal())) {
b.getSelected()[newSize] = i;
newSize += 1;
}
}
b.setSelectedInUse(true);
b.setSelectedSize(newSize);
}
}
/**
* Fill odd batches values in a default read
* if ridx(rowIdx) / 1024 is even then allow otherwise fail
*/
private static class AlternateFilter implements Consumer<OrcFilterContext> {
@Override
public void accept(OrcFilterContext b) {
LongColumnVector v = (LongColumnVector) ((OrcFilterContextImpl) b).getCols()[4];
if ((v.vector[0] / 1024) % 2 == 1) {
b.setSelectedInUse(true);
b.setSelectedSize(0);
}
}
}
private static class AllowAllFilter implements Consumer<OrcFilterContext> {
@Override
public void accept(OrcFilterContext batch) {
// do nothing every row is allowed
}
}
private static void readStart() {
FileSystem.clearStatistics();
}
private static FileSystem.Statistics readEnd() {
return FileSystem.getAllStatistics().get(0);
}
}
| 14,938 | 35.795567 | 100 | java |
null | orc-main/java/core/src/test/org/apache/orc/TestRowFilteringIOSkip.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.orc.impl.OrcFilterContextImpl;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.HashSet;
import java.util.Random;
import java.util.Set;
import java.util.function.Consumer;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestRowFilteringIOSkip {
private static final Logger LOG = LoggerFactory.getLogger(TestRowFilteringIOSkip.class);
private static final Path workDir = new Path(System.getProperty("test.tmp.dir",
"target" + File.separator + "test"
+ File.separator + "tmp"));
private static final Path filePath = new Path(workDir, "skip_file.orc");
private static Configuration conf;
private static FileSystem fs;
private static final TypeDescription schema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createLong())
.addField("f2", TypeDescription.createDecimal().withPrecision(20).withScale(6))
.addField("f3", TypeDescription.createLong())
.addField("f4", TypeDescription.createString())
.addField("ridx", TypeDescription.createLong());
private static final boolean[] FirstColumnOnly = new boolean[] {true, true, false, false, false
, false};
private static final long RowCount = 4000000L;
private static final String[] FilterColumns = new String[] {"f1", "ridx"};
private static final int scale = 3;
@BeforeAll
public static void setup() throws IOException {
conf = new Configuration();
fs = FileSystem.get(conf);
LOG.info("Creating file {} with schema {}", filePath, schema);
try (Writer writer = OrcFile.createWriter(filePath,
OrcFile.writerOptions(conf)
.fileSystem(fs)
.overwrite(true)
.rowIndexStride(8192)
.setSchema(schema))) {
Random rnd = new Random(1024);
VectorizedRowBatch b = schema.createRowBatch();
for (int rowIdx = 0; rowIdx < RowCount; rowIdx++) {
long v = rnd.nextLong();
for (int colIdx = 0; colIdx < schema.getChildren().size() - 1; colIdx++) {
switch (schema.getChildren().get(colIdx).getCategory()) {
case LONG:
((LongColumnVector) b.cols[colIdx]).vector[b.size] = v;
break;
case DECIMAL:
HiveDecimalWritable d = new HiveDecimalWritable();
d.setFromLongAndScale(v, scale);
((DecimalColumnVector) b.cols[colIdx]).vector[b.size] = d;
break;
case STRING:
((BytesColumnVector) b.cols[colIdx]).setVal(b.size,
String.valueOf(v)
.getBytes(StandardCharsets.UTF_8));
break;
default:
throw new IllegalArgumentException();
}
}
// Populate the rowIdx
((LongColumnVector) b.cols[4]).vector[b.size] = rowIdx;
b.size += 1;
if (b.size == b.getMaxSize()) {
writer.addRowBatch(b);
b.reset();
}
}
if (b.size > 0) {
writer.addRowBatch(b);
b.reset();
}
}
LOG.info("Created file {}", filePath);
}
@Test
public void writeIsSuccessful() throws IOException {
Reader r = OrcFile.createReader(filePath, OrcFile.readerOptions(conf).filesystem(fs));
assertEquals(RowCount, r.getNumberOfRows());
assertTrue(r.getStripes().size() > 1);
}
@Test
public void readFirstColumn() throws IOException {
readStart();
Reader r = OrcFile.createReader(filePath, OrcFile.readerOptions(conf).filesystem(fs));
VectorizedRowBatch b = schema.createRowBatch();
long rowCount = 0;
try (RecordReader rr = r.rows(r.options().include(FirstColumnOnly))) {
while (rr.nextBatch(b)) {
assertTrue(((LongColumnVector) b.cols[0]).vector[0] != 0);
rowCount += b.size;
}
}
FileSystem.Statistics stats = readEnd();
assertEquals(RowCount, rowCount);
// We should read less than half the length of the file
assertTrue(stats.getBytesRead() < r.getContentLength() / 2,
String.format("Bytes read %d is not half of file size %d",
stats.getBytesRead(),
r.getContentLength()));
}
@Test
public void readSingleRowWithFilter() throws IOException {
int cnt = 100;
Random r = new Random(cnt);
long ridx;
while (cnt > 0) {
ridx = r.nextInt((int) RowCount);
readSingleRowWithFilter(ridx);
readSingleRowWithPluginFilter(ridx);
cnt--;
}
}
private void readSingleRowWithFilter(long idx) throws IOException {
Reader r = OrcFile.createReader(filePath, OrcFile.readerOptions(conf).filesystem(fs));
SearchArgument sarg = SearchArgumentFactory.newBuilder()
.in("ridx", PredicateLeaf.Type.LONG, idx)
.build();
Reader.Options options = r.options()
.searchArgument(sarg, new String[] {"ridx"})
.useSelected(true)
.allowSARGToFilter(true);
VectorizedRowBatch b = schema.createRowBatch();
long rowCount = 0;
try (RecordReader rr = r.rows(options)) {
assertTrue(rr.nextBatch(b));
validateBatch(b, idx);
rowCount += b.size;
assertFalse(rr.nextBatch(b));
}
assertEquals(1, rowCount);
}
private void readSingleRowWithPluginFilter(long idx) throws IOException {
Configuration localConf = new Configuration(conf);
OrcConf.ALLOW_PLUGIN_FILTER.setBoolean(localConf, true);
localConf.set("my.filter.name", "my_long_abs_eq");
localConf.set("my.filter.col.name", "ridx");
localConf.set("my.filter.col.value", String.valueOf(-idx));
localConf.set("my.filter.scope", fs.makeQualified(filePath.getParent()) + "/.*");
Reader r = OrcFile.createReader(filePath, OrcFile.readerOptions(localConf).filesystem(fs));
Reader.Options options = r.options()
.useSelected(true)
.allowSARGToFilter(true);
VectorizedRowBatch b = schema.createRowBatch();
long rowCount = 0;
try (RecordReader rr = r.rows(options)) {
assertTrue(rr.nextBatch(b));
validateBatch(b, idx);
rowCount += b.size;
assertFalse(rr.nextBatch(b));
}
assertEquals(1, rowCount);
}
@Test
public void readWithoutSelectedSupport() throws IOException {
// When selected vector is not supported we will read more rows than just the filtered rows.
Reader r = OrcFile.createReader(filePath, OrcFile.readerOptions(conf).filesystem(fs));
long rowIdx = 12345;
SearchArgument sarg = SearchArgumentFactory.newBuilder()
.in("ridx", PredicateLeaf.Type.LONG, rowIdx)
.build();
Reader.Options options = r.options()
.searchArgument(sarg, new String[] {"ridx"})
.useSelected(false)
.allowSARGToFilter(true);
VectorizedRowBatch b = schema.createRowBatch();
long rowCount = 0;
HiveDecimalWritable d = new HiveDecimalWritable();
readStart();
try (RecordReader rr = r.rows(options)) {
while (rr.nextBatch(b)) {
rowCount += b.size;
for (int i = 0; i < b.size; i++) {
if (i == b.selected[0]) {
// All the values are expected to match only for the selected row
long expValue = ((LongColumnVector) b.cols[0]).vector[i];
d.setFromLongAndScale(expValue, scale);
assertEquals(d, ((DecimalColumnVector) b.cols[1]).vector[i]);
assertEquals(expValue, ((LongColumnVector) b.cols[2]).vector[i]);
BytesColumnVector sv = (BytesColumnVector) b.cols[3];
assertEquals(String.valueOf(expValue),
sv.toString(i));
assertEquals(rowIdx, ((LongColumnVector) b.cols[4]).vector[i]);
}
}
}
}
double p = readPercentage(readEnd(), fs.getFileStatus(filePath).getLen());
assertTrue(rowCount > 0 && rowCount <= b.getMaxSize(),
String.format("RowCount: %s should be between 1 and 1024", rowCount));
assertTrue(p <= 3, String.format("Read p: %s should be less than 3", p));
}
@Test
public void readWithSArg() throws IOException {
readStart();
Reader r = OrcFile.createReader(filePath, OrcFile.readerOptions(conf).filesystem(fs));
SearchArgument sarg = SearchArgumentFactory.newBuilder()
.in("f1", PredicateLeaf.Type.LONG, 0L)
.build();
Reader.Options options = r.options()
.allowSARGToFilter(false)
.useSelected(true)
.searchArgument(sarg, new String[] {"f1"});
VectorizedRowBatch b = schema.createRowBatch();
long rowCount;
try (RecordReader rr = r.rows(options)) {
rowCount = validateFilteredRecordReader(rr, b);
}
double p = readPercentage(readEnd(), fs.getFileStatus(filePath).getLen());
assertEquals(RowCount, rowCount);
assertTrue(p >= 100);
}
@Test
public void readWithSArgAsFilter() throws IOException {
readStart();
Reader r = OrcFile.createReader(filePath, OrcFile.readerOptions(conf).filesystem(fs));
SearchArgument sarg = SearchArgumentFactory.newBuilder()
.in("f1", PredicateLeaf.Type.LONG, 0L)
.build();
Reader.Options options = r.options()
.searchArgument(sarg, new String[] {"f1"})
.useSelected(true)
.allowSARGToFilter(true);
VectorizedRowBatch b = schema.createRowBatch();
long rowCount;
try (RecordReader rr = r.rows(options)) {
rowCount = validateFilteredRecordReader(rr, b);
}
double p = readPercentage(readEnd(), fs.getFileStatus(filePath).getLen());
assertEquals(0, rowCount);
assertTrue(p < 30);
}
@Test
public void readWithInvalidSArgAs() throws IOException {
readStart();
Reader r = OrcFile.createReader(filePath, OrcFile.readerOptions(conf).filesystem(fs));
SearchArgument sarg = SearchArgumentFactory.newBuilder()
.startNot()
.isNull("f1", PredicateLeaf.Type.LONG)
.end()
.build();
Reader.Options options = r.options()
.searchArgument(sarg, new String[] {"f1"})
.useSelected(true)
.allowSARGToFilter(true);
VectorizedRowBatch b = schema.createRowBatch();
long rowCount;
try (RecordReader rr = r.rows(options)) {
rowCount = validateFilteredRecordReader(rr, b);
}
double p = readPercentage(readEnd(), fs.getFileStatus(filePath).getLen());
assertEquals(RowCount, rowCount);
assertTrue(p > 100);
}
private long validateFilteredRecordReader(RecordReader rr, VectorizedRowBatch b)
throws IOException {
long rowCount = 0;
while (rr.nextBatch(b)) {
validateBatch(b, -1);
rowCount += b.size;
}
return rowCount;
}
private void validateBatch(VectorizedRowBatch b, long expRowNum) {
HiveDecimalWritable d = new HiveDecimalWritable();
for (int i = 0; i < b.size; i++) {
int rowIdx;
if (b.selectedInUse) {
rowIdx = b.selected[i];
} else {
rowIdx = i;
}
long expValue = ((LongColumnVector) b.cols[0]).vector[rowIdx];
d.setFromLongAndScale(expValue, scale);
assertEquals(d, ((DecimalColumnVector) b.cols[1]).vector[rowIdx]);
assertEquals(expValue, ((LongColumnVector) b.cols[2]).vector[rowIdx]);
BytesColumnVector sv = (BytesColumnVector) b.cols[3];
assertEquals(String.valueOf(expValue),
sv.toString(rowIdx));
if (expRowNum != -1) {
assertEquals(expRowNum + i, ((LongColumnVector) b.cols[4]).vector[rowIdx]);
}
}
}
@Test
public void filterAllRowsWithFilter() throws IOException {
readStart();
Reader r = OrcFile.createReader(filePath, OrcFile.readerOptions(conf).filesystem(fs));
r.options();
filterAllRows(r,
r.options()
.useSelected(true)
.setRowFilter(FilterColumns,
new InFilter(new HashSet<>(0), 0)));
}
@Test
public void filterAllRowsWPluginFilter() throws IOException {
readStart();
Configuration localConf = new Configuration(conf);
OrcConf.ALLOW_PLUGIN_FILTER.setBoolean(localConf, true);
localConf.set("my.filter.name", "my_long_abs_eq");
localConf.set("my.filter.col.name", "f1");
localConf.set("my.filter.col.value", String.valueOf(Long.MIN_VALUE));
localConf.set("my.filter.scope", fs.makeQualified(filePath.getParent()) + "/.*");
Reader r = OrcFile.createReader(filePath, OrcFile.readerOptions(localConf).filesystem(fs));
filterAllRows(r, r.options());
}
private void filterAllRows(Reader r, Reader.Options options) throws IOException {
VectorizedRowBatch b = schema.createRowBatch();
long rowCount = 0;
try (RecordReader rr = r.rows(options)) {
while (rr.nextBatch(b)) {
assertTrue(((LongColumnVector) b.cols[0]).vector[0] != 0);
assertTrue(((LongColumnVector) b.cols[0]).vector[0] != 0);
rowCount += b.size;
}
}
FileSystem.Statistics stats = readEnd();
assertEquals(0, rowCount);
// We should read less than half the length of the file
double readPercentage = readPercentage(stats, fs.getFileStatus(filePath).getLen());
assertTrue(readPercentage < 50,
String.format("Bytes read %.2f%% should be less than 50%%", readPercentage));
}
@Test
public void readEverything() throws IOException {
readStart();
Reader r = OrcFile.createReader(filePath, OrcFile.readerOptions(conf).filesystem(fs));
VectorizedRowBatch b = schema.createRowBatch();
long rowCount;
try (RecordReader rr = r.rows(r.options().useSelected(true))) {
rowCount = validateFilteredRecordReader(rr, b);
}
double p = readPercentage(readEnd(), fs.getFileStatus(filePath).getLen());
assertEquals(RowCount, rowCount);
assertTrue(p >= 100);
}
private double readPercentage(FileSystem.Statistics stats, long fileSize) {
double p = stats.getBytesRead() * 100.0 / fileSize;
LOG.info(String.format("%nFileSize: %d%nReadSize: %d%nRead %%: %.2f",
fileSize,
stats.getBytesRead(),
p));
return p;
}
@Test
public void readEverythingWithFilter() throws IOException {
readStart();
Reader r = OrcFile.createReader(filePath, OrcFile.readerOptions(conf).filesystem(fs));
VectorizedRowBatch b = schema.createRowBatch();
long rowCount;
try (RecordReader rr = r.rows(r.options()
.useSelected(true)
.setRowFilter(FilterColumns, new AllowAllFilter()))) {
rowCount = validateFilteredRecordReader(rr, b);
}
double p = readPercentage(readEnd(), fs.getFileStatus(filePath).getLen());
assertEquals(RowCount, rowCount);
assertTrue(p >= 100);
}
@Test
public void filterAlternateBatches() throws IOException {
readStart();
Reader r = OrcFile.createReader(filePath, OrcFile.readerOptions(conf).filesystem(fs));
VectorizedRowBatch b = schema.createRowBatch();
Reader.Options options = r.options()
.useSelected(true)
.setRowFilter(FilterColumns, new AlternateFilter());
long rowCount;
try (RecordReader rr = r.rows(options)) {
rowCount = validateFilteredRecordReader(rr, b);
}
FileSystem.Statistics stats = readEnd();
double readPercentage = readPercentage(stats, fs.getFileStatus(filePath).getLen());
assertTrue(readPercentage > 100);
assertTrue(RowCount > rowCount);
}
@Test
public void filterWithSeek() throws IOException {
readStart();
Reader r = OrcFile.createReader(filePath, OrcFile.readerOptions(conf).filesystem(fs));
VectorizedRowBatch b = schema.createRowBatch();
Reader.Options options = r.options()
.useSelected(true)
.setRowFilter(FilterColumns, new AlternateFilter());
long seekRow;
try (RecordReader rr = r.rows(options)) {
// Validate the first batch
assertTrue(rr.nextBatch(b));
validateBatch(b, 0);
assertEquals(b.size, rr.getRowNumber());
// Read the next batch, will skip a batch that is filtered
assertTrue(rr.nextBatch(b));
validateBatch(b, 2048);
assertEquals(2048 + 1024, rr.getRowNumber());
// Seek forward
seekToRow(rr, b, 4096);
// Seek back to the filtered batch
long bytesRead = readEnd().getBytesRead();
seekToRow(rr, b, 1024);
// No IO should have taken place
assertEquals(bytesRead, readEnd().getBytesRead());
// Seek forward to next row group, where the first batch is not filtered
seekToRow(rr, b, 8192);
// Seek forward to next row group but position on filtered batch
seekToRow(rr, b, (8192 * 2) + 1024);
// Seek forward to next stripe
seekRow = r.getStripes().get(0).getNumberOfRows();
seekToRow(rr, b, seekRow);
// Seek back to previous stripe, filtered row, it should require more IO as a result of
// stripe change
bytesRead = readEnd().getBytesRead();
seekToRow(rr, b, 1024);
assertTrue(readEnd().getBytesRead() > bytesRead,
"Change of stripe should require more IO");
}
FileSystem.Statistics stats = readEnd();
double readPercentage = readPercentage(stats, fs.getFileStatus(filePath).getLen());
assertTrue(readPercentage > 130);
}
@Test
public void readFewRGWithSArg() throws IOException {
readStart();
Reader r = OrcFile.createReader(filePath,
OrcFile.readerOptions(conf).filesystem(fs));
VectorizedRowBatch b = schema.createRowBatch();
SearchArgument sarg = SearchArgumentFactory.newBuilder()
.in("ridx", PredicateLeaf.Type.LONG, 0L, 1000000L, 2000000L, 3000000L)
.build();
Reader.Options options = r.options()
.allowSARGToFilter(false)
.useSelected(true)
.searchArgument(sarg, new String[] {"ridx"});
long rowCount;
try (RecordReader rr = r.rows(options)) {
rowCount = validateFilteredRecordReader(rr, b);
}
assertEquals(8192 * 4, rowCount);
FileSystem.Statistics stats = readEnd();
double readPercentage = readPercentage(stats, fs.getFileStatus(filePath).getLen());
assertTrue(readPercentage < 10);
}
@Test
public void readFewRGWithSArgAndFilter() throws IOException {
readStart();
Reader r = OrcFile.createReader(filePath, OrcFile.readerOptions(conf).filesystem(fs));
VectorizedRowBatch b = schema.createRowBatch();
SearchArgument sarg = SearchArgumentFactory.newBuilder()
.in("ridx", PredicateLeaf.Type.LONG, 0L, 1000000L, 2000000L, 3000000L)
.build();
Reader.Options options = r.options()
.searchArgument(sarg, new String[] {"ridx"})
.useSelected(true)
.allowSARGToFilter(true);
long rowCount;
try (RecordReader rr = r.rows(options)) {
rowCount = validateFilteredRecordReader(rr, b);
}
assertEquals(4, rowCount);
FileSystem.Statistics stats = readEnd();
double readPercentage = readPercentage(stats, fs.getFileStatus(filePath).getLen());
assertTrue(readPercentage < 10);
}
@Test
public void schemaEvolutionMissingFilterColumn() throws IOException {
Reader r = OrcFile.createReader(filePath, OrcFile.readerOptions(conf).filesystem(fs));
TypeDescription readSchema = schema
.clone()
.addField("missing", TypeDescription.createLong());
SearchArgument sarg = SearchArgumentFactory.newBuilder()
.startNot()
.isNull("missing", PredicateLeaf.Type.LONG)
.end()
.build();
Reader.Options options = r.options()
.schema(readSchema)
.searchArgument(sarg, new String[] {"missing"})
.useSelected(true)
.allowSARGToFilter(true);
VectorizedRowBatch b = readSchema.createRowBatch();
long rowCount = 0;
try (RecordReader rr = r.rows(options)) {
assertFalse(rr.nextBatch(b));
}
assertEquals(0, rowCount);
}
@Test
public void schemaEvolutionLong2StringColumn() throws IOException {
Reader r = OrcFile.createReader(filePath, OrcFile.readerOptions(conf).filesystem(fs));
// Change ridx column from long to string and swap the positions of ridx and f4 columns
TypeDescription readSchema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createLong())
.addField("f2", TypeDescription.createDecimal().withPrecision(20).withScale(6))
.addField("f3", TypeDescription.createLong())
.addField("ridx", TypeDescription.createString())
.addField("f4", TypeDescription.createString());
SearchArgument sarg = SearchArgumentFactory.newBuilder()
.in("ridx", PredicateLeaf.Type.STRING, "1")
.build();
Reader.Options options = r.options()
.schema(readSchema)
.searchArgument(sarg, new String[] {"ridx"})
.useSelected(true)
.allowSARGToFilter(true);
VectorizedRowBatch b = readSchema.createRowBatch();
long rowCount = 0;
try (RecordReader rr = r.rows(options)) {
assertTrue(rr.nextBatch(b));
assertEquals(1, b.size);
rowCount += b.size;
HiveDecimalWritable d = new HiveDecimalWritable();
int rowIdx = 1;
long expValue = ((LongColumnVector) b.cols[0]).vector[rowIdx];
d.setFromLongAndScale(expValue, scale);
assertEquals(d, ((DecimalColumnVector) b.cols[1]).vector[rowIdx]);
assertEquals(expValue, ((LongColumnVector) b.cols[2]).vector[rowIdx]);
// The columns ridx and f4 are swapped, which is reflected in the updated index value
BytesColumnVector sv = (BytesColumnVector) b.cols[4];
assertEquals(String.valueOf(expValue),
sv.toString(rowIdx));
sv = (BytesColumnVector) b.cols[3];
assertEquals(String.valueOf(rowIdx),
sv.toString(rowIdx));
assertFalse(rr.nextBatch(b));
}
assertEquals(1, rowCount);
}
@Test
public void readWithCaseSensitivityOff() throws IOException {
// Use the ridx column input in UpperCase and flag case-sensitivity off
Reader r = OrcFile.createReader(filePath, OrcFile.readerOptions(conf).filesystem(fs));
SearchArgument sarg = SearchArgumentFactory.newBuilder()
.in("RIDX", PredicateLeaf.Type.LONG, 1L)
.build();
Reader.Options options = r.options()
.searchArgument(sarg, new String[] {"RIDX"})
.useSelected(true)
.allowSARGToFilter(true)
.isSchemaEvolutionCaseAware(false);
VectorizedRowBatch b = schema.createRowBatch();
long rowCount = 0;
try (RecordReader rr = r.rows(options)) {
assertTrue(rr.nextBatch(b));
validateBatch(b, 1L);
rowCount += b.size;
assertFalse(rr.nextBatch(b));
}
assertEquals(1, rowCount);
}
@Test
public void readFailureWithCaseSensitivityOn() throws IOException {
// Use the ridx column input in UpperCase and flag case-sensitivity off
Reader r = OrcFile.createReader(filePath, OrcFile.readerOptions(conf).filesystem(fs));
SearchArgument sarg = SearchArgumentFactory.newBuilder()
.in("RIDX", PredicateLeaf.Type.LONG, 1L)
.build();
Reader.Options options = r.options()
.searchArgument(sarg, new String[] {"RIDX"})
.useSelected(true)
.allowSARGToFilter(true)
.isSchemaEvolutionCaseAware(true);
assertThrows(IllegalArgumentException.class,
() -> r.rows(options),
"Field RIDX not found in struct<f1:bigint,f2:decimal(20,6),f3:bigint,"
+ "f4:string,ridx:bigint>");
}
private void seekToRow(RecordReader rr, VectorizedRowBatch b, long row) throws IOException {
rr.seekToRow(row);
assertTrue(rr.nextBatch(b));
long expRowNum;
if ((row / b.getMaxSize()) % 2 == 0) {
expRowNum = row;
} else {
// As the seek batch gets filtered
expRowNum = row + b.getMaxSize();
}
validateBatch(b, expRowNum);
assertEquals(expRowNum + b.getMaxSize(), rr.getRowNumber());
}
private static class InFilter implements Consumer<OrcFilterContext> {
private final Set<Long> ids;
private final int colIdx;
private InFilter(Set<Long> ids, int colIdx) {
this.ids = ids;
this.colIdx = colIdx;
}
@Override
public void accept(OrcFilterContext b) {
int newSize = 0;
for (int i = 0; i < b.getSelectedSize(); i++) {
if (ids.contains(getValue(b, i))) {
b.getSelected()[newSize] = i;
newSize += 1;
}
}
b.setSelectedInUse(true);
b.setSelectedSize(newSize);
}
private Long getValue(OrcFilterContext b, int rowIdx) {
LongColumnVector v = ((LongColumnVector) ((OrcFilterContextImpl) b).getCols()[colIdx]);
int valIdx = rowIdx;
if (v.isRepeating) {
valIdx = 0;
}
if (!v.noNulls && v.isNull[valIdx]) {
return null;
} else {
return v.vector[valIdx];
}
}
}
/**
* Fill odd batches values in a default read
* if ridx(rowIdx) / 1024 is even then allow otherwise fail
*/
private static class AlternateFilter implements Consumer<OrcFilterContext> {
@Override
public void accept(OrcFilterContext b) {
LongColumnVector v = (LongColumnVector) ((OrcFilterContextImpl) b).getCols()[4];
if ((v.vector[0] / 1024) % 2 == 1) {
b.setSelectedInUse(true);
b.setSelectedSize(0);
}
}
}
private static class AllowAllFilter implements Consumer<OrcFilterContext> {
@Override
public void accept(OrcFilterContext batch) {
// do nothing every row is allowed
}
}
private static void readStart() {
FileSystem.clearStatistics();
}
private static FileSystem.Statistics readEnd() {
return FileSystem.getAllStatistics().get(0);
}
}
| 27,751 | 36.757823 | 100 | java |
null | orc-main/java/core/src/test/org/apache/orc/TestRowFilteringNoSkip.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.impl.RecordReaderImpl;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInfo;
import java.io.File;
import java.sql.Timestamp;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Types that are not skipped at row-level include: Long, Short, Int, Date, Binary
* As it turns out it is more expensive to skip non-selected rows rather that just decode all and propagate the
* selected array. Skipping for these type breaks instruction pipelining and introduces more branch mispredictions.
*/
public class TestRowFilteringNoSkip {
private Path workDir = new Path(System.getProperty("test.tmp.dir", "target" + File.separator + "test"
+ File.separator + "tmp"));
private Configuration conf;
private FileSystem fs;
private Path testFilePath;
private static final int ColumnBatchRows = 1024;
@BeforeEach
public void openFileSystem(TestInfo testInfo) throws Exception {
conf = new Configuration();
OrcConf.READER_USE_SELECTED.setBoolean(conf, true);
fs = FileSystem.getLocal(conf);
testFilePath = new Path(workDir, "TestRowFilteringNoSkip." +
testInfo.getTestMethod().get().getName() + ".orc");
fs.delete(testFilePath, false);
}
@Test
public void testLongRoundRobbinRowFilterCallback() throws Exception {
// Set the row stride to a multiple of the batch size
final int INDEX_STRIDE = 16 * ColumnBatchRows;
final int NUM_BATCHES = 10;
TypeDescription schema = TypeDescription.createStruct()
.addField("int1", TypeDescription.createInt())
.addField("int2", TypeDescription.createLong());
try (Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.rowIndexStride(INDEX_STRIDE))) {
VectorizedRowBatch batch = schema.createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
LongColumnVector col2 = (LongColumnVector) batch.cols[1];
for (int b=0; b < NUM_BATCHES; ++b) {
batch.reset();
batch.size = ColumnBatchRows;
for (int row = 0; row < batch.size; row++) {
col1.vector[row] = row;
if ((row % 2) ==0 )
col2.vector[row] = 100;
else
col2.vector[row] = 999;
}
col1.isRepeating = false;
writer.addRowBatch(batch);
}
}
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf));
try (RecordReaderImpl rows = (RecordReaderImpl) reader.rows(
reader.options()
.setRowFilter(new String[]{"int1"}, TestRowFilteringSkip::intRoundRobbinRowFilter))) {
VectorizedRowBatch batch = reader.getSchema().createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
LongColumnVector col2 = (LongColumnVector) batch.cols[1];
// We assume that it fits in a single stripe
assertEquals(1, reader.getStripes().size());
int noNullCnt = 0;
while (rows.nextBatch(batch)) {
// We applied the given filter so selected is true
assertTrue(batch.selectedInUse);
assertNotNull(batch.selected);
// Selected Arrays is propagated -- so size is never 1024
assertTrue(batch.size != ColumnBatchRows);
// But since this Column type is not actually filtered there will be no nulls!
assertTrue(col1.noNulls);
for (int r = 0; r < ColumnBatchRows; ++r) {
if (col2.vector[r] != 0)
noNullCnt ++;
}
}
// For Int type ColumnVector filtering does not remove any data!
assertEquals(NUM_BATCHES * ColumnBatchRows, noNullCnt);
assertEquals(0, batch.selected[0]);
assertEquals(2, batch.selected[1]);
assertEquals(col2.vector[0], 100);
assertEquals(col2.vector[511], 999);
assertEquals(col2.vector[1020], 100);
assertEquals(col2.vector[1021], 999);
}
}
@Test
public void testIntRowFilterCallback() throws Exception {
// Set the row stride to a multiple of the batch size
final int INDEX_STRIDE = 16 * ColumnBatchRows;
final int NUM_BATCHES = 10;
// ORC write some data (one PASSing row per batch)
TypeDescription schema = TypeDescription.createStruct()
.addField("int1", TypeDescription.createInt())
.addField("int2", TypeDescription.createInt());
try (Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.rowIndexStride(INDEX_STRIDE))) {
VectorizedRowBatch batch = schema.createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
LongColumnVector col2 = (LongColumnVector) batch.cols[1];
for (int b=0; b < NUM_BATCHES; ++b) {
batch.reset();
batch.size = ColumnBatchRows;
col1.vector[1023] = b;
col2.vector[1023] = 101;
for (int row = 0; row < batch.size-1; row++) {
col1.vector[row] = 999;
col2.vector[row] = row+1;
}
col1.isRepeating = false;
writer.addRowBatch(batch);
}
}
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf));
try (RecordReaderImpl rows = (RecordReaderImpl) reader.rows(
reader.options()
.setRowFilter(new String[]{"int1"}, TestRowFilteringSkip::intFirstRowFilter))) {
VectorizedRowBatch batch = reader.getSchema().createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
LongColumnVector col2 = (LongColumnVector) batch.cols[1];
// We assume that it fits in a single stripe
assertEquals(1, reader.getStripes().size());
int noNullCount = 0;
while (rows.nextBatch(batch)) {
// We applied the given filter so selected is true
assertTrue(batch.selectedInUse);
assertNotNull(batch.selected);
// Selected Arrays is propagated -- so size is never 1024
assertTrue(batch.size != ColumnBatchRows);
// But since this Column type is not actually filtered there will be no nulls!
assertTrue(col1.noNulls);
for (int r = 0; r < ColumnBatchRows; ++r) {
if (col2.vector[r] != 0)
noNullCount++;
}
}
// For Int type ColumnVector filtering does not remove any data!
assertEquals(NUM_BATCHES * ColumnBatchRows, noNullCount);
// check filter-selected output
assertEquals(0, batch.selected[0]);
assertEquals(0, batch.selected[1]);
assertEquals(0, batch.selected[1023]);
}
}
@Test
public void testShortRoundRobbinRowFilterCallback() throws Exception {
// Set the row stride to a multiple of the batch size
final int INDEX_STRIDE = 16 * ColumnBatchRows;
final int NUM_BATCHES = 10;
// ORC write some data (one PASSing row per batch)
TypeDescription schema = TypeDescription.createStruct()
.addField("int1", TypeDescription.createInt())
.addField("short2", TypeDescription.createShort());
try (Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.rowIndexStride(INDEX_STRIDE))) {
VectorizedRowBatch batch = schema.createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
LongColumnVector col2 = (LongColumnVector) batch.cols[1];
for (int b=0; b < NUM_BATCHES; ++b) {
batch.reset();
batch.size = ColumnBatchRows;
for (int row = 0; row < batch.size; row++) {
col1.vector[row] = row;
if ((row % 2) == 0)
col2.vector[row] = row*2+1;
else
col2.vector[row] = -1 * row*2;
}
col1.isRepeating = false;
writer.addRowBatch(batch);
}
}
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf));
try (RecordReaderImpl rows = (RecordReaderImpl) reader.rows(
reader.options()
.setRowFilter(new String[]{"int1"}, TestRowFilteringSkip::intRoundRobbinRowFilter))) {
VectorizedRowBatch batch = reader.getSchema().createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
LongColumnVector col2 = (LongColumnVector) batch.cols[1];
// We assume that it fits in a single stripe
assertEquals(1, reader.getStripes().size());
int noNullCnt = 0;
while (rows.nextBatch(batch)) {
// We applied the given filter so selected is true
assertTrue(batch.selectedInUse);
assertNotNull(batch.selected);
// Selected Arrays is propagated -- so size is never 1024
assertTrue(batch.size != ColumnBatchRows);
// But since this Column type is not actually filtered there will be no nulls!
assertTrue(col1.noNulls);
for (int r = 0; r < ColumnBatchRows; ++r) {
if (col2.vector[r] != 0)
noNullCnt ++;
}
}
// For Short type ColumnVector filtering does not remove any data!
assertEquals(NUM_BATCHES * ColumnBatchRows, noNullCnt);
assertFalse(col2.isRepeating);
assertEquals(0, batch.selected[0]);
assertEquals(2, batch.selected[1]);
assertTrue(col2.vector[0] > 0);
assertTrue(col2.vector[511] < 0);
assertTrue(col2.vector[1020] > 0);
assertTrue(col2.vector[1021] < 0);
}
}
@Test
public void testDateRoundRobbinRowFilterCallback() throws Exception {
// Set the row stride to a multiple of the batch size
final int INDEX_STRIDE = 16 * ColumnBatchRows;
final int NUM_BATCHES = 10;
TypeDescription schema = TypeDescription.createStruct()
.addField("int1", TypeDescription.createInt())
.addField("dt2", TypeDescription.createDate());
try (Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.rowIndexStride(INDEX_STRIDE))) {
VectorizedRowBatch batch = schema.createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
LongColumnVector col2 = (LongColumnVector) batch.cols[1];
for (int b=0; b < NUM_BATCHES; ++b) {
batch.reset();
batch.size = ColumnBatchRows;
for (int row = 0; row < batch.size; row++) {
col1.vector[row] = row;
if ((row % 2) == 0)
col2.vector[row] = Timestamp.valueOf("2020-04-01 12:34:56.9").toInstant().getEpochSecond();
else
col2.vector[row] = Timestamp.valueOf("2019-04-01 12:34:56.9").toInstant().getEpochSecond();
}
col2.isRepeating = false;
writer.addRowBatch(batch);
}
}
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf));
try (RecordReaderImpl rows = (RecordReaderImpl) reader.rows(
reader.options()
.setRowFilter(new String[]{"int1"}, TestRowFilteringSkip::intRoundRobbinRowFilter))) {
VectorizedRowBatch batch = reader.getSchema().createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
LongColumnVector col2 = (LongColumnVector) batch.cols[1];
// We assume that it fits in a single stripe
assertEquals(1, reader.getStripes().size());
int noNullCnt = 0;
while (rows.nextBatch(batch)) {
// We applied the given filter so selected is true
assertTrue(batch.selectedInUse);
assertNotNull(batch.selected);
// Selected Arrays is propagated -- so size is never 1024
assertTrue(batch.size != ColumnBatchRows);
// But since this Column type is not actually filtered there will be no nulls!
assertTrue(col1.noNulls);
for (int r = 0; r < ColumnBatchRows; ++r) {
if (col2.vector[r] != 0)
noNullCnt ++;
}
}
// For Date type ColumnVector filtering does not remove any data!
assertEquals(NUM_BATCHES * ColumnBatchRows, noNullCnt);
assertFalse(col2.isRepeating);
assertEquals(0, batch.selected[0]);
assertEquals(2, batch.selected[1]);
assertTrue(col2.vector[0] != 0);
assertTrue(col2.vector[511] != 0);
assertTrue(col2.vector[1020] != 0);
assertTrue(col2.vector[1021] != 0);
}
}
@Test
public void testBinaryRoundRobbinRowFilterCallback() throws Exception {
// Set the row stride to a multiple of the batch size
final int INDEX_STRIDE = 16 * ColumnBatchRows;
final int NUM_BATCHES = 10;
// ORC write some data (one PASSing row per batch)
TypeDescription schema = TypeDescription.createStruct()
.addField("int1", TypeDescription.createInt())
.addField("binary2", TypeDescription.createBinary());
try (Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.rowIndexStride(INDEX_STRIDE))) {
// Write 50 batches where each batch has a single value for str.
VectorizedRowBatch batch = schema.createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
BytesColumnVector col2 = (BytesColumnVector) batch.cols[1];
for (int b=0; b < NUM_BATCHES; ++b) {
batch.reset();
batch.size = ColumnBatchRows;
for (int row = 0; row < batch.size; row++) {
col1.vector[row] = row;
if ((row % 2) == 0)
col2.setVal(row, TestVectorOrcFile.bytesArray(0, 1, 2, 3, row));
else
col2.setVal(row, TestVectorOrcFile.bytesArray(1, 2, 3, 4, row));
}
col1.isRepeating = false;
writer.addRowBatch(batch);
}
}
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf));
try (RecordReaderImpl rows = (RecordReaderImpl) reader.rows(
reader.options()
.setRowFilter(new String[]{"int1"}, TestRowFilteringSkip::intRoundRobbinRowFilter))) {
VectorizedRowBatch batch = reader.getSchema().createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
BytesColumnVector col2 = (BytesColumnVector) batch.cols[1];
// We assume that it fits in a single stripe
assertEquals(1, reader.getStripes().size());
int noNullCnt = 0;
while (rows.nextBatch(batch)) {
// We applied the given filter so selected is true
assertTrue(batch.selectedInUse);
assertNotNull(batch.selected);
// Selected Arrays is propagated -- so size is never 1024
assertTrue(batch.size != ColumnBatchRows);
// But since this Column type is not actually filtered there will be no nulls!
assertTrue(col1.noNulls);
for (int r = 0; r < ColumnBatchRows; ++r) {
if (!TestVectorOrcFile.getBinary(col2, r).equals(TestVectorOrcFile.bytes()))
noNullCnt ++;
}
}
// For Binary type ColumnVector filtering does not remove any data!
assertEquals(NUM_BATCHES * ColumnBatchRows, noNullCnt);
assertEquals(0, batch.selected[0]);
assertEquals(2, batch.selected[1]);
assertNotEquals(TestVectorOrcFile.getBinary(col2, 0), TestVectorOrcFile.bytes());
assertNotEquals(TestVectorOrcFile.getBinary(col2, 511), TestVectorOrcFile.bytes());
assertNotEquals(TestVectorOrcFile.getBinary(col2, 1020), TestVectorOrcFile.bytes());
assertNotEquals(TestVectorOrcFile.getBinary(col2, 1021), TestVectorOrcFile.bytes());
}
}
}
| 16,874 | 39.564904 | 115 | java |
null | orc-main/java/core/src/test/org/apache/orc/TestRowFilteringSkip.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.Decimal64ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.StructColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.orc.impl.OrcFilterContextImpl;
import org.apache.orc.impl.RecordReaderImpl;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInfo;
import java.io.File;
import java.nio.charset.StandardCharsets;
import java.sql.Date;
import java.sql.Timestamp;
import java.text.Format;
import java.text.SimpleDateFormat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Types that are skipped at row-level include: Decimal, Decimal64, Double, Float, Char, VarChar, String, Boolean, Timestamp
* For the remaining types that are not row-skipped see {@link TestRowFilteringNoSkip}
*/
public class TestRowFilteringSkip {
private Path workDir = new Path(System.getProperty("test.tmp.dir", "target" + File.separator + "test"
+ File.separator + "tmp"));
private Configuration conf;
private FileSystem fs;
private Path testFilePath;
private static final int ColumnBatchRows = 1024;
@BeforeEach
public void openFileSystem(TestInfo testInfo) throws Exception {
conf = new Configuration();
OrcConf.READER_USE_SELECTED.setBoolean(conf, true);
fs = FileSystem.getLocal(conf);
testFilePath = new Path(workDir, "TestRowFilteringSkip." +
testInfo.getTestMethod().get().getName() + ".orc");
fs.delete(testFilePath, false);
}
public static String convertTime(long time){
Date date = new Date(time);
Format format = new SimpleDateFormat("yyyy-MM-d HH:mm:ss.SSS");
return format.format(date);
}
// Filter all rows except: 924 and 940
public static void intAnyRowFilter(OrcFilterContext batch) {
// Dummy Filter implementation passing just one Batch row
int newSize = 2;
batch.getSelected()[0] = batch.getSelectedSize()-100;
batch.getSelected()[1] = 940;
batch.setSelectedInUse(true);
batch.setSelectedSize(newSize);
}
// Filter all rows except the first one
public static void intFirstRowFilter(OrcFilterContext batch) {
int newSize = 0;
for (int row = 0; row <batch.getSelectedSize(); ++row) {
if (row == 0) {
batch.getSelected()[newSize++] = row;
}
}
batch.setSelectedInUse(true);
batch.setSelectedSize(newSize);
}
// Filter out rows in a round-robbin fashion starting with a pass
public static void intRoundRobbinRowFilter(OrcFilterContext batch) {
int newSize = 0;
int[] selected = batch.getSelected();
for (int row = 0; row < batch.getSelectedSize(); ++row) {
if ((row % 2) == 0) {
selected[newSize++] = row;
}
}
batch.setSelectedInUse(true);
batch.setSelected(selected);
batch.setSelectedSize(newSize);
}
static int rowCount = 0;
public static void intCustomValueFilter(OrcFilterContext batch) {
LongColumnVector col1 = (LongColumnVector) ((OrcFilterContextImpl) batch).getCols()[0];
int newSize = 0;
for (int row = 0; row <batch.getSelectedSize(); ++row) {
long val = col1.vector[row];
if ((val == 2) || (val == 5) || (val == 13) || (val == 29) || (val == 70)) {
batch.getSelected()[newSize++] = row;
}
rowCount++;
}
batch.setSelectedInUse(true);
batch.setSelectedSize(newSize);
}
@Test
public void testDecimalRepeatingFilterCallback() throws Exception {
// Set the row stride to a multiple of the batch size
final int INDEX_STRIDE = 16 * ColumnBatchRows;
final int NUM_BATCHES = 10;
// ORC write some data (one PASSing row per batch)
TypeDescription schema = TypeDescription.createStruct()
.addField("int1", TypeDescription.createInt())
.addField("decimal1", TypeDescription.createDecimal());
HiveDecimalWritable passDataVal = new HiveDecimalWritable("100");
HiveDecimalWritable nullDataVal = new HiveDecimalWritable("0");
try (Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.rowIndexStride(INDEX_STRIDE))) {
VectorizedRowBatch batch = schema.createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
DecimalColumnVector col2 = (DecimalColumnVector) batch.cols[1];
for (int b=0; b < NUM_BATCHES; ++b) {
batch.reset();
batch.size = ColumnBatchRows;
for (int row = 0; row < batch.size; row++) {
col1.vector[row] = row;
col2.vector[row] = passDataVal;
}
col1.isRepeating = true;
writer.addRowBatch(batch);
}
}
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf));
try (RecordReaderImpl rows = (RecordReaderImpl) reader.rows(
reader.options()
.setRowFilter(new String[]{"int1"}, TestRowFilteringSkip::intRoundRobbinRowFilter))) {
VectorizedRowBatch batch = reader.getSchema().createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
DecimalColumnVector col2 = (DecimalColumnVector) batch.cols[1];
// We assume that it fits in a single stripe
assertEquals(1, reader.getStripes().size());
int noNullCnt = 0;
while (rows.nextBatch(batch)) {
assertTrue(batch.selectedInUse);
assertNotNull(batch.selected);
// Rows are filtered so it should never be 1024
assertTrue(batch.size != ColumnBatchRows);
assertTrue(col1.noNulls);
for (int r = 0; r < ColumnBatchRows; ++r) {
if (col2.vector[r].compareTo(passDataVal) == 0)
noNullCnt ++;
}
}
// Make sure that our filter worked
assertEquals(NUM_BATCHES * 512, noNullCnt);
assertEquals(0, batch.selected[0]);
assertEquals(2, batch.selected[1]);
assertEquals(col2.vector[0], passDataVal);
assertEquals(col2.vector[511], nullDataVal);
assertEquals(col2.vector[1020], passDataVal);
assertEquals(col2.vector[1021], nullDataVal);
}
}
@Test
public void testDecimalRoundRobbinFilterCallback() throws Exception {
// Set the row stride to a multiple of the batch size
final int INDEX_STRIDE = 16 * ColumnBatchRows;
final int NUM_BATCHES = 10;
// ORC write some data (one PASSing row per batch)
TypeDescription schema = TypeDescription.createStruct()
.addField("int1", TypeDescription.createInt())
.addField("decimal1", TypeDescription.createDecimal());
HiveDecimalWritable failDataVal = new HiveDecimalWritable("-100");
HiveDecimalWritable nullDataVal = new HiveDecimalWritable("0");
try (Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.rowIndexStride(INDEX_STRIDE))) {
VectorizedRowBatch batch = schema.createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
DecimalColumnVector col2 = (DecimalColumnVector) batch.cols[1];
for (int b=0; b < NUM_BATCHES; ++b) {
batch.reset();
batch.size = ColumnBatchRows;
for (int row = 0; row < batch.size; row++) {
col1.vector[row] = row;
if ((row % 2) == 0)
col2.vector[row] = new HiveDecimalWritable(row+1);
else
col2.vector[row] = failDataVal;
}
col1.isRepeating = false;
writer.addRowBatch(batch);
}
}
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf));
try (RecordReaderImpl rows = (RecordReaderImpl) reader.rows(
reader.options()
.setRowFilter(new String[]{"int1"}, TestRowFilteringSkip::intRoundRobbinRowFilter))) {
VectorizedRowBatch batch = reader.getSchema().createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
DecimalColumnVector col2 = (DecimalColumnVector) batch.cols[1];
// We assume that it fits in a single stripe
assertEquals(1, reader.getStripes().size());
int noNullCnt = 0;
while (rows.nextBatch(batch)) {
assertTrue(batch.selectedInUse);
assertNotNull(batch.selected);
// Rows are filtered so it should never be 1024
assertTrue(batch.size != ColumnBatchRows);
assertTrue(col1.noNulls);
for (int r = 0; r < ColumnBatchRows; ++r) {
if (col2.vector[r].getHiveDecimal().longValue() > 0)
noNullCnt ++;
}
}
// Make sure that our filter worked
assertEquals(NUM_BATCHES * 512, noNullCnt);
assertEquals(0, batch.selected[0]);
assertEquals(2, batch.selected[1]);
assertEquals(col2.vector[0].getHiveDecimal().longValue(), 1);
assertEquals(col2.vector[511], nullDataVal);
assertEquals(col2.vector[1020].getHiveDecimal().longValue(), 1021);
assertEquals(col2.vector[1021], nullDataVal);
}
}
@Test
public void testDecimalNullRoundRobbinFilterCallback() throws Exception {
// Set the row stride to a multiple of the batch size
final int INDEX_STRIDE = 16 * ColumnBatchRows;
final int NUM_BATCHES = 10;
// ORC write some data (one PASSing row per batch)
TypeDescription schema = TypeDescription.createStruct()
.addField("int1", TypeDescription.createInt())
.addField("decimal1", TypeDescription.createDecimal());
HiveDecimalWritable nullDataVal = new HiveDecimalWritable("0");
try (Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.rowIndexStride(INDEX_STRIDE))) {
VectorizedRowBatch batch = schema.createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
DecimalColumnVector col2 = (DecimalColumnVector) batch.cols[1];
for (int b=0; b < NUM_BATCHES; ++b) {
batch.reset();
batch.size = ColumnBatchRows;
for (int row = 0; row < batch.size; row++) {
col1.vector[row] = row;
if ((row % 2) == 0)
col2.vector[row] = new HiveDecimalWritable(row+1);
}
// Make sure we trigger the nullCount path of DecimalTreeReader
col2.noNulls = false;
writer.addRowBatch(batch);
}
}
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf));
try (RecordReaderImpl rows = (RecordReaderImpl) reader.rows(
reader.options()
.setRowFilter(new String[]{"int1"}, TestRowFilteringSkip::intRoundRobbinRowFilter))) {
VectorizedRowBatch batch = reader.getSchema().createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
DecimalColumnVector col2 = (DecimalColumnVector) batch.cols[1];
// We assume that it fits in a single stripe
assertEquals(1, reader.getStripes().size());
int noNullCnt = 0;
while (rows.nextBatch(batch)) {
assertTrue(batch.selectedInUse);
assertNotNull(batch.selected);
// Rows are filtered so it should never be 1024
assertTrue(batch.size != ColumnBatchRows);
assertTrue(col1.noNulls);
for (int r = 0; r < ColumnBatchRows; ++r) {
if (col2.vector[r].getHiveDecimal().longValue() > 0)
noNullCnt ++;
}
}
// Make sure that our filter worked
assertEquals(NUM_BATCHES * 512, noNullCnt);
assertEquals(0, batch.selected[0]);
assertEquals(2, batch.selected[1]);
assertEquals(col2.vector[0].getHiveDecimal().longValue(), 1);
assertEquals(col2.vector[511], nullDataVal);
assertEquals(col2.vector[1020].getHiveDecimal().longValue(), 1021);
assertEquals(col2.vector[1021], nullDataVal);
}
}
@Test
public void testMultiDecimalSingleFilterCallback() throws Exception {
/// Set the row stride to a multiple of the batch size
final int INDEX_STRIDE = 16 * ColumnBatchRows;
final int NUM_BATCHES = 10;
// ORC write some data (one PASSing row per batch)
TypeDescription schema = TypeDescription.createStruct()
.addField("int1", TypeDescription.createInt())
.addField("decimal1", TypeDescription.createDecimal())
.addField("decimal2", TypeDescription.createDecimal());
HiveDecimalWritable passDataVal = new HiveDecimalWritable("12");
HiveDecimalWritable failDataVal = new HiveDecimalWritable("100");
HiveDecimalWritable nullDataVal = new HiveDecimalWritable("0");
try (Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.rowIndexStride(INDEX_STRIDE))) {
// Write 50 batches where each batch has a single value for str.
VectorizedRowBatch batch = schema.createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
DecimalColumnVector col2 = (DecimalColumnVector) batch.cols[1];
DecimalColumnVector col3 = (DecimalColumnVector) batch.cols[2];
for (int b=0; b < NUM_BATCHES; ++b) {
batch.reset();
batch.size = ColumnBatchRows;
for (int row = 0; row < batch.size; row++) {
col1.vector[row] = row;
if (row == 924 || row == 940) {
col2.vector[row] = passDataVal;
col3.vector[row] = passDataVal;
} else {
col2.vector[row] = failDataVal;
col3.vector[row] = failDataVal;
}
}
col1.isRepeating = false;
writer.addRowBatch(batch);
}
}
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf));
try (RecordReaderImpl rows = (RecordReaderImpl) reader.rows(
reader.options()
.setRowFilter(new String[]{"int1"}, TestRowFilteringSkip::intAnyRowFilter))) {
VectorizedRowBatch batch = reader.getSchema().createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
DecimalColumnVector col2 = (DecimalColumnVector) batch.cols[1];
DecimalColumnVector col3 = (DecimalColumnVector) batch.cols[2];
// We assume that it fits in a single stripe
assertEquals(1, reader.getStripes().size());
int noNullCnt = 0;
while (rows.nextBatch(batch)) {
assertTrue(batch.selectedInUse);
assertNotNull(batch.selected);
// Rows are filtered so it should never be 1024
assertTrue(batch.size != ColumnBatchRows);
assertTrue(col1.noNulls);
for (int r = 0; r < ColumnBatchRows; ++r) {
if (col2.vector[r].compareTo(passDataVal) == 0 && col3.vector[r].compareTo(passDataVal) == 0)
noNullCnt ++;
}
}
// Make sure that our filter worked
assertEquals(NUM_BATCHES * 2, noNullCnt);
assertEquals(924, batch.selected[0]);
assertEquals(940, batch.selected[1]);
assertEquals(0, batch.selected[2]);
assertEquals(col2.vector[0], nullDataVal);
assertEquals(col3.vector[0], nullDataVal);
assertEquals(col2.vector[511], nullDataVal);
assertEquals(col3.vector[511], nullDataVal);
assertEquals(col2.vector[924], passDataVal);
assertEquals(col3.vector[940], passDataVal);
assertEquals(col2.vector[1020], nullDataVal);
assertEquals(col3.vector[1020], nullDataVal);
assertEquals(col2.vector[1021], nullDataVal);
assertEquals(col3.vector[1021], nullDataVal);
}
}
@Test
public void testDecimal64RoundRobbinFilterCallback() throws Exception {
// Set the row stride to a multiple of the batch size
final int INDEX_STRIDE = 16 * ColumnBatchRows;
final int NUM_BATCHES = 10;
// ORC write some data (one PASSing row per batch)
TypeDescription schema = TypeDescription.createStruct()
.addField("int1", TypeDescription.createInt())
.addField("decimal1", TypeDescription.createDecimal().withPrecision(10).withScale(2));
try (Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.rowIndexStride(INDEX_STRIDE))) {
VectorizedRowBatch batch = schema.createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
Decimal64ColumnVector col2 = (Decimal64ColumnVector) batch.cols[1];
for (int b=0; b < NUM_BATCHES; ++b) {
batch.reset();
batch.size = ColumnBatchRows;
for (int row = 0; row < batch.size; row++) {
col1.vector[row] = row;
if ((row % 2) == 0)
col2.vector[row] = row + 1;
else
col2.vector[row] = -1 * row;
}
col1.isRepeating = false;
writer.addRowBatch(batch);
}
}
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf));
try (RecordReaderImpl rows = (RecordReaderImpl) reader.rows(
reader.options()
.setRowFilter(new String[]{"int1"}, TestRowFilteringSkip::intRoundRobbinRowFilter))) {
VectorizedRowBatch batch = reader.getSchema().createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
Decimal64ColumnVector col2 = (Decimal64ColumnVector) batch.cols[1];
// We assume that it fits in a single stripe
assertEquals(1, reader.getStripes().size());
int noNullCnt = 0;
while (rows.nextBatch(batch)) {
assertTrue(batch.selectedInUse);
assertNotNull(batch.selected);
// Rows are filtered so it should never be 1024
assertTrue(batch.size != ColumnBatchRows);
assertTrue(col1.noNulls);
for (int r = 0; r < ColumnBatchRows; ++r) {
if (col2.vector[r] != 0)
noNullCnt ++;
}
}
// Make sure that our filter worked
assertEquals(NUM_BATCHES * 512, noNullCnt);
assertEquals(0, batch.selected[0]);
assertEquals(2, batch.selected[1]);
assertEquals(col2.vector[0], 1);
assertEquals(col2.vector[511], 0);
assertEquals(col2.vector[1020], 1021);
assertEquals(col2.vector[1021], 0);
}
}
@Test
public void testDecimal64NullRoundRobbinFilterCallback() throws Exception {
// Set the row stride to a multiple of the batch size
final int INDEX_STRIDE = 16 * ColumnBatchRows;
final int NUM_BATCHES = 10;
// ORC write some data (one PASSing row per batch)
TypeDescription schema = TypeDescription.createStruct()
.addField("int1", TypeDescription.createInt())
.addField("decimal1", TypeDescription.createDecimal().withPrecision(10).withScale(2));
try (Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.rowIndexStride(INDEX_STRIDE))) {
VectorizedRowBatch batch = schema.createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
Decimal64ColumnVector col2 = (Decimal64ColumnVector) batch.cols[1];
for (int b=0; b < NUM_BATCHES; ++b) {
batch.reset();
batch.size = ColumnBatchRows;
for (int row = 0; row < batch.size; row++) {
col1.vector[row] = row;
if ((row % 2) == 0)
col2.vector[row] = row + 1;
}
col2.noNulls = false;
writer.addRowBatch(batch);
}
}
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf));
try (RecordReaderImpl rows = (RecordReaderImpl) reader.rows(
reader.options()
.setRowFilter(new String[]{"int1"}, TestRowFilteringSkip::intRoundRobbinRowFilter))) {
VectorizedRowBatch batch = reader.getSchema().createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
Decimal64ColumnVector col2 = (Decimal64ColumnVector) batch.cols[1];
// We assume that it fits in a single stripe
assertEquals(1, reader.getStripes().size());
int noNullCnt = 0;
while (rows.nextBatch(batch)) {
assertTrue(batch.selectedInUse);
assertNotNull(batch.selected);
// Rows are filtered so it should never be 1024
assertTrue(batch.size != ColumnBatchRows);
assertTrue(col1.noNulls);
for (int r = 0; r < ColumnBatchRows; ++r) {
if (col2.vector[r] == 0)
noNullCnt ++;
}
}
// Make sure that our filter worked
assertEquals(NUM_BATCHES * 512, noNullCnt);
assertEquals(0, batch.selected[0]);
assertEquals(2, batch.selected[1]);
assertEquals(col2.vector[0], 1);
assertEquals(col2.vector[511], 0);
assertEquals(col2.vector[1020], 1021);
assertEquals(col2.vector[1021], 0);
}
}
@Test
public void testDoubleRoundRobbinRowFilterCallback() throws Exception {
// Set the row stride to a multiple of the batch size
final int INDEX_STRIDE = 16 * ColumnBatchRows;
final int NUM_BATCHES = 10;
// ORC write some data (one PASSing row per batch)
TypeDescription schema = TypeDescription.createStruct()
.addField("int1", TypeDescription.createInt())
.addField("double2", TypeDescription.createDouble());
try (Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.rowIndexStride(INDEX_STRIDE))) {
VectorizedRowBatch batch = schema.createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
DoubleColumnVector col2 = (DoubleColumnVector) batch.cols[1];
for (int b=0; b < NUM_BATCHES; ++b) {
batch.reset();
batch.size = ColumnBatchRows;
for (int row = 0; row < batch.size; row++) {
col1.vector[row] = row;
if ((row % 2) ==0 )
col2.vector[row] = 100;
else
col2.vector[row] = 999;
}
col1.isRepeating = false;
writer.addRowBatch(batch);
}
}
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf));
try (RecordReaderImpl rows = (RecordReaderImpl) reader.rows(
reader.options()
.setRowFilter(new String[]{"int1"}, TestRowFilteringSkip::intRoundRobbinRowFilter))) {
VectorizedRowBatch batch = reader.getSchema().createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
DoubleColumnVector col2 = (DoubleColumnVector) batch.cols[1];
// We assume that it fits in a single stripe
assertEquals(1, reader.getStripes().size());
int noNullCnt = 0;
while (rows.nextBatch(batch)) {
assertTrue(batch.selectedInUse);
assertNotNull(batch.selected);
// Rows are filtered so it should never be 1024
assertTrue(batch.size != ColumnBatchRows);
assertTrue(col1.noNulls);
for (int r = 0; r < ColumnBatchRows; ++r) {
if (col2.vector[r] == 100)
noNullCnt ++;
}
}
// Make sure that our filter worked
assertEquals(NUM_BATCHES * 512, noNullCnt);
assertEquals(0, batch.selected[0]);
assertEquals(2, batch.selected[1]);
assertEquals(100.0, col2.vector[0]);
assertEquals(0.0, col2.vector[511]);
assertEquals(100, col2.vector[1020]);
assertEquals(0, col2.vector[1021]);
}
}
@Test
public void testFloatRoundRobbinRowFilterCallback() throws Exception {
// Set the row stride to a multiple of the batch size
final int INDEX_STRIDE = 16 * ColumnBatchRows;
final int NUM_BATCHES = 10;
// ORC write some data (one PASSing row per batch)
TypeDescription schema = TypeDescription.createStruct()
.addField("int1", TypeDescription.createInt())
.addField("float2", TypeDescription.createFloat());
try (Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.rowIndexStride(INDEX_STRIDE))) {
VectorizedRowBatch batch = schema.createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
DoubleColumnVector col2 = (DoubleColumnVector) batch.cols[1];
for (int b=0; b < NUM_BATCHES; ++b) {
batch.reset();
batch.size = ColumnBatchRows;
for (int row = 0; row < batch.size; row++) {
col1.vector[row] = row;
if ((row % 2) ==0 )
col2.vector[row] = 100+row;
else
col2.vector[row] = 999;
}
col1.isRepeating = false;
writer.addRowBatch(batch);
}
}
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf));
try (RecordReaderImpl rows = (RecordReaderImpl) reader.rows(
reader.options()
.setRowFilter(new String[]{"int1"}, TestRowFilteringSkip::intRoundRobbinRowFilter))) {
VectorizedRowBatch batch = reader.getSchema().createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
DoubleColumnVector col2 = (DoubleColumnVector) batch.cols[1];
// We assume that it fits in a single stripe
assertEquals(1, reader.getStripes().size());
int noNullCnt = 0;
while (rows.nextBatch(batch)) {
assertTrue(batch.selectedInUse);
assertNotNull(batch.selected);
// Rows are filtered so it should never be 1024
assertTrue(batch.size != ColumnBatchRows);
assertTrue(col1.noNulls);
for (int r = 0; r < ColumnBatchRows; ++r) {
if (col2.vector[r] != 0)
noNullCnt ++;
}
}
// Make sure that our filter worked
assertEquals(NUM_BATCHES * 512, noNullCnt);
assertEquals(0, batch.selected[0]);
assertEquals(2, batch.selected[1]);
assertTrue(col2.vector[0] != 999.0);
assertEquals(0.0, col2.vector[511]);
assertEquals(1120.0, col2.vector[1020]);
assertEquals(0, col2.vector[1021]);
}
}
@Test
public void testCharRoundRobbinRowFilterCallback() throws Exception {
// Set the row stride to a multiple of the batch size
final int INDEX_STRIDE = 16 * ColumnBatchRows;
final int NUM_BATCHES = 10;
// ORC write some data (one PASSing row per batch)
TypeDescription schema = TypeDescription.createStruct()
.addField("int1", TypeDescription.createInt())
.addField("char2", TypeDescription.createChar());
byte[] passData = ("p").getBytes(StandardCharsets.UTF_8);
byte[] failData = ("f").getBytes(StandardCharsets.UTF_8);
try (Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.rowIndexStride(INDEX_STRIDE))) {
VectorizedRowBatch batch = schema.createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
BytesColumnVector col2 = (BytesColumnVector) batch.cols[1];
for (int b=0; b < NUM_BATCHES; ++b) {
batch.reset();
batch.size = ColumnBatchRows;
for (int row = 0; row < batch.size; row++) {
col1.vector[row] = row;
if ((row % 2) == 0)
col2.setVal(row, passData);
else
col2.setVal(row, failData);
}
col1.isRepeating = false;
writer.addRowBatch(batch);
}
}
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf));
try (RecordReaderImpl rows = (RecordReaderImpl) reader.rows(
reader.options()
.setRowFilter(new String[]{"int1"}, TestRowFilteringSkip::intRoundRobbinRowFilter))) {
VectorizedRowBatch batch = reader.getSchema().createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
BytesColumnVector col2 = (BytesColumnVector) batch.cols[1];
// We assume that it fits in a single stripe
assertEquals(1, reader.getStripes().size());
int noNullCnt = 0;
while (rows.nextBatch(batch)) {
assertTrue(batch.selectedInUse);
assertNotNull(batch.selected);
// Rows are filtered so it should never be 1024
assertTrue(batch.size != ColumnBatchRows);
assertTrue(col1.noNulls);
for (int r = 0; r < ColumnBatchRows; ++r) {
if (!col2.toString(r).isEmpty())
noNullCnt ++;
}
}
// Make sure that our filter worked
assertEquals(NUM_BATCHES * 512, noNullCnt);
assertEquals(0, batch.selected[0]);
assertEquals(2, batch.selected[1]);
assertEquals("p", col2.toString(0));
assertTrue(col2.toString(511).isEmpty());
assertEquals("p", col2.toString(1020));
assertTrue(col2.toString(1021).isEmpty());
}
}
@Test
public void testVarCharRoundRobbinRowFilterCallback() throws Exception {
// Set the row stride to a multiple of the batch size
final int INDEX_STRIDE = 16 * ColumnBatchRows;
final int NUM_BATCHES = 10;
// ORC write some data (one PASSing row per batch)
TypeDescription schema = TypeDescription.createStruct()
.addField("int1", TypeDescription.createInt())
.addField("varchar2", TypeDescription.createVarchar());
byte[] passData = ("p").getBytes(StandardCharsets.UTF_8);
byte[] failData = ("f").getBytes(StandardCharsets.UTF_8);
try (Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.rowIndexStride(INDEX_STRIDE))) {
VectorizedRowBatch batch = schema.createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
BytesColumnVector col2 = (BytesColumnVector) batch.cols[1];
for (int b=0; b < NUM_BATCHES; ++b) {
batch.reset();
batch.size = ColumnBatchRows;
for (int row = 0; row < batch.size; row++) {
col1.vector[row] = row;
if ((row % 2) == 0)
col2.setVal(row, passData);
else
col2.setVal(row, failData);
}
col1.isRepeating = false;
writer.addRowBatch(batch);
}
}
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf));
try (RecordReaderImpl rows = (RecordReaderImpl) reader.rows(
reader.options()
.setRowFilter(new String[]{"int1"}, TestRowFilteringSkip::intRoundRobbinRowFilter))) {
VectorizedRowBatch batch = reader.getSchema().createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
BytesColumnVector col2 = (BytesColumnVector) batch.cols[1];
// We assume that it fits in a single stripe
assertEquals(1, reader.getStripes().size());
int noNullCnt = 0;
while (rows.nextBatch(batch)) {
assertTrue(batch.selectedInUse);
assertNotNull(batch.selected);
// Rows are filtered so it should never be 1024
assertTrue(batch.size != ColumnBatchRows);
assertTrue(col1.noNulls);
for (int r = 0; r < ColumnBatchRows; ++r) {
if (!col2.toString(r).isEmpty())
noNullCnt ++;
}
}
// Make sure that our filter worked
assertEquals(NUM_BATCHES * 512, noNullCnt);
assertEquals(0, batch.selected[0]);
assertEquals(2, batch.selected[1]);
assertEquals("p", col2.toString(0));
assertTrue(col2.toString(511).isEmpty());
assertEquals("p", col2.toString(1020));
assertTrue(col2.toString(1021).isEmpty());
}
}
@Test
public void testDirectStringRowFilterCallback() throws Exception {
// Set the row stride to a multiple of the batch size
final int INDEX_STRIDE = 10 * ColumnBatchRows;
final int NUM_BATCHES = 10;
// ORC write some data (one PASSing row per batch)
TypeDescription schema = TypeDescription.createStruct()
.addField("int1", TypeDescription.createInt())
.addField("string1", TypeDescription.createString());
try (Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.rowIndexStride(INDEX_STRIDE))) {
// Write 50 batches where each batch has a single value for str.
VectorizedRowBatch batch = schema.createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
BytesColumnVector col2 = (BytesColumnVector) batch.cols[1];
for (int b=0; b < NUM_BATCHES; ++b) {
batch.reset();
batch.size = ColumnBatchRows;
for (int row = 0; row < batch.size; row++) {
col1.vector[row] = row;
if ((row % 2) ==0 )
col2.setVal(row, ("passData-" + row).getBytes(StandardCharsets.UTF_8));
else
col2.setVal(row, ("failData-" + row).getBytes(StandardCharsets.UTF_8));
}
col1.isRepeating = false;
writer.addRowBatch(batch);
}
}
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf));
try (RecordReaderImpl rows = (RecordReaderImpl) reader.rows(
reader.options()
.setRowFilter(new String[]{"int1"}, TestRowFilteringSkip::intRoundRobbinRowFilter))) {
VectorizedRowBatch batch = reader.getSchema().createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
BytesColumnVector col2 = (BytesColumnVector) batch.cols[1];
// We assume that it fits in a single stripe
assertEquals(1, reader.getStripes().size());
int noNullCnt = 0;
while (rows.nextBatch(batch)) {
assertTrue(batch.selectedInUse);
assertNotNull(batch.selected);
// Rows are filtered so it should never be 1024
assertTrue(batch.size != ColumnBatchRows);
assertTrue(col1.noNulls);
for (int r = 0; r < ColumnBatchRows; ++r) {
if (!col2.toString(r).isEmpty())
noNullCnt ++;
}
}
// Make sure that our filter worked
assertEquals(NUM_BATCHES * 512, noNullCnt);
assertEquals(0, batch.selected[0]);
assertEquals(2, batch.selected[1]);
assertTrue(col2.toString(0).startsWith("pass"));
assertTrue(col2.toString(511).isEmpty());
assertTrue(col2.toString(1020).startsWith("pass"));
}
}
@Test
public void testDictionaryStringRowFilterCallback() throws Exception {
// Set the row stride to a multiple of the batch size
final int INDEX_STRIDE = 100 * ColumnBatchRows;
final int NUM_BATCHES = 100;
// ORC write some data (one PASSing row per batch)
TypeDescription schema = TypeDescription.createStruct()
.addField("int1", TypeDescription.createInt())
.addField("string1", TypeDescription.createString());
try (Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.rowIndexStride(INDEX_STRIDE))) {
VectorizedRowBatch batch = schema.createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
BytesColumnVector col2 = (BytesColumnVector) batch.cols[1];
for (int b=0; b < NUM_BATCHES; ++b) {
batch.reset();
batch.size = ColumnBatchRows;
for (int row = 0; row < batch.size; row++) {
col1.vector[row] = row;
if (row % 2 ==0)
col2.setVal(row, ("passData").getBytes(StandardCharsets.UTF_8));
else
col2.setVal(row, ("failData").getBytes(StandardCharsets.UTF_8));
}
col1.isRepeating = false;
writer.addRowBatch(batch);
}
}
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf));
try (RecordReaderImpl rows = (RecordReaderImpl) reader.rows(
reader.options()
.setRowFilter(new String[]{"int1"}, TestRowFilteringSkip::intRoundRobbinRowFilter))) {
VectorizedRowBatch batch = reader.getSchema().createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
BytesColumnVector col2 = (BytesColumnVector) batch.cols[1];
// We assume that it fits in a single stripe
assertEquals(1, reader.getStripes().size());
int noNullCnt = 0;
while (rows.nextBatch(batch)) {
assertTrue(batch.selectedInUse);
assertNotNull(batch.selected);
// Rows are filtered so it should never be 1024
assertTrue(batch.size != ColumnBatchRows);
assertTrue(col1.noNulls);
for (int r = 0; r < ColumnBatchRows; ++r) {
if (!col2.toString(r).isEmpty())
noNullCnt ++;
}
}
// Make sure that our filter worked
assertEquals(NUM_BATCHES * 512, noNullCnt);
assertEquals(0, batch.selected[0]);
assertEquals(2, batch.selected[1]);
assertTrue(col2.toString(0).startsWith("pass"));
assertTrue(col2.toString(511).isEmpty());
assertTrue(col2.toString(1020).startsWith("pass"));
}
}
@Test
public void testRepeatingBooleanRoundRobbinRowFilterCallback() throws Exception {
// Set the row stride to a multiple of the batch size
final int INDEX_STRIDE = 16 * ColumnBatchRows;
final int NUM_BATCHES = 10;
// ORC write some data (one PASSing row per batch)
TypeDescription schema = TypeDescription.createStruct()
.addField("int1", TypeDescription.createInt())
.addField("bool2", TypeDescription.createBoolean());
try (Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.rowIndexStride(INDEX_STRIDE))) {
VectorizedRowBatch batch = schema.createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
LongColumnVector col2 = (LongColumnVector) batch.cols[1];
for (int b=0; b < NUM_BATCHES; ++b) {
batch.reset();
batch.size = ColumnBatchRows;
for (int row = 0; row < batch.size; row++) {
col1.vector[row] = row;
col2.vector[row] = 0;
}
col1.isRepeating = false;
writer.addRowBatch(batch);
}
}
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf));
try (RecordReaderImpl rows = (RecordReaderImpl) reader.rows(
reader.options()
.setRowFilter(new String[]{"int1"}, TestRowFilteringSkip::intRoundRobbinRowFilter))) {
VectorizedRowBatch batch = reader.getSchema().createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
LongColumnVector col2 = (LongColumnVector) batch.cols[1];
// We assume that it fits in a single stripe
assertEquals(1, reader.getStripes().size());
int noNullCnt = 0;
while (rows.nextBatch(batch)) {
assertTrue(batch.selectedInUse);
assertNotNull(batch.selected);
// Rows are filtered so it should never be 1024
assertTrue(batch.size != ColumnBatchRows);
assertTrue(col1.noNulls);
for (int r = 0; r < ColumnBatchRows; ++r) {
if (col2.vector[r] == 0)
noNullCnt ++;
}
}
// Make sure that our filter worked
assertEquals(NUM_BATCHES * ColumnBatchRows, noNullCnt);
assertFalse(col2.isRepeating);
assertEquals(0, batch.selected[0]);
assertEquals(2, batch.selected[1]);
assertEquals(0, col2.vector[0]);
assertEquals(0, col2.vector[511]);
assertEquals(0, col2.vector[1020]);
assertEquals(0, col2.vector[1021]);
}
}
@Test
public void testBooleanRoundRobbinRowFilterCallback() throws Exception {
final int INDEX_STRIDE = 0;
final int NUM_BATCHES = 10;
// ORC write some data (one PASSing row per batch)
TypeDescription schema = TypeDescription.createStruct()
.addField("int1", TypeDescription.createInt())
.addField("bool2", TypeDescription.createBoolean());
try (Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.rowIndexStride(INDEX_STRIDE))) {
VectorizedRowBatch batch = schema.createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
LongColumnVector col2 = (LongColumnVector) batch.cols[1];
for (int b=0; b < NUM_BATCHES; ++b) {
batch.reset();
batch.size = ColumnBatchRows;
for (int row = 0; row < batch.size; row++) {
col1.vector[row] = row;
if ((row % 2) == 0)
col2.vector[row] = 1;
}
col1.isRepeating = false;
writer.addRowBatch(batch);
}
}
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf));
try (RecordReaderImpl rows = (RecordReaderImpl) reader.rows(
reader.options()
.setRowFilter(new String[]{"int1"}, TestRowFilteringSkip::intRoundRobbinRowFilter))) {
VectorizedRowBatch batch = reader.getSchema().createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
LongColumnVector col2 = (LongColumnVector) batch.cols[1];
// We assume that it fits in a single stripe
assertEquals(1, reader.getStripes().size());
int noNullCnt = 0;
while (rows.nextBatch(batch)) {
assertTrue(batch.selectedInUse);
assertNotNull(batch.selected);
// Rows are filtered so it should never be 1024
assertTrue(batch.size != ColumnBatchRows);
assertTrue(col1.noNulls);
for (int r = 0; r < ColumnBatchRows; ++r) {
if (col2.vector[r] == 0)
noNullCnt ++;
}
}
// Make sure that our filter worked
assertEquals(NUM_BATCHES * 512, noNullCnt);
assertEquals(0, batch.selected[0]);
assertEquals(2, batch.selected[1]);
assertEquals(1, col2.vector[0]);
assertEquals(0, col2.vector[511]);
assertEquals(1, col2.vector[1020]);
assertEquals(0, col2.vector[1021]);
}
}
@Test
public void testBooleanAnyRowFilterCallback() throws Exception {
final int INDEX_STRIDE = 0;
final int NUM_BATCHES = 100;
// ORC write some data (one PASSing row per batch)
TypeDescription schema = TypeDescription.createStruct()
.addField("int1", TypeDescription.createInt())
.addField("bool2", TypeDescription.createBoolean());
try (Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.rowIndexStride(INDEX_STRIDE))) {
VectorizedRowBatch batch = schema.createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
LongColumnVector col2 = (LongColumnVector) batch.cols[1];
for (int b=0; b < NUM_BATCHES; ++b) {
batch.reset();
batch.size = ColumnBatchRows;
for (int row = 0; row < batch.size; row++) {
col1.vector[row] = row;
if (row == 924 || row == 940)
col2.vector[row] = 1;
}
col1.isRepeating = false;
writer.addRowBatch(batch);
}
}
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf));
try (RecordReaderImpl rows = (RecordReaderImpl) reader.rows(
reader.options()
.setRowFilter(new String[]{"int1"}, TestRowFilteringSkip::intAnyRowFilter))) {
VectorizedRowBatch batch = reader.getSchema().createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
LongColumnVector col2 = (LongColumnVector) batch.cols[1];
// We assume that it fits in a single stripe
assertEquals(1, reader.getStripes().size());
int noNullCnt = 0;
while (rows.nextBatch(batch)) {
assertTrue(batch.selectedInUse);
assertNotNull(batch.selected);
// Rows are filtered so it should never be 1024
assertTrue(batch.size != ColumnBatchRows);
assertTrue(col1.noNulls);
for (int r = 0; r < ColumnBatchRows; ++r) {
if (col2.vector[r] == 1)
noNullCnt ++;
}
}
// Make sure that our filter worked
assertEquals(NUM_BATCHES * 2, noNullCnt);
assertEquals(924, batch.selected[0]);
assertEquals(940, batch.selected[1]);
assertEquals(0, col2.vector[0]);
assertEquals(0, col2.vector[511]);
assertEquals(0, col2.vector[1020]);
assertEquals(1, col2.vector[924]);
assertEquals(1, col2.vector[940]);
}
}
@Test
public void testTimestampRoundRobbinRowFilterCallback() throws Exception {
// Set the row stride to a multiple of the batch size
final int INDEX_STRIDE = 16 * ColumnBatchRows;
final int NUM_BATCHES = 10;
TypeDescription schema = TypeDescription.createStruct()
.addField("int1", TypeDescription.createInt())
.addField("ts2", TypeDescription.createTimestamp());
try (Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.rowIndexStride(INDEX_STRIDE))) {
VectorizedRowBatch batch = schema.createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
TimestampColumnVector col2 = (TimestampColumnVector) batch.cols[1];
for (int b=0; b < NUM_BATCHES; ++b) {
batch.reset();
batch.size = ColumnBatchRows;
for (int row = 0; row < batch.size; row++) {
col1.vector[row] = row;
if ((row % 2) == 0)
col2.set(row, Timestamp.valueOf((1900+row)+"-04-01 12:34:56.9"));
else {
col2.isNull[row] = true;
col2.set(row, null);
}
}
col1.isRepeating = true;
col1.noNulls = false;
col2.noNulls = false;
writer.addRowBatch(batch);
}
}
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf));
try (RecordReaderImpl rows = (RecordReaderImpl) reader.rows(
reader.options()
.setRowFilter(new String[]{"int1"}, TestRowFilteringSkip::intRoundRobbinRowFilter))) {
VectorizedRowBatch batch = reader.getSchema().createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
TimestampColumnVector col2 = (TimestampColumnVector) batch.cols[1];
// We assume that it fits in a single stripe
assertEquals(1, reader.getStripes().size());
int noNullCnt = 0;
while (rows.nextBatch(batch)) {
assertTrue(batch.selectedInUse);
assertNotNull(batch.selected);
// Rows are filtered so it should never be 1024
assertTrue(batch.size != ColumnBatchRows);
assertTrue(col1.noNulls);
for (int r = 0; r < ColumnBatchRows; ++r) {
if (col2.getTime(r) == 0)
noNullCnt ++;
}
}
// Make sure that our filter worked
assertEquals(NUM_BATCHES * 512, noNullCnt);
assertFalse(col2.isRepeating);
assertEquals(0, batch.selected[0]);
assertEquals(2, batch.selected[1]);
assertEquals(0, convertTime(col2.getTime(0)).compareTo("1900-04-1 12:34:56.900"));
assertEquals(0, col2.getTime(511));
assertEquals(0, convertTime(col2.getTime(1020)).compareTo("2920-04-1 12:34:56.900"));
assertEquals(0, col2.getTime(1021));
}
}
@Test
public void testSchemaEvolutionMissingColumn() throws Exception {
// Set the row stride to a multiple of the batch size
final int INDEX_STRIDE = 16 * ColumnBatchRows;
final int NUM_BATCHES = 10;
TypeDescription fileSchema = TypeDescription.createStruct()
.addField("int1", TypeDescription.createInt())
.addField("ts2", TypeDescription.createTimestamp());
try (Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(fileSchema)
.rowIndexStride(INDEX_STRIDE))) {
VectorizedRowBatch batch = fileSchema.createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
TimestampColumnVector col2 = (TimestampColumnVector) batch.cols[1];
for (int b=0; b < NUM_BATCHES; ++b) {
batch.reset();
batch.size = ColumnBatchRows;
for (int row = 0; row < batch.size; row++) {
col1.vector[row] = row;
if ((row % 2) == 0)
col2.set(row, Timestamp.valueOf((1900+row)+"-04-01 12:34:56.9"));
else {
col2.isNull[row] = true;
col2.set(row, null);
}
}
col1.noNulls = true;
col2.noNulls = false;
writer.addRowBatch(batch);
}
}
TypeDescription readSchema = fileSchema
.clone()
.addField("missing", TypeDescription.createInt());
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf));
// Read nothing with NOT NULL filter on missing
try (RecordReaderImpl rows = (RecordReaderImpl) reader.rows(
reader.options()
.schema(readSchema)
.setRowFilter(new String[]{"missing"}, TestRowFilteringSkip::notNullFilterMissing))) {
VectorizedRowBatch batch = readSchema.createRowBatchV2();
assertFalse(rows.nextBatch(batch));
}
// Read everything with select all filter on missing
try (RecordReaderImpl rows = (RecordReaderImpl) reader.rows(
reader.options()
.schema(readSchema)
.setRowFilter(new String[]{"missing"}, TestRowFilteringSkip::allowAll))) {
VectorizedRowBatch batch = readSchema
.createRowBatch(TypeDescription.RowBatchVersion.USE_DECIMAL64, ColumnBatchRows);
long rowCount = 0;
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
TimestampColumnVector col2 = (TimestampColumnVector) batch.cols[1];
while (rows.nextBatch(batch)) {
rowCount += batch.size;
assertFalse(batch.cols[2].noNulls);
assertTrue(batch.cols[2].isRepeating);
assertTrue(batch.cols[2].isNull[0]);
for (int i = 0; i < batch.size; i++) {
assertEquals(i, col1.vector[i]);
if (i % 2 == 0) {
assertEquals(Timestamp.valueOf((1900+i)+"-04-01 12:34:56.9"),
col2.asScratchTimestamp(i));
} else {
assertTrue(col2.isNull[i]);
}
}
}
assertEquals(ColumnBatchRows * NUM_BATCHES, rowCount);
}
}
@Test
public void testSchemaEvolutionMissingNestedColumn() throws Exception {
// Set the row stride to a multiple of the batch size
final int INDEX_STRIDE = 16 * ColumnBatchRows;
final int NUM_BATCHES = 10;
TypeDescription fileSchema = TypeDescription.createStruct()
.addField("int1", TypeDescription.createInt())
.addField("s2", TypeDescription.createStruct()
.addField("ts2", TypeDescription.createTimestamp()));
try (Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(fileSchema)
.rowIndexStride(INDEX_STRIDE))) {
VectorizedRowBatch batch = fileSchema.createRowBatchV2();
LongColumnVector int1 = (LongColumnVector) batch.cols[0];
StructColumnVector s2 = (StructColumnVector) batch.cols[1];
TimestampColumnVector ts2 = (TimestampColumnVector) s2.fields[0];
for (int b=0; b < NUM_BATCHES; ++b) {
batch.reset();
batch.size = ColumnBatchRows;
for (int row = 0; row < batch.size; row++) {
int1.vector[row] = row;
if ((row % 2) == 0)
ts2.set(row, Timestamp.valueOf((1900+row)+"-04-01 12:34:56.9"));
else {
s2.isNull[row] = true;
}
}
int1.noNulls = true;
ts2.noNulls = false;
s2.noNulls = false;
writer.addRowBatch(batch);
}
}
TypeDescription readSchema = fileSchema.clone();
readSchema
.findSubtype("s2")
.addField("missing", TypeDescription.createInt());
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf));
// Read nothing with NOT NULL filter on missing
try (RecordReaderImpl rows = (RecordReaderImpl) reader.rows(
reader.options()
.schema(readSchema)
.setRowFilter(new String[]{"s2.missing"},
TestRowFilteringSkip::notNullFilterNestedMissing))) {
VectorizedRowBatch batch = readSchema.createRowBatchV2();
assertFalse(rows.nextBatch(batch));
}
// Read everything with select all filter on missing
try (RecordReaderImpl rows = (RecordReaderImpl) reader.rows(
reader.options()
.schema(readSchema)
.setRowFilter(new String[]{"s2.missing"}, TestRowFilteringSkip::allowAll))) {
VectorizedRowBatch batch = readSchema
.createRowBatch(TypeDescription.RowBatchVersion.USE_DECIMAL64, ColumnBatchRows);
long rowCount = 0;
LongColumnVector int1 = (LongColumnVector) batch.cols[0];
StructColumnVector s2 = (StructColumnVector) batch.cols[1];
TimestampColumnVector ts2 = (TimestampColumnVector) s2.fields[0];
while (rows.nextBatch(batch)) {
rowCount += batch.size;
// Validate that the missing column is null
assertFalse(s2.fields[1].noNulls);
assertTrue(s2.fields[1].isRepeating);
assertTrue(s2.fields[1].isNull[0]);
for (int i = 0; i < batch.size; i++) {
assertEquals(i, int1.vector[i]);
if (i % 2 == 0) {
assertEquals(Timestamp.valueOf((1900+i)+"-04-01 12:34:56.9"),
ts2.asScratchTimestamp(i));
} else {
assertTrue(s2.isNull[i]);
assertTrue(ts2.isNull[i]);
}
}
}
assertEquals(ColumnBatchRows * NUM_BATCHES, rowCount);
}
}
@Test
public void testSchemaEvolutionMissingAllChildren() throws Exception {
// Set the row stride to a multiple of the batch size
final int INDEX_STRIDE = 16 * ColumnBatchRows;
final int NUM_BATCHES = 10;
TypeDescription fileSchema = TypeDescription.createStruct()
.addField("int1", TypeDescription.createInt())
.addField("s2", TypeDescription.createStruct()
.addField("ts2", TypeDescription.createTimestamp()));
try (Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(fileSchema)
.rowIndexStride(INDEX_STRIDE))) {
VectorizedRowBatch batch = fileSchema.createRowBatchV2();
LongColumnVector int1 = (LongColumnVector) batch.cols[0];
StructColumnVector s2 = (StructColumnVector) batch.cols[1];
TimestampColumnVector ts2 = (TimestampColumnVector) s2.fields[0];
for (int b=0; b < NUM_BATCHES; ++b) {
batch.reset();
batch.size = ColumnBatchRows;
for (int row = 0; row < batch.size; row++) {
int1.vector[row] = row;
if ((row % 2) == 0)
ts2.set(row, Timestamp.valueOf((1900+row)+"-04-01 12:34:56.9"));
else {
s2.isNull[row] = true;
}
}
int1.noNulls = true;
ts2.noNulls = false;
s2.noNulls = false;
writer.addRowBatch(batch);
}
}
TypeDescription readSchema = TypeDescription.createStruct()
.addField("int1", TypeDescription.createInt())
.addField("s2", TypeDescription.createStruct()
.addField("missing_other", TypeDescription.createString())
.addField("missing", TypeDescription.createInt()));
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf));
// Read nothing with NOT NULL filter on missing
try (RecordReaderImpl rows = (RecordReaderImpl) reader.rows(
reader.options()
.schema(readSchema)
.setRowFilter(new String[]{"s2.missing"},
TestRowFilteringSkip::notNullFilterNestedMissing))) {
VectorizedRowBatch batch = readSchema.createRowBatchV2();
assertFalse(rows.nextBatch(batch));
}
// Read everything with select all filter on missing
try (RecordReaderImpl rows = (RecordReaderImpl) reader.rows(
reader.options()
.schema(readSchema)
.setRowFilter(new String[]{"s2.missing"}, TestRowFilteringSkip::allowAll))) {
VectorizedRowBatch batch = readSchema
.createRowBatch(TypeDescription.RowBatchVersion.USE_DECIMAL64, ColumnBatchRows);
long rowCount = 0;
LongColumnVector int1 = (LongColumnVector) batch.cols[0];
StructColumnVector s2 = (StructColumnVector) batch.cols[1];
LongColumnVector missing = (LongColumnVector) s2.fields[1];
BytesColumnVector missingOther = (BytesColumnVector) s2.fields[0];
while (rows.nextBatch(batch)) {
rowCount += batch.size;
// Validate that the missing columns are null
assertFalse(missing.noNulls);
assertTrue(missing.isRepeating);
assertTrue(missing.isNull[0]);
assertFalse(missingOther.noNulls);
assertTrue(missingOther.isRepeating);
assertTrue(missingOther.isNull[0]);
// Struct column vector should still give the correct null and not null alternating
assertFalse(s2.isRepeating);
assertFalse(s2.noNulls);
for (int i = 0; i < batch.size; i++) {
assertEquals(i, int1.vector[i]);
assertEquals(i % 2 != 0, s2.isNull[i]);
}
}
assertEquals(ColumnBatchRows * NUM_BATCHES, rowCount);
}
}
private static void notNullFilterMissing(OrcFilterContext batch) {
int selIdx = 0;
ColumnVector cv = ((OrcFilterContextImpl) batch).getCols()[2];
if (cv.isRepeating) {
if (!cv.isNull[0]) {
for (int i = 0; i < batch.getSelectedSize(); i++) {
batch.getSelected()[selIdx++] = i;
}
}
} else {
for (int i = 0; i < batch.getSelectedSize(); i++) {
if (!((OrcFilterContextImpl) batch).getCols()[2].isNull[i]) {
batch.getSelected()[selIdx++] = i;
}
}
}
batch.setSelectedInUse(true);
batch.setSelectedSize(selIdx);
}
private static void notNullFilterNestedMissing(OrcFilterContext batch) {
int selIdx = 0;
StructColumnVector scv = (StructColumnVector) ((OrcFilterContextImpl) batch).getCols()[1];
ColumnVector cv = scv.fields[1];
if (cv.isRepeating) {
if (!cv.isNull[0]) {
for (int i = 0; i < batch.getSelectedSize(); i++) {
batch.getSelected()[selIdx++] = i;
}
}
} else {
for (int i = 0; i < batch.getSelectedSize(); i++) {
if (!cv.isNull[i]) {
batch.getSelected()[selIdx++] = i;
}
}
}
batch.setSelectedInUse(true);
batch.setSelectedSize(selIdx);
}
private static void allowAll(OrcFilterContext batch) {
// Do nothing everything is allowed by default
}
@Test
public void testCustomFileTimestampRoundRobbinRowFilterCallback() throws Exception {
testFilePath = new Path(getClass().getClassLoader().
getSystemResource("orc_split_elim.orc").getPath());
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf));
try (RecordReaderImpl rows = (RecordReaderImpl) reader.rows(
reader.options()
.setRowFilter(new String[]{"userid"}, TestRowFilteringSkip::intCustomValueFilter))) {
VectorizedRowBatch batch = reader.getSchema().createRowBatchV2();
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
TimestampColumnVector col5 = (TimestampColumnVector) batch.cols[4];
// We assume that it fits in a single stripe
assertEquals(1, reader.getStripes().size());
int noNullCnt = 0;
while (rows.nextBatch(batch)) {
assertTrue(batch.selectedInUse);
assertNotNull(batch.selected);
// Rows are filtered so it should never be 1024
assertTrue(batch.size != ColumnBatchRows);
assertTrue(col1.noNulls);
for (int r = 0; r < ColumnBatchRows; ++r) {
if (col1.vector[r] != 100) noNullCnt ++;
}
// We should always select 1 row as the file is spaced as such. We could get 0 in case all
// the rows are filtered out.
if (batch.size == 0) {
continue;
}
assertEquals(1, batch.size);
long val = col1.vector[batch.selected[0]] ;
// Check that we have read the valid value
assertTrue((val == 2) || (val == 5) || (val == 13) || (val == 29) || (val == 70));
if (val == 2) {
assertEquals(0, col5.getTime(batch.selected[0]));
} else {
assertNotEquals(0, col5.getTime(batch.selected[0]));
}
// Check that unselected is not populated
assertEquals(0, batch.selected[1]);
}
// Total rows of the file should be 25k
assertEquals(25000, rowCount);
// Make sure that our filter worked ( 5 rows with userId != 100)
assertEquals(5, noNullCnt);
}
}
}
| 62,447 | 38.152351 | 124 | java |
null | orc-main/java/core/src/test/org/apache/orc/TestSelectedVector.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DateColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ListColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.MapColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.StructColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.UnionColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.orc.impl.KeyProvider;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInfo;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestSelectedVector {
Path workDir = new Path(System.getProperty("test.tmp.dir"));
Configuration conf;
FileSystem fs;
Path testFilePath;
Random random = new Random();
@BeforeEach
public void openFileSystem(TestInfo testInfo) throws Exception {
conf = new Configuration();
conf.setInt(OrcConf.ROW_INDEX_STRIDE.getAttribute(), VectorizedRowBatch.DEFAULT_SIZE);
fs = FileSystem.getLocal(conf);
fs.setWorkingDirectory(workDir);
testFilePath = new Path(workDir, TestSelectedVector.class.getSimpleName() + "." +
testInfo.getTestMethod().get().getName() + ".orc");
}
@AfterEach
public void deleteTestFile() throws Exception {
fs.delete(testFilePath, false);
}
@Test
public void testWriteBaseTypeUseSelectedVector() throws IOException {
TypeDescription schema =
TypeDescription.fromString("struct<a:boolean,b:tinyint,c:smallint,d:int,e:bigint," +
"f:float,g:double,h:string,i:date,j:timestamp,k:binary,l:decimal(20,5),m:varchar(5)," +
"n:char(5)>");
Writer writer = OrcFile.createWriter(testFilePath, OrcFile.writerOptions(conf)
.setSchema(schema).overwrite(true));
VectorizedRowBatch batch = schema.createRowBatch();
LongColumnVector a = (LongColumnVector) batch.cols[0];
LongColumnVector b = (LongColumnVector) batch.cols[1];
LongColumnVector c = (LongColumnVector) batch.cols[2];
LongColumnVector d = (LongColumnVector) batch.cols[3];
LongColumnVector e = (LongColumnVector) batch.cols[4];
DoubleColumnVector f = (DoubleColumnVector) batch.cols[5];
DoubleColumnVector g = (DoubleColumnVector) batch.cols[6];
BytesColumnVector h = (BytesColumnVector) batch.cols[7];
DateColumnVector i = (DateColumnVector) batch.cols[8];
TimestampColumnVector j = (TimestampColumnVector) batch.cols[9];
BytesColumnVector k = (BytesColumnVector) batch.cols[10];
DecimalColumnVector l = (DecimalColumnVector) batch.cols[11];
BytesColumnVector m = (BytesColumnVector) batch.cols[12];
BytesColumnVector n = (BytesColumnVector) batch.cols[13];
List<Integer> selectedRows = new ArrayList<>();
int[] selected = new int[VectorizedRowBatch.DEFAULT_SIZE];
int selectedSize = 0;
int writeRowNum = 0;
for (int o = 0; o < VectorizedRowBatch.DEFAULT_SIZE * 2; o++) {
int row = batch.size++;
if (row % 5 == 0) {
a.noNulls = false;
a.isNull[row] = true;
b.noNulls = false;
b.isNull[row] = true;
c.noNulls = false;
c.isNull[row] = true;
d.noNulls = false;
d.isNull[row] = true;
e.noNulls = false;
e.isNull[row] = true;
f.noNulls = false;
f.isNull[row] = true;
g.noNulls = false;
g.isNull[row] = true;
h.noNulls = false;
h.isNull[row] = true;
i.noNulls = false;
i.isNull[row] = true;
j.noNulls = false;
j.isNull[row] = true;
k.noNulls = false;
k.isNull[row] = true;
l.noNulls = false;
l.isNull[row] = true;
m.noNulls = false;
m.isNull[row] = true;
n.noNulls = false;
n.isNull[row] = true;
} else {
a.vector[row] = row % 2;
b.vector[row] = row % 128;
c.vector[row] = row;
d.vector[row] = row;
e.vector[row] = row * 10000000L;
f.vector[row] = row * 1.0f;
g.vector[row] = row * 1.0d;
byte[] bytes = String.valueOf(row).getBytes(StandardCharsets.UTF_8);
h.setRef(row, bytes, 0, bytes.length);
i.vector[row] = row;
j.time[row] = row * 1000L;
j.nanos[row] = row;
k.setRef(row, bytes, 0, bytes.length);
l.vector[row] = new HiveDecimalWritable(row);
m.setRef(row, bytes, 0, bytes.length);
bytes = String.valueOf(10000 - row).getBytes(StandardCharsets.UTF_8);
n.setRef(row, bytes, 0, bytes.length);
}
if (random.nextInt() % 2 == 0) {
selectedRows.add(row);
selected[selectedSize ++] = row;
writeRowNum ++;
}
if (batch.size == batch.getMaxSize()) {
batch.setFilterContext(true, selected, selectedSize);
writer.addRowBatch(batch);
selected = new int[VectorizedRowBatch.DEFAULT_SIZE];
selectedSize = 0;
batch.reset();
}
}
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf));
batch = schema.createRowBatch();
Reader.Options options = reader.options().schema(schema);
RecordReader rowIterator = reader.rows(options);
int readRowNum = 0;
a = (LongColumnVector) batch.cols[0];
b = (LongColumnVector) batch.cols[1];
c = (LongColumnVector) batch.cols[2];
d = (LongColumnVector) batch.cols[3];
e = (LongColumnVector) batch.cols[4];
f = (DoubleColumnVector) batch.cols[5];
g = (DoubleColumnVector) batch.cols[6];
h = (BytesColumnVector) batch.cols[7];
i = (DateColumnVector) batch.cols[8];
j = (TimestampColumnVector) batch.cols[9];
k = (BytesColumnVector) batch.cols[10];
l = (DecimalColumnVector) batch.cols[11];
m = (BytesColumnVector) batch.cols[12];
n = (BytesColumnVector) batch.cols[13];
while (rowIterator.nextBatch(batch)) {
for (int row = 0; row < batch.size; ++row) {
int selectedRow = selectedRows.get(readRowNum);
readRowNum ++;
if (c.isNull[row]) {
assertTrue(a.isNull[row]);
assertTrue(b.isNull[row]);
assertTrue(d.isNull[row]);
assertTrue(e.isNull[row]);
assertTrue(f.isNull[row]);
assertTrue(g.isNull[row]);
assertTrue(h.isNull[row]);
assertTrue(i.isNull[row]);
assertTrue(j.isNull[row]);
assertTrue(k.isNull[row]);
assertTrue(l.isNull[row]);
assertTrue(m.isNull[row]);
assertTrue(n.isNull[row]);
}
else {
int rowNum = (int)c.vector[row];
assertEquals(selectedRow, rowNum);
assertTrue(rowNum % 5 != 0);
assertEquals(rowNum % 2, a.vector[row]);
assertEquals(rowNum % 128, b.vector[row]);
assertEquals(rowNum, d.vector[row]);
assertEquals(rowNum * 10000000L, e.vector[row]);
assertEquals(rowNum * 1.0f, f.vector[row]);
assertEquals(rowNum * 1.0d, g.vector[row]);
assertEquals(String.valueOf(rowNum), h.toString(row));
assertEquals(rowNum, i.vector[row]);
assertEquals(rowNum * 1000L, j.time[row]);
assertEquals(rowNum, j.nanos[row]);
assertEquals(String.valueOf(rowNum), k.toString(row));
assertEquals(new HiveDecimalWritable(rowNum), l.vector[row]);
assertEquals(String.valueOf(rowNum), m.toString(row));
assertEquals(String.valueOf(10000 - rowNum), n.toString(row));
}
}
}
rowIterator.close();
assertEquals(writeRowNum, readRowNum);
}
@Test
public void testWriteComplexTypeUseSelectedVector() throws IOException {
TypeDescription schema =
TypeDescription.fromString("struct<a:map<int,uniontype<int,string>>," +
"b:array<struct<c:int>>>");
Writer writer = OrcFile.createWriter(testFilePath, OrcFile.writerOptions(conf)
.setSchema(schema).overwrite(true));
VectorizedRowBatch batch = schema.createRowBatch();
MapColumnVector a = (MapColumnVector) batch.cols[0];
LongColumnVector keys = (LongColumnVector) a.keys;
UnionColumnVector values = (UnionColumnVector) a.values;
LongColumnVector value1 = (LongColumnVector) values.fields[0];
BytesColumnVector value2 = (BytesColumnVector) values.fields[1];
ListColumnVector b = (ListColumnVector) batch.cols[1];
StructColumnVector child = (StructColumnVector) b.child;
LongColumnVector c = (LongColumnVector) child.fields[0];
int mapOffset = 0;
int arrayOffset = 0;
List<Integer> selectedRows = new ArrayList<>();
int[] selected = new int[VectorizedRowBatch.DEFAULT_SIZE];
int selectedSize = 0;
int writeRowNum = 0;
for (int i = 0; i < VectorizedRowBatch.DEFAULT_SIZE * 2; i++) {
int row = batch.size++;
a.offsets[row] = mapOffset;
b.offsets[row] = arrayOffset;
int tag = row % 2;
if (row % 5 == 0) {
a.lengths[row] = 1;
values.tags[mapOffset] = tag;
keys.noNulls = false;
keys.isNull[mapOffset] = true;
if (tag == 0) {
value1.noNulls = false;
value1.isNull[mapOffset] = true;
} else {
value2.noNulls = false;
value2.isNull[mapOffset] = true;
}
b.lengths[row] = 1;
c.noNulls = false;
c.isNull[arrayOffset] = true;
} else {
a.lengths[row] = 2;
values.tags[mapOffset] = tag;
values.tags[mapOffset + 1] = tag;
keys.vector[mapOffset] = row;
keys.vector[mapOffset + 1] = row + 1;
if (tag == 0) {
value1.vector[mapOffset] = row * 3L;
value1.vector[mapOffset + 1] = (row + 1) * 3L;
} else {
byte[] bytes = String.valueOf(row).getBytes(StandardCharsets.UTF_8);
value2.setRef(mapOffset, bytes, 0, bytes.length);
bytes = String.valueOf(row + 1).getBytes(StandardCharsets.UTF_8);
value2.setRef(mapOffset + 1, bytes, 0, bytes.length);
}
b.lengths[row] = 3;
c.vector[arrayOffset] = row;
c.vector[arrayOffset + 1] = row + 1;
c.vector[arrayOffset + 2] = row + 2;
}
mapOffset += a.lengths[row];
arrayOffset += b.lengths[row];
if (random.nextInt() % 2 == 0) {
selectedRows.add(row);
selected[selectedSize ++] = row;
writeRowNum ++;
}
if (arrayOffset + 3 >= batch.getMaxSize()) {
batch.setFilterContext(true, selected, selectedSize);
writer.addRowBatch(batch);
selected = new int[VectorizedRowBatch.DEFAULT_SIZE];
selectedSize = 0;
mapOffset = 0;
arrayOffset = 0;
batch.reset();
}
}
if (batch.size != 0) {
batch.setFilterContext(true, selected, selectedSize);
writer.addRowBatch(batch);
batch.reset();
}
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf));
batch = schema.createRowBatch();
Reader.Options options = reader.options().schema(schema);
RecordReader rowIterator = reader.rows(options);
int readRowNum = 0;
a = (MapColumnVector) batch.cols[0];
keys = (LongColumnVector) a.keys;
values = (UnionColumnVector) a.values;
value1 = (LongColumnVector) values.fields[0];
value2 = (BytesColumnVector) values.fields[1];
b = (ListColumnVector) batch.cols[1];
child = (StructColumnVector) b.child;
c = (LongColumnVector) child.fields[0];
while (rowIterator.nextBatch(batch)) {
for (int row = 0; row < batch.size; ++row) {
int selectedRow = selectedRows.get(readRowNum);
readRowNum ++;
mapOffset = (int)a.offsets[row];
int mapLen = (int)a.lengths[row];
arrayOffset = (int)b.offsets[row];
int arrayLen = (int)b.lengths[row];
if (mapLen == 1) {
assertEquals(1, arrayLen);
assertTrue(keys.isNull[mapOffset]);
if (values.tags[mapOffset] == 0) {
assertTrue(value1.isNull[mapOffset]);
} else {
assertTrue(value2.isNull[mapOffset]);
}
assertTrue(c.isNull[arrayOffset]);
}
else {
assertEquals(2, mapLen);
assertEquals(3, arrayLen);
long rowNum = keys.vector[mapOffset];
assertEquals(selectedRow, rowNum);
assertEquals(rowNum + 1, keys.vector[mapOffset + 1]);
if (values.tags[mapOffset] == 0) {
assertEquals(rowNum * 3, value1.vector[mapOffset]);
} else {
assertEquals(String.valueOf(rowNum), value2.toString(mapOffset));
}
if (values.tags[mapOffset + 1] == 0) {
assertEquals((rowNum + 1) * 3, value1.vector[mapOffset + 1]);
} else {
assertEquals(String.valueOf(rowNum + 1), value2.toString(mapOffset + 1));
}
}
}
}
rowIterator.close();
assertEquals(writeRowNum, readRowNum);
}
@Test
public void testWriteRepeatedUseSelectedVector() throws IOException {
TypeDescription schema =
TypeDescription.fromString("struct<a:int,b:string,c:decimal(20,5)>");
Writer writer = OrcFile.createWriter(testFilePath, OrcFile.writerOptions(conf)
.setSchema(schema).overwrite(true));
VectorizedRowBatch batch = schema.createRowBatch();
LongColumnVector a = (LongColumnVector) batch.cols[0];
BytesColumnVector b = (BytesColumnVector) batch.cols[1];
DecimalColumnVector c = (DecimalColumnVector) batch.cols[2];
b.fillWithNulls();
c.fill(new HiveDecimalWritable(42).getHiveDecimal());
Random random = new Random();
List<Integer> selectedRows = new ArrayList<>();
int[] selected = new int[VectorizedRowBatch.DEFAULT_SIZE];
int selectedSize = 0;
int writeRowNum = 0;
for (int i = 0; i < VectorizedRowBatch.DEFAULT_SIZE; i++) {
a.vector[i] = i;
if (random.nextInt() % 2 == 0) {
selectedRows.add(i);
selected[selectedSize ++] = i;
writeRowNum ++;
}
}
batch.setFilterContext(true, selected, selectedSize);
writer.addRowBatch(batch);
batch.reset();
selectedSize = 0;
b.fill(String.valueOf(42).getBytes(StandardCharsets.UTF_8));
c.noNulls = false;
c.isRepeating = true;
c.vector[0].setFromLong(0);
c.isNull[0] = true;
selected = new int[VectorizedRowBatch.DEFAULT_SIZE];
for (int i = 0; i < VectorizedRowBatch.DEFAULT_SIZE; i++) {
a.vector[i] = i + 1024;
if (random.nextInt() % 2 == 0) {
selectedRows.add(i + 1024);
selected[selectedSize ++] = i;
writeRowNum ++;
}
}
batch.setFilterContext(true, selected, selectedSize);
writer.addRowBatch(batch);
batch.reset();
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf));
batch = schema.createRowBatch();
Reader.Options options = reader.options().schema(schema);
RecordReader rowIterator = reader.rows(options);
int readRowNum = 0;
a = (LongColumnVector) batch.cols[0];
b = (BytesColumnVector) batch.cols[1];
c = (DecimalColumnVector) batch.cols[2];
while (rowIterator.nextBatch(batch)) {
for (int row = 0; row < batch.size; ++row) {
int selectedRow = selectedRows.get(readRowNum);
readRowNum ++;
long rowNum = a.vector[row];
assertEquals(selectedRow, rowNum);
if (rowNum < 1024) {
assertNull(b.toString(row));
assertEquals(new HiveDecimalWritable(42), c.vector[row]);
} else {
assertEquals("42", b.toString(row));
assertTrue(c.isNull[row]);
}
}
}
rowIterator.close();
assertEquals(writeRowNum, readRowNum);
}
@Test
public void testWriteEncryptionUseSelectedVector() throws IOException {
TypeDescription schema =
TypeDescription.fromString("struct<id:int,name:string>");
byte[] kmsKey = "secret123".getBytes(StandardCharsets.UTF_8);
KeyProvider keyProvider = new InMemoryKeystore()
.addKey("pii", EncryptionAlgorithm.AES_CTR_128, kmsKey);
String encryption = "pii:id,name";
String mask = "sha256:id,name";
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.overwrite(true)
.setKeyProvider(keyProvider)
.encrypt(encryption)
.masks(mask));
VectorizedRowBatch batch = schema.createRowBatch();
LongColumnVector id = (LongColumnVector) batch.cols[0];
BytesColumnVector name = (BytesColumnVector) batch.cols[1];
Random random = new Random();
List<Integer> selectedRows = new ArrayList<>();
int[] selected = new int[VectorizedRowBatch.DEFAULT_SIZE];
int selectedSize = 0;
int writeRowNum = 0;
for (int r = 0; r < VectorizedRowBatch.DEFAULT_SIZE * 2; ++r) {
int row = batch.size++;
id.vector[row] = r;
byte[] buffer = ("name-" + r).getBytes(StandardCharsets.UTF_8);
name.setRef(row, buffer, 0, buffer.length);
if (random.nextInt() % 2 == 0) {
selectedRows.add(r);
selected[selectedSize ++] = r % VectorizedRowBatch.DEFAULT_SIZE;
writeRowNum ++;
}
if (batch.size == batch.getMaxSize()) {
batch.setFilterContext(true, selected, selectedSize);
writer.addRowBatch(batch);
selected = new int[VectorizedRowBatch.DEFAULT_SIZE];
selectedSize = 0;
batch.reset();
}
}
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).setKeyProvider(keyProvider));
batch = schema.createRowBatch();
Reader.Options options = reader.options().schema(schema);
RecordReader rowIterator = reader.rows(options);
int readRowNum = 0;
id = (LongColumnVector) batch.cols[0];
name = (BytesColumnVector) batch.cols[1];
while (rowIterator.nextBatch(batch)) {
for (int row = 0; row < batch.size; ++row) {
int selectedRow = selectedRows.get(readRowNum);
readRowNum ++;
long value = id.vector[row];
assertEquals(selectedRow, value);
assertEquals("name-" + (value), name.toString(row));
}
}
rowIterator.close();
assertEquals(writeRowNum, readRowNum);
}
}
| 19,975 | 36.338318 | 99 | java |
null | orc-main/java/core/src/test/org/apache/orc/TestStringDictionary.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory;
import org.apache.orc.impl.OutStream;
import org.apache.orc.impl.RecordReaderImpl;
import org.apache.orc.impl.StreamName;
import org.apache.orc.impl.TestInStream;
import org.apache.orc.impl.writer.StreamOptions;
import org.apache.orc.impl.writer.StringTreeWriter;
import org.apache.orc.impl.writer.TreeWriter;
import org.apache.orc.impl.writer.WriterContext;
import org.apache.orc.impl.writer.WriterEncryptionVariant;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.TestInfo;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
import java.io.File;
import java.nio.charset.StandardCharsets;
import java.util.HashMap;
import java.util.Map;
import java.util.Random;
import java.util.stream.Stream;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestStringDictionary {
private Path workDir = new Path(System.getProperty("test.tmp.dir", "target" + File.separator + "test"
+ File.separator + "tmp"));
private Configuration conf;
private FileSystem fs;
private Path testFilePath;
@BeforeEach
public void openFileSystem(TestInfo testInfo) throws Exception {
conf = new Configuration();
fs = FileSystem.getLocal(conf);
testFilePath = new Path(workDir, "TestStringDictionary." +
testInfo.getTestMethod().get().getName() + ".orc");
fs.delete(testFilePath, false);
}
private static Stream<Arguments> data() {
return Stream.of(Arguments.of("RBTREE"), Arguments.of("HASH"));
}
@ParameterizedTest
@MethodSource("data")
public void testTooManyDistinct(String dictImpl) throws Exception {
OrcConf.DICTIONARY_IMPL.setString(conf, dictImpl);
TypeDescription schema = TypeDescription.createString();
Writer writer = OrcFile.createWriter(
testFilePath,
OrcFile.writerOptions(conf).setSchema(schema)
.compress(CompressionKind.NONE)
.bufferSize(10000));
VectorizedRowBatch batch = schema.createRowBatch();
BytesColumnVector col = (BytesColumnVector) batch.cols[0];
for (int i = 0; i < 20000; i++) {
if (batch.size == batch.getMaxSize()) {
writer.addRowBatch(batch);
batch.reset();
}
col.setVal(batch.size++, String.valueOf(i).getBytes(StandardCharsets.UTF_8));
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
col = (BytesColumnVector) batch.cols[0];
int idx = 0;
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(String.valueOf(idx++), col.toString(r));
}
}
// make sure the encoding type is correct
for (StripeInformation stripe : reader.getStripes()) {
// hacky but does the job, this casting will work as long this test resides
// within the same package as ORC reader
OrcProto.StripeFooter footer = ((RecordReaderImpl) rows).readStripeFooter(stripe);
for (int i = 0; i < footer.getColumnsCount(); ++i) {
OrcProto.ColumnEncoding encoding = footer.getColumns(i);
assertEquals(OrcProto.ColumnEncoding.Kind.DIRECT_V2, encoding.getKind());
}
}
}
@ParameterizedTest
@MethodSource("data")
public void testHalfDistinct(String dictImpl) throws Exception {
OrcConf.DICTIONARY_IMPL.setString(conf, dictImpl);
final int totalSize = 20000;
final int bound = 10000;
TypeDescription schema = TypeDescription.createString();
Writer writer = OrcFile.createWriter(
testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).compress(CompressionKind.NONE)
.bufferSize(bound));
Random rand = new Random(123);
int[] input = new int[totalSize];
for (int i = 0; i < totalSize; i++) {
input[i] = rand.nextInt(bound);
}
VectorizedRowBatch batch = schema.createRowBatch();
BytesColumnVector col = (BytesColumnVector) batch.cols[0];
for (int i = 0; i < totalSize; i++) {
if (batch.size == batch.getMaxSize()) {
writer.addRowBatch(batch);
batch.reset();
}
col.setVal(batch.size++, String.valueOf(input[i]).getBytes(StandardCharsets.UTF_8));
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
col = (BytesColumnVector) batch.cols[0];
int idx = 0;
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(String.valueOf(input[idx++]), col.toString(r));
}
}
// make sure the encoding type is correct
for (StripeInformation stripe : reader.getStripes()) {
// hacky but does the job, this casting will work as long this test resides
// within the same package as ORC reader
OrcProto.StripeFooter footer = ((RecordReaderImpl) rows).readStripeFooter(stripe);
for (int i = 0; i < footer.getColumnsCount(); ++i) {
OrcProto.ColumnEncoding encoding = footer.getColumns(i);
assertEquals(OrcProto.ColumnEncoding.Kind.DICTIONARY_V2, encoding.getKind());
}
}
}
static class WriterContextImpl implements WriterContext {
private final TypeDescription schema;
private final Configuration conf;
private final Map<StreamName, TestInStream.OutputCollector> streams =
new HashMap<>();
WriterContextImpl(TypeDescription schema, Configuration conf) {
this.schema = schema;
this.conf = conf;
}
@Override
public OutStream createStream(StreamName name) {
TestInStream.OutputCollector collect = new TestInStream.OutputCollector();
streams.put(name, collect);
return new OutStream("test", new StreamOptions(1000), collect);
}
@Override
public int getRowIndexStride() {
return 10000;
}
@Override
public boolean buildIndex() {
return OrcConf.ENABLE_INDEXES.getBoolean(conf);
}
@Override
public boolean isCompressed() {
return false;
}
@Override
public OrcFile.EncodingStrategy getEncodingStrategy() {
return OrcFile.EncodingStrategy.SPEED;
}
@Override
public boolean[] getBloomFilterColumns() {
return new boolean[schema.getMaximumId() + 1];
}
@Override
public double getBloomFilterFPP() {
return 0;
}
@Override
public Configuration getConfiguration() {
return conf;
}
@Override
public OrcFile.Version getVersion() {
return OrcFile.Version.V_0_12;
}
@Override
public PhysicalWriter getPhysicalWriter() {
return null;
}
@Override
public void setEncoding(int column, WriterEncryptionVariant variant, OrcProto.ColumnEncoding encoding) {
}
@Override
public void writeStatistics(StreamName name, OrcProto.ColumnStatistics.Builder stats) {
}
@Override
public OrcFile.BloomFilterVersion getBloomFilterVersion() {
return OrcFile.BloomFilterVersion.UTF8;
}
@Override
public void writeIndex(StreamName name, OrcProto.RowIndex.Builder index) {
}
@Override
public void writeBloomFilter(StreamName name,
OrcProto.BloomFilterIndex.Builder bloom) {
}
@Override
public DataMask getUnencryptedMask(int columnId) {
return null;
}
@Override
public WriterEncryptionVariant getEncryption(int columnId) {
return null;
}
@Override
public boolean getUseUTCTimestamp() {
return true;
}
@Override
public double getDictionaryKeySizeThreshold(int column) {
return OrcConf.DICTIONARY_KEY_SIZE_THRESHOLD.getDouble(conf);
}
@Override
public boolean getProlepticGregorian() {
return false;
}
}
@ParameterizedTest
@MethodSource("data")
public void testNonDistinctDisabled(String dictImpl) throws Exception {
OrcConf.DICTIONARY_IMPL.setString(conf, dictImpl);
TypeDescription schema = TypeDescription.createString();
conf.set(OrcConf.DICTIONARY_KEY_SIZE_THRESHOLD.getAttribute(), "0.0");
WriterContextImpl writerContext = new WriterContextImpl(schema, conf);
StringTreeWriter writer = (StringTreeWriter)
TreeWriter.Factory.create(schema, null, writerContext);
VectorizedRowBatch batch = schema.createRowBatch();
BytesColumnVector col = (BytesColumnVector) batch.cols[0];
batch.size = 1024;
col.isRepeating = true;
col.setVal(0, "foobar".getBytes(StandardCharsets.UTF_8));
writer.writeBatch(col, 0, batch.size);
TestInStream.OutputCollector output = writerContext.streams.get(
new StreamName(0, OrcProto.Stream.Kind.DATA));
// Check to make sure that the strings are being written to the stream,
// even before we get to the first rowGroup. (6 * 1024 / 1000 * 1000)
assertEquals(6000, output.buffer.size());
}
@ParameterizedTest
@MethodSource("data")
public void testTooManyDistinctCheckDisabled(String dictImpl) throws Exception {
OrcConf.DICTIONARY_IMPL.setString(conf, dictImpl);
TypeDescription schema = TypeDescription.createString();
conf.setBoolean(OrcConf.ROW_INDEX_STRIDE_DICTIONARY_CHECK.getAttribute(), false);
Writer writer = OrcFile.createWriter(
testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).compress(CompressionKind.NONE)
.bufferSize(10000));
VectorizedRowBatch batch = schema.createRowBatch();
BytesColumnVector string = (BytesColumnVector) batch.cols[0];
for (int i = 0; i < 20000; i++) {
if (batch.size == batch.getMaxSize()) {
writer.addRowBatch(batch);
batch.reset();
}
string.setVal(batch.size++, String.valueOf(i).getBytes(StandardCharsets.UTF_8));
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
string = (BytesColumnVector) batch.cols[0];
int idx = 0;
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(String.valueOf(idx++), string.toString(r));
}
}
// make sure the encoding type is correct
for (StripeInformation stripe : reader.getStripes()) {
// hacky but does the job, this casting will work as long this test resides
// within the same package as ORC reader
OrcProto.StripeFooter footer = ((RecordReaderImpl) rows).readStripeFooter(stripe);
for (int i = 0; i < footer.getColumnsCount(); ++i) {
OrcProto.ColumnEncoding encoding = footer.getColumns(i);
assertEquals(OrcProto.ColumnEncoding.Kind.DIRECT_V2, encoding.getKind());
}
}
}
@ParameterizedTest
@MethodSource("data")
public void testHalfDistinctCheckDisabled(String dictImpl) throws Exception {
OrcConf.DICTIONARY_IMPL.setString(conf, dictImpl);
TypeDescription schema = TypeDescription.createString();
conf.setBoolean(OrcConf.ROW_INDEX_STRIDE_DICTIONARY_CHECK.getAttribute(),
false);
Writer writer = OrcFile.createWriter(
testFilePath,
OrcFile.writerOptions(conf).setSchema(schema)
.compress(CompressionKind.NONE)
.bufferSize(10000));
Random rand = new Random(123);
int[] input = new int[20000];
for (int i = 0; i < 20000; i++) {
input[i] = rand.nextInt(10000);
}
VectorizedRowBatch batch = schema.createRowBatch();
BytesColumnVector string = (BytesColumnVector) batch.cols[0];
for (int i = 0; i < 20000; i++) {
if (batch.size == batch.getMaxSize()) {
writer.addRowBatch(batch);
batch.reset();
}
string.setVal(batch.size++, String.valueOf(input[i]).getBytes(StandardCharsets.UTF_8));
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
string = (BytesColumnVector) batch.cols[0];
int idx = 0;
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(String.valueOf(input[idx++]), string.toString(r));
}
}
// make sure the encoding type is correct
for (StripeInformation stripe : reader.getStripes()) {
// hacky but does the job, this casting will work as long this test resides
// within the same package as ORC reader
OrcProto.StripeFooter footer = ((RecordReaderImpl) rows).readStripeFooter(stripe);
for (int i = 0; i < footer.getColumnsCount(); ++i) {
OrcProto.ColumnEncoding encoding = footer.getColumns(i);
assertEquals(OrcProto.ColumnEncoding.Kind.DICTIONARY_V2, encoding.getKind());
}
}
}
@ParameterizedTest
@MethodSource("data")
public void testTooManyDistinctV11AlwaysDictionary(String dictImpl) throws Exception {
OrcConf.DICTIONARY_IMPL.setString(conf, dictImpl);
TypeDescription schema = TypeDescription.createString();
Writer writer = OrcFile.createWriter(
testFilePath,
OrcFile.writerOptions(conf).setSchema(schema)
.compress(CompressionKind.NONE)
.version(OrcFile.Version.V_0_11).bufferSize(10000));
VectorizedRowBatch batch = schema.createRowBatch();
BytesColumnVector string = (BytesColumnVector) batch.cols[0];
for (int i = 0; i < 20000; i++) {
if (batch.size == batch.getMaxSize()) {
writer.addRowBatch(batch);
batch.reset();
}
string.setVal(batch.size++, String.valueOf(i).getBytes(StandardCharsets.UTF_8));
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf).filesystem(fs));
batch = reader.getSchema().createRowBatch();
string = (BytesColumnVector) batch.cols[0];
RecordReader rows = reader.rows();
int idx = 0;
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(String.valueOf(idx++), string.toString(r));
}
}
// make sure the encoding type is correct
for (StripeInformation stripe : reader.getStripes()) {
// hacky but does the job, this casting will work as long this test resides
// within the same package as ORC reader
OrcProto.StripeFooter footer = ((RecordReaderImpl) rows).readStripeFooter(stripe);
for (int i = 0; i < footer.getColumnsCount(); ++i) {
OrcProto.ColumnEncoding encoding = footer.getColumns(i);
assertEquals(OrcProto.ColumnEncoding.Kind.DICTIONARY, encoding.getKind());
}
}
}
/**
* Test that dictionaries can be disabled, per column. In this test, we want to disable DICTIONARY_V2 for the
* `longString` column (presumably for a low hit-ratio), while preserving DICTIONARY_V2 for `shortString`.
* @throws Exception on unexpected failure
*/
@ParameterizedTest
@MethodSource("data")
public void testDisableDictionaryForSpecificColumn(String dictImpl) throws Exception {
OrcConf.DICTIONARY_IMPL.setString(conf, dictImpl);
final String SHORT_STRING_VALUE = "foo";
final String LONG_STRING_VALUE = "BAAAAAAAAR!!";
TypeDescription schema =
TypeDescription.fromString("struct<shortString:string,longString:string>");
Writer writer = OrcFile.createWriter(
testFilePath,
OrcFile.writerOptions(conf).setSchema(schema)
.compress(CompressionKind.NONE)
.bufferSize(10000)
.directEncodingColumns("longString"));
VectorizedRowBatch batch = schema.createRowBatch();
BytesColumnVector shortStringColumnVector = (BytesColumnVector) batch.cols[0];
BytesColumnVector longStringColumnVector = (BytesColumnVector) batch.cols[1];
for (int i = 0; i < 20000; i++) {
if (batch.size == batch.getMaxSize()) {
writer.addRowBatch(batch);
batch.reset();
}
shortStringColumnVector.setVal(batch.size, SHORT_STRING_VALUE.getBytes(StandardCharsets.UTF_8));
longStringColumnVector.setVal( batch.size, LONG_STRING_VALUE.getBytes(StandardCharsets.UTF_8));
++batch.size;
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf).filesystem(fs));
RecordReader recordReader = reader.rows();
batch = reader.getSchema().createRowBatch();
shortStringColumnVector = (BytesColumnVector) batch.cols[0];
longStringColumnVector = (BytesColumnVector) batch.cols[1];
while (recordReader.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(SHORT_STRING_VALUE, shortStringColumnVector.toString(r));
assertEquals(LONG_STRING_VALUE, longStringColumnVector.toString(r));
}
}
// make sure the encoding type is correct
for (StripeInformation stripe : reader.getStripes()) {
// hacky but does the job, this casting will work as long this test resides
// within the same package as ORC reader
OrcProto.StripeFooter footer = ((RecordReaderImpl) recordReader).readStripeFooter(stripe);
for (int i = 0; i < footer.getColumnsCount(); ++i) {
assertEquals(3, footer.getColumnsCount(),
"Expected 3 columns in the footer: One for the Orc Struct, and two for its members.");
assertEquals(
OrcProto.ColumnEncoding.Kind.DIRECT, footer.getColumns(0).getKind(),
"The ORC schema struct should be DIRECT encoded."
);
assertEquals(
OrcProto.ColumnEncoding.Kind.DICTIONARY_V2, footer.getColumns(1).getKind(),
"The shortString column must be DICTIONARY_V2 encoded"
);
assertEquals(
OrcProto.ColumnEncoding.Kind.DIRECT_V2, footer.getColumns(2).getKind(),
"The longString column must be DIRECT_V2 encoded"
);
}
}
}
@ParameterizedTest
@MethodSource("data")
public void testForcedNonDictionary(String dictImpl) throws Exception {
OrcConf.DICTIONARY_IMPL.setString(conf, dictImpl);
// Set the row stride to 16k so that it is a multiple of the batch size
final int INDEX_STRIDE = 16 * 1024;
final int NUM_BATCHES = 50;
// Explicitly turn off dictionary encoding.
OrcConf.DICTIONARY_KEY_SIZE_THRESHOLD.setDouble(conf, 0);
TypeDescription schema = TypeDescription.fromString("struct<str:string>");
try (Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.rowIndexStride(INDEX_STRIDE))) {
// Write 50 batches where each batch has a single value for str.
VectorizedRowBatch batch = schema.createRowBatchV2();
BytesColumnVector col = (BytesColumnVector) batch.cols[0];
for(int b=0; b < NUM_BATCHES; ++b) {
batch.reset();
batch.size = 1024;
col.setVal(0, ("Value for " + b).getBytes(StandardCharsets.UTF_8));
col.isRepeating = true;
writer.addRowBatch(batch);
}
}
try (Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf));
RecordReaderImpl rows = (RecordReaderImpl) reader.rows()) {
VectorizedRowBatch batch = reader.getSchema().createRowBatchV2();
BytesColumnVector col = (BytesColumnVector) batch.cols[0];
// Get the index for the str column
OrcProto.RowIndex index = rows.readRowIndex(0, null, null)
.getRowGroupIndex()[1];
// We assume that it fits in a single stripe
assertEquals(1, reader.getStripes().size());
// There are 4 entries, because ceil(NUM_BATCHES * 1024 / INDEX_STRIDE) = 4.
assertEquals(4, index.getEntryCount());
for(int e=0; e < index.getEntryCount(); ++e) {
OrcProto.RowIndexEntry entry = index.getEntry(e);
// For a string column with direct encoding, compression & no nulls, we
// should have 5 positions in each entry.
assertEquals(5, entry.getPositionsCount(), "position count entry " + e);
// make sure we can seek and get the right data
int row = e * INDEX_STRIDE;
rows.seekToRow(row);
assertTrue(rows.nextBatch(batch), "entry " + e);
assertEquals(1024, batch.size, "entry " + e);
assertTrue(col.noNulls, "entry " + e);
assertEquals("Value for " + (row / 1024), col.toString(0), "entry " + e);
}
}
}
/**
* That when we disable dictionaries, we don't get broken row indexes.
*/
@ParameterizedTest
@MethodSource("data")
public void testRowIndex(String dictImpl) throws Exception {
OrcConf.DICTIONARY_IMPL.setString(conf, dictImpl);
TypeDescription schema =
TypeDescription.fromString("struct<str:string>");
// turn off the dictionaries
OrcConf.DICTIONARY_KEY_SIZE_THRESHOLD.setDouble(conf, 0);
Writer writer = OrcFile.createWriter(
testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).rowIndexStride(4 * 1024));
VectorizedRowBatch batch = schema.createRowBatch();
BytesColumnVector strVector = (BytesColumnVector) batch.cols[0];
for (int i = 0; i < 32 * 1024; i++) {
if (batch.size == batch.getMaxSize()) {
writer.addRowBatch(batch);
batch.reset();
}
byte[] value = String.format("row %06d", i).getBytes(StandardCharsets.UTF_8);
strVector.setRef(batch.size, value, 0, value.length);
++batch.size;
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf).filesystem(fs));
SearchArgument sarg = SearchArgumentFactory.newBuilder(conf)
.lessThan("str", PredicateLeaf.Type.STRING, "row 001000")
.build();
Reader.Options options = reader.options().searchArgument(sarg, null).allowSARGToFilter(false);
RecordReader recordReader = reader.rows(options);
batch = reader.getSchema().createRowBatch();
strVector = (BytesColumnVector) batch.cols[0];
long base = 0;
while (recordReader.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
String value = String.format("row %06d", r + base);
assertEquals(value, strVector.toString(r), "row " + (r + base));
}
base += batch.size;
}
// We should only read the first row group.
assertEquals(4 * 1024, base);
}
/**
* Test that files written before ORC-569 are read correctly.
*/
@ParameterizedTest
@MethodSource("data")
public void testRowIndexPreORC569(String dictImpl) throws Exception {
OrcConf.DICTIONARY_IMPL.setString(conf, dictImpl);
testFilePath = new Path(System.getProperty("example.dir"), "TestStringDictionary.testRowIndex.orc");
SearchArgument sarg = SearchArgumentFactory.newBuilder(conf)
.lessThan("str", PredicateLeaf.Type.STRING, "row 001000")
.build();
try (Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf).filesystem(fs))) {
Reader.Options options = reader.options().searchArgument(sarg, null).allowSARGToFilter(false);
try (RecordReader recordReader = reader.rows(options)) {
VectorizedRowBatch batch = reader.getSchema().createRowBatch();
BytesColumnVector strVector = (BytesColumnVector) batch.cols[0];
long base = 0;
while (recordReader.nextBatch(batch)) {
for (int r = 0; r < batch.size; ++r) {
String value = String.format("row %06d", r + base);
assertEquals(value, strVector.toString(r), "row " + (r + base));
}
base += batch.size;
}
// We should only read the first row group.
assertEquals(4 * 1024, base);
}
try (RecordReader recordReader = reader.rows()) {
VectorizedRowBatch batch = reader.getSchema().createRowBatch();
recordReader.seekToRow(4 * 1024);
assertTrue(recordReader.nextBatch(batch));
recordReader.seekToRow(0);
assertTrue(recordReader.nextBatch(batch));
}
}
}
}
| 25,771 | 37.011799 | 111 | java |
null | orc-main/java/core/src/test/org/apache/orc/TestTypeDescription.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.junit.jupiter.api.Test;
import java.io.File;
import java.io.IOException;
import java.util.List;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
public class TestTypeDescription {
@Test
public void testJson() {
TypeDescription bin = TypeDescription.createBinary();
assertEquals("{\"category\": \"binary\", \"id\": 0, \"max\": 0}",
bin.toJson());
assertEquals("binary", bin.toString());
TypeDescription struct = TypeDescription.createStruct()
.addField("f1", TypeDescription.createInt())
.addField("f2", TypeDescription.createString())
.addField("f3", TypeDescription.createDecimal());
assertEquals("struct<f1:int,f2:string,f3:decimal(38,10)>",
struct.toString());
assertEquals("{"
+ "\"category\": \"struct\", "
+ "\"id\": 0, \"max\": 3, "
+ "\"fields\": [\n"
+ "{ \"f1\": {\"category\": \"int\", \"id\": 1, \"max\": 1}},\n"
+ "{ \"f2\": {\"category\": \"string\", \"id\": 2, \"max\": 2}},\n"
+ "{ \"f3\": {\"category\": \"decimal\", \"id\": 3, \"max\": 3, \"precision\": 38, \"scale\": 10}}"
+ "]"
+ "}",
struct.toJson());
struct = TypeDescription.createStruct()
.addField("f1", TypeDescription.createUnion()
.addUnionChild(TypeDescription.createByte())
.addUnionChild(TypeDescription.createDecimal()
.withPrecision(20).withScale(10)))
.addField("f2", TypeDescription.createStruct()
.addField("f3", TypeDescription.createDate())
.addField("f4", TypeDescription.createDouble())
.addField("f5", TypeDescription.createBoolean()))
.addField("f6", TypeDescription.createChar().withMaxLength(100));
assertEquals("struct<f1:uniontype<tinyint,decimal(20,10)>,f2:struct<f3:date,f4:double,f5:boolean>,f6:char(100)>",
struct.toString());
assertEquals(
"{\"category\": \"struct\", "
+ "\"id\": 0, "
+ "\"max\": 8, "
+ "\"fields\": [\n" +
"{ \"f1\": {\"category\": \"uniontype\", \"id\": 1, \"max\": 3, \"children\": [\n" +
" {\"category\": \"tinyint\", \"id\": 2, \"max\": 2},\n" +
" {\"category\": \"decimal\", \"id\": 3, \"max\": 3, \"precision\": 20, \"scale\": 10}]}},\n" +
"{ \"f2\": {\"category\": \"struct\", \"id\": 4, \"max\": 7, \"fields\": [\n" +
"{ \"f3\": {\"category\": \"date\", \"id\": 5, \"max\": 5}},\n" +
"{ \"f4\": {\"category\": \"double\", \"id\": 6, \"max\": 6}},\n" +
"{ \"f5\": {\"category\": \"boolean\", \"id\": 7, \"max\": 7}}]}},\n" +
"{ \"f6\": {\"category\": \"char\", \"id\": 8, \"max\": 8, \"length\": 100}}]}",
struct.toJson());
}
@Test
public void testSpecialFieldNames() {
TypeDescription type = TypeDescription.createStruct()
.addField("foo bar", TypeDescription.createInt())
.addField("`some`thing`", TypeDescription.createInt())
.addField("èœ", TypeDescription.createInt())
.addField("1234567890_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ", TypeDescription.createInt())
.addField("'!@#$%^&*()-=_+", TypeDescription.createInt());
assertEquals("struct<`foo bar`:int,```some``thing```:int,`èœ`:int," +
"1234567890_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ:int," +
"`'!@#$%^&*()-=_+`:int>", type.toString());
}
@Test
public void testParserSimple() {
TypeDescription expected = TypeDescription.createStruct()
.addField("b1", TypeDescription.createBinary())
.addField("b2", TypeDescription.createBoolean())
.addField("b3", TypeDescription.createByte())
.addField("c", TypeDescription.createChar().withMaxLength(10))
.addField("d1", TypeDescription.createDate())
.addField("d2", TypeDescription.createDecimal().withScale(5).withPrecision(20))
.addField("d3", TypeDescription.createDouble())
.addField("fff", TypeDescription.createFloat())
.addField("int", TypeDescription.createInt())
.addField("l", TypeDescription.createList
(TypeDescription.createLong()))
.addField("map", TypeDescription.createMap
(TypeDescription.createShort(), TypeDescription.createString()))
.addField("str", TypeDescription.createStruct()
.addField("u", TypeDescription.createUnion()
.addUnionChild(TypeDescription.createTimestamp())
.addUnionChild(TypeDescription.createVarchar()
.withMaxLength(100))))
.addField("tz", TypeDescription.createTimestampInstant())
.addField("ts", TypeDescription.createTimestamp());
String expectedStr =
"struct<b1:binary,b2:boolean,b3:tinyint,c:char(10),d1:date," +
"d2:decimal(20,5),d3:double,fff:float,int:int,l:array<bigint>," +
"map:map<smallint,string>,str:struct<u:uniontype<timestamp," +
"varchar(100)>>,tz:timestamp with local time zone,ts:timestamp>";
assertEquals(expectedStr, expected.toString());
TypeDescription actual = TypeDescription.fromString(expectedStr);
assertEquals(expected, actual);
assertEquals(expectedStr, actual.toString());
}
@Test
public void testParserUpper() {
TypeDescription type = TypeDescription.fromString("BIGINT");
assertEquals(TypeDescription.Category.LONG, type.getCategory());
type = TypeDescription.fromString("STRUCT<MY_FIELD:INT>");
assertEquals(TypeDescription.Category.STRUCT, type.getCategory());
assertEquals("MY_FIELD", type.getFieldNames().get(0));
assertEquals(TypeDescription.Category.INT,
type.getChildren().get(0).getCategory());
type = TypeDescription.fromString("UNIONTYPE< TIMESTAMP WITH LOCAL TIME ZONE >");
assertEquals(TypeDescription.Category.UNION, type.getCategory());
assertEquals(TypeDescription.Category.TIMESTAMP_INSTANT,
type.getChildren().get(0).getCategory());
}
@Test
public void testSpecialFieldNameParser() {
TypeDescription type = TypeDescription.fromString("struct<`foo bar`:int," +
"```quotes```:double,`abc``def````ghi`:float>");
assertEquals(TypeDescription.Category.STRUCT, type.getCategory());
List<String> fields = type.getFieldNames();
assertEquals(3, fields.size());
assertEquals("foo bar", fields.get(0));
assertEquals("`quotes`", fields.get(1));
assertEquals("abc`def``ghi", fields.get(2));
}
@Test
public void testMissingField() {
IllegalArgumentException e = assertThrows(IllegalArgumentException.class, () -> {
TypeDescription.fromString("struct<");
});
assertTrue(e.getMessage().contains("Missing name at 'struct<^'"));
}
@Test
public void testQuotedField1() {
IllegalArgumentException e = assertThrows(IllegalArgumentException.class, () -> {
TypeDescription.fromString("struct<`abc");
});
assertTrue(e.getMessage().contains("Unmatched quote at 'struct<^`abc'"));
}
@Test
public void testQuotedField2() {
IllegalArgumentException e = assertThrows(IllegalArgumentException.class, () -> {
TypeDescription.fromString("struct<``:int>");
});
assertTrue(e.getMessage().contains("Empty quoted field name at 'struct<``^:int>'"));
}
@Test
public void testParserUnknownCategory() {
IllegalArgumentException e = assertThrows(IllegalArgumentException.class, () -> {
TypeDescription.fromString("FOOBAR");
});
assertTrue(e.getMessage().contains("Can't parse category at 'FOOBAR^'"));
}
@Test
public void testParserEmptyCategory() {
IllegalArgumentException e = assertThrows(IllegalArgumentException.class, () -> {
TypeDescription.fromString("<int>");
});
assertTrue(e.getMessage().contains("Can't parse category at '^<int>'"));
}
@Test
public void testParserMissingInt() {
IllegalArgumentException e = assertThrows(IllegalArgumentException.class, () -> {
TypeDescription.fromString("char()");
});
assertTrue(e.getMessage().contains("Missing integer at 'char(^)'"));
}
@Test
public void testParserMissingSize() {
IllegalArgumentException e = assertThrows(IllegalArgumentException.class, () -> {
TypeDescription.fromString("struct<c:char>");
});
assertTrue(e.getMessage().contains("Missing required char '(' at 'struct<c:char^>'"));
}
@Test
public void testParserExtraStuff() {
IllegalArgumentException e = assertThrows(IllegalArgumentException.class, () -> {
TypeDescription.fromString("struct<i:int>,");
});
assertTrue(e.getMessage().contains("Extra characters at 'struct<i:int>^,'"));
}
@Test
public void testConnectedListSubtrees() {
TypeDescription type =
TypeDescription.fromString("struct<field1:array<struct<field2:int>>>");
TypeDescription leaf = type.getChildren().get(0)
.getChildren().get(0)
.getChildren().get(0);
assertEquals(3, leaf.getId());
assertEquals(0, type.getId());
assertEquals(3, leaf.getId());
}
@Test
public void testConnectedMapSubtrees() {
TypeDescription type =
TypeDescription.fromString("struct<field1:map<string,int>>");
TypeDescription leaf = type.getChildren().get(0).getChildren().get(0);
assertEquals(2, leaf.getId());
assertEquals(0, type.getId());
assertEquals(2, leaf.getId());
}
@Test
public void testFindSubtype() {
TypeDescription type = TypeDescription.fromString(
"struct<a:int," +
"b:struct<c:array<int>,d:map<string,struct<e:string>>>," +
"f:string," +
"g:uniontype<string,int>>");
assertEquals(0, type.findSubtype("0").getId());
assertEquals(1, type.findSubtype("a").getId());
assertEquals(2, type.findSubtype("b").getId());
assertEquals(3, type.findSubtype("b.c").getId());
assertEquals(4, type.findSubtype("b.c._elem").getId());
assertEquals(5, type.findSubtype("b.d").getId());
assertEquals(6, type.findSubtype("b.d._key").getId());
assertEquals(7, type.findSubtype("b.d._value").getId());
assertEquals(8, type.findSubtype("b.d._value.e").getId());
assertEquals(9, type.findSubtype("f").getId());
assertEquals(10, type.findSubtype("g").getId());
assertEquals(11, type.findSubtype("g.0").getId());
assertEquals(12, type.findSubtype("g.1").getId());
}
@Test
public void testBadFindSubtype() {
TypeDescription type = TypeDescription.fromString(
"struct<a:int," +
"b:struct<c:array<int>,d:map<string,struct<e:string>>>," +
"f:string," +
"g:uniontype<string,int>>");
try {
type.findSubtype("13");
fail();
} catch (IllegalArgumentException e) {
// PASS
}
try {
type.findSubtype("aa");
fail();
} catch (IllegalArgumentException e) {
// PASS
}
try {
type.findSubtype("b.a");
fail();
} catch (IllegalArgumentException e) {
// PASS
}
try {
type.findSubtype("g.2");
fail();
} catch (IllegalArgumentException e) {
// PASS
}
try {
type.findSubtype("b.c.d");
fail();
} catch (IllegalArgumentException e) {
// PASS
}
}
@Test
public void testFindSubtypes() {
TypeDescription type = TypeDescription.fromString(
"struct<a:int," +
"b:struct<c:array<int>,d:map<string,struct<e:string>>>," +
"f:string," +
"g:uniontype<string,int>>");
List<TypeDescription> results = type.findSubtypes("a");
assertEquals(1, results.size());
assertEquals(1, results.get(0).getId());
results = type.findSubtypes("b.d._value.e,3,g.0");
assertEquals(3, results.size());
assertEquals(8, results.get(0).getId());
assertEquals(3, results.get(1).getId());
assertEquals(11, results.get(2).getId());
results = type.findSubtypes("");
assertEquals(0, results.size());
}
@Test
public void testFindSubtypesAcid() {
TypeDescription type = TypeDescription.fromString(
"struct<operation:int,originalTransaction:bigint,bucket:int," +
"rowId:bigint,currentTransaction:bigint," +
"row:struct<col0:int,col1:struct<z:int,x:double,y:string>," +
"col2:double>>");
List<TypeDescription> results = type.findSubtypes("col0");
assertEquals(1, results.size());
assertEquals(7, results.get(0).getId());
results = type.findSubtypes("col1,col2,col1.x,col1.z");
assertEquals(4, results.size());
assertEquals(8, results.get(0).getId());
assertEquals(12, results.get(1).getId());
assertEquals(10, results.get(2).getId());
assertEquals(9, results.get(3).getId());
results = type.findSubtypes("");
assertEquals(0, results.size());
}
@Test
public void testAttributes() throws IOException {
TypeDescription schema = TypeDescription.fromString(
"struct<" +
"name:struct<first:string,last:string>," +
"address:struct<street:string,city:string,country:string,post_code:string>," +
"credit_cards:array<struct<card_number:string,expire:date,ccv:string>>>");
// set some attributes
schema.findSubtype("name").setAttribute("iceberg.id", "12");
schema.findSubtype("address.street").setAttribute("mask", "nullify")
.setAttribute("context", "pii");
TypeDescription clone = schema.clone();
assertEquals("12", clone.findSubtype("name").getAttributeValue("iceberg.id"));
clone.findSubtype("name").removeAttribute("iceberg.id");
assertEquals(0, clone.findSubtype("name").getAttributeNames().size());
assertEquals(1, schema.findSubtype("name").getAttributeNames().size());
// write a file with those attributes
Path path = new Path(System.getProperty("test.tmp.dir",
"target" + File.separator + "test" + File.separator + "tmp"), "attribute.orc");
Configuration conf = new Configuration();
Writer writer = OrcFile.createWriter(path,
OrcFile.writerOptions(conf).setSchema(schema).overwrite(true));
writer.close();
// read the file back again
Reader reader = OrcFile.createReader(path, OrcFile.readerOptions(conf));
TypeDescription readerSchema = reader.getSchema();
// make sure that the read types have the attributes
TypeDescription nameCol = readerSchema.findSubtype("name");
assertArrayEquals(new Object[]{"iceberg.id"},
nameCol.getAttributeNames().toArray());
assertEquals("12", nameCol.getAttributeValue("iceberg.id"));
TypeDescription street = readerSchema.findSubtype("address.street");
assertArrayEquals(new Object[]{"context", "mask"},
street.getAttributeNames().toArray());
assertEquals("pii", street.getAttributeValue("context"));
assertEquals("nullify", street.getAttributeValue("mask"));
assertNull(street.getAttributeValue("foobar"));
}
@Test
public void testAttributesEquality() {
TypeDescription schema = TypeDescription.fromString(
"struct<" +
"name:struct<first:string,last:string>," +
"address:struct<street:string,city:string,country:string,post_code:string>," +
"credit_cards:array<struct<card_number:string,expire:date,ccv:string>>>");
// set some attributes
schema.findSubtype("name").setAttribute("iceberg.id", "12");
schema.findSubtype("address.street").setAttribute("mask", "nullify")
.setAttribute("context", "pii");
TypeDescription clone = schema.clone();
assertEquals(3, clearAttributes(clone));
assertNotEquals(clone, schema);
assertTrue(clone.equals(schema, false));
}
static int clearAttributes(TypeDescription schema) {
int result = 0;
for(String attribute: schema.getAttributeNames()) {
schema.removeAttribute(attribute);
result += 1;
}
List<TypeDescription> children = schema.getChildren();
if (children != null) {
for (TypeDescription child : children) {
result += clearAttributes(child);
}
}
return result;
}
@Test
public void testEncryption() {
String schemaString = "struct<" +
"name:struct<first:string,last:string>," +
"address:struct<street:string,city:string,country:string,post_code:string>," +
"credit_cards:array<struct<card_number:string,expire:date,ccv:string>>>";
TypeDescription schema = TypeDescription.fromString(schemaString);
TypeDescription copy = TypeDescription.fromString(schemaString);
assertEquals(copy, schema);
// set some encryption
schema.annotateEncryption("pii:name,address.street;credit:credit_cards", null);
assertEquals("pii",
schema.findSubtype("name").getAttributeValue(TypeDescription.ENCRYPT_ATTRIBUTE));
assertEquals("pii",
schema.findSubtype("address.street").getAttributeValue(TypeDescription.ENCRYPT_ATTRIBUTE));
assertEquals("credit",
schema.findSubtype("credit_cards").getAttributeValue(TypeDescription.ENCRYPT_ATTRIBUTE));
assertNotEquals(copy, schema);
assertEquals(3, clearAttributes(schema));
assertEquals(copy, schema);
schema.annotateEncryption("pii:name.first", "redact,Yy:name.first");
// check that we ignore if already set
schema.annotateEncryption("pii:name.first", "redact,Yy:name.first,credit_cards");
assertEquals("pii",
schema.findSubtype("name.first").getAttributeValue(TypeDescription.ENCRYPT_ATTRIBUTE));
assertEquals("redact,Yy",
schema.findSubtype("name.first").getAttributeValue(TypeDescription.MASK_ATTRIBUTE));
assertEquals("redact,Yy",
schema.findSubtype("credit_cards").getAttributeValue(TypeDescription.MASK_ATTRIBUTE));
assertEquals(3, clearAttributes(schema));
schema.annotateEncryption("pii:name", "redact:name.first;nullify:name.last");
assertEquals("pii",
schema.findSubtype("name").getAttributeValue(TypeDescription.ENCRYPT_ATTRIBUTE));
assertEquals("redact",
schema.findSubtype("name.first").getAttributeValue(TypeDescription.MASK_ATTRIBUTE));
assertEquals("nullify",
schema.findSubtype("name.last").getAttributeValue(TypeDescription.MASK_ATTRIBUTE));
assertEquals(3, clearAttributes(schema));
}
@Test
public void testEncryptionConflict() {
TypeDescription schema = TypeDescription.fromString(
"struct<" +
"name:struct<first:string,last:string>," +
"address:struct<street:string,city:string,country:string,post_code:string>," +
"credit_cards:array<struct<card_number:string,expire:date,ccv:string>>>");
// set some encryption
assertThrows(IllegalArgumentException.class, () ->
schema.annotateEncryption("pii:address,personal:address",null));
}
@Test
public void testMaskConflict() {
TypeDescription schema = TypeDescription.fromString(
"struct<" +
"name:struct<first:string,last:string>," +
"address:struct<street:string,city:string,country:string,post_code:string>," +
"credit_cards:array<struct<card_number:string,expire:date,ccv:string>>>");
// set some encryption
assertThrows(IllegalArgumentException.class, () ->
schema.annotateEncryption(null,"nullify:name;sha256:name"));
}
@Test
public void testGetFullFieldName() {
TypeDescription schema = TypeDescription.fromString(
"struct<" +
"name:struct<first:string,last:string>," +
"address:struct<street:string,city:string,country:string,post_code:string>," +
"credit_cards:array<struct<card_number:string,expire:date,ccv:string>>," +
"properties:map<string,uniontype<int,string>>>");
for (String column: new String[]{"0", "name", "name.first", "name.last",
"address.street", "address.city",
"credit_cards", "credit_cards._elem",
"credit_cards._elem.card_number",
"properties", "properties._key", "properties._value",
"properties._value.0", "properties._value.1"}) {
assertEquals(column,
schema.findSubtype(column, true).getFullFieldName());
}
}
@Test
public void testSetAttribute() {
TypeDescription type = TypeDescription.fromString("int");
type.setAttribute("key1", null);
assertEquals(0, type.getAttributeNames().size());
}
@Test
public void testHashCode() {
// Should not throw NPE
TypeDescription.fromString("int").hashCode();
}
}
| 21,903 | 40.563567 | 117 | java |
null | orc-main/java/core/src/test/org/apache/orc/TestUnicode.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.TestInfo;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
import java.io.File;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.stream.Stream;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class TestUnicode {
Path workDir = new Path(System.getProperty("test.tmp.dir", "target" + File.separator + "test"
+ File.separator + "tmp"));
Configuration conf;
FileSystem fs;
Path testFilePath;
private static Stream<Arguments> data() {
ArrayList<Arguments> data = new ArrayList<>();
for (int j = 0; j < 2; j++) {
for (int i = 1; i <= 5; i++) {
data.add(Arguments.of(j == 0 ? "char" : "varchar", i, true));
}
}
return data.stream();
}
static final String[] utf8strs = new String[] {
// Character.UnicodeBlock GREEK (2 bytes)
"\u03b1\u03b2\u03b3", "\u03b1\u03b2", "\u03b1\u03b2\u03b3\u03b4",
"\u03b1\u03b2\u03b3\u03b4",
// Character.UnicodeBlock MALAYALAM (3 bytes)
"\u0d06\u0d30\u0d3e", "\u0d0e\u0d28\u0d4d\u0d24\u0d3e", "\u0d13\u0d7c\u0d15\u0d4d",
// Unicode emoji (4 bytes)
"\u270f\ufe0f\ud83d\udcdd\u270f\ufe0f", "\ud83c\udf3b\ud83d\udc1d\ud83c\udf6f",
"\ud83c\udf7a\ud83e\udd43\ud83c\udf77" };
@BeforeEach
public void openFileSystem(TestInfo testInfo) throws Exception {
conf = new Configuration();
fs = FileSystem.getLocal(conf);
testFilePath = new Path(workDir, "TestOrcFile." +
testInfo.getTestMethod().get().getName() + ".orc");
fs.delete(testFilePath, false);
}
@ParameterizedTest
@MethodSource("data")
public void testUtf8(String type, int maxLength, boolean hasRTrim) throws Exception {
if (type.equals("varchar")) {
testVarChar(maxLength);
} else {
testChar(maxLength, hasRTrim);
}
}
// copied from HiveBaseChar
public static String enforceMaxLength(String val, int maxLength) {
if (val == null) {
return null;
}
String value = val;
if (maxLength > 0) {
int valLength = val.codePointCount(0, val.length());
if (valLength > maxLength) {
// Truncate the excess chars to fit the character length.
// Also make sure we take supplementary chars into account.
value = val.substring(0, val.offsetByCodePoints(0, maxLength));
}
}
return value;
}
// copied from HiveBaseChar
public static String getPaddedValue(String val, int maxLength, boolean rtrim) {
if (val == null) {
return null;
}
if (maxLength < 0) {
return val;
}
int valLength = val.codePointCount(0, val.length());
if (valLength > maxLength) {
return enforceMaxLength(val, maxLength);
}
if (maxLength > valLength && rtrim == false) {
// Make sure we pad the right amount of spaces; valLength is in terms of code points,
// while StringUtils.rpad() is based on the number of java chars.
int padLength = val.length() + (maxLength - valLength);
val = StringUtils.rightPad(val, padLength);
}
return val;
}
public void testChar(int maxLength, boolean hasRTrim) throws Exception {
// char(n)
TypeDescription schema = TypeDescription.createChar().withMaxLength(maxLength);
String[] expected = new String[utf8strs.length];
for (int i = 0; i < utf8strs.length; i++) {
expected[i] = getPaddedValue(utf8strs[i], maxLength, hasRTrim);
}
verifyWrittenStrings(schema, utf8strs, expected, maxLength);
}
public void testVarChar(int maxLength) throws Exception {
// char(n)
TypeDescription schema = TypeDescription.createVarchar().withMaxLength(maxLength);
String[] expected = new String[utf8strs.length];
for (int i = 0; i < utf8strs.length; i++) {
expected[i] = enforceMaxLength(utf8strs[i], maxLength);
}
verifyWrittenStrings(schema, utf8strs, expected, maxLength);
}
public void verifyWrittenStrings(TypeDescription schema, String[] inputs, String[] expected, int maxLength)
throws Exception {
Writer writer =
OrcFile.createWriter(testFilePath, OrcFile.writerOptions(conf).setSchema(schema)
.compress(CompressionKind.NONE).bufferSize(10000));
VectorizedRowBatch batch = schema.createRowBatch();
BytesColumnVector col = (BytesColumnVector) batch.cols[0];
for (int i = 0; i < inputs.length; i++) {
if (batch.size == batch.getMaxSize()) {
writer.addRowBatch(batch);
batch.reset();
}
col.setVal(batch.size++, inputs[i].getBytes(StandardCharsets.UTF_8));
}
writer.addRowBatch(batch);
writer.close();
Reader reader =
OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
col = (BytesColumnVector) batch.cols[0];
int idx = 0;
while (rows.nextBatch(batch)) {
for (int r = 0; r < batch.size; ++r) {
assertEquals(expected[idx], toString(col, r),
String.format("test for %s:%d", schema, maxLength));
idx++;
}
}
fs.delete(testFilePath, false);
}
static String toString(BytesColumnVector vector, int row) {
if (vector.isRepeating) {
row = 0;
}
if (!vector.noNulls && vector.isNull[row]) {
return null;
}
return new String(vector.vector[row], vector.start[row], vector.length[row],
StandardCharsets.UTF_8);
}
}
| 6,743 | 34.308901 | 109 | java |
null | orc-main/java/core/src/test/org/apache/orc/TestUnrolledBitPack.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import com.google.common.collect.Lists;
import com.google.common.primitives.Longs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.TestInfo;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
import java.io.File;
import java.util.List;
import java.util.stream.Stream;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class TestUnrolledBitPack {
private static Stream<Arguments> data() {
return Stream.of(
Arguments.of(-1),
Arguments.of(1),
Arguments.of(7),
Arguments.of(-128),
Arguments.of(32000),
Arguments.of(8300000),
Arguments.of(Integer.MAX_VALUE),
Arguments.of(540000000000L),
Arguments.of(140000000000000L),
Arguments.of(36000000000000000L),
Arguments.of(Long.MAX_VALUE));
}
Path workDir = new Path(System.getProperty("test.tmp.dir", "target" + File.separator + "test"
+ File.separator + "tmp"));
Configuration conf;
FileSystem fs;
Path testFilePath;
@BeforeEach
public void openFileSystem(TestInfo testInfo) throws Exception {
conf = new Configuration();
fs = FileSystem.getLocal(conf);
testFilePath = new Path(workDir, "TestOrcFile." +
testInfo.getTestMethod().get().getName() + ".orc");
fs.delete(testFilePath, false);
}
@ParameterizedTest
@MethodSource("data")
public void testBitPacking(long val) throws Exception {
TypeDescription schema = TypeDescription.createLong();
long[] inp = new long[] { val, 0, val, val, 0, val, 0, val, val, 0, val, 0, val, val, 0, 0,
val, val, 0, val, 0, 0, val, 0, val, 0, val, 0, 0, val, 0, val, 0, val, 0, 0, val, 0, val,
0, val, 0, 0, val, 0, val, 0, val, 0, 0, val, 0, val, 0, val, 0, 0, val, 0, val, 0, val, 0,
0, val, 0, val, 0, val, 0, 0, val, 0, val, 0, val, 0, 0, val, 0, val, 0, val, 0, 0, val, 0,
val, 0, val, 0, 0, val, 0, val, 0, 0, val, val };
List<Long> input = Lists.newArrayList(Longs.asList(inp));
Writer writer = OrcFile.createWriter(
testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).stripeSize(100000)
.compress(CompressionKind.NONE).bufferSize(10000));
VectorizedRowBatch batch = schema.createRowBatch();
for (Long l : input) {
int row = batch.size++;
((LongColumnVector) batch.cols[0]).vector[row] = l;
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
int idx = 0;
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(input.get(idx++).longValue(),
((LongColumnVector) batch.cols[0]).vector[r]);
}
}
}
}
| 4,029 | 35.636364 | 99 | java |
null | orc-main/java/core/src/test/org/apache/orc/TestVectorOrcFile.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import com.google.common.collect.Lists;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.type.HiveDecimal;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.Decimal64ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ListColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.MapColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.StructColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.UnionColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory;
import org.apache.hadoop.hive.serde2.io.DateWritable;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Text;
import org.apache.orc.OrcFile.Version;
import org.apache.orc.OrcFile.WriterOptions;
import org.apache.orc.impl.DataReaderProperties;
import org.apache.orc.impl.InStream;
import org.apache.orc.impl.KeyProvider;
import org.apache.orc.impl.MemoryManagerImpl;
import org.apache.orc.impl.OrcCodecPool;
import org.apache.orc.impl.OrcIndex;
import org.apache.orc.impl.ReaderImpl;
import org.apache.orc.impl.RecordReaderImpl;
import org.apache.orc.impl.RecordReaderUtils;
import org.apache.orc.impl.WriterImpl;
import org.apache.orc.impl.reader.ReaderEncryption;
import org.apache.orc.impl.reader.StripePlanner;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.TestInfo;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
import org.mockito.Mockito;
import java.io.File;
import java.io.IOException;
import java.math.BigInteger;
import java.net.URL;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.sql.Date;
import java.sql.Timestamp;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.TimeZone;
import java.util.UUID;
import java.util.function.IntFunction;
import java.util.stream.Stream;
import static org.apache.orc.impl.ReaderImpl.DEFAULT_COMPRESSION_BLOCK_SIZE;
import static org.apache.orc.impl.mask.SHA256MaskFactory.printHexBinary;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNotSame;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertSame;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
import static org.junit.jupiter.api.Assumptions.assumeTrue;
/**
* Tests for the vectorized reader and writer for ORC files.
*/
public class TestVectorOrcFile {
private static Stream<Arguments> data() {
return Stream.of(
Arguments.of(Version.V_0_11),
Arguments.of(Version.V_0_12),
Arguments.of(Version.UNSTABLE_PRE_2_0)
);
}
public static String getFileFromClasspath(String name) {
URL url = ClassLoader.getSystemResource(name);
if (url == null) {
throw new IllegalArgumentException("Could not find " + name);
}
return url.getPath();
}
public static class InnerStruct {
int int1;
Text string1 = new Text();
InnerStruct(int int1, Text string1) {
this.int1 = int1;
this.string1.set(string1);
}
InnerStruct(int int1, String string1) {
this.int1 = int1;
this.string1.set(string1);
}
public String toString() {
return "{" + int1 + ", " + string1 + "}";
}
}
public static class MiddleStruct {
List<InnerStruct> list = new ArrayList<InnerStruct>();
MiddleStruct(InnerStruct... items) {
list.clear();
list.addAll(Arrays.asList(items));
}
}
private static InnerStruct inner(int i, String s) {
return new InnerStruct(i, s);
}
private static Map<String, InnerStruct> map(InnerStruct... items) {
Map<String, InnerStruct> result = new HashMap<String, InnerStruct>();
for(InnerStruct i: items) {
result.put(i.string1.toString(), i);
}
return result;
}
private static List<InnerStruct> list(InnerStruct... items) {
List<InnerStruct> result = new ArrayList<InnerStruct>();
result.addAll(Arrays.asList(items));
return result;
}
protected static BytesWritable bytes(int... items) {
BytesWritable result = new BytesWritable();
result.setSize(items.length);
for(int i=0; i < items.length; ++i) {
result.getBytes()[i] = (byte) items[i];
}
return result;
}
protected static byte[] bytesArray(int... items) {
byte[] result = new byte[items.length];
for(int i=0; i < items.length; ++i) {
result[i] = (byte) items[i];
}
return result;
}
private static ByteBuffer byteBuf(int... items) {
ByteBuffer result = ByteBuffer.allocate(items.length);
for(int item: items) {
result.put((byte) item);
}
result.flip();
return result;
}
Path workDir = new Path(System.getProperty("test.tmp.dir",
"target" + File.separator + "test" + File.separator + "tmp"));
Configuration conf;
FileSystem fs;
Path testFilePath;
@BeforeEach
public void openFileSystem(TestInfo testInfo) throws Exception {
conf = new Configuration();
fs = FileSystem.getLocal(conf);
testFilePath = new Path(workDir, "TestVectorOrcFile." +
testInfo.getTestMethod().get().getName().replaceFirst("\\[[0-9]+\\]", "")
+ "." + UUID.randomUUID() + ".orc");
fs.delete(testFilePath, false);
}
@ParameterizedTest
@MethodSource("data")
public void testReadFormat_0_11(Version fileFormat) throws Exception {
assumeTrue(fileFormat == Version.V_0_11);
Path oldFilePath =
new Path(getFileFromClasspath("orc-file-11-format.orc"));
Reader reader = OrcFile.createReader(oldFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
int stripeCount = 0;
int rowCount = 0;
long currentOffset = -1;
for(StripeInformation stripe : reader.getStripes()) {
stripeCount += 1;
rowCount += stripe.getNumberOfRows();
if (currentOffset < 0) {
currentOffset = stripe.getOffset() + stripe.getIndexLength()
+ stripe.getDataLength() + stripe.getFooterLength();
} else {
assertEquals(currentOffset, stripe.getOffset());
currentOffset += stripe.getIndexLength() + stripe.getDataLength()
+ stripe.getFooterLength();
}
}
assertEquals(reader.getNumberOfRows(), rowCount);
assertEquals(2, stripeCount);
// check the stats
ColumnStatistics[] stats = reader.getStatistics();
assertEquals(7500, stats[1].getNumberOfValues());
assertEquals(3750, ((BooleanColumnStatistics) stats[1]).getFalseCount());
assertEquals(3750, ((BooleanColumnStatistics) stats[1]).getTrueCount());
assertEquals("count: 7500 hasNull: true true: 3750", stats[1].toString());
assertEquals(2048, ((IntegerColumnStatistics) stats[3]).getMaximum());
assertEquals(1024, ((IntegerColumnStatistics) stats[3]).getMinimum());
assertTrue(((IntegerColumnStatistics) stats[3]).isSumDefined());
assertEquals(11520000, ((IntegerColumnStatistics) stats[3]).getSum());
assertEquals("count: 7500 hasNull: true min: 1024 max: 2048 sum: 11520000",
stats[3].toString());
assertEquals(Long.MAX_VALUE,
((IntegerColumnStatistics) stats[5]).getMaximum());
assertEquals(Long.MAX_VALUE,
((IntegerColumnStatistics) stats[5]).getMinimum());
assertFalse(((IntegerColumnStatistics) stats[5]).isSumDefined());
assertEquals(
"count: 7500 hasNull: true min: 9223372036854775807 max: 9223372036854775807",
stats[5].toString());
assertEquals(-15.0, ((DoubleColumnStatistics) stats[7]).getMinimum(), 0.0001);
assertEquals(-5.0, ((DoubleColumnStatistics) stats[7]).getMaximum(), 0.0001);
assertEquals(-75000.0, ((DoubleColumnStatistics) stats[7]).getSum(),
0.00001);
assertEquals("count: 7500 hasNull: true min: -15.0 max: -5.0 sum: -75000.0",
stats[7].toString());
assertEquals("count: 7500 hasNull: true min: bye max: hi sum: 0", stats[9].toString());
// check the inspectors
TypeDescription schema = reader.getSchema();
assertEquals(TypeDescription.Category.STRUCT, schema.getCategory());
assertEquals("struct<boolean1:boolean,byte1:tinyint,short1:smallint,"
+ "int1:int,long1:bigint,float1:float,double1:double,bytes1:"
+ "binary,string1:string,middle:struct<list:array<struct<int1:int,"
+ "string1:string>>>,list:array<struct<int1:int,string1:string>>,"
+ "map:map<string,struct<int1:int,string1:string>>,ts:timestamp,"
+ "decimal1:decimal(38,10)>", schema.toString());
VectorizedRowBatch batch = schema.createRowBatch();
RecordReader rows = reader.rows();
assertTrue(rows.nextBatch(batch));
assertEquals(1024, batch.size);
// check the contents of the first row
assertFalse(getBoolean(batch, 0));
assertEquals(1, getByte(batch, 0));
assertEquals(1024, getShort(batch, 0));
assertEquals(65536, getInt(batch, 0));
assertEquals(Long.MAX_VALUE, getLong(batch, 0));
assertEquals(1.0, getFloat(batch, 0), 0.00001);
assertEquals(-15.0, getDouble(batch, 0), 0.00001);
assertEquals(bytes(0, 1, 2, 3, 4), getBinary(batch, 0));
assertEquals("hi", getText(batch, 0).toString());
StructColumnVector middle = (StructColumnVector) batch.cols[9];
ListColumnVector midList = (ListColumnVector) middle.fields[0];
StructColumnVector midListStruct = (StructColumnVector) midList.child;
LongColumnVector midListInt = (LongColumnVector) midListStruct.fields[0];
BytesColumnVector midListStr = (BytesColumnVector) midListStruct.fields[1];
ListColumnVector list = (ListColumnVector) batch.cols[10];
StructColumnVector listStruct = (StructColumnVector) list.child;
LongColumnVector listInts = (LongColumnVector) listStruct.fields[0];
BytesColumnVector listStrs = (BytesColumnVector) listStruct.fields[1];
MapColumnVector map = (MapColumnVector) batch.cols[11];
BytesColumnVector mapKey = (BytesColumnVector) map.keys;
StructColumnVector mapValue = (StructColumnVector) map.values;
LongColumnVector mapValueInts = (LongColumnVector) mapValue.fields[0];
BytesColumnVector mapValueStrs = (BytesColumnVector) mapValue.fields[1];
TimestampColumnVector timestamp = (TimestampColumnVector) batch.cols[12];
DecimalColumnVector decs = (DecimalColumnVector) batch.cols[13];
assertFalse(middle.isNull[0]);
assertEquals(2, midList.lengths[0]);
int start = (int) midList.offsets[0];
assertEquals(1, midListInt.vector[start]);
assertEquals("bye", midListStr.toString(start));
assertEquals(2, midListInt.vector[start + 1]);
assertEquals("sigh", midListStr.toString(start + 1));
assertEquals(2, list.lengths[0]);
start = (int) list.offsets[0];
assertEquals(3, listInts.vector[start]);
assertEquals("good", listStrs.toString(start));
assertEquals(4, listInts.vector[start + 1]);
assertEquals("bad", listStrs.toString(start + 1));
assertEquals(0, map.lengths[0]);
assertEquals(Timestamp.valueOf("2000-03-12 15:00:00"),
timestamp.asScratchTimestamp(0));
assertEquals(new HiveDecimalWritable(HiveDecimal.create("12345678.6547456")),
decs.vector[0]);
// check the contents of row 7499
rows.seekToRow(7499);
assertTrue(rows.nextBatch(batch));
assertTrue(getBoolean(batch, 0));
assertEquals(100, getByte(batch, 0));
assertEquals(2048, getShort(batch, 0));
assertEquals(65536, getInt(batch, 0));
assertEquals(Long.MAX_VALUE, getLong(batch, 0));
assertEquals(2.0, getFloat(batch, 0), 0.00001);
assertEquals(-5.0, getDouble(batch, 0), 0.00001);
assertEquals(bytes(), getBinary(batch, 0));
assertEquals("bye", getText(batch, 0).toString());
assertFalse(middle.isNull[0]);
assertEquals(2, midList.lengths[0]);
start = (int) midList.offsets[0];
assertEquals(1, midListInt.vector[start]);
assertEquals("bye", midListStr.toString(start));
assertEquals(2, midListInt.vector[start + 1]);
assertEquals("sigh", midListStr.toString(start + 1));
assertEquals(3, list.lengths[0]);
start = (int) list.offsets[0];
assertEquals(100000000, listInts.vector[start]);
assertEquals("cat", listStrs.toString(start));
assertEquals(-100000, listInts.vector[start + 1]);
assertEquals("in", listStrs.toString(start + 1));
assertEquals(1234, listInts.vector[start + 2]);
assertEquals("hat", listStrs.toString(start + 2));
assertEquals(2, map.lengths[0]);
start = (int) map.offsets[0];
assertEquals("chani", mapKey.toString(start));
assertEquals(5, mapValueInts.vector[start]);
assertEquals("chani", mapValueStrs.toString(start));
assertEquals("mauddib", mapKey.toString(start + 1));
assertEquals(1, mapValueInts.vector[start + 1]);
assertEquals("mauddib", mapValueStrs.toString(start + 1));
assertEquals(Timestamp.valueOf("2000-03-12 15:00:01"),
timestamp.asScratchTimestamp(0));
assertEquals(new HiveDecimalWritable(HiveDecimal.create("12345678.6547457")),
decs.vector[0]);
// handle the close up
assertFalse(rows.nextBatch(batch));
rows.close();
}
@ParameterizedTest
@MethodSource("data")
public void testTimestampBug(Version fileFormat) throws IOException {
TypeDescription schema = TypeDescription.createTimestamp();
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).stripeSize(100000)
.bufferSize(10000).version(fileFormat));
int batchCount = 5;
VectorizedRowBatch batch = schema.createRowBatch(batchCount * 2);;
TimestampColumnVector vec = (TimestampColumnVector) batch.cols[0];
int[] seconds = new int[]{ -2, -1, 0, 1, 2 };
// write 1st batch with nanosecond <= 999999
int nanos = 999_999;
for (int i = 0; i < batchCount; i++) {
Timestamp curr = Timestamp.from(Instant.ofEpochSecond(seconds[i]));
curr.setNanos(nanos);
vec.set(i, curr);
}
batch.size = batchCount;
writer.addRowBatch(batch);
nanos = 1_000_000;
// write 2nd batch with nanosecond > 999999
for (int i = 0; i < batchCount; i++) {
Timestamp curr = Timestamp.from(Instant.ofEpochSecond(seconds[i]));
curr.setNanos(nanos);
vec.set(i, curr);
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch(batchCount);
TimestampColumnVector timestamps = (TimestampColumnVector) batch.cols[0];
rows.nextBatch(batch);
// read 1st batch with nanosecond <= 999999
for (int r=0; r < batchCount; ++r) {
assertEquals(seconds[r], timestamps.getTimestampAsLong(r));
assertEquals(999_999, timestamps.nanos[r]);
}
rows.nextBatch(batch);
// read 2nd batch with nanosecond > 999999
for (int r=0; r < batchCount; ++r) {
if (seconds[r] == -1) {
// reproduce the JDK bug of java.sql.Timestamp see ORC-763
// Wrong extra second: 1969-12-31 23.59.59.001 -> 1970-01-01 00.00.00.001
assertEquals(0, timestamps.getTimestampAsLong(r));
} else {
assertEquals(seconds[r], timestamps.getTimestampAsLong(r));
}
assertEquals(1_000_000, timestamps.nanos[r]);
}
}
@ParameterizedTest
@MethodSource("data")
public void testTimestamp(Version fileFormat) throws Exception {
TypeDescription schema = TypeDescription.createTimestamp();
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).stripeSize(100000)
.bufferSize(10000).version(fileFormat));
List<Timestamp> tslist = Lists.newArrayList();
tslist.add(Timestamp.valueOf("2037-01-01 00:00:00.000999"));
tslist.add(Timestamp.valueOf("2003-01-01 00:00:00.000000222"));
tslist.add(Timestamp.valueOf("1999-01-01 00:00:00.999999999"));
tslist.add(Timestamp.valueOf("1995-01-01 00:00:00.688888888"));
tslist.add(Timestamp.valueOf("2002-01-01 00:00:00.1"));
tslist.add(Timestamp.valueOf("2010-03-02 00:00:00.000009001"));
tslist.add(Timestamp.valueOf("2005-01-01 00:00:00.000002229"));
tslist.add(Timestamp.valueOf("2006-01-01 00:00:00.900203003"));
tslist.add(Timestamp.valueOf("2003-01-01 00:00:00.800000007"));
tslist.add(Timestamp.valueOf("1996-08-02 00:00:00.723100809"));
tslist.add(Timestamp.valueOf("1998-11-02 00:00:00.857340643"));
tslist.add(Timestamp.valueOf("2008-10-02 00:00:00"));
VectorizedRowBatch batch = new VectorizedRowBatch(1, 1024);
TimestampColumnVector vec = new TimestampColumnVector(1024);
batch.cols[0] = vec;
batch.reset();
batch.size = tslist.size();
for (int i=0; i < tslist.size(); ++i) {
Timestamp ts = tslist.get(i);
vec.set(i, ts);
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
TimestampColumnVector timestamps = (TimestampColumnVector) batch.cols[0];
int idx = 0;
while (rows.nextBatch(batch)) {
for(int r=0; r < batch.size; ++r) {
assertEquals(tslist.get(idx++).getNanos(),
timestamps.asScratchTimestamp(r).getNanos());
}
}
assertEquals(tslist.size(), rows.getRowNumber());
assertEquals(0, writer.getSchema().getMaximumId());
boolean[] expected = new boolean[] {false};
boolean[] included = OrcUtils.includeColumns("", writer.getSchema());
assertTrue(Arrays.equals(expected, included));
}
@ParameterizedTest
@MethodSource("data")
public void testStringAndBinaryStatistics(Version fileFormat) throws Exception {
TypeDescription schema = TypeDescription.createStruct()
.addField("bytes1", TypeDescription.createBinary())
.addField("string1", TypeDescription.createString());
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(100000)
.bufferSize(10000)
.version(fileFormat));
VectorizedRowBatch batch = schema.createRowBatch();
batch.size = 4;
BytesColumnVector field1 = (BytesColumnVector) batch.cols[0];
BytesColumnVector field2 = (BytesColumnVector) batch.cols[1];
field1.setVal(0, bytesArray(0, 1, 2, 3, 4));
field1.setVal(1, bytesArray(0, 1, 2, 3));
field1.setVal(2, bytesArray(0, 1, 2, 3, 4, 5));
field1.noNulls = false;
field1.isNull[3] = true;
field2.setVal(0, "foo".getBytes(StandardCharsets.UTF_8));
field2.setVal(1, "bar".getBytes(StandardCharsets.UTF_8));
field2.noNulls = false;
field2.isNull[2] = true;
field2.setVal(3, "hi".getBytes(StandardCharsets.UTF_8));
writer.addRowBatch(batch);
writer.close();
schema = writer.getSchema();
assertEquals(2, schema.getMaximumId());
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
boolean[] expected = new boolean[] {false, false, true};
boolean[] included = OrcUtils.includeColumns("string1", schema);
assertTrue(Arrays.equals(expected, included));
expected = new boolean[] {false, false, false};
included = OrcUtils.includeColumns("", schema);
assertTrue(Arrays.equals(expected, included));
expected = new boolean[] {false, false, false};
included = OrcUtils.includeColumns(null, schema);
assertTrue(Arrays.equals(expected, included));
// check the stats
ColumnStatistics[] stats = reader.getStatistics();
assertArrayEquals(stats, writer.getStatistics());
assertEquals(4, stats[0].getNumberOfValues());
assertEquals("count: 4 hasNull: false", stats[0].toString());
assertEquals(3, stats[1].getNumberOfValues());
assertEquals(15, ((BinaryColumnStatistics) stats[1]).getSum());
assertEquals("count: 3 hasNull: true bytesOnDisk: 28 sum: 15", stats[1].toString());
assertEquals(3, stats[2].getNumberOfValues());
assertEquals("bar", ((StringColumnStatistics) stats[2]).getMinimum());
assertEquals("hi", ((StringColumnStatistics) stats[2]).getMaximum());
assertEquals(8, ((StringColumnStatistics) stats[2]).getSum());
assertEquals("count: 3 hasNull: true bytesOnDisk: " +
(fileFormat == OrcFile.Version.V_0_11 ? "30" : "22") +
" min: bar max: hi sum: 8",
stats[2].toString());
// check the inspectors
batch = reader.getSchema().createRowBatch();
BytesColumnVector bytes = (BytesColumnVector) batch.cols[0];
BytesColumnVector strs = (BytesColumnVector) batch.cols[1];
RecordReader rows = reader.rows();
assertTrue(rows.nextBatch(batch));
assertEquals(4, batch.size);
// check the contents of the first row
assertEquals(bytes(0,1,2,3,4), getBinary(bytes, 0));
assertEquals("foo", strs.toString(0));
// check the contents of second row
assertEquals(bytes(0,1,2,3), getBinary(bytes, 1));
assertEquals("bar", strs.toString(1));
// check the contents of third row
assertEquals(bytes(0,1,2,3,4,5), getBinary(bytes, 2));
assertNull(strs.toString(2));
// check the contents of fourth row
assertNull(getBinary(bytes, 3));
assertEquals("hi", strs.toString(3));
// handle the close up
assertFalse(rows.nextBatch(batch));
rows.close();
}
@ParameterizedTest
@MethodSource("data")
public void testHiveDecimalStatsAllNulls(Version fileFormat) throws Exception {
TypeDescription schema = TypeDescription.createStruct()
.addField("dec1", TypeDescription.createDecimal());
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).stripeSize(100000)
.bufferSize(10000).version(fileFormat));
VectorizedRowBatch batch = schema.createRowBatch();
batch.size = 4;
DecimalColumnVector field1 = (DecimalColumnVector) batch.cols[0];
field1.noNulls = false;
field1.isNull[0] = true;
field1.isNull[1] = true;
field1.isNull[2] = true;
field1.isNull[3] = true;
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
// check the stats
ColumnStatistics[] stats = reader.getStatistics();
assertEquals(4, stats[0].getNumberOfValues());
assertEquals(0, stats[1].getNumberOfValues());
assertTrue(stats[1].hasNull());
assertNull(((DecimalColumnStatistics)stats[1]).getMinimum());
assertNull(((DecimalColumnStatistics)stats[1]).getMaximum());
assertEquals(new HiveDecimalWritable(0).getHiveDecimal(), ((DecimalColumnStatistics)stats[1]).getSum());
}
@ParameterizedTest
@MethodSource("data")
public void testStripeLevelStats(Version fileFormat) throws Exception {
TypeDescription schema =
TypeDescription.fromString("struct<int1:int,string1:string>");
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(100000)
.bufferSize(10000)
.enforceBufferSize()
.version(fileFormat));
VectorizedRowBatch batch = schema.createRowBatch();
batch.size = 1000;
LongColumnVector field1 = (LongColumnVector) batch.cols[0];
BytesColumnVector field2 = (BytesColumnVector) batch.cols[1];
field1.isRepeating = true;
field2.isRepeating = true;
for (int b = 0; b < 11; b++) {
if (b >= 5) {
if (b >= 10) {
field1.vector[0] = 3;
field2.setVal(0, "three".getBytes(StandardCharsets.UTF_8));
} else {
field1.vector[0] = 2;
field2.setVal(0, "two".getBytes(StandardCharsets.UTF_8));
}
} else {
field1.vector[0] = 1;
field2.setVal(0, "one".getBytes(StandardCharsets.UTF_8));
}
writer.addRowBatch(batch);
}
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
schema = writer.getSchema();
assertEquals(2, schema.getMaximumId());
boolean[] expected = new boolean[] {false, true, false};
boolean[] included = OrcUtils.includeColumns("int1", schema);
assertTrue(Arrays.equals(expected, included));
List<StripeStatistics> stats = reader.getStripeStatistics();
int numStripes = stats.size();
assertEquals(3, numStripes);
StripeStatistics ss1 = stats.get(0);
StripeStatistics ss2 = stats.get(1);
StripeStatistics ss3 = stats.get(2);
assertEquals(5000, ss1.getColumnStatistics()[0].getNumberOfValues());
assertEquals(5000, ss2.getColumnStatistics()[0].getNumberOfValues());
assertEquals(1000, ss3.getColumnStatistics()[0].getNumberOfValues());
assertEquals(5000, (ss1.getColumnStatistics()[1]).getNumberOfValues());
assertEquals(5000, (ss2.getColumnStatistics()[1]).getNumberOfValues());
assertEquals(1000, (ss3.getColumnStatistics()[1]).getNumberOfValues());
assertEquals(1, ((IntegerColumnStatistics)ss1.getColumnStatistics()[1]).getMinimum());
assertEquals(2, ((IntegerColumnStatistics)ss2.getColumnStatistics()[1]).getMinimum());
assertEquals(3, ((IntegerColumnStatistics)ss3.getColumnStatistics()[1]).getMinimum());
assertEquals(1, ((IntegerColumnStatistics)ss1.getColumnStatistics()[1]).getMaximum());
assertEquals(2, ((IntegerColumnStatistics)ss2.getColumnStatistics()[1]).getMaximum());
assertEquals(3, ((IntegerColumnStatistics)ss3.getColumnStatistics()[1]).getMaximum());
assertEquals(5000, ((IntegerColumnStatistics)ss1.getColumnStatistics()[1]).getSum());
assertEquals(10000, ((IntegerColumnStatistics)ss2.getColumnStatistics()[1]).getSum());
assertEquals(3000, ((IntegerColumnStatistics)ss3.getColumnStatistics()[1]).getSum());
assertEquals(5000, (ss1.getColumnStatistics()[2]).getNumberOfValues());
assertEquals(5000, (ss2.getColumnStatistics()[2]).getNumberOfValues());
assertEquals(1000, (ss3.getColumnStatistics()[2]).getNumberOfValues());
assertEquals("one", ((StringColumnStatistics)ss1.getColumnStatistics()[2]).getMinimum());
assertEquals("two", ((StringColumnStatistics)ss2.getColumnStatistics()[2]).getMinimum());
assertEquals("three", ((StringColumnStatistics)ss3.getColumnStatistics()[2]).getMinimum());
assertEquals("one", ((StringColumnStatistics)ss1.getColumnStatistics()[2]).getMaximum());
assertEquals("two", ((StringColumnStatistics) ss2.getColumnStatistics()[2]).getMaximum());
assertEquals("three", ((StringColumnStatistics)ss3.getColumnStatistics()[2]).getMaximum());
assertEquals(15000, ((StringColumnStatistics)ss1.getColumnStatistics()[2]).getSum());
assertEquals(15000, ((StringColumnStatistics)ss2.getColumnStatistics()[2]).getSum());
assertEquals(5000, ((StringColumnStatistics)ss3.getColumnStatistics()[2]).getSum());
RecordReaderImpl recordReader = (RecordReaderImpl) reader.rows();
OrcProto.RowIndex[] index = recordReader.readRowIndex(0, null, null).getRowGroupIndex();
assertEquals(3, index.length);
List<OrcProto.RowIndexEntry> items = index[1].getEntryList();
assertEquals(1, items.size());
assertEquals(3, items.get(0).getPositionsCount());
assertEquals(0, items.get(0).getPositions(0));
assertEquals(0, items.get(0).getPositions(1));
assertEquals(0, items.get(0).getPositions(2));
assertEquals(1,
items.get(0).getStatistics().getIntStatistics().getMinimum());
index = recordReader.readRowIndex(1, null, null).getRowGroupIndex();
assertEquals(3, index.length);
items = index[1].getEntryList();
assertEquals(2,
items.get(0).getStatistics().getIntStatistics().getMaximum());
}
@ParameterizedTest
@MethodSource("data")
public void testStripeLevelStatsNoForce(Version fileFormat) throws Exception {
TypeDescription schema =
TypeDescription.fromString("struct<int1:int,string1:string>");
OrcConf.DICTIONARY_IMPL.setString(conf, "hash");
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(100000)
.bufferSize(10000)
.version(fileFormat));
VectorizedRowBatch batch = schema.createRowBatch();
batch.size = 1000;
LongColumnVector field1 = (LongColumnVector) batch.cols[0];
BytesColumnVector field2 = (BytesColumnVector) batch.cols[1];
field1.isRepeating = true;
field2.isRepeating = true;
for (int b = 0; b < 11; b++) {
if (b >= 5) {
if (b >= 10) {
field1.vector[0] = 3;
field2.setVal(0, "three".getBytes(StandardCharsets.UTF_8));
} else {
field1.vector[0] = 2;
field2.setVal(0, "two".getBytes(StandardCharsets.UTF_8));
}
} else {
field1.vector[0] = 1;
field2.setVal(0, "one".getBytes(StandardCharsets.UTF_8));
}
writer.addRowBatch(batch);
}
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
schema = writer.getSchema();
assertEquals(2, schema.getMaximumId());
boolean[] expected = new boolean[] {false, true, false};
boolean[] included = OrcUtils.includeColumns("int1", schema);
assertTrue(Arrays.equals(expected, included));
List<StripeStatistics> stats = reader.getStripeStatistics();
int numStripes = stats.size();
assertEquals(2, numStripes);
StripeStatistics ss1 = stats.get(0);
StripeStatistics ss2 = stats.get(1);
assertEquals(10000, ss1.getColumnStatistics()[0].getNumberOfValues());
assertEquals(1000, ss2.getColumnStatistics()[0].getNumberOfValues());
assertEquals(10000, (ss1.getColumnStatistics()[1]).getNumberOfValues());
assertEquals(1000, (ss2.getColumnStatistics()[1]).getNumberOfValues());
assertEquals(1, ((IntegerColumnStatistics)ss1.getColumnStatistics()[1]).getMinimum());
assertEquals(3, ((IntegerColumnStatistics)ss2.getColumnStatistics()[1]).getMinimum());
assertEquals(2, ((IntegerColumnStatistics)ss1.getColumnStatistics()[1]).getMaximum());
assertEquals(3, ((IntegerColumnStatistics)ss2.getColumnStatistics()[1]).getMaximum());
assertEquals(15000, ((IntegerColumnStatistics)ss1.getColumnStatistics()[1]).getSum());
assertEquals(3000, ((IntegerColumnStatistics)ss2.getColumnStatistics()[1]).getSum());
assertEquals(10000, (ss1.getColumnStatistics()[2]).getNumberOfValues());
assertEquals(1000, (ss2.getColumnStatistics()[2]).getNumberOfValues());
assertEquals("one", ((StringColumnStatistics)ss1.getColumnStatistics()[2]).getMinimum());
assertEquals("three", ((StringColumnStatistics)ss2.getColumnStatistics()[2]).getMinimum());
assertEquals("two", ((StringColumnStatistics)ss1.getColumnStatistics()[2]).getMaximum());
assertEquals("three", ((StringColumnStatistics) ss2.getColumnStatistics()[2]).getMaximum());
assertEquals(30000, ((StringColumnStatistics)ss1.getColumnStatistics()[2]).getSum());
assertEquals(5000, ((StringColumnStatistics)ss2.getColumnStatistics()[2]).getSum());
RecordReaderImpl recordReader = (RecordReaderImpl) reader.rows();
OrcProto.RowIndex[] index = recordReader.readRowIndex(0, null, null).getRowGroupIndex();
assertEquals(3, index.length);
List<OrcProto.RowIndexEntry> items = index[1].getEntryList();
assertEquals(1, items.size());
assertEquals(3, items.get(0).getPositionsCount());
assertEquals(0, items.get(0).getPositions(0));
assertEquals(0, items.get(0).getPositions(1));
assertEquals(0, items.get(0).getPositions(2));
assertEquals(1,
items.get(0).getStatistics().getIntStatistics().getMinimum());
index = recordReader.readRowIndex(1, null, null).getRowGroupIndex();
assertEquals(3, index.length);
items = index[1].getEntryList();
assertEquals(3, items.get(0).getStatistics().getIntStatistics().getMaximum());
}
private static void setInner(StructColumnVector inner, int rowId,
int i, String value) {
((LongColumnVector) inner.fields[0]).vector[rowId] = i;
if (value != null) {
((BytesColumnVector) inner.fields[1]).setVal(rowId, value.getBytes(StandardCharsets.UTF_8));
} else {
inner.fields[1].isNull[rowId] = true;
inner.fields[1].noNulls = false;
}
}
private static void checkInner(StructColumnVector inner, int rowId,
int rowInBatch, int i, String value) {
assertEquals(i,
((LongColumnVector) inner.fields[0]).vector[rowInBatch],
"row " + rowId);
if (value != null) {
assertEquals(value,
((BytesColumnVector) inner.fields[1]).toString(rowInBatch),
"row " + rowId);
} else {
assertTrue(inner.fields[1].isNull[rowInBatch], "row " + rowId);
assertFalse(inner.fields[1].noNulls, "row " + rowId);
}
}
private static void setInnerList(ListColumnVector list, int rowId,
List<InnerStruct> value) {
if (value != null) {
if (list.childCount + value.size() > list.child.isNull.length) {
list.child.ensureSize(list.childCount * 2, true);
}
list.lengths[rowId] = value.size();
list.offsets[rowId] = list.childCount;
for (int i = 0; i < list.lengths[rowId]; ++i) {
InnerStruct inner = value.get(i);
setInner((StructColumnVector) list.child, i + list.childCount,
inner.int1, inner.string1.toString());
}
list.childCount += value.size();
} else {
list.isNull[rowId] = true;
list.noNulls = false;
}
}
private static void checkInnerList(ListColumnVector list, int rowId,
int rowInBatch, List<InnerStruct> value) {
if (value != null) {
assertEquals(value.size(), list.lengths[rowInBatch], "row " + rowId);
int start = (int) list.offsets[rowInBatch];
for (int i = 0; i < list.lengths[rowInBatch]; ++i) {
InnerStruct inner = value.get(i);
checkInner((StructColumnVector) list.child, rowId, i + start,
inner.int1, inner.string1.toString());
}
list.childCount += value.size();
} else {
assertTrue(list.isNull[rowInBatch], "row " + rowId);
assertFalse(list.noNulls, "row " + rowId);
}
}
private static void setInnerMap(MapColumnVector map, int rowId,
Map<String, InnerStruct> value) {
if (value != null) {
if (map.childCount >= map.keys.isNull.length) {
map.keys.ensureSize(map.childCount * 2, true);
map.values.ensureSize(map.childCount * 2, true);
}
map.lengths[rowId] = value.size();
int offset = map.childCount;
map.offsets[rowId] = offset;
for (Map.Entry<String, InnerStruct> entry : value.entrySet()) {
((BytesColumnVector) map.keys).setVal(offset, entry.getKey().getBytes(StandardCharsets.UTF_8));
InnerStruct inner = entry.getValue();
setInner((StructColumnVector) map.values, offset, inner.int1,
inner.string1.toString());
offset += 1;
}
map.childCount = offset;
} else {
map.isNull[rowId] = true;
map.noNulls = false;
}
}
private static void checkInnerMap(MapColumnVector map, int rowId,
int rowInBatch,
Map<String, InnerStruct> value) {
if (value != null) {
assertEquals(value.size(), map.lengths[rowInBatch], "row " + rowId);
int offset = (int) map.offsets[rowInBatch];
for(int i=0; i < value.size(); ++i) {
String key = ((BytesColumnVector) map.keys).toString(offset + i);
InnerStruct expected = value.get(key);
checkInner((StructColumnVector) map.values, rowId, offset + i,
expected.int1, expected.string1.toString());
}
} else {
assertTrue(map.isNull[rowId], "row " + rowId);
assertFalse(map.noNulls, "row " + rowId);
}
}
private static void setMiddleStruct(StructColumnVector middle, int rowId,
MiddleStruct value) {
if (value != null) {
setInnerList((ListColumnVector) middle.fields[0], rowId, value.list);
} else {
middle.isNull[rowId] = true;
middle.noNulls = false;
}
}
private static void checkMiddleStruct(StructColumnVector middle, int rowId,
int rowInBatch, MiddleStruct value) {
if (value != null) {
checkInnerList((ListColumnVector) middle.fields[0], rowId, rowInBatch,
value.list);
} else {
assertTrue(middle.isNull[rowInBatch], "row " + rowId);
assertFalse(middle.noNulls, "row " + rowId);
}
}
private static void setBigRow(VectorizedRowBatch batch, int rowId,
Boolean b1, Byte b2, Short s1,
Integer i1, Long l1, Float f1,
Double d1, BytesWritable b3, String s2,
MiddleStruct m1, List<InnerStruct> l2,
Map<String, InnerStruct> m2) {
((LongColumnVector) batch.cols[0]).vector[rowId] = b1 ? 1 : 0;
((LongColumnVector) batch.cols[1]).vector[rowId] = b2;
((LongColumnVector) batch.cols[2]).vector[rowId] = s1;
((LongColumnVector) batch.cols[3]).vector[rowId] = i1;
((LongColumnVector) batch.cols[4]).vector[rowId] = l1;
((DoubleColumnVector) batch.cols[5]).vector[rowId] = f1;
((DoubleColumnVector) batch.cols[6]).vector[rowId] = d1;
if (b3 != null) {
((BytesColumnVector) batch.cols[7]).setVal(rowId, b3.getBytes(), 0,
b3.getLength());
} else {
batch.cols[7].isNull[rowId] = true;
batch.cols[7].noNulls = false;
}
if (s2 != null) {
((BytesColumnVector) batch.cols[8]).setVal(rowId, s2.getBytes(StandardCharsets.UTF_8));
} else {
batch.cols[8].isNull[rowId] = true;
batch.cols[8].noNulls = false;
}
setMiddleStruct((StructColumnVector) batch.cols[9], rowId, m1);
setInnerList((ListColumnVector) batch.cols[10], rowId, l2);
setInnerMap((MapColumnVector) batch.cols[11], rowId, m2);
}
private static void checkBigRow(VectorizedRowBatch batch,
int rowInBatch,
int rowId,
boolean b1, byte b2, short s1,
int i1, long l1, float f1,
double d1, BytesWritable b3, String s2,
MiddleStruct m1, List<InnerStruct> l2,
Map<String, InnerStruct> m2) {
String msg = "row " + rowId;
assertEquals(b1, getBoolean(batch, rowInBatch), msg);
assertEquals(b2, getByte(batch, rowInBatch), msg);
assertEquals(s1, getShort(batch, rowInBatch), msg);
assertEquals(i1, getInt(batch, rowInBatch), msg);
assertEquals(l1, getLong(batch, rowInBatch), msg);
assertEquals(f1, getFloat(batch, rowInBatch), 0.0001, msg);
assertEquals(d1, getDouble(batch, rowInBatch), 0.0001, msg);
if (b3 != null) {
BytesColumnVector bytes = (BytesColumnVector) batch.cols[7];
assertEquals(b3.getLength(), bytes.length[rowInBatch], msg);
for(int i=0; i < b3.getLength(); ++i) {
assertEquals(b3.getBytes()[i],
bytes.vector[rowInBatch][bytes.start[rowInBatch] + i],
"row " + rowId + " byte " + i);
}
} else {
assertTrue(batch.cols[7].isNull[rowInBatch], msg);
assertFalse(batch.cols[7].noNulls, msg);
}
if (s2 != null) {
assertEquals(s2, getText(batch, rowInBatch).toString(), "row " + rowId);
} else {
assertTrue(batch.cols[8].isNull[rowInBatch], msg);
assertFalse(batch.cols[8].noNulls, msg);
}
checkMiddleStruct((StructColumnVector) batch.cols[9], rowId, rowInBatch,
m1);
checkInnerList((ListColumnVector) batch.cols[10], rowId, rowInBatch, l2);
checkInnerMap((MapColumnVector) batch.cols[11], rowId, rowInBatch, m2);
}
private static boolean getBoolean(VectorizedRowBatch batch, int rowId) {
return ((LongColumnVector) batch.cols[0]).vector[rowId] != 0;
}
private static byte getByte(VectorizedRowBatch batch, int rowId) {
return (byte) ((LongColumnVector) batch.cols[1]).vector[rowId];
}
private static short getShort(VectorizedRowBatch batch, int rowId) {
return (short) ((LongColumnVector) batch.cols[2]).vector[rowId];
}
private static int getInt(VectorizedRowBatch batch, int rowId) {
return (int) ((LongColumnVector) batch.cols[3]).vector[rowId];
}
private static long getLong(VectorizedRowBatch batch, int rowId) {
return ((LongColumnVector) batch.cols[4]).vector[rowId];
}
private static float getFloat(VectorizedRowBatch batch, int rowId) {
return (float) ((DoubleColumnVector) batch.cols[5]).vector[rowId];
}
private static double getDouble(VectorizedRowBatch batch, int rowId) {
return ((DoubleColumnVector) batch.cols[6]).vector[rowId];
}
protected static BytesWritable getBinary(BytesColumnVector column, int rowId) {
if (column.isRepeating) {
rowId = 0;
}
if (column.noNulls || !column.isNull[rowId]) {
return new BytesWritable(Arrays.copyOfRange(column.vector[rowId],
column.start[rowId], column.start[rowId] + column.length[rowId]));
} else {
return null;
}
}
private static BytesWritable getBinary(VectorizedRowBatch batch, int rowId) {
return getBinary((BytesColumnVector) batch.cols[7], rowId);
}
private static Text getText(BytesColumnVector vector, int rowId) {
if (vector.isRepeating) {
rowId = 0;
}
if (vector.noNulls || !vector.isNull[rowId]) {
return new Text(Arrays.copyOfRange(vector.vector[rowId],
vector.start[rowId], vector.start[rowId] + vector.length[rowId]));
} else {
return null;
}
}
private static Text getText(VectorizedRowBatch batch, int rowId) {
return getText((BytesColumnVector) batch.cols[8], rowId);
}
private static InnerStruct getInner(StructColumnVector vector,
int rowId) {
return new InnerStruct(
(int) ((LongColumnVector) vector.fields[0]).vector[rowId],
getText((BytesColumnVector) vector.fields[1], rowId));
}
private static List<InnerStruct> getList(ListColumnVector cv,
int rowId) {
if (cv.isRepeating) {
rowId = 0;
}
if (cv.noNulls || !cv.isNull[rowId]) {
List<InnerStruct> result =
new ArrayList<InnerStruct>((int) cv.lengths[rowId]);
for(long i=cv.offsets[rowId];
i < cv.offsets[rowId] + cv.lengths[rowId]; ++i) {
result.add(getInner((StructColumnVector) cv.child, (int) i));
}
return result;
} else {
return null;
}
}
private static List<InnerStruct> getMidList(VectorizedRowBatch batch,
int rowId) {
return getList((ListColumnVector) ((StructColumnVector) batch.cols[9])
.fields[0], rowId);
}
private static List<InnerStruct> getList(VectorizedRowBatch batch,
int rowId) {
return getList((ListColumnVector) batch.cols[10], rowId);
}
private static Map<Text, InnerStruct> getMap(VectorizedRowBatch batch,
int rowId) {
MapColumnVector cv = (MapColumnVector) batch.cols[11];
if (cv.isRepeating) {
rowId = 0;
}
if (cv.noNulls || !cv.isNull[rowId]) {
Map<Text, InnerStruct> result =
new HashMap<Text, InnerStruct>((int) cv.lengths[rowId]);
for(long i=cv.offsets[rowId];
i < cv.offsets[rowId] + cv.lengths[rowId]; ++i) {
result.put(getText((BytesColumnVector) cv.keys, (int) i),
getInner((StructColumnVector) cv.values, (int) i));
}
return result;
} else {
return null;
}
}
private static TypeDescription createInnerSchema() {
return TypeDescription.fromString("struct<int1:int,string1:string>");
}
private static TypeDescription createComplexInnerSchema()
{
return TypeDescription.fromString("struct<int1:int,"
+ "complex:struct<int2:int,String1:string>>");
}
private static TypeDescription createQuotedSchema() {
return TypeDescription.createStruct()
.addField("`int1`", TypeDescription.createInt())
.addField("`string1`", TypeDescription.createString());
}
private static TypeDescription createQuotedSchemaFromString() {
return TypeDescription.fromString("struct<```int1```:int,```string1```:string>");
}
private static TypeDescription createBigRowSchema() {
return TypeDescription.createStruct()
.addField("boolean1", TypeDescription.createBoolean())
.addField("byte1", TypeDescription.createByte())
.addField("short1", TypeDescription.createShort())
.addField("int1", TypeDescription.createInt())
.addField("long1", TypeDescription.createLong())
.addField("float1", TypeDescription.createFloat())
.addField("double1", TypeDescription.createDouble())
.addField("bytes1", TypeDescription.createBinary())
.addField("string1", TypeDescription.createString())
.addField("middle", TypeDescription.createStruct()
.addField("list", TypeDescription.createList(createInnerSchema())))
.addField("list", TypeDescription.createList(createInnerSchema()))
.addField("map", TypeDescription.createMap(
TypeDescription.createString(),
createInnerSchema()));
}
static void assertArrayBooleanEquals(boolean[] expected, boolean[] actual) {
assertEquals(expected.length, actual.length);
boolean diff = false;
for(int i=0; i < expected.length; ++i) {
if (expected[i] != actual[i]) {
System.out.println("Difference at " + i + " expected: " + expected[i] +
" actual: " + actual[i]);
diff = true;
}
}
assertFalse(diff);
}
@ParameterizedTest
@MethodSource("data")
public void test1(Version fileFormat) throws Exception {
TypeDescription schema = createBigRowSchema();
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(100000)
.bufferSize(10000)
.version(fileFormat));
assertEmptyStats(writer.getStatistics());
VectorizedRowBatch batch = schema.createRowBatch();
batch.size = 2;
setBigRow(batch, 0, false, (byte) 1, (short) 1024, 65536,
Long.MAX_VALUE, (float) 1.0, -15.0, bytes(0, 1, 2, 3, 4), "hi",
new MiddleStruct(inner(1, "bye"), inner(2, "sigh")),
list(inner(3, "good"), inner(4, "bad")),
map());
setBigRow(batch, 1, true, (byte) 100, (short) 2048, 65536,
Long.MAX_VALUE, (float) 2.0, -5.0, bytes(), "bye",
new MiddleStruct(inner(1, "bye"), inner(2, "sigh")),
list(inner(100000000, "cat"), inner(-100000, "in"), inner(1234, "hat")),
map(inner(5, "chani"), inner(1, "mauddib")));
writer.addRowBatch(batch);
assertEmptyStats(writer.getStatistics());
writer.close();
ColumnStatistics[] closeStatistics = writer.getStatistics();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
schema = writer.getSchema();
assertEquals(23, schema.getMaximumId());
boolean[] expected = new boolean[] {false, false, false, false, false,
false, false, false, false, false,
false, false, false, false, false,
false, false, false, false, false,
false, false, false, false};
boolean[] included = OrcUtils.includeColumns("", schema);
assertTrue(Arrays.equals(expected, included));
expected = new boolean[] {false, true, false, false, false,
false, false, false, false, true,
true, true, true, true, true,
false, false, false, false, true,
true, true, true, true};
included = OrcUtils.includeColumns("boolean1,string1,middle,map", schema);
assertArrayBooleanEquals(expected, included);
expected = new boolean[] {false, true, false, false, false,
false, false, false, false, true,
true, true, true, true, true,
false, false, false, false, true,
true, true, true, true};
included = OrcUtils.includeColumns("boolean1,string1,middle,map", schema);
assertArrayBooleanEquals(expected, included);
expected = new boolean[] {false, true, true, true, true,
true, true, true, true, true,
true, true, true, true, true,
true, true, true, true, true,
true, true, true, true};
included = OrcUtils.includeColumns(
"boolean1,byte1,short1,int1,long1,float1,double1,bytes1,string1,middle,list,map",
schema);
assertTrue(Arrays.equals(expected, included));
// check the stats
ColumnStatistics[] stats = reader.getStatistics();
assertArrayEquals(stats, closeStatistics);
assertEquals(2, stats[1].getNumberOfValues());
assertEquals(1, ((BooleanColumnStatistics) stats[1]).getFalseCount());
assertEquals(1, ((BooleanColumnStatistics) stats[1]).getTrueCount());
assertEquals("count: 2 hasNull: false bytesOnDisk: 5 true: 1", stats[1].toString());
assertEquals(2048, ((IntegerColumnStatistics) stats[3]).getMaximum());
assertEquals(1024, ((IntegerColumnStatistics) stats[3]).getMinimum());
assertTrue(((IntegerColumnStatistics) stats[3]).isSumDefined());
assertEquals(3072, ((IntegerColumnStatistics) stats[3]).getSum());
assertEquals("count: 2 hasNull: false bytesOnDisk: " +
(fileFormat == OrcFile.Version.V_0_11 ? "8" : "9") +
" min: 1024 max: 2048 sum: 3072", stats[3].toString());
StripeStatistics ss = reader.getStripeStatistics().get(0);
assertEquals(2, ss.getColumnStatistics()[0].getNumberOfValues());
assertEquals(1, ((BooleanColumnStatistics) ss.getColumnStatistics()[1]).getTrueCount());
assertEquals(1024, ((IntegerColumnStatistics) ss.getColumnStatistics()[3]).getMinimum());
assertEquals(2048, ((IntegerColumnStatistics) ss.getColumnStatistics()[3]).getMaximum());
assertEquals(3072, ((IntegerColumnStatistics) ss.getColumnStatistics()[3]).getSum());
assertEquals(-15.0, ((DoubleColumnStatistics) stats[7]).getMinimum(), 0.0001);
assertEquals(-5.0, ((DoubleColumnStatistics) stats[7]).getMaximum(), 0.0001);
assertEquals(-20.0, ((DoubleColumnStatistics) stats[7]).getSum(), 0.00001);
assertEquals("count: 2 hasNull: false bytesOnDisk: 15 min: -15.0 max: -5.0 sum: -20.0",
stats[7].toString());
assertEquals("count: 2 hasNull: false bytesOnDisk: " +
(fileFormat == OrcFile.Version.V_0_11 ? "20" : "14") +
" min: bye max: hi sum: 5", stats[9].toString());
// check the schema
TypeDescription readerSchema = reader.getSchema();
assertEquals(TypeDescription.Category.STRUCT, readerSchema.getCategory());
assertEquals("struct<boolean1:boolean,byte1:tinyint,short1:smallint,"
+ "int1:int,long1:bigint,float1:float,double1:double,bytes1:"
+ "binary,string1:string,middle:struct<list:array<struct<int1:int,"
+ "string1:string>>>,list:array<struct<int1:int,string1:string>>,"
+ "map:map<string,struct<int1:int,string1:string>>>",
readerSchema.toString());
List<String> fieldNames = readerSchema.getFieldNames();
List<TypeDescription> fieldTypes = readerSchema.getChildren();
assertEquals("boolean1", fieldNames.get(0));
assertEquals(TypeDescription.Category.BOOLEAN, fieldTypes.get(0).getCategory());
assertEquals("byte1", fieldNames.get(1));
assertEquals(TypeDescription.Category.BYTE, fieldTypes.get(1).getCategory());
assertEquals("short1", fieldNames.get(2));
assertEquals(TypeDescription.Category.SHORT, fieldTypes.get(2).getCategory());
assertEquals("int1", fieldNames.get(3));
assertEquals(TypeDescription.Category.INT, fieldTypes.get(3).getCategory());
assertEquals("long1", fieldNames.get(4));
assertEquals(TypeDescription.Category.LONG, fieldTypes.get(4).getCategory());
assertEquals("float1", fieldNames.get(5));
assertEquals(TypeDescription.Category.FLOAT, fieldTypes.get(5).getCategory());
assertEquals("double1", fieldNames.get(6));
assertEquals(TypeDescription.Category.DOUBLE, fieldTypes.get(6).getCategory());
assertEquals("bytes1", fieldNames.get(7));
assertEquals(TypeDescription.Category.BINARY, fieldTypes.get(7).getCategory());
assertEquals("string1", fieldNames.get(8));
assertEquals(TypeDescription.Category.STRING, fieldTypes.get(8).getCategory());
assertEquals("middle", fieldNames.get(9));
TypeDescription middle = fieldTypes.get(9);
assertEquals(TypeDescription.Category.STRUCT, middle.getCategory());
TypeDescription midList = middle.getChildren().get(0);
assertEquals(TypeDescription.Category.LIST, midList.getCategory());
TypeDescription inner = midList.getChildren().get(0);
assertEquals(TypeDescription.Category.STRUCT, inner.getCategory());
assertEquals("int1", inner.getFieldNames().get(0));
assertEquals("string1", inner.getFieldNames().get(1));
RecordReader rows = reader.rows();
// create a new batch
batch = readerSchema.createRowBatch();
assertTrue(rows.nextBatch(batch));
assertEquals(2, batch.size);
assertFalse(rows.nextBatch(batch));
// check the contents of the first row
assertFalse(getBoolean(batch, 0));
assertEquals(1, getByte(batch, 0));
assertEquals(1024, getShort(batch, 0));
assertEquals(65536, getInt(batch, 0));
assertEquals(Long.MAX_VALUE, getLong(batch, 0));
assertEquals(1.0, getFloat(batch, 0), 0.00001);
assertEquals(-15.0, getDouble(batch, 0), 0.00001);
assertEquals(bytes(0,1,2,3,4), getBinary(batch, 0));
assertEquals("hi", getText(batch, 0).toString());
List<InnerStruct> midRow = getMidList(batch, 0);
assertNotNull(midRow);
assertEquals(2, midRow.size());
assertEquals(1, midRow.get(0).int1);
assertEquals("bye", midRow.get(0).string1.toString());
assertEquals(2, midRow.get(1).int1);
assertEquals("sigh", midRow.get(1).string1.toString());
List<InnerStruct> list = getList(batch, 0);
assertEquals(2, list.size());
assertEquals(3, list.get(0).int1);
assertEquals("good", list.get(0).string1.toString());
assertEquals(4, list.get(1).int1);
assertEquals("bad", list.get(1).string1.toString());
Map<Text, InnerStruct> map = getMap(batch, 0);
assertEquals(0, map.size());
// check the contents of second row
assertTrue(getBoolean(batch, 1));
assertEquals(100, getByte(batch, 1));
assertEquals(2048, getShort(batch, 1));
assertEquals(65536, getInt(batch, 1));
assertEquals(Long.MAX_VALUE, getLong(batch, 1));
assertEquals(2.0, getFloat(batch, 1), 0.00001);
assertEquals(-5.0, getDouble(batch, 1), 0.00001);
assertEquals(bytes(), getBinary(batch, 1));
assertEquals("bye", getText(batch, 1).toString());
midRow = getMidList(batch, 1);
assertNotNull(midRow);
assertEquals(2, midRow.size());
assertEquals(1, midRow.get(0).int1);
assertEquals("bye", midRow.get(0).string1.toString());
assertEquals(2, midRow.get(1).int1);
assertEquals("sigh", midRow.get(1).string1.toString());
list = getList(batch, 1);
assertEquals(3, list.size());
assertEquals(100000000, list.get(0).int1);
assertEquals("cat", list.get(0).string1.toString());
assertEquals(-100000, list.get(1).int1);
assertEquals("in", list.get(1).string1.toString());
assertEquals(1234, list.get(2).int1);
assertEquals("hat", list.get(2).string1.toString());
map = getMap(batch, 1);
assertEquals(2, map.size());
InnerStruct value = map.get(new Text("chani"));
assertEquals(5, value.int1);
assertEquals("chani", value.string1.toString());
value = map.get(new Text("mauddib"));
assertEquals(1, value.int1);
assertEquals("mauddib", value.string1.toString());
// handle the close up
assertFalse(rows.nextBatch(batch));
rows.close();
}
static void assertEmptyStats(ColumnStatistics[] writerStatistics) {
for (ColumnStatistics columnStatistics : writerStatistics){
assertEquals(0, columnStatistics.getNumberOfValues());
assertFalse(columnStatistics.hasNull());
}
}
@ParameterizedTest
@MethodSource("data")
public void testColumnProjection(Version fileFormat) throws Exception {
TypeDescription schema = createInnerSchema();
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(1000)
.compress(CompressionKind.NONE)
.bufferSize(100)
.rowIndexStride(1000)
.version(fileFormat));
VectorizedRowBatch batch = schema.createRowBatch();
Random r1 = new Random(1);
Random r2 = new Random(2);
int x;
int minInt=0, maxInt=0;
String y;
String minStr = null, maxStr = null;
batch.size = 1000;
boolean first = true;
for(int b=0; b < 21; ++b) {
for(int r=0; r < 1000; ++r) {
x = r1.nextInt();
y = Long.toHexString(r2.nextLong());
if (first || x < minInt) {
minInt = x;
}
if (first || x > maxInt) {
maxInt = x;
}
if (first || y.compareTo(minStr) < 0) {
minStr = y;
}
if (first || y.compareTo(maxStr) > 0) {
maxStr = y;
}
first = false;
((LongColumnVector) batch.cols[0]).vector[r] = x;
((BytesColumnVector) batch.cols[1]).setVal(r, y.getBytes(StandardCharsets.UTF_8));
}
writer.addRowBatch(batch);
}
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
// check out the statistics
ColumnStatistics[] stats = reader.getStatistics();
assertEquals(3, stats.length);
for(ColumnStatistics s: stats) {
assertEquals(21000, s.getNumberOfValues());
if (s instanceof IntegerColumnStatistics) {
assertEquals(minInt, ((IntegerColumnStatistics) s).getMinimum());
assertEquals(maxInt, ((IntegerColumnStatistics) s).getMaximum());
} else if (s instanceof StringColumnStatistics) {
assertEquals(maxStr, ((StringColumnStatistics) s).getMaximum());
assertEquals(minStr, ((StringColumnStatistics) s).getMinimum());
}
}
// check out the types
TypeDescription type = reader.getSchema();
assertEquals(TypeDescription.Category.STRUCT, type.getCategory());
assertEquals(2, type.getChildren().size());
TypeDescription type1 = type.getChildren().get(0);
TypeDescription type2 = type.getChildren().get(1);
assertEquals(TypeDescription.Category.INT, type1.getCategory());
assertEquals(TypeDescription.Category.STRING, type2.getCategory());
assertEquals("struct<int1:int,string1:string>", type.toString());
// read the contents and make sure they match
RecordReader rows1 = reader.rows(
reader.options().include(new boolean[]{true, true, false}));
RecordReader rows2 = reader.rows(
reader.options().include(new boolean[]{true, false, true}));
r1 = new Random(1);
r2 = new Random(2);
VectorizedRowBatch batch1 = reader.getSchema().createRowBatch(1000);
VectorizedRowBatch batch2 = reader.getSchema().createRowBatch(1000);
for(int i = 0; i < 21000; i += 1000) {
assertTrue(rows1.nextBatch(batch1));
assertTrue(rows2.nextBatch(batch2));
assertEquals(1000, batch1.size);
assertEquals(1000, batch2.size);
for(int j=0; j < 1000; ++j) {
assertEquals(r1.nextInt(),
((LongColumnVector) batch1.cols[0]).vector[j]);
assertEquals(Long.toHexString(r2.nextLong()),
((BytesColumnVector) batch2.cols[1]).toString(j));
}
}
assertFalse(rows1.nextBatch(batch1));
assertFalse(rows2.nextBatch(batch2));
rows1.close();
rows2.close();
}
@ParameterizedTest
@MethodSource("data")
public void testEmptyFile(Version fileFormat) throws Exception {
TypeDescription schema = createBigRowSchema();
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(1000)
.compress(CompressionKind.NONE)
.bufferSize(100)
.version(fileFormat));
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
VectorizedRowBatch batch = reader.getSchema().createRowBatch();
assertFalse(reader.rows().nextBatch(batch));
assertEquals(CompressionKind.NONE, reader.getCompressionKind());
assertEquals(0, reader.getNumberOfRows());
assertEquals(DEFAULT_COMPRESSION_BLOCK_SIZE, reader.getCompressionSize());
assertFalse(reader.getMetadataKeys().iterator().hasNext());
assertEquals(3, reader.getContentLength());
assertFalse(reader.getStripes().iterator().hasNext());
}
@ParameterizedTest
@MethodSource("data")
public void metaData(Version fileFormat) throws Exception {
TypeDescription schema = createBigRowSchema();
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(1000)
.compress(CompressionKind.NONE)
.bufferSize(100)
.version(fileFormat));
writer.addUserMetadata("my.meta", byteBuf(1, 2, 3, 4, 5, 6, 7, -1, -2, 127,
-128));
writer.addUserMetadata("clobber", byteBuf(1, 2, 3));
writer.addUserMetadata("clobber", byteBuf(4, 3, 2, 1));
ByteBuffer bigBuf = ByteBuffer.allocate(40000);
Random random = new Random(0);
random.nextBytes(bigBuf.array());
writer.addUserMetadata("big", bigBuf);
bigBuf.position(0);
VectorizedRowBatch batch = schema.createRowBatch();
batch.size = 1;
setBigRow(batch, 0, true, (byte) 127, (short) 1024, 42,
42L * 1024 * 1024 * 1024, (float) 3.1415, -2.713, null,
null, null, null, null);
writer.addRowBatch(batch);
writer.addUserMetadata("clobber", byteBuf(5,7,11,13,17,19));
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
assertEquals(byteBuf(5, 7, 11, 13, 17, 19), reader.getMetadataValue("clobber"));
assertEquals(byteBuf(1, 2, 3, 4, 5, 6, 7, -1, -2, 127, -128),
reader.getMetadataValue("my.meta"));
assertEquals(bigBuf, reader.getMetadataValue("big"));
try {
reader.getMetadataValue("unknown");
fail();
} catch (IllegalArgumentException iae) {
// PASS
}
int i = 0;
for(String key: reader.getMetadataKeys()) {
if ("my.meta".equals(key) ||
"clobber".equals(key) ||
"big".equals(key)) {
i += 1;
} else {
throw new IllegalArgumentException("unknown key " + key);
}
}
assertEquals(3, i);
int numStripes = reader.getStripeStatistics().size();
assertEquals(1, numStripes);
}
/**
* Generate an ORC file with a range of dates and times.
*/
public void createOrcDateFile(Path file, int minYear, int maxYear, Version fileFormat
) throws IOException {
TypeDescription schema = TypeDescription.createStruct()
.addField("time", TypeDescription.createTimestamp())
.addField("date", TypeDescription.createDate());
Writer writer = OrcFile.createWriter(file,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(100000)
.bufferSize(10000)
.blockPadding(false)
.version(fileFormat));
VectorizedRowBatch batch = schema.createRowBatch();
batch.size = 1000;
TimestampColumnVector timestampColVector = (TimestampColumnVector) batch.cols[0];
for (int year = minYear; year < maxYear; ++year) {
for (int row = 0; row < 1000; ++row) {
String timeStr = String.format("%04d-05-05 12:34:56.%04d", year, 2*row);
timestampColVector.set(row, Timestamp.valueOf(timeStr));
}
((LongColumnVector) batch.cols[1]).vector[0] =
new DateWritable(new Date(year - 1900, 11, 25)).getDays();
batch.cols[1].isRepeating = true;
writer.addRowBatch(batch);
}
// add one more row to check the statistics for the jvm bug case
batch.size = 1;
String timeStr = String.format("%04d-12-12 12:34:56.0001", maxYear-1);
timestampColVector.set(0, Timestamp.valueOf(timeStr));
writer.addRowBatch(batch);
writer.close();
// check the stats to make sure they match up to the millisecond
// ORC-611 update: nanoseconds are now supported!
ColumnStatistics[] stats = writer.getStatistics();
TimestampColumnStatistics tsStat = (TimestampColumnStatistics) stats[1];
assertEquals(String.format("%04d-12-12 12:34:56.0001", maxYear - 1),
tsStat.getMaximum().toString());
assertEquals(String.format("%04d-05-05 12:34:56.0", minYear),
tsStat.getMinimum().toString());
// read back the rows
Reader reader = OrcFile.createReader(file,
OrcFile.readerOptions(conf));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch(1000);
TimestampColumnVector times = (TimestampColumnVector) batch.cols[0];
LongColumnVector dates = (LongColumnVector) batch.cols[1];
for (int year = minYear; year < maxYear; ++year) {
rows.nextBatch(batch);
assertEquals(1000, batch.size);
for(int row = 0; row < 1000; ++row) {
Timestamp expected = Timestamp.valueOf(
String.format("%04d-05-05 12:34:56.%04d", year, 2*row));
assertEquals(expected.getTime(), times.time[row],
"ms row " + row + " " + expected);
assertEquals(expected.getNanos(), times.nanos[row],
"nanos row " + row + " " + expected);
assertEquals(
Integer.toString(year) + "-12-25",
new DateWritable((int) dates.vector[row]).toString(),
"year " + year + " row " + row);
}
}
rows.nextBatch(batch);
assertEquals(1, batch.size);
}
@ParameterizedTest
@MethodSource("data")
public void testDate1900(Version fileFormat) throws Exception {
createOrcDateFile(testFilePath, 1900, 1970, fileFormat);
}
@ParameterizedTest
@MethodSource("data")
public void testDate2038(Version fileFormat) throws Exception {
createOrcDateFile(testFilePath, 2038, 2250, fileFormat);
}
private static void setUnion(VectorizedRowBatch batch, int rowId,
Timestamp ts, Integer tag, Integer i, String s,
HiveDecimalWritable dec, Timestamp instant) {
UnionColumnVector union = (UnionColumnVector) batch.cols[1];
if (ts != null) {
TimestampColumnVector timestampColVector = (TimestampColumnVector) batch.cols[0];
timestampColVector.set(rowId, ts);
} else {
batch.cols[0].isNull[rowId] = true;
batch.cols[0].noNulls = false;
}
if (tag != null) {
union.tags[rowId] = tag;
if (tag == 0) {
if (i != null) {
((LongColumnVector) union.fields[tag]).vector[rowId] = i;
} else {
union.fields[tag].isNull[rowId] = true;
union.fields[tag].noNulls = false;
}
} else if (tag == 1) {
if (s != null) {
((BytesColumnVector) union.fields[tag]).setVal(rowId, s.getBytes(StandardCharsets.UTF_8));
} else {
union.fields[tag].isNull[rowId] = true;
union.fields[tag].noNulls = false;
}
} else {
throw new IllegalArgumentException("Bad tag " + tag);
}
} else {
batch.cols[1].isNull[rowId] = true;
batch.cols[1].noNulls = false;
}
if (dec != null) {
((DecimalColumnVector) batch.cols[2]).vector[rowId] = dec;
} else {
batch.cols[2].isNull[rowId] = true;
batch.cols[2].noNulls = false;
}
if (instant == null) {
batch.cols[3].isNull[rowId] = true;
batch.cols[3].noNulls = false;
} else {
((TimestampColumnVector) batch.cols[3]).set(rowId, instant);
}
}
/**
* Test writing with the new decimal and reading with the new and old.
*/
@ParameterizedTest
@MethodSource("data")
public void testDecimal64Writing(Version fileFormat) throws Exception {
TypeDescription schema = TypeDescription.fromString("struct<d:decimal(18,3)>");
VectorizedRowBatch batch = schema.createRowBatchV2();
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.compress(CompressionKind.NONE)
.version(fileFormat));
Decimal64ColumnVector cv = (Decimal64ColumnVector) batch.cols[0];
cv.precision = 18;
cv.scale = 3;
cv.vector[0] = 1;
for(int r=1; r < 18; r++) {
cv.vector[r] = cv.vector[r-1] * 10;
}
cv.vector[18] = -2000;
batch.size = 19;
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
assertEquals("count: 19 hasNull: false", reader.getStatistics()[0].toString());
// the size of the column in the different formats
int size = (fileFormat == OrcFile.Version.V_0_11 ? 89 :
fileFormat == OrcFile.Version.V_0_12 ? 90 : 154);
assertEquals("count: 19 hasNull: false bytesOnDisk: " + size +
" min: -2 max: 100000000000000 sum: 111111111111109.111",
reader.getStatistics()[1].toString());
RecordReader rows = reader.rows();
batch = schema.createRowBatchV2();
cv = (Decimal64ColumnVector) batch.cols[0];
assertTrue(rows.nextBatch(batch));
assertEquals(19, batch.size);
assertEquals(18, cv.precision);
assertEquals(3, cv.scale);
assertEquals(1, cv.vector[0], "row 0");
for(int r=1; r < 18; ++r) {
assertEquals(10 * cv.vector[r-1], cv.vector[r], "row " + r);
}
assertEquals(-2000, cv.vector[18]);
assertFalse(rows.nextBatch(batch));
// test with old batch
rows = reader.rows();
batch = schema.createRowBatch();
DecimalColumnVector oldCv = (DecimalColumnVector) batch.cols[0];
assertTrue(rows.nextBatch(batch));
assertEquals(19, batch.size);
assertEquals(18, oldCv.precision);
assertEquals(3, oldCv.scale);
assertEquals("0.001", oldCv.vector[0].toString());
assertEquals("0.01", oldCv.vector[1].toString());
assertEquals("0.1", oldCv.vector[2].toString());
assertEquals("1", oldCv.vector[3].toString());
assertEquals("10", oldCv.vector[4].toString());
assertEquals("100", oldCv.vector[5].toString());
assertEquals("1000", oldCv.vector[6].toString());
assertEquals("10000", oldCv.vector[7].toString());
assertEquals("100000", oldCv.vector[8].toString());
assertEquals("1000000", oldCv.vector[9].toString());
assertEquals("10000000", oldCv.vector[10].toString());
assertEquals("100000000", oldCv.vector[11].toString());
assertEquals("1000000000", oldCv.vector[12].toString());
assertEquals("10000000000", oldCv.vector[13].toString());
assertEquals("100000000000", oldCv.vector[14].toString());
assertEquals("1000000000000", oldCv.vector[15].toString());
assertEquals("10000000000000", oldCv.vector[16].toString());
assertEquals("100000000000000", oldCv.vector[17].toString());
assertEquals("-2", oldCv.vector[18].toString());
assertFalse(rows.nextBatch(batch));
}
/**
* Test writing with the old decimal and reading with the new and old.
*/
@ParameterizedTest
@MethodSource("data")
public void testDecimal64Reading(Version fileFormat) throws Exception {
TypeDescription schema = TypeDescription.fromString("struct<d:decimal(18,4)>");
VectorizedRowBatch batch = schema.createRowBatch();
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.compress(CompressionKind.NONE)
.version(fileFormat));
DecimalColumnVector cv = (DecimalColumnVector) batch.cols[0];
cv.precision = 18;
cv.scale = 3;
long base = 1;
for(int r=0; r < 18; r++) {
cv.vector[r].setFromLongAndScale(base, 4);
base *= 10;
}
cv.vector[18].setFromLong(-2);
batch.size = 19;
writer.addRowBatch(batch);
writer.close();
// test with new batch
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
assertEquals("count: 19 hasNull: false", reader.getStatistics()[0].toString());
// the size of the column in the different formats
int size = (fileFormat == OrcFile.Version.V_0_11 ? 63 :
fileFormat == OrcFile.Version.V_0_12 ? 65 : 154);
assertEquals("count: 19 hasNull: false bytesOnDisk: " + size +
" min: -2 max: 10000000000000 sum: 11111111111109.1111",
reader.getStatistics()[1].toString());
RecordReader rows = reader.rows();
batch = schema.createRowBatchV2();
Decimal64ColumnVector newCv = (Decimal64ColumnVector) batch.cols[0];
assertTrue(rows.nextBatch(batch));
assertEquals(19, batch.size);
assertEquals(18, newCv.precision);
assertEquals(4, newCv.scale);
assertEquals(1, newCv.vector[0], "row 0");
for(int r=1; r < 18; ++r) {
assertEquals(10 * newCv.vector[r-1], newCv.vector[r], "row " + r);
}
assertEquals(-20000, newCv.vector[18]);
assertFalse(rows.nextBatch(batch));
// test with old batch
rows = reader.rows();
batch = schema.createRowBatch();
cv = (DecimalColumnVector) batch.cols[0];
assertTrue(rows.nextBatch(batch));
assertEquals(19, batch.size);
assertEquals(18, cv.precision);
assertEquals(4, cv.scale);
assertEquals("0.0001", cv.vector[0].toString());
assertEquals("0.001", cv.vector[1].toString());
assertEquals("0.01", cv.vector[2].toString());
assertEquals("0.1", cv.vector[3].toString());
assertEquals("1", cv.vector[4].toString());
assertEquals("10", cv.vector[5].toString());
assertEquals("100", cv.vector[6].toString());
assertEquals("1000", cv.vector[7].toString());
assertEquals("10000", cv.vector[8].toString());
assertEquals("100000", cv.vector[9].toString());
assertEquals("1000000", cv.vector[10].toString());
assertEquals("10000000", cv.vector[11].toString());
assertEquals("100000000", cv.vector[12].toString());
assertEquals("1000000000", cv.vector[13].toString());
assertEquals("10000000000", cv.vector[14].toString());
assertEquals("100000000000", cv.vector[15].toString());
assertEquals("1000000000000", cv.vector[16].toString());
assertEquals("10000000000000", cv.vector[17].toString());
assertEquals("-2", cv.vector[18].toString());
assertFalse(rows.nextBatch(batch));
}
/**
* We test union, timestamp, and decimal separately since we need to make the
* object inspector manually. (The Hive reflection-based doesn't handle
* them properly.)
*/
@ParameterizedTest
@MethodSource("data")
public void testUnionAndTimestamp(Version fileFormat) throws Exception {
final TimeZone original = TimeZone.getDefault();
TimeZone.setDefault(TimeZone.getTimeZone("America/Los_Angeles"));
TypeDescription schema = TypeDescription.fromString(
"struct<time:timestamp," +
"union:uniontype<int,string>," +
"decimal:decimal(38,18)," +
"instant:timestamp with local time zone>"
);
HiveDecimal maxValue = HiveDecimal.create("10000000000000000000");
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(1000)
.compress(CompressionKind.NONE)
.bufferSize(100)
.blockPadding(false)
.version(fileFormat));
VectorizedRowBatch batch = schema.createRowBatch();
batch.size = 6;
setUnion(batch, 0, Timestamp.valueOf("2000-03-12 15:00:00"), 0, 42, null,
new HiveDecimalWritable("12345678.6547456"),
Timestamp.valueOf("2014-12-12 6:00:00"));
setUnion(batch, 1, Timestamp.valueOf("2000-03-20 12:00:00.123456789"),
1, null, "hello", new HiveDecimalWritable("-5643.234"),
Timestamp.valueOf("1996-12-11 11:00:00"));
setUnion(batch, 2, null, null, null, null, null, null);
setUnion(batch, 3, null, 0, null, null, null, null);
setUnion(batch, 4, null, 1, null, null, null, null);
setUnion(batch, 5, Timestamp.valueOf("1970-01-01 00:00:00"), 0, 200000,
null, new HiveDecimalWritable("10000000000000000000"),
Timestamp.valueOf("2011-07-01 09:00:00"));
writer.addRowBatch(batch);
batch.reset();
Random rand = new Random(42);
for(int i=1970; i < 2038; ++i) {
Timestamp ts = Timestamp.valueOf(i + "-05-05 12:34:56." + i);
HiveDecimal dec =
HiveDecimal.create(new BigInteger(64, rand), rand.nextInt(18));
if ((i & 1) == 0) {
setUnion(batch, batch.size++, ts, 0, i*i, null,
new HiveDecimalWritable(dec), null);
} else {
setUnion(batch, batch.size++, ts, 1, null, Integer.toString(i*i),
new HiveDecimalWritable(dec), null);
}
if (maxValue.compareTo(dec) < 0) {
maxValue = dec;
}
}
writer.addRowBatch(batch);
batch.reset();
// let's add a lot of constant rows to test the rle
batch.size = 1000;
for(int c=0; c < batch.cols.length; ++c) {
batch.cols[c].setRepeating(true);
}
((UnionColumnVector) batch.cols[1]).fields[0].isRepeating = true;
setUnion(batch, 0, null, 0, 1732050807, null, null, null);
for(int i=0; i < 5; ++i) {
writer.addRowBatch(batch);
}
batch.reset();
batch.size = 3;
setUnion(batch, 0, null, 0, 0, null, null, null);
setUnion(batch, 1, null, 0, 10, null, null, null);
setUnion(batch, 2, null, 0, 138, null, null, null);
writer.addRowBatch(batch);
// check the stats on the writer side
ColumnStatistics[] stats = writer.getStatistics();
assertEquals("1996-12-11 11:00:00.0",
((TimestampColumnStatistics) stats[6]).getMinimum().toString());
assertEquals("1996-12-11 11:00:00.0",
((TimestampColumnStatistics) stats[6]).getMinimumUTC().toString());
assertEquals("2014-12-12 06:00:00.0",
((TimestampColumnStatistics) stats[6]).getMaximum().toString());
assertEquals("2014-12-12 06:00:00.0",
((TimestampColumnStatistics) stats[6]).getMaximumUTC().toString());
writer.close();
TimeZone.setDefault(TimeZone.getTimeZone("America/New_York"));
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
stats = reader.getStatistics();
// check the timestamp statistics
assertEquals("1970-01-01 00:00:00.0",
((TimestampColumnStatistics) stats[1]).getMinimum().toString());
assertEquals("1969-12-31 19:00:00.0",
((TimestampColumnStatistics) stats[1]).getMinimumUTC().toString());
assertEquals("2037-05-05 12:34:56.2037",
((TimestampColumnStatistics) stats[1]).getMaximum().toString());
assertEquals("2037-05-05 08:34:56.2037",
((TimestampColumnStatistics) stats[1]).getMaximumUTC().toString());
// check the instant statistics
assertEquals("1996-12-11 14:00:00.0",
((TimestampColumnStatistics) stats[6]).getMinimum().toString());
assertEquals("1996-12-11 14:00:00.0",
((TimestampColumnStatistics) stats[6]).getMinimumUTC().toString());
assertEquals("2014-12-12 09:00:00.0",
((TimestampColumnStatistics) stats[6]).getMaximum().toString());
assertEquals("2014-12-12 09:00:00.0",
((TimestampColumnStatistics) stats[6]).getMaximumUTC().toString());
schema = writer.getSchema();
assertEquals(6, schema.getMaximumId());
boolean[] expected = new boolean[] {false, false, false, false, false, false, false};
boolean[] included = OrcUtils.includeColumns("", schema);
assertTrue(Arrays.equals(expected, included));
expected = new boolean[] {false, true, false, false, false, true, false};
included = OrcUtils.includeColumns("time,decimal", schema);
assertTrue(Arrays.equals(expected, included));
expected = new boolean[] {false, false, true, true, true, false, false};
included = OrcUtils.includeColumns("union", schema);
assertTrue(Arrays.equals(expected, included));
assertFalse(reader.getMetadataKeys().iterator().hasNext());
assertEquals(5077, reader.getNumberOfRows());
DecimalColumnStatistics decStats =
(DecimalColumnStatistics) reader.getStatistics()[5];
assertEquals(71, decStats.getNumberOfValues());
assertEquals(HiveDecimal.create("-5643.234"), decStats.getMinimum());
assertEquals(maxValue, decStats.getMaximum());
// TODO: fix this
// assertEquals(null,stats.getSum());
int stripeCount = 0;
int rowCount = 0;
long currentOffset = -1;
for(StripeInformation stripe: reader.getStripes()) {
stripeCount += 1;
rowCount += stripe.getNumberOfRows();
if (currentOffset < 0) {
currentOffset = stripe.getOffset() + stripe.getLength();
} else {
assertEquals(currentOffset, stripe.getOffset());
currentOffset += stripe.getLength();
}
}
assertEquals(reader.getNumberOfRows(), rowCount);
assertEquals(2, stripeCount);
assertEquals(reader.getContentLength(), currentOffset);
RecordReader rows = reader.rows();
assertEquals(0, rows.getRowNumber());
assertEquals(0.0, rows.getProgress(), 0.000001);
schema = reader.getSchema();
batch = schema.createRowBatch(74);
assertEquals(0, rows.getRowNumber());
rows.nextBatch(batch);
assertEquals(74, batch.size);
assertEquals(74, rows.getRowNumber());
TimestampColumnVector ts = (TimestampColumnVector) batch.cols[0];
UnionColumnVector union = (UnionColumnVector) batch.cols[1];
LongColumnVector longs = (LongColumnVector) union.fields[0];
BytesColumnVector strs = (BytesColumnVector) union.fields[1];
DecimalColumnVector decs = (DecimalColumnVector) batch.cols[2];
TimestampColumnVector instant = (TimestampColumnVector) batch.cols[3];
assertEquals("struct<time:timestamp,union:uniontype<int,string>,decimal:decimal(38,18)," +
"instant:timestamp with local time zone>",
schema.toString());
assertEquals("2000-03-12 15:00:00.0", ts.asScratchTimestamp(0).toString());
assertEquals(0, union.tags[0]);
assertEquals(42, longs.vector[0]);
assertEquals("12345678.6547456", decs.vector[0].toString());
assertEquals("2014-12-12 09:00:00.0", instant.asScratchTimestamp(0).toString());
assertEquals("2000-03-20 12:00:00.123456789", ts.asScratchTimestamp(1).toString());
assertEquals(1, union.tags[1]);
assertEquals("hello", strs.toString(1));
assertEquals("-5643.234", decs.vector[1].toString());
assertEquals("1996-12-11 14:00:00.0", instant.asScratchTimestamp(1).toString());
assertFalse(ts.noNulls);
assertFalse(union.noNulls);
assertFalse(decs.noNulls);
assertTrue(ts.isNull[2]);
assertTrue(union.isNull[2]);
assertTrue(decs.isNull[2]);
assertTrue(ts.isNull[3]);
assertFalse(union.isNull[3]);
assertEquals(0, union.tags[3]);
assertTrue(longs.isNull[3]);
assertTrue(decs.isNull[3]);
assertTrue(ts.isNull[4]);
assertFalse(union.isNull[4]);
assertEquals(1, union.tags[4]);
assertTrue(strs.isNull[4]);
assertTrue(decs.isNull[4]);
assertFalse(ts.isNull[5]);
assertEquals("1970-01-01 00:00:00.0", ts.asScratchTimestamp(5).toString());
assertFalse(union.isNull[5]);
assertEquals(0, union.tags[5]);
assertFalse(longs.isNull[5]);
assertEquals(200000, longs.vector[5]);
assertFalse(decs.isNull[5]);
assertEquals("10000000000000000000", decs.vector[5].toString());
assertEquals("2011-07-01 12:00:00.0", instant.asScratchTimestamp(5).toString());
rand = new Random(42);
for(int i=1970; i < 2038; ++i) {
int row = 6 + i - 1970;
assertEquals(Timestamp.valueOf(i + "-05-05 12:34:56." + i),
ts.asScratchTimestamp(row));
if ((i & 1) == 0) {
assertEquals(0, union.tags[row]);
assertEquals(i*i, longs.vector[row]);
} else {
assertEquals(1, union.tags[row]);
assertEquals(Integer.toString(i * i), strs.toString(row));
}
assertEquals(new HiveDecimalWritable(HiveDecimal.create(new BigInteger(64, rand),
rand.nextInt(18))), decs.vector[row]);
}
// rebuild the row batch, so that we can read by 1000 rows
batch = schema.createRowBatch(1000);
ts = (TimestampColumnVector) batch.cols[0];
union = (UnionColumnVector) batch.cols[1];
longs = (LongColumnVector) union.fields[0];
strs = (BytesColumnVector) union.fields[1];
decs = (DecimalColumnVector) batch.cols[2];
for(int i=0; i < 5; ++i) {
rows.nextBatch(batch);
String msg = "batch " + i;
assertEquals(1000, batch.size, msg);
assertFalse(union.isRepeating, msg);
assertTrue(union.noNulls, msg);
for(int r=0; r < batch.size; ++r) {
assertEquals(0, union.tags[r], "bad tag at " + i + "." + r);
}
assertTrue(longs.isRepeating, msg);
assertEquals(1732050807, longs.vector[0], msg);
}
rows.nextBatch(batch);
assertEquals(3, batch.size);
assertEquals(0, union.tags[0]);
assertEquals(0, longs.vector[0]);
assertEquals(0, union.tags[1]);
assertEquals(10, longs.vector[1]);
assertEquals(0, union.tags[2]);
assertEquals(138, longs.vector[2]);
rows.nextBatch(batch);
assertEquals(0, batch.size);
assertEquals(1.0, rows.getProgress(), 0.00001);
assertEquals(reader.getNumberOfRows(), rows.getRowNumber());
rows.seekToRow(1);
rows.nextBatch(batch);
assertEquals(1000, batch.size);
assertEquals(Timestamp.valueOf("2000-03-20 12:00:00.123456789"), ts.asScratchTimestamp(0));
assertEquals(1, union.tags[0]);
assertEquals("hello", strs.toString(0));
assertEquals(new HiveDecimalWritable(HiveDecimal.create("-5643.234")), decs.vector[0]);
rows.close();
TimeZone.setDefault(original);
}
/**
* Read and write a randomly generated snappy file.
* @throws Exception
*/
@ParameterizedTest
@MethodSource("data")
public void testSnappy(Version fileFormat) throws Exception {
TypeDescription schema = createInnerSchema();
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(1000)
.compress(CompressionKind.SNAPPY)
.bufferSize(100)
.version(fileFormat));
VectorizedRowBatch batch = schema.createRowBatch();
Random rand;
writeRandomIntBytesBatches(writer, batch, 10, 1000);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
assertEquals(CompressionKind.SNAPPY, reader.getCompressionKind());
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch(1000);
rand = new Random(12);
LongColumnVector longs = (LongColumnVector) batch.cols[0];
BytesColumnVector strs = (BytesColumnVector) batch.cols[1];
for(int b=0; b < 10; ++b) {
rows.nextBatch(batch);
assertEquals(1000, batch.size);
for(int r=0; r < batch.size; ++r) {
assertEquals(rand.nextInt(), longs.vector[r]);
assertEquals(Integer.toHexString(rand.nextInt()), strs.toString(r));
}
}
rows.nextBatch(batch);
assertEquals(0, batch.size);
rows.close();
}
/**
* Read and write a randomly generated lzo file.
* @throws Exception
*/
@ParameterizedTest
@MethodSource("data")
public void testLzo(Version fileFormat) throws Exception {
TypeDescription schema =
TypeDescription.fromString("struct<x:bigint,y:int,z:bigint>");
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(10000)
.compress(CompressionKind.LZO)
.bufferSize(1000)
.version(fileFormat));
VectorizedRowBatch batch = schema.createRowBatch();
Random rand = new Random(69);
batch.size = 1000;
for(int b=0; b < 10; ++b) {
for (int r=0; r < 1000; ++r) {
((LongColumnVector) batch.cols[0]).vector[r] = rand.nextInt();
((LongColumnVector) batch.cols[1]).vector[r] = b * 1000 + r;
((LongColumnVector) batch.cols[2]).vector[r] = rand.nextLong();
}
writer.addRowBatch(batch);
}
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
assertEquals(CompressionKind.LZO, reader.getCompressionKind());
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch(1000);
rand = new Random(69);
for(int b=0; b < 10; ++b) {
rows.nextBatch(batch);
assertEquals(1000, batch.size);
for(int r=0; r < batch.size; ++r) {
assertEquals(rand.nextInt(),
((LongColumnVector) batch.cols[0]).vector[r]);
assertEquals(b * 1000 + r,
((LongColumnVector) batch.cols[1]).vector[r]);
assertEquals(rand.nextLong(),
((LongColumnVector) batch.cols[2]).vector[r]);
}
}
rows.nextBatch(batch);
assertEquals(0, batch.size);
rows.close();
}
/**
* Read and write a randomly generated lz4 file.
* @throws Exception
*/
@ParameterizedTest
@MethodSource("data")
public void testLz4(Version fileFormat) throws Exception {
TypeDescription schema =
TypeDescription.fromString("struct<x:bigint,y:int,z:bigint>");
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(10000)
.compress(CompressionKind.LZ4)
.bufferSize(1000)
.version(fileFormat));
VectorizedRowBatch batch = schema.createRowBatch();
Random rand = new Random(3);
batch.size = 1000;
for(int b=0; b < 10; ++b) {
for (int r=0; r < 1000; ++r) {
((LongColumnVector) batch.cols[0]).vector[r] = rand.nextInt();
((LongColumnVector) batch.cols[1]).vector[r] = b * 1000 + r;
((LongColumnVector) batch.cols[2]).vector[r] = rand.nextLong();
}
writer.addRowBatch(batch);
}
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
assertEquals(CompressionKind.LZ4, reader.getCompressionKind());
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch(1000);
rand = new Random(3);
for(int b=0; b < 10; ++b) {
rows.nextBatch(batch);
assertEquals(1000, batch.size);
for(int r=0; r < batch.size; ++r) {
assertEquals(rand.nextInt(),
((LongColumnVector) batch.cols[0]).vector[r]);
assertEquals(b * 1000 + r,
((LongColumnVector) batch.cols[1]).vector[r]);
assertEquals(rand.nextLong(),
((LongColumnVector) batch.cols[2]).vector[r]);
}
}
rows.nextBatch(batch);
assertEquals(0, batch.size);
rows.close();
}
/**
* Read and write a randomly generated zstd file.
*/
@ParameterizedTest
@MethodSource("data")
public void testZstd(Version fileFormat) throws Exception {
TypeDescription schema =
TypeDescription.fromString("struct<x:bigint,y:int,z:bigint>");
try (Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.compress(CompressionKind.ZSTD)
.bufferSize(1000)
.version(fileFormat))) {
VectorizedRowBatch batch = schema.createRowBatch();
Random rand = new Random(3);
batch.size = 1000;
for (int b = 0; b < 10; ++b) {
for (int r = 0; r < 1000; ++r) {
((LongColumnVector) batch.cols[0]).vector[r] = rand.nextInt();
((LongColumnVector) batch.cols[1]).vector[r] = b * 1000 + r;
((LongColumnVector) batch.cols[2]).vector[r] = rand.nextLong();
}
writer.addRowBatch(batch);
}
}
try (Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows()) {
assertEquals(CompressionKind.ZSTD, reader.getCompressionKind());
VectorizedRowBatch batch = reader.getSchema().createRowBatch(1000);
Random rand = new Random(3);
for (int b = 0; b < 10; ++b) {
rows.nextBatch(batch);
assertEquals(1000, batch.size);
for (int r = 0; r < batch.size; ++r) {
assertEquals(rand.nextInt(),
((LongColumnVector) batch.cols[0]).vector[r]);
assertEquals(b * 1000 + r,
((LongColumnVector) batch.cols[1]).vector[r]);
assertEquals(rand.nextLong(),
((LongColumnVector) batch.cols[2]).vector[r]);
}
}
rows.nextBatch(batch);
assertEquals(0, batch.size);
}
}
/**
* Read and write a file; verify codec usage.
* @throws Exception
*/
@ParameterizedTest
@MethodSource("data")
public void testCodecPool(Version fileFormat) throws Exception {
OrcCodecPool.clear();
TypeDescription schema = createInnerSchema();
VectorizedRowBatch batch = schema.createRowBatch();
WriterOptions opts = OrcFile.writerOptions(conf)
.setSchema(schema).stripeSize(1000).bufferSize(100).version(fileFormat);
CompressionCodec snappyCodec, zlibCodec;
snappyCodec = writeBatchesAndGetCodec(10, 1000, opts.compress(CompressionKind.SNAPPY), batch);
assertEquals(1, OrcCodecPool.getPoolSize(CompressionKind.SNAPPY));
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf).filesystem(fs));
assertEquals(CompressionKind.SNAPPY, reader.getCompressionKind());
CompressionCodec codec = readBatchesAndGetCodec(reader, 10, 1000);
assertEquals(1, OrcCodecPool.getPoolSize(CompressionKind.SNAPPY));
assertSame(snappyCodec, codec);
reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf).filesystem(fs));
assertEquals(CompressionKind.SNAPPY, reader.getCompressionKind());
codec = readBatchesAndGetCodec(reader, 10, 1000);
assertSame(snappyCodec, codec);
assertEquals(1, OrcCodecPool.getPoolSize(CompressionKind.SNAPPY));
zlibCodec = writeBatchesAndGetCodec(10, 1000, opts.compress(CompressionKind.ZLIB), batch);
assertNotSame(snappyCodec, zlibCodec);
assertEquals(1, OrcCodecPool.getPoolSize(CompressionKind.ZLIB));
codec = writeBatchesAndGetCodec(10, 1000, opts.compress(CompressionKind.ZLIB), batch);
assertEquals(1, OrcCodecPool.getPoolSize(CompressionKind.ZLIB));
assertSame(zlibCodec, codec);
assertSame(snappyCodec, OrcCodecPool.getCodec(CompressionKind.SNAPPY));
CompressionCodec snappyCodec2 = writeBatchesAndGetCodec(
10, 1000, opts.compress(CompressionKind.SNAPPY), batch);
assertNotSame(snappyCodec, snappyCodec2);
OrcCodecPool.returnCodec(CompressionKind.SNAPPY, snappyCodec);
reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf).filesystem(fs));
assertEquals(CompressionKind.SNAPPY, reader.getCompressionKind());
codec = readBatchesAndGetCodec(reader, 10, 1000);
assertEquals(2, OrcCodecPool.getPoolSize(CompressionKind.SNAPPY));
assertTrue(snappyCodec == codec || snappyCodec2 == codec);
}
private CompressionCodec writeBatchesAndGetCodec(int count,
int size,
WriterOptions opts,
VectorizedRowBatch batch
) throws IOException {
fs.delete(testFilePath, false);
Writer writer = OrcFile.createWriter(testFilePath, opts);
CompressionCodec codec = ((WriterImpl) writer).getCompressionCodec();
writeRandomIntBytesBatches(writer, batch, count, size);
writer.close();
return codec;
}
private CompressionCodec readBatchesAndGetCodec(
Reader reader, int count, int size) throws IOException {
RecordReader rows = reader.rows();
VectorizedRowBatch batch = reader.getSchema().createRowBatch(size);
for (int b = 0; b < count; ++b) {
rows.nextBatch(batch);
}
CompressionCodec codec = ((RecordReaderImpl)rows).getCompressionCodec();
rows.close();
return codec;
}
private void readRandomBatches(
Reader reader, RecordReader rows, int count, int size) throws IOException {
}
private void writeRandomIntBytesBatches(
Writer writer, VectorizedRowBatch batch, int count, int size) throws IOException {
Random rand = new Random(12);
batch.size = size;
for(int b=0; b < count; ++b) {
for (int r=0; r < size; ++r) {
((LongColumnVector) batch.cols[0]).vector[r] = rand.nextInt();
((BytesColumnVector) batch.cols[1]).setVal(r,
Integer.toHexString(rand.nextInt()).getBytes(StandardCharsets.UTF_8));
}
writer.addRowBatch(batch);
}
}
/**
* Read and write a randomly generated snappy file.
* @throws Exception
*/
@ParameterizedTest
@MethodSource("data")
public void testWithoutIndex(Version fileFormat) throws Exception {
TypeDescription schema = createInnerSchema();
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(5000)
.compress(CompressionKind.SNAPPY)
.bufferSize(1000)
.rowIndexStride(0)
.version(fileFormat));
VectorizedRowBatch batch = schema.createRowBatch();
Random rand = new Random(24);
batch.size = 5;
for(int c=0; c < batch.cols.length; ++c) {
batch.cols[c].setRepeating(true);
}
for(int i=0; i < 10000; ++i) {
((LongColumnVector) batch.cols[0]).vector[0] = rand.nextInt();
((BytesColumnVector) batch.cols[1])
.setVal(0, Integer.toBinaryString(rand.nextInt()).getBytes(StandardCharsets.UTF_8));
writer.addRowBatch(batch);
}
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
assertEquals(50000, reader.getNumberOfRows());
assertEquals(0, reader.getRowIndexStride());
StripeInformation stripe = reader.getStripes().iterator().next();
assertTrue(stripe.getDataLength() != 0);
assertEquals(0, stripe.getIndexLength());
RecordReader rows = reader.rows();
rand = new Random(24);
batch = reader.getSchema().createRowBatch(1000);
LongColumnVector longs = (LongColumnVector) batch.cols[0];
BytesColumnVector strs = (BytesColumnVector) batch.cols[1];
for(int i=0; i < 50; ++i) {
rows.nextBatch(batch);
assertEquals(1000, batch.size, "batch " + i);
for(int j=0; j < 200; ++j) {
int intVal = rand.nextInt();
String strVal = Integer.toBinaryString(rand.nextInt());
for (int k = 0; k < 5; ++k) {
assertEquals(intVal, longs.vector[j * 5 + k]);
assertEquals(strVal, strs.toString(j * 5 + k));
}
}
}
rows.nextBatch(batch);
assertEquals(0, batch.size);
rows.close();
}
@ParameterizedTest
@MethodSource("data")
public void testSeek(Version fileFormat) throws Exception {
TypeDescription schema = createBigRowSchema();
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(200000)
.bufferSize(65536)
.rowIndexStride(1000)
.version(fileFormat));
VectorizedRowBatch batch = schema.createRowBatch();
Random rand = new Random(42);
final int COUNT=32768;
long[] intValues= new long[COUNT];
double[] doubleValues = new double[COUNT];
String[] stringValues = new String[COUNT];
BytesWritable[] byteValues = new BytesWritable[COUNT];
String[] words = new String[128];
for(int i=0; i < words.length; ++i) {
words[i] = Integer.toHexString(rand.nextInt());
}
for(int i=0; i < COUNT/2; ++i) {
intValues[2*i] = rand.nextLong();
intValues[2*i+1] = intValues[2*i];
stringValues[2*i] = words[rand.nextInt(words.length)];
stringValues[2*i+1] = stringValues[2*i];
}
for(int i=0; i < COUNT; ++i) {
doubleValues[i] = rand.nextDouble();
byte[] buf = new byte[20];
rand.nextBytes(buf);
byteValues[i] = new BytesWritable(buf);
}
for(int i=0; i < COUNT; ++i) {
appendRandomRow(batch, intValues, doubleValues, stringValues,
byteValues, words, i);
if (batch.size == 1024) {
writer.addRowBatch(batch);
batch.reset();
}
}
if (batch.size != 0) {
writer.addRowBatch(batch);
}
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
assertEquals(COUNT, reader.getNumberOfRows());
RecordReader rows = reader.rows();
// get the row index
InStream.StreamOptions options = InStream.options();
if (reader.getCompressionKind() != CompressionKind.NONE) {
options.withCodec(OrcCodecPool.getCodec(reader.getCompressionKind()))
.withBufferSize(reader.getCompressionSize());
}
DataReader meta = RecordReaderUtils.createDefaultDataReader(
DataReaderProperties.builder()
.withFileSystem(fs)
.withPath(testFilePath)
.withCompression(options)
.withZeroCopy(false)
.build());
StripePlanner planner = new StripePlanner(schema, new ReaderEncryption(),
meta, reader.getWriterVersion(), true, Integer.MAX_VALUE);
boolean[] columns = new boolean[schema.getMaximumId() + 1];
Arrays.fill(columns, true);
OrcIndex index = planner.parseStripe(reader.getStripes().get(0), columns)
.readRowIndex(null, null);
// check the primitive columns to make sure they have the right number of
// items in the first row group
for(int c=1; c < 9; ++c) {
OrcProto.RowIndex colIndex = index.getRowGroupIndex()[c];
assertEquals(1000,
colIndex.getEntry(0).getStatistics().getNumberOfValues());
}
batch = reader.getSchema().createRowBatch();
int nextRowInBatch = -1;
for(int i=COUNT-1; i >= 0; --i, --nextRowInBatch) {
// if we have consumed the previous batch read a new one
if (nextRowInBatch < 0) {
long base = Math.max(i - 1023, 0);
rows.seekToRow(base);
assertTrue(rows.nextBatch(batch), "row " + i);
nextRowInBatch = batch.size - 1;
}
checkRandomRow(batch, intValues, doubleValues,
stringValues, byteValues, words, i, nextRowInBatch);
}
rows.close();
Iterator<StripeInformation> stripeIterator =
reader.getStripes().iterator();
long offsetOfStripe2 = 0;
long offsetOfStripe4 = 0;
long lastRowOfStripe2 = 0;
for(int i = 0; i < 5; ++i) {
StripeInformation stripe = stripeIterator.next();
if (i < 2) {
lastRowOfStripe2 += stripe.getNumberOfRows();
} else if (i == 2) {
offsetOfStripe2 = stripe.getOffset();
lastRowOfStripe2 += stripe.getNumberOfRows() - 1;
} else if (i == 4) {
offsetOfStripe4 = stripe.getOffset();
}
}
Arrays.fill(columns, false);
columns[5] = true; // long colulmn
columns[9] = true; // text column
rows = reader.rows(reader.options()
.range(offsetOfStripe2, offsetOfStripe4 - offsetOfStripe2)
.include(columns));
rows.seekToRow(lastRowOfStripe2);
// we only want two rows
batch = reader.getSchema().createRowBatch(2);
assertTrue(rows.nextBatch(batch));
assertEquals(1, batch.size);
assertEquals(intValues[(int) lastRowOfStripe2], getLong(batch, 0));
assertEquals(stringValues[(int) lastRowOfStripe2],
getText(batch, 0).toString());
assertTrue(rows.nextBatch(batch));
assertEquals(intValues[(int) lastRowOfStripe2 + 1], getLong(batch, 0));
assertEquals(stringValues[(int) lastRowOfStripe2 + 1],
getText(batch, 0).toString());
rows.close();
}
private void appendRandomRow(VectorizedRowBatch batch,
long[] intValues, double[] doubleValues,
String[] stringValues,
BytesWritable[] byteValues,
String[] words, int i) {
InnerStruct inner = new InnerStruct((int) intValues[i], stringValues[i]);
InnerStruct inner2 = new InnerStruct((int) (intValues[i] >> 32),
words[i % words.length] + "-x");
setBigRow(batch, batch.size++, (intValues[i] & 1) == 0, (byte) intValues[i],
(short) intValues[i], (int) intValues[i], intValues[i],
(float) doubleValues[i], doubleValues[i], byteValues[i], stringValues[i],
new MiddleStruct(inner, inner2), list(), map(inner, inner2));
}
private void checkRandomRow(VectorizedRowBatch batch,
long[] intValues, double[] doubleValues,
String[] stringValues,
BytesWritable[] byteValues,
String[] words, int i, int rowInBatch) {
InnerStruct inner = new InnerStruct((int) intValues[i], stringValues[i]);
InnerStruct inner2 = new InnerStruct((int) (intValues[i] >> 32),
words[i % words.length] + "-x");
checkBigRow(batch, rowInBatch, i, (intValues[i] & 1) == 0, (byte) intValues[i],
(short) intValues[i], (int) intValues[i], intValues[i],
(float) doubleValues[i], doubleValues[i], byteValues[i], stringValues[i],
new MiddleStruct(inner, inner2), list(), map(inner, inner2));
}
@ParameterizedTest
@MethodSource("data")
public void testMemoryManagement(Version fileFormat) throws Exception {
OrcConf.ROWS_BETWEEN_CHECKS.setLong(conf, 100);
final long POOL_SIZE = 50_000;
TypeDescription schema = createInnerSchema();
MemoryManagerImpl memoryMgr = new MemoryManagerImpl(POOL_SIZE);
// set up 10 files that all request the full size.
MemoryManager.Callback ignore = newScale -> false;
for(int f=0; f < 9; ++f) {
memoryMgr.addWriter(new Path("file-" + f), POOL_SIZE, ignore);
}
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.compress(CompressionKind.NONE)
.stripeSize(POOL_SIZE)
.bufferSize(100)
.rowIndexStride(0)
.memory(memoryMgr)
.version(fileFormat));
// check to make sure it is 10%
assertEquals(0.1, memoryMgr.getAllocationScale(), 0.001);
VectorizedRowBatch batch = schema.createRowBatch();
batch.size = 1;
for(int i=0; i < 2500; ++i) {
((LongColumnVector) batch.cols[0]).vector[0] = i * 300;
((BytesColumnVector) batch.cols[1]).setVal(0,
Integer.toHexString(10*i).getBytes(StandardCharsets.UTF_8));
writer.addRowBatch(batch);
}
writer.close();
assertEquals(0.111, memoryMgr.getAllocationScale(), 0.001);
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
int i = 0;
for(StripeInformation stripe: reader.getStripes()) {
i += 1;
assertTrue(stripe.getDataLength() < POOL_SIZE,
"stripe " + i + " is too long at " + stripe.getDataLength());
}
// 0.11 always uses the dictionary, so ends up with a lot more stripes
assertEquals(fileFormat == OrcFile.Version.V_0_11 ? 25 : 3, i);
assertEquals(2500, reader.getNumberOfRows());
}
@ParameterizedTest
@MethodSource("data")
public void testPredicatePushdown(Version fileFormat) throws Exception {
TypeDescription schema = createInnerSchema();
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(400000L)
.compress(CompressionKind.NONE)
.bufferSize(500)
.rowIndexStride(1000)
.version(fileFormat));
VectorizedRowBatch batch = schema.createRowBatch();
batch.ensureSize(3500);
batch.size = 3500;
for(int i=0; i < 3500; ++i) {
((LongColumnVector) batch.cols[0]).vector[i] = i * 300;
((BytesColumnVector) batch.cols[1]).setVal(i,
Integer.toHexString(10*i).getBytes(StandardCharsets.UTF_8));
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
assertEquals(3500, reader.getNumberOfRows());
SearchArgument sarg = SearchArgumentFactory.newBuilder()
.startAnd()
.startNot()
.lessThan("int1", PredicateLeaf.Type.LONG, 300000L)
.end()
.lessThan("int1", PredicateLeaf.Type.LONG, 600000L)
.end()
.build();
RecordReader rows = reader.rows(reader.options()
.range(0L, Long.MAX_VALUE)
.include(new boolean[]{true, true, true})
.searchArgument(sarg, new String[]{null, "int1", "string1"}));
batch = reader.getSchema().createRowBatch(2000);
LongColumnVector ints = (LongColumnVector) batch.cols[0];
BytesColumnVector strs = (BytesColumnVector) batch.cols[1];
assertEquals(1000L, rows.getRowNumber());
assertTrue(rows.nextBatch(batch));
assertEquals(1000, batch.size);
for(int i=1000; i < 2000; ++i) {
assertEquals(300 * i, ints.vector[i - 1000]);
assertEquals(Integer.toHexString(10*i), strs.toString(i - 1000));
}
assertFalse(rows.nextBatch(batch));
assertEquals(3500, rows.getRowNumber());
// look through the file with no rows selected
sarg = SearchArgumentFactory.newBuilder()
.startAnd()
.lessThan("int1", PredicateLeaf.Type.LONG, 0L)
.end()
.build();
rows = reader.rows(reader.options()
.range(0L, Long.MAX_VALUE)
.include(new boolean[]{true, true, true})
.searchArgument(sarg, new String[]{null, "int1", "string1"}));
assertEquals(3500L, rows.getRowNumber());
assertFalse(rows.nextBatch(batch));
// select first 100 and last 100 rows
sarg = SearchArgumentFactory.newBuilder()
.startOr()
.lessThan("int1", PredicateLeaf.Type.LONG, 300L * 100)
.startNot()
.lessThan("int1", PredicateLeaf.Type.LONG, 300L * 3400)
.end()
.end()
.build();
rows = reader.rows(reader.options()
.range(0L, Long.MAX_VALUE)
.include(new boolean[]{true, true, true})
.searchArgument(sarg, new String[]{null, "int1", "string1"})
.allowSARGToFilter(false));
assertEquals(0, rows.getRowNumber());
assertTrue(rows.nextBatch(batch));
assertEquals(1000, batch.size);
assertEquals(3000, rows.getRowNumber());
for(int i=0; i < 1000; ++i) {
assertEquals(300 * i, ints.vector[i]);
assertEquals(Integer.toHexString(10*i), strs.toString(i));
}
assertTrue(rows.nextBatch(batch));
assertEquals(500, batch.size);
assertEquals(3500, rows.getRowNumber());
for(int i=3000; i < 3500; ++i) {
assertEquals(300 * i, ints.vector[i - 3000]);
assertEquals(Integer.toHexString(10*i), strs.toString(i - 3000));
}
assertFalse(rows.nextBatch(batch));
assertEquals(3500, rows.getRowNumber());
}
@ParameterizedTest
@MethodSource("data")
public void testQuotedPredicatePushdown(Version fileFormat) throws Exception {
TypeDescription schema = createQuotedSchema();
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(400000L)
.compress(CompressionKind.NONE)
.bufferSize(500)
.rowIndexStride(1000)
.version(fileFormat));
VectorizedRowBatch batch = schema.createRowBatch();
batch.ensureSize(3500);
batch.size = 3500;
for(int i=0; i < 3500; ++i) {
((LongColumnVector) batch.cols[0]).vector[i] = i * 300;
((BytesColumnVector) batch.cols[1]).setVal(i,
Integer.toHexString(10*i).getBytes(StandardCharsets.UTF_8));
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
assertEquals(3500, reader.getNumberOfRows());
SearchArgument sarg = SearchArgumentFactory.newBuilder()
.startAnd()
.startNot()
.lessThan("`int1`", PredicateLeaf.Type.LONG, 300000L)
.end()
.lessThan("`int1`", PredicateLeaf.Type.LONG, 600000L)
.end()
.build();
RecordReader rows = reader.rows(reader.options()
.range(0L, Long.MAX_VALUE)
.include(new boolean[]{true, true, true})
.searchArgument(sarg, new String[]{null, "`int1`", "string1"}));
batch = reader.getSchema().createRowBatch(2000);
assertEquals(1000L, rows.getRowNumber());
assertTrue(rows.nextBatch(batch));
assertEquals(1000, batch.size);
// Validate the same behaviour with schemaFromString
fs.delete(testFilePath, false);
TypeDescription qSchema = createQuotedSchemaFromString();
// [`int1`, `string1`]
assertEquals(schema.getFieldNames(), qSchema.getFieldNames());
Writer writerSchemaFromStr = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(qSchema)
.stripeSize(400000L)
.compress(CompressionKind.NONE)
.bufferSize(500)
.rowIndexStride(1000)
.version(fileFormat));
batch = qSchema.createRowBatch();
batch.ensureSize(3500);
batch.size = 3500;
for(int i=0; i < 3500; ++i) {
((LongColumnVector) batch.cols[0]).vector[i] = i * 300;
((BytesColumnVector) batch.cols[1]).setVal(i,
Integer.toHexString(10*i).getBytes(StandardCharsets.UTF_8));
}
writerSchemaFromStr.addRowBatch(batch);
writerSchemaFromStr.close();
Reader readerSchemaFromStr = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
assertEquals(3500, readerSchemaFromStr.getNumberOfRows());
sarg = SearchArgumentFactory.newBuilder()
.startAnd()
.startNot()
.lessThan("`int1`", PredicateLeaf.Type.LONG, 300000L)
.end()
.lessThan("`int1`", PredicateLeaf.Type.LONG, 600000L)
.end()
.build();
rows = readerSchemaFromStr.rows(readerSchemaFromStr.options()
.range(0L, Long.MAX_VALUE)
.include(new boolean[]{true, true, true})
.searchArgument(sarg, new String[]{null, "`int1`", "string1"}));
batch = readerSchemaFromStr.getSchema().createRowBatch(2000);
assertEquals(1000L, rows.getRowNumber());
assertTrue(rows.nextBatch(batch));
assertEquals(1000, batch.size);
assertEquals(reader.getSchema(), readerSchemaFromStr.getSchema());
assertEquals(writer.getSchema(), writerSchemaFromStr.getSchema());
}
/**
* Test all of the types that have distinct ORC writers using the vectorized
* writer with different combinations of repeating and null values.
* @throws Exception
*/
@ParameterizedTest
@MethodSource("data")
public void testRepeating(Version fileFormat) throws Exception {
// create a row type with each type that has a unique writer
// really just folds short, int, and long together
TypeDescription schema = TypeDescription.createStruct()
.addField("bin", TypeDescription.createBinary())
.addField("bool", TypeDescription.createBoolean())
.addField("byte", TypeDescription.createByte())
.addField("long", TypeDescription.createLong())
.addField("float", TypeDescription.createFloat())
.addField("double", TypeDescription.createDouble())
.addField("date", TypeDescription.createDate())
.addField("time", TypeDescription.createTimestamp())
.addField("dec", TypeDescription.createDecimal()
.withPrecision(20).withScale(6))
.addField("string", TypeDescription.createString())
.addField("char", TypeDescription.createChar().withMaxLength(10))
.addField("vc", TypeDescription.createVarchar().withMaxLength(10))
.addField("struct", TypeDescription.createStruct()
.addField("sub1", TypeDescription.createInt()))
.addField("union", TypeDescription.createUnion()
.addUnionChild(TypeDescription.createString())
.addUnionChild(TypeDescription.createInt()))
.addField("list", TypeDescription
.createList(TypeDescription.createInt()))
.addField("map",
TypeDescription.createMap(TypeDescription.createString(),
TypeDescription.createString()));
VectorizedRowBatch batch = schema.createRowBatch();
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.rowIndexStride(1000)
.version(fileFormat));
// write 1024 repeating nulls
batch.size = 1024;
for(int c = 0; c < batch.cols.length; ++c) {
batch.cols[c].setRepeating(true);
batch.cols[c].noNulls = false;
batch.cols[c].isNull[0] = true;
}
writer.addRowBatch(batch);
// write 1024 repeating non-null
for(int c =0; c < batch.cols.length; ++c) {
batch.cols[c].isNull[0] = false;
}
((BytesColumnVector) batch.cols[0]).setVal(0, "Horton".getBytes(StandardCharsets.UTF_8));
((LongColumnVector) batch.cols[1]).vector[0] = 1;
((LongColumnVector) batch.cols[2]).vector[0] = 130;
((LongColumnVector) batch.cols[3]).vector[0] = 0x123456789abcdef0L;
((DoubleColumnVector) batch.cols[4]).vector[0] = 1.125;
((DoubleColumnVector) batch.cols[5]).vector[0] = 0.0009765625;
((LongColumnVector) batch.cols[6]).vector[0] =
new DateWritable(new Date(111, 6, 1)).getDays();
((TimestampColumnVector) batch.cols[7]).set(0,
new Timestamp(115, 9, 23, 10, 11, 59,
999999999));
((DecimalColumnVector) batch.cols[8]).vector[0] =
new HiveDecimalWritable("1.234567");
((BytesColumnVector) batch.cols[9]).setVal(0, "Echelon".getBytes(StandardCharsets.UTF_8));
((BytesColumnVector) batch.cols[10]).setVal(0, "Juggernaut".getBytes(StandardCharsets.UTF_8));
((BytesColumnVector) batch.cols[11]).setVal(0, "Dreadnaught".getBytes(StandardCharsets.UTF_8));
((LongColumnVector) ((StructColumnVector) batch.cols[12]).fields[0])
.vector[0] = 123;
((UnionColumnVector) batch.cols[13]).tags[0] = 1;
((LongColumnVector) ((UnionColumnVector) batch.cols[13]).fields[1])
.vector[0] = 1234;
((ListColumnVector) batch.cols[14]).offsets[0] = 0;
((ListColumnVector) batch.cols[14]).lengths[0] = 3;
((ListColumnVector) batch.cols[14]).child.isRepeating = true;
((LongColumnVector) ((ListColumnVector) batch.cols[14]).child).vector[0]
= 31415;
((MapColumnVector) batch.cols[15]).offsets[0] = 0;
((MapColumnVector) batch.cols[15]).lengths[0] = 3;
((MapColumnVector) batch.cols[15]).values.isRepeating = true;
((BytesColumnVector) ((MapColumnVector) batch.cols[15]).keys)
.setVal(0, "ORC".getBytes(StandardCharsets.UTF_8));
((BytesColumnVector) ((MapColumnVector) batch.cols[15]).keys)
.setVal(1, "Hive".getBytes(StandardCharsets.UTF_8));
((BytesColumnVector) ((MapColumnVector) batch.cols[15]).keys)
.setVal(2, "LLAP".getBytes(StandardCharsets.UTF_8));
((BytesColumnVector) ((MapColumnVector) batch.cols[15]).values)
.setVal(0, "fast".getBytes(StandardCharsets.UTF_8));
writer.addRowBatch(batch);
// write 1024 null without repeat
for(int c = 0; c < batch.cols.length; ++c) {
batch.cols[c].setRepeating(false);
batch.cols[c].noNulls = false;
Arrays.fill(batch.cols[c].isNull, true);
}
writer.addRowBatch(batch);
// add 1024 rows of non-null, non-repeating
batch.reset();
batch.size = 1024;
((ListColumnVector) batch.cols[14]).child.ensureSize(3 * 1024, false);
((MapColumnVector) batch.cols[15]).keys.ensureSize(3 * 1024, false);
((MapColumnVector) batch.cols[15]).values.ensureSize(3 * 1024, false);
for(int r=0; r < 1024; ++r) {
((BytesColumnVector) batch.cols[0]).setVal(r,
Integer.toHexString(r).getBytes(StandardCharsets.UTF_8));
((LongColumnVector) batch.cols[1]).vector[r] = r % 2;
((LongColumnVector) batch.cols[2]).vector[r] = (r % 255);
((LongColumnVector) batch.cols[3]).vector[r] = 31415L * r;
((DoubleColumnVector) batch.cols[4]).vector[r] = 1.125 * r;
((DoubleColumnVector) batch.cols[5]).vector[r] = 0.0009765625 * r;
((LongColumnVector) batch.cols[6]).vector[r] =
new DateWritable(new Date(111, 6, 1)).getDays() + r;
Timestamp ts = new Timestamp(115, 9, 25, 10, 11, 59 + r, 999999999);
((TimestampColumnVector) batch.cols[7]).set(r, ts);
((DecimalColumnVector) batch.cols[8]).vector[r] =
new HiveDecimalWritable("1.234567");
((BytesColumnVector) batch.cols[9]).setVal(r,
Integer.toString(r).getBytes(StandardCharsets.UTF_8));
((BytesColumnVector) batch.cols[10]).setVal(r,
Integer.toHexString(r).getBytes(StandardCharsets.UTF_8));
((BytesColumnVector) batch.cols[11]).setVal(r,
Integer.toHexString(r * 128).getBytes(StandardCharsets.UTF_8));
((LongColumnVector) ((StructColumnVector) batch.cols[12]).fields[0])
.vector[r] = r + 13;
((UnionColumnVector) batch.cols[13]).tags[r] = 1;
((LongColumnVector) ((UnionColumnVector) batch.cols[13]).fields[1])
.vector[r] = r + 42;
((ListColumnVector) batch.cols[14]).offsets[r] = 3 * r;
((ListColumnVector) batch.cols[14]).lengths[r] = 3;
for(int i=0; i < 3; ++i) {
((LongColumnVector) ((ListColumnVector) batch.cols[14]).child)
.vector[3 * r + i] = 31415 + i;
}
((MapColumnVector) batch.cols[15]).offsets[r] = 3 * r;
((MapColumnVector) batch.cols[15]).lengths[r] = 3;
for(int i=0; i < 3; ++i) {
((BytesColumnVector) ((MapColumnVector) batch.cols[15]).keys)
.setVal(3 * r + i, Integer.toHexString(3 * r + i).getBytes(StandardCharsets.UTF_8));
((BytesColumnVector) ((MapColumnVector) batch.cols[15]).values)
.setVal(3 * r + i, Integer.toString(3 * r + i).getBytes(StandardCharsets.UTF_8));
}
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
// check the stats
ColumnStatistics[] stats = reader.getStatistics();
assertArrayEquals(stats, writer.getStatistics());
assertEquals(4096, stats[0].getNumberOfValues());
assertFalse(stats[0].hasNull());
for(TypeDescription colType: schema.getChildren()) {
assertEquals(2048, stats[colType.getId()].getNumberOfValues(),
"count on " + colType.getId());
assertTrue(stats[colType.getId()].hasNull(), "hasNull on " + colType.getId());
}
assertEquals(8944, ((BinaryColumnStatistics) stats[1]).getSum());
assertEquals(1536, ((BooleanColumnStatistics) stats[2]).getTrueCount());
assertEquals(512, ((BooleanColumnStatistics) stats[2]).getFalseCount());
assertFalse(((IntegerColumnStatistics) stats[4]).isSumDefined());
assertEquals(0, ((IntegerColumnStatistics) stats[4]).getMinimum());
assertEquals(0x123456789abcdef0L,
((IntegerColumnStatistics) stats[4]).getMaximum());
assertEquals("0", ((StringColumnStatistics) stats[10]).getMinimum());
assertEquals("Echelon", ((StringColumnStatistics) stats[10]).getMaximum());
assertEquals(10154, ((StringColumnStatistics) stats[10]).getSum());
assertEquals("0 ",
((StringColumnStatistics) stats[11]).getMinimum());
assertEquals("ff ",
((StringColumnStatistics) stats[11]).getMaximum());
assertEquals(20480, ((StringColumnStatistics) stats[11]).getSum());
assertEquals("0",
((StringColumnStatistics) stats[12]).getMinimum());
assertEquals("ff80",
((StringColumnStatistics) stats[12]).getMaximum());
assertEquals(14813, ((StringColumnStatistics) stats[12]).getSum());
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch(1024);
BytesColumnVector bins = (BytesColumnVector) batch.cols[0];
LongColumnVector bools = (LongColumnVector) batch.cols[1];
LongColumnVector bytes = (LongColumnVector) batch.cols[2];
LongColumnVector longs = (LongColumnVector) batch.cols[3];
DoubleColumnVector floats = (DoubleColumnVector) batch.cols[4];
DoubleColumnVector doubles = (DoubleColumnVector) batch.cols[5];
LongColumnVector dates = (LongColumnVector) batch.cols[6];
TimestampColumnVector times = (TimestampColumnVector) batch.cols[7];
DecimalColumnVector decs = (DecimalColumnVector) batch.cols[8];
BytesColumnVector strs = (BytesColumnVector) batch.cols[9];
BytesColumnVector chars = (BytesColumnVector) batch.cols[10];
BytesColumnVector vcs = (BytesColumnVector) batch.cols[11];
StructColumnVector structs = (StructColumnVector) batch.cols[12];
UnionColumnVector unions = (UnionColumnVector) batch.cols[13];
ListColumnVector lists = (ListColumnVector) batch.cols[14];
MapColumnVector maps = (MapColumnVector) batch.cols[15];
LongColumnVector structInts = (LongColumnVector) structs.fields[0];
LongColumnVector unionInts = (LongColumnVector) unions.fields[1];
LongColumnVector listInts = (LongColumnVector) lists.child;
BytesColumnVector mapKeys = (BytesColumnVector) maps.keys;
BytesColumnVector mapValues = (BytesColumnVector) maps.values;
assertTrue(rows.nextBatch(batch));
assertEquals(1024, batch.size);
// read the 1024 nulls
for(int f=0; f < batch.cols.length; ++f) {
assertTrue(batch.cols[f].isRepeating, "field " + f);
assertFalse(batch.cols[f].noNulls, "field " + f);
assertTrue(batch.cols[f].isNull[0], "field " + f);
}
// read the 1024 repeat values
assertTrue(rows.nextBatch(batch));
assertEquals(1024, batch.size);
for(int r=0; r < 1024; ++r) {
String msg = "row " + r;
assertEquals("Horton", bins.toString(r), msg);
assertEquals(1, bools.vector[r], msg);
assertEquals(-126, bytes.vector[r], msg);
assertEquals(1311768467463790320L, longs.vector[r], msg);
assertEquals(1.125, floats.vector[r], 0.00001, msg);
assertEquals(9.765625E-4, doubles.vector[r], 0.000001, msg);
assertEquals("2011-07-01", new DateWritable((int) dates.vector[r]).toString(), msg);
assertEquals("2015-10-23 10:11:59.999999999", times.asScratchTimestamp(r).toString(), msg);
assertEquals("1.234567", decs.vector[r].toString(), msg);
assertEquals("Echelon", strs.toString(r), msg);
assertEquals("Juggernaut", chars.toString(r), msg);
assertEquals("Dreadnaugh", vcs.toString(r), msg);
assertEquals(123, structInts.vector[r], msg);
assertEquals(1, unions.tags[r], msg);
assertEquals(1234, unionInts.vector[r], msg);
assertEquals(3, lists.lengths[r], msg);
assertTrue(listInts.isRepeating, msg);
assertEquals(31415, listInts.vector[0], msg);
assertEquals(3, maps.lengths[r], msg);
assertEquals("ORC", mapKeys.toString((int) maps.offsets[r]), msg);
assertEquals("Hive", mapKeys.toString((int) maps.offsets[r] + 1), msg);
assertEquals("LLAP", mapKeys.toString((int) maps.offsets[r] + 2), msg);
assertEquals("fast", mapValues.toString((int) maps.offsets[r]), msg);
assertEquals("fast", mapValues.toString((int) maps.offsets[r] + 1), msg);
assertEquals("fast", mapValues.toString((int) maps.offsets[r] + 2), msg);
}
// read the second set of 1024 nulls
assertTrue(rows.nextBatch(batch));
assertEquals(1024, batch.size);
for(int f=0; f < batch.cols.length; ++f) {
assertTrue(batch.cols[f].isRepeating, "field " + f);
assertFalse(batch.cols[f].noNulls, "field " + f);
assertTrue(batch.cols[f].isNull[0], "field " + f);
}
assertTrue(rows.nextBatch(batch));
assertEquals(1024, batch.size);
for(int r=0; r < 1024; ++r) {
String hex = Integer.toHexString(r);
String msg = "row " + r;
assertEquals(hex, bins.toString(r), msg);
assertEquals(r % 2 == 1 ? 1 : 0, bools.vector[r], msg);
assertEquals((byte) (r % 255), bytes.vector[r], msg);
assertEquals(31415L * r, longs.vector[r], msg);
assertEquals(1.125F * r, floats.vector[r], 0.0001, msg);
assertEquals(0.0009765625 * r, doubles.vector[r], 0.000001, msg);
assertEquals(new DateWritable(new Date(111, 6, 1 + r)),
new DateWritable((int) dates.vector[r]), msg);
assertEquals(
new Timestamp(115, 9, 25, 10, 11, 59 + r, 999999999),
times.asScratchTimestamp(r), msg);
assertEquals("1.234567", decs.vector[r].toString(), msg);
assertEquals(Integer.toString(r), strs.toString(r), msg);
assertEquals(Integer.toHexString(r), chars.toString(r), msg);
assertEquals(Integer.toHexString(r * 128), vcs.toString(r), msg);
assertEquals(r + 13, structInts.vector[r], msg);
assertEquals(1, unions.tags[r], msg);
assertEquals(r + 42, unionInts.vector[r], msg);
assertEquals(3, lists.lengths[r], msg);
assertEquals(31415, listInts.vector[(int) lists.offsets[r]], msg);
assertEquals(31416, listInts.vector[(int) lists.offsets[r] + 1], msg);
assertEquals(31417, listInts.vector[(int) lists.offsets[r] + 2], msg);
assertEquals(3, maps.lengths[3], msg);
assertEquals(Integer.toHexString(3 * r), mapKeys.toString((int) maps.offsets[r]), msg);
assertEquals(Integer.toString(3 * r), mapValues.toString((int) maps.offsets[r]), msg);
assertEquals(Integer.toHexString(3 * r + 1), mapKeys.toString((int) maps.offsets[r] + 1), msg);
assertEquals(Integer.toString(3 * r + 1), mapValues.toString((int) maps.offsets[r] + 1), msg);
assertEquals(Integer.toHexString(3 * r + 2), mapKeys.toString((int) maps.offsets[r] + 2), msg);
assertEquals(Integer.toString(3 * r + 2), mapValues.toString((int) maps.offsets[r] + 2), msg);
}
// should have no more rows
assertFalse(rows.nextBatch(batch));
}
private static String makeString(BytesColumnVector vector, int row) {
if (vector.isRepeating) {
row = 0;
}
if (vector.noNulls || !vector.isNull[row]) {
return new String(vector.vector[row], vector.start[row],
vector.length[row], StandardCharsets.UTF_8);
} else {
return null;
}
}
/**
* Test the char and varchar padding and truncation.
* @throws Exception
*/
@ParameterizedTest
@MethodSource("data")
public void testStringPadding(Version fileFormat) throws Exception {
TypeDescription schema = TypeDescription.createStruct()
.addField("char", TypeDescription.createChar().withMaxLength(10))
.addField("varchar", TypeDescription.createVarchar().withMaxLength(10));
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).version(fileFormat));
VectorizedRowBatch batch = schema.createRowBatch();
batch.size = 4;
for(int c=0; c < batch.cols.length; ++c) {
((BytesColumnVector) batch.cols[c]).setVal(0, "".getBytes(StandardCharsets.UTF_8));
((BytesColumnVector) batch.cols[c]).setVal(1, "xyz".getBytes(StandardCharsets.UTF_8));
((BytesColumnVector) batch.cols[c]).setVal(2, "0123456789".getBytes(StandardCharsets.UTF_8));
((BytesColumnVector) batch.cols[c]).setVal(3,
"0123456789abcdef".getBytes(StandardCharsets.UTF_8));
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
assertTrue(rows.nextBatch(batch));
assertEquals(4, batch.size);
// ORC currently trims the output strings. See HIVE-12286
assertEquals("",
makeString((BytesColumnVector) batch.cols[0], 0));
assertEquals("xyz",
makeString((BytesColumnVector) batch.cols[0], 1));
assertEquals("0123456789",
makeString((BytesColumnVector) batch.cols[0], 2));
assertEquals("0123456789",
makeString((BytesColumnVector) batch.cols[0], 3));
assertEquals("",
makeString((BytesColumnVector) batch.cols[1], 0));
assertEquals("xyz",
makeString((BytesColumnVector) batch.cols[1], 1));
assertEquals("0123456789",
makeString((BytesColumnVector) batch.cols[1], 2));
assertEquals("0123456789",
makeString((BytesColumnVector) batch.cols[1], 3));
}
/**
* A test case that tests the case where you add a repeating batch
* to a column that isn't using dictionary encoding.
* @throws Exception
*/
@ParameterizedTest
@MethodSource("data")
public void testNonDictionaryRepeatingString(Version fileFormat) throws Exception {
assumeTrue(fileFormat != OrcFile.Version.V_0_11);
TypeDescription schema = TypeDescription.createStruct()
.addField("str", TypeDescription.createString());
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.rowIndexStride(1000)
.version(fileFormat));
VectorizedRowBatch batch = schema.createRowBatch();
batch.size = 1024;
for(int r=0; r < batch.size; ++r) {
((BytesColumnVector) batch.cols[0]).setVal(r,
Integer.toString(r * 10001).getBytes(StandardCharsets.UTF_8));
}
writer.addRowBatch(batch);
batch.cols[0].isRepeating = true;
((BytesColumnVector) batch.cols[0]).setVal(0, "Halloween".getBytes(StandardCharsets.UTF_8));
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
assertTrue(rows.nextBatch(batch));
assertEquals(1024, batch.size);
for(int r=0; r < 1024; ++r) {
assertEquals(Integer.toString(r * 10001),
makeString((BytesColumnVector) batch.cols[0], r));
}
assertTrue(rows.nextBatch(batch));
assertEquals(1024, batch.size);
for(int r=0; r < 1024; ++r) {
assertEquals("Halloween",
makeString((BytesColumnVector) batch.cols[0], r));
}
assertFalse(rows.nextBatch(batch));
}
@ParameterizedTest
@MethodSource("data")
public void testStructs(Version fileFormat) throws Exception {
TypeDescription schema = TypeDescription.createStruct()
.addField("struct", TypeDescription.createStruct()
.addField("inner", TypeDescription.createLong()));
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).version(fileFormat));
VectorizedRowBatch batch = schema.createRowBatch();
batch.size = 1024;
StructColumnVector outer = (StructColumnVector) batch.cols[0];
outer.noNulls = false;
for(int r=0; r < 1024; ++r) {
if (r < 200 || (r >= 400 && r < 600) || r >= 800) {
outer.isNull[r] = true;
}
((LongColumnVector) outer.fields[0]).vector[r] = r;
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
rows.nextBatch(batch);
assertEquals(1024, batch.size);
StructColumnVector inner = (StructColumnVector) batch.cols[0];
LongColumnVector vec = (LongColumnVector) inner.fields[0];
for(int r=0; r < 1024; ++r) {
if (r < 200 || (r >= 400 && r < 600) || r >= 800) {
assertTrue(inner.isNull[r], "row " + r);
} else {
assertFalse(inner.isNull[r], "row " + r);
assertEquals(r, vec.vector[r], "row " + r);
}
}
rows.nextBatch(batch);
assertEquals(0, batch.size);
}
/**
* Test Unions.
* @throws Exception
*/
@ParameterizedTest
@MethodSource("data")
public void testUnions(Version fileFormat) throws Exception {
TypeDescription schema = TypeDescription.createStruct()
.addField("outer", TypeDescription.createUnion()
.addUnionChild(TypeDescription.createInt())
.addUnionChild(TypeDescription.createLong()));
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).version(fileFormat));
VectorizedRowBatch batch = schema.createRowBatch();
batch.size = 1024;
UnionColumnVector outer = (UnionColumnVector) batch.cols[0];
batch.cols[0].noNulls = false;
for(int r=0; r < 1024; ++r) {
if (r < 200) {
outer.isNull[r] = true;
} else if (r < 300) {
outer.tags[r] = 0;
} else if (r < 400) {
outer.tags[r] = 1;
} else if (r < 600) {
outer.isNull[r] = true;
} else if (r < 800) {
outer.tags[r] = 1;
} else if (r < 1000) {
outer.isNull[r] = true;
} else {
outer.tags[r] = 1;
}
((LongColumnVector) outer.fields[0]).vector[r] = r;
((LongColumnVector) outer.fields[1]).vector[r] = -r;
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch(1024);
UnionColumnVector union = (UnionColumnVector) batch.cols[0];
LongColumnVector ints = (LongColumnVector) union.fields[0];
LongColumnVector longs = (LongColumnVector) union.fields[1];
assertTrue(rows.nextBatch(batch));
assertEquals(1024, batch.size);
for(int r=0; r < 1024; ++r) {
String msg = "row " + r;
if (r < 200) {
assertTrue(union.isNull[r], msg);
} else if (r < 300) {
assertFalse(union.isNull[r], msg);
assertEquals(0, union.tags[r], msg);
assertEquals(r, ints.vector[r], msg);
} else if (r < 400) {
assertFalse(union.isNull[r], msg);
assertEquals(1, union.tags[r], msg);
assertEquals(-r, longs.vector[r], msg);
} else if (r < 600) {
assertTrue(union.isNull[r], msg);
} else if (r < 800) {
assertFalse(union.isNull[r], msg);
assertEquals(1, union.tags[r], msg);
assertEquals(-r, longs.vector[r], msg);
} else if (r < 1000) {
assertTrue(union.isNull[r], msg);
} else {
assertFalse(union.isNull[r], msg);
assertEquals(1, union.tags[r], msg);
assertEquals(-r, longs.vector[r], msg);
}
}
assertFalse(rows.nextBatch(batch));
}
/**
* Test lists and how they interact with the child column. In particular,
* put nulls between back to back lists and then make some lists that
* oper lap.
* @throws Exception
*/
@ParameterizedTest
@MethodSource("data")
public void testLists(Version fileFormat) throws Exception {
TypeDescription schema = TypeDescription.createStruct()
.addField("list",
TypeDescription.createList(TypeDescription.createLong()));
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).version(fileFormat));
VectorizedRowBatch batch = schema.createRowBatch();
batch.size = 1024;
ListColumnVector list = (ListColumnVector) batch.cols[0];
list.noNulls = false;
for(int r=0; r < 1024; ++r) {
if (r < 200) {
list.isNull[r] = true;
} else if (r < 300) {
list.offsets[r] = r - 200;
list.lengths[r] = 1;
} else if (r < 400) {
list.isNull[r] = true;
} else if (r < 500) {
list.offsets[r] = r - 300;
list.lengths[r] = 1;
} else if (r < 600) {
list.isNull[r] = true;
} else if (r < 700) {
list.offsets[r] = r;
list.lengths[r] = 2;
} else {
list.isNull[r] = true;
}
((LongColumnVector) list.child).vector[r] = r * 10;
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch(1024);
list = (ListColumnVector) batch.cols[0];
rows.nextBatch(batch);
assertEquals(1024, batch.size);
for(int r=0; r < 1024; ++r) {
StringBuilder actual = new StringBuilder();
list.stringifyValue(actual, r);
String msg = "row " + r;
if (r < 200) {
assertEquals("null", actual.toString(), msg);
} else if (r < 300) {
assertEquals("[" + ((r - 200) * 10) + "]", actual.toString(), msg);
} else if (r < 400) {
assertEquals("null", actual.toString(), msg);
} else if (r < 500) {
assertEquals("[" + ((r - 300) * 10) + "]", actual.toString(), msg);
} else if (r < 600) {
assertEquals("null", actual.toString(), msg);
} else if (r < 700) {
assertEquals("[" + (10 * r) + ", " + (10 * (r + 1)) + "]",
actual.toString(), msg);
} else {
assertEquals("null", actual.toString(), msg);
}
}
assertFalse(rows.nextBatch(batch));
}
/**
* Test maps and how they interact with the child column. In particular,
* put nulls between back to back lists and then make some lists that
* oper lap.
* @throws Exception
*/
@ParameterizedTest
@MethodSource("data")
public void testMaps(Version fileFormat) throws Exception {
TypeDescription schema = TypeDescription.createStruct()
.addField("map",
TypeDescription.createMap(TypeDescription.createLong(),
TypeDescription.createLong()));
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).version(fileFormat));
VectorizedRowBatch batch = schema.createRowBatch();
batch.size = 1024;
MapColumnVector map = (MapColumnVector) batch.cols[0];
map.noNulls = false;
for(int r=0; r < 1024; ++r) {
if (r < 200) {
map.isNull[r] = true;
} else if (r < 300) {
map.offsets[r] = r - 200;
map.lengths[r] = 1;
} else if (r < 400) {
map.isNull[r] = true;
} else if (r < 500) {
map.offsets[r] = r - 300;
map.lengths[r] = 1;
} else if (r < 600) {
map.isNull[r] = true;
} else if (r < 700) {
map.offsets[r] = r;
map.lengths[r] = 2;
} else {
map.isNull[r] = true;
}
((LongColumnVector) map.keys).vector[r] = r;
((LongColumnVector) map.values).vector[r] = r * 10;
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
map = (MapColumnVector) batch.cols[0];
rows.nextBatch(batch);
assertEquals(1024, batch.size);
for(int r=0; r < 1024; ++r) {
StringBuilder buffer = new StringBuilder();
map.stringifyValue(buffer, r);
String msg = "row " + r;
String actual = buffer.toString();
if (r < 200) {
assertEquals("null", actual, msg);
} else if (r < 300) {
assertEquals("[{\"key\": " + (r - 200) +
", \"value\": " + ((r - 200) * 10) + "}]",
actual, msg);
} else if (r < 400) {
assertEquals("null", actual, msg);
} else if (r < 500) {
assertEquals("[{\"key\": " + (r - 300) +
", \"value\": " + ((r - 300) * 10) + "}]", actual, msg);
} else if (r < 600) {
assertEquals("null", actual, msg);
} else if (r < 700) {
assertEquals("[{\"key\": " + r + ", \"value\": " + (r * 10)
+ "}, {\"key\": " + (r + 1) + ", \"value\": " + (10 * (r + 1))
+ "}]", actual, msg);
} else {
assertEquals("null", actual, msg);
}
}
rows.nextBatch(batch);
assertEquals(0, batch.size);
}
@ParameterizedTest
@MethodSource("data")
public void testExpansion(Version fileFormat) throws Exception {
TypeDescription schema =
TypeDescription.fromString(
"struct<list1:array<string>," +
"list2:array<binary>>");
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).version(fileFormat));
VectorizedRowBatch batch = schema.createRowBatch();
batch.size = 2;
ListColumnVector list1 = (ListColumnVector) batch.cols[0];
BytesColumnVector str = (BytesColumnVector) list1.child;
str.ensureSize(6000, false);
ListColumnVector list2 = (ListColumnVector) batch.cols[1];
BytesColumnVector bin = (BytesColumnVector) list2.child;
bin.ensureSize(6000, false);
list1.offsets[0] = 0;
list1.lengths[0] = 2000;
list2.offsets[1] = 2000;
list2.lengths[1] = 3000;
for(int v=0; v < 5000; ++v) {
byte[] bytes = Long.toHexString(v).getBytes(StandardCharsets.UTF_8);
str.setVal(v, bytes);
bin.setVal(v, bytes);
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
assertTrue(rows.nextBatch(batch));
assertEquals(2, batch.size);
assertFalse(rows.nextBatch(batch));
rows.close();
}
@ParameterizedTest
@MethodSource("data")
public void testWriterVersion(Version fileFormat) throws Exception {
assumeTrue(fileFormat == OrcFile.Version.V_0_11);
// test writer implementation serialization
assertEquals(OrcFile.WriterImplementation.ORC_JAVA,
OrcFile.WriterImplementation.from(0));
assertEquals(OrcFile.WriterImplementation.ORC_CPP,
OrcFile.WriterImplementation.from(1));
assertEquals(OrcFile.WriterImplementation.PRESTO,
OrcFile.WriterImplementation.from(2));
assertEquals(OrcFile.WriterImplementation.TRINO,
OrcFile.WriterImplementation.from(4));
assertEquals(OrcFile.WriterImplementation.UNKNOWN,
OrcFile.WriterImplementation.from(99));
// test writer version serialization
assertEquals(OrcFile.WriterVersion.FUTURE,
OrcFile.WriterVersion.from(OrcFile.WriterImplementation.ORC_JAVA, 99));
assertEquals(OrcFile.WriterVersion.ORIGINAL,
OrcFile.WriterVersion.from(OrcFile.WriterImplementation.ORC_JAVA, 0));
assertEquals(OrcFile.WriterVersion.HIVE_4243,
OrcFile.WriterVersion.from(OrcFile.WriterImplementation.ORC_JAVA, 2));
assertEquals(OrcFile.WriterVersion.FUTURE,
OrcFile.WriterVersion.from(OrcFile.WriterImplementation.ORC_CPP, 99));
assertEquals(OrcFile.WriterVersion.ORC_CPP_ORIGINAL,
OrcFile.WriterVersion.from(OrcFile.WriterImplementation.ORC_CPP, 6));
assertEquals(OrcFile.WriterVersion.PRESTO_ORIGINAL,
OrcFile.WriterVersion.from(OrcFile.WriterImplementation.PRESTO, 6));
assertEquals(OrcFile.WriterVersion.TRINO_ORIGINAL,
OrcFile.WriterVersion.from(OrcFile.WriterImplementation.TRINO, 6));
assertEquals(OrcFile.WriterVersion.FUTURE,
OrcFile.WriterVersion.from(OrcFile.WriterImplementation.UNKNOWN, 0));
// test compatibility
assertTrue(OrcFile.WriterVersion.FUTURE.includes(
OrcFile.WriterVersion.ORC_CPP_ORIGINAL));
assertTrue(OrcFile.WriterVersion.FUTURE.includes(
OrcFile.WriterVersion.HIVE_8732));
assertTrue(OrcFile.WriterVersion.HIVE_12055.includes(
OrcFile.WriterVersion.HIVE_4243));
assertTrue(OrcFile.WriterVersion.HIVE_12055.includes(
OrcFile.WriterVersion.HIVE_12055));
assertFalse(OrcFile.WriterVersion.HIVE_4243.includes(
OrcFile.WriterVersion.HIVE_12055));
assertTrue(OrcFile.WriterVersion.HIVE_12055.includes(
OrcFile.WriterVersion.PRESTO_ORIGINAL));
assertTrue(OrcFile.WriterVersion.HIVE_12055.includes(
OrcFile.WriterVersion.TRINO_ORIGINAL));
}
@ParameterizedTest
@MethodSource("data")
public void testBadPrestoVersion(Version fileFormat) {
assumeTrue(fileFormat == OrcFile.Version.V_0_11);
assertThrows(IllegalArgumentException.class, () -> {
OrcFile.WriterVersion.from(OrcFile.WriterImplementation.PRESTO, 0);
});
}
/**
* Test whether the file versions are translated correctly
* @throws Exception
*/
@ParameterizedTest
@MethodSource("data")
public void testFileVersion(Version fileFormat) throws Exception {
assumeTrue(fileFormat == OrcFile.Version.V_0_11);
assertEquals(OrcFile.Version.V_0_11, ReaderImpl.getFileVersion(null));
assertEquals(OrcFile.Version.V_0_11, ReaderImpl.getFileVersion(new ArrayList<Integer>()));
assertEquals(OrcFile.Version.V_0_11,
ReaderImpl.getFileVersion(Arrays.asList(new Integer[]{0, 11})));
assertEquals(OrcFile.Version.V_0_12,
ReaderImpl.getFileVersion(Arrays.asList(new Integer[]{0, 12})));
assertEquals(OrcFile.Version.FUTURE,
ReaderImpl.getFileVersion(Arrays.asList(new Integer[]{9999, 0})));
}
@ParameterizedTest
@MethodSource("data")
public void testMergeUnderstood(Version fileFormat) throws Exception {
assumeTrue(fileFormat == OrcFile.Version.V_0_11);
Path p = new Path("test.orc");
Reader futureVersion = Mockito.mock(Reader.class);
Mockito.when(futureVersion.getFileVersion()).thenReturn(OrcFile.Version.FUTURE);
Mockito.when(futureVersion.getWriterVersion()).thenReturn(OrcFile.WriterVersion.HIVE_4243);
assertFalse(OrcFile.understandFormat(p, futureVersion));
Reader futureWriter = Mockito.mock(Reader.class);
Mockito.when(futureWriter.getFileVersion()).thenReturn(OrcFile.Version.V_0_11);
Mockito.when(futureWriter.getWriterVersion()).thenReturn(OrcFile.WriterVersion.FUTURE);
assertFalse(OrcFile.understandFormat(p, futureWriter));
Reader current = Mockito.mock(Reader.class);
Mockito.when(current.getFileVersion()).thenReturn(OrcFile.Version.CURRENT);
Mockito.when(current.getWriterVersion()).thenReturn(OrcFile.CURRENT_WRITER);
assertTrue(OrcFile.understandFormat(p, current));
}
static ByteBuffer fromString(String s) {
return ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8));
}
static byte[] fromLong(long x) {
return Long.toHexString(x).getBytes(StandardCharsets.UTF_8);
}
@ParameterizedTest
@MethodSource("data")
public void testMerge(Version fileFormat) throws Exception {
Path input1 = new Path(workDir, "TestVectorOrcFile.testMerge1-" +
fileFormat.getName() + ".orc");
fs.delete(input1, false);
Path input2 = new Path(workDir, "TestVectorOrcFile.testMerge2-" +
fileFormat.getName() + ".orc");
fs.delete(input2, false);
Path input3 = new Path(workDir, "TestVectorOrcFile.testMerge3-" +
fileFormat.getName() + ".orc");
fs.delete(input3, false);
TypeDescription schema = TypeDescription.fromString("struct<a:int,b:string>");
// change all of the options away from default to find anything we
// don't copy to the merged file
OrcFile.WriterOptions opts = OrcFile.writerOptions(conf)
.setSchema(schema)
.compress(CompressionKind.LZO)
.enforceBufferSize()
.bufferSize(20*1024)
.rowIndexStride(1000)
.version(fileFormat)
.writerVersion(OrcFile.WriterVersion.HIVE_8732);
Writer writer = OrcFile.createWriter(input1, opts);
VectorizedRowBatch batch = schema.createRowBatch();
batch.size = 1024;
for(int r=0; r < 1024; ++r) {
((LongColumnVector) batch.cols[0]).vector[r] = r;
((BytesColumnVector) batch.cols[1]).setVal(r, fromLong(r));
}
writer.addRowBatch(batch);
writer.addUserMetadata("a", fromString("foo"));
writer.addUserMetadata("b", fromString("bar"));
writer.close();
// increase the buffer size to 30k
opts.bufferSize(30*1024);
writer = OrcFile.createWriter(input2, opts);
batch.size = 1024;
for(int r=0; r < 1024; ++r) {
((LongColumnVector) batch.cols[0]).vector[r] = 2 * r;
((BytesColumnVector) batch.cols[1]).setVal(r, fromLong(2 * r));
}
writer.addRowBatch(batch);
writer.addUserMetadata("a", fromString("foo"));
writer.addUserMetadata("c", fromString("baz"));
writer.close();
// decrease the buffer size to 10k
opts.bufferSize(10*1024);
writer = OrcFile.createWriter(input3, opts);
batch.size = 1024;
for(int r=0; r < 1024; ++r) {
((LongColumnVector) batch.cols[0]).vector[r] = 3 * r;
((BytesColumnVector) batch.cols[1]).setVal(r, fromLong(3 * r));
}
writer.addRowBatch(batch);
writer.addUserMetadata("c", fromString("baz"));
writer.addUserMetadata("d", fromString("bat"));
writer.close();
Path output1 = new Path(workDir, "TestVectorOrcFile.testMerge.out1-" +
fileFormat.getName() + ".orc");
fs.delete(output1, false);
List<Path> paths = OrcFile.mergeFiles(output1,
OrcFile.writerOptions(conf), Arrays.asList(input1, input2, input3));
assertEquals(3, paths.size());
Reader reader = OrcFile.createReader(output1, OrcFile.readerOptions(conf));
assertEquals(3 * 1024, reader.getNumberOfRows());
assertEquals(CompressionKind.LZO, reader.getCompressionKind());
assertEquals(30 * 1024, reader.getCompressionSize());
assertEquals(1000, reader.getRowIndexStride());
assertEquals(fileFormat, reader.getFileVersion());
assertEquals(OrcFile.WriterVersion.HIVE_8732, reader.getWriterVersion());
assertEquals(3, reader.getStripes().size());
assertEquals(4, reader.getMetadataKeys().size());
assertEquals(fromString("foo"), reader.getMetadataValue("a"));
assertEquals(fromString("bar"), reader.getMetadataValue("b"));
assertEquals(fromString("baz"), reader.getMetadataValue("c"));
assertEquals(fromString("bat"), reader.getMetadataValue("d"));
TypeDescription schema4 = TypeDescription.fromString("struct<a:int>");
Path input4 = new Path(workDir, "TestVectorOrcFile.testMerge4-" +
fileFormat.getName() + ".orc");
fs.delete(input4, false);
opts.setSchema(schema4);
writer = OrcFile.createWriter(input4, opts);
batch = schema4.createRowBatch();
batch.size = 1024;
for(int r=0; r < 1024; ++r) {
((LongColumnVector) batch.cols[0]).vector[r] = 4 * r;
}
writer.addRowBatch(batch);
writer.close();
Path input5 = new Path(workDir, "TestVectorOrcFile.testMerge5-" +
fileFormat.getName() + ".orc");
fs.delete(input5, false);
opts.setSchema(schema)
.compress(CompressionKind.NONE)
.bufferSize(100*1024);
writer = OrcFile.createWriter(input5, opts);
batch = schema.createRowBatch();
batch.size = 1024;
for(int r=0; r < 1024; ++r) {
((LongColumnVector) batch.cols[0]).vector[r] = 4 * r;
((BytesColumnVector) batch.cols[1]).setVal(r, fromLong(5 * r));
}
writer.addRowBatch(batch);
writer.close();
Path output2 = new Path(workDir, "TestVectorOrcFile.testMerge.out2-" +
fileFormat.getName() + ".orc");
fs.delete(output2, false);
paths = OrcFile.mergeFiles(output2, OrcFile.writerOptions(conf),
Arrays.asList(input3, input4, input1, input5));
assertEquals(2, paths.size());
reader = OrcFile.createReader(output2, OrcFile.readerOptions(conf));
assertEquals(2 * 1024, reader.getNumberOfRows());
assertEquals(CompressionKind.LZO, reader.getCompressionKind());
assertEquals(20 * 1024, reader.getCompressionSize());
assertEquals(1000, reader.getRowIndexStride());
assertEquals(fileFormat, reader.getFileVersion());
assertEquals(OrcFile.WriterVersion.HIVE_8732, reader.getWriterVersion());
assertEquals(2, reader.getStripes().size());
assertEquals(4, reader.getMetadataKeys().size());
assertEquals(fromString("foo"), reader.getMetadataValue("a"));
assertEquals(fromString("bar"), reader.getMetadataValue("b"));
assertEquals(fromString("baz"), reader.getMetadataValue("c"));
assertEquals(fromString("bat"), reader.getMetadataValue("d"));
}
/**
* Write a mergeable file to test merging files with column encryption.
* @param path the path to write to
* @param provider the key provider
* @param startValue the base value for the columns
* @param stripes the number of stripes to write
* @param bufferSize the buffer size to use for the compression
* @param encrypt the encryption string
* @param mask the mask string
* @return the locations of the intermediate stripes
* @throws IOException
*/
private long[] writeMergeableFile(Path path,
KeyProvider provider,
long startValue,
int stripes,
int bufferSize,
String encrypt,
String mask,
Version fileFormat) throws IOException {
fs.delete(path, false);
TypeDescription schema = TypeDescription.fromString(
"struct<a:int,b:struct<c:string,d:string>>");
// change all of the options away from default to find anything we
// don't copy to the merged file
OrcFile.WriterOptions opts = OrcFile.writerOptions(conf)
.setSchema(schema)
.rowIndexStride(1000)
.version(fileFormat)
.bufferSize(bufferSize)
.enforceBufferSize()
.setKeyProvider(provider)
.encrypt(encrypt)
.masks(mask);
long[] intermediateFooters = new long[stripes];
Writer writer = OrcFile.createWriter(path, opts);
VectorizedRowBatch batch = schema.createRowBatch();
LongColumnVector a = (LongColumnVector) batch.cols[0];
StructColumnVector b = (StructColumnVector) batch.cols[1];
BytesColumnVector c = (BytesColumnVector) b.fields[0];
BytesColumnVector d = (BytesColumnVector) b.fields[1];
batch.size = 1024;
for(int btch=0; btch < 3; ++btch) {
for (int r = 0; r < 1024; ++r) {
long value = startValue + btch * 1024 + r;
a.vector[r] = value;
c.setVal(r, fromLong(value));
d.setVal(r, String.format("%010x", value * 1_000_001)
.getBytes(StandardCharsets.UTF_8));
}
writer.addRowBatch(batch);
// write an intermediate footer to force a stripe
intermediateFooters[btch] = writer.writeIntermediateFooter();
}
writer.close();
return intermediateFooters;
}
static String computeSha(String value) {
try {
MessageDigest md = MessageDigest.getInstance("SHA-256");
byte[] digest = md.digest(value.getBytes(StandardCharsets.UTF_8));
return printHexBinary(digest);
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
}
@ParameterizedTest
@MethodSource("data")
public void testEncryptMerge(Version fileFormat) throws Exception {
assumeTrue(fileFormat != OrcFile.Version.V_0_11);
Path input1 = new Path(workDir, "TestVectorOrcFile.testEncryptMerge1-" +
fileFormat.getName() + ".orc");
Path input2 = new Path(workDir, "TestVectorOrcFile.testEncryptMerge2-" +
fileFormat.getName() + ".orc");
Path input3 = new Path(workDir, "TestVectorOrcFile.testEncryptMerge3-" +
fileFormat.getName() + ".orc");
Path input4 = new Path(workDir, "TestVectorOrcFile.testEncryptMerge4-" +
fileFormat.getName() + ".orc");
Path input5 = new Path(workDir, "TestVectorOrcFile.testEncryptMerge5-" +
fileFormat.getName() + ".orc");
Random random = new Random(169);
InMemoryKeystore keystore = new InMemoryKeystore(random);
EncryptionAlgorithm algorithm = EncryptionAlgorithm.AES_CTR_128;
byte[] piiKey = new byte[algorithm.keyLength()];
byte[] topSecretKey = new byte[algorithm.keyLength()];
random.nextBytes(piiKey);
random.nextBytes(topSecretKey);
keystore.addKey("pii", algorithm, piiKey)
.addKey("top_secret", algorithm, topSecretKey);
String encryption = "pii:a;top_secret:b";
String mask = "sha256,`don't worry`:b";
// write three files that should merge, each with 3 stripes of 1024 rows.
long[] cuts = writeMergeableFile(input1, keystore, 0, 3, 0x400, encryption, mask, fileFormat);
writeMergeableFile(input2, keystore, 3 * 1024, 3, 0x800, encryption, mask, fileFormat);
writeMergeableFile(input3, keystore, 6 * 1024, 3, 0xc00, encryption, mask, fileFormat);
// two files that aren't mergeable
writeMergeableFile(input4, keystore, 9 * 1024, 3, 0x400, encryption, null, fileFormat);
writeMergeableFile(input5, keystore, 12 * 1024, 3, 0x400, null, null, fileFormat);
// make sure that we can read up to the intermediate footers
try (Reader reader = OrcFile.createReader(input1, OrcFile.readerOptions(conf)
.maxLength(cuts[0]))) {
assertEquals(1024, reader.getNumberOfRows());
}
try (Reader reader = OrcFile.createReader(input1, OrcFile.readerOptions(conf)
.maxLength(cuts[1]))) {
assertEquals(2 * 1024, reader.getNumberOfRows());
}
try (Reader reader = OrcFile.createReader(input1, OrcFile.readerOptions(conf)
.maxLength(cuts[2]))) {
assertEquals(3 * 1024, reader.getNumberOfRows());
}
// make a new version of the pii key
keystore.addKey("pii", 1, algorithm, new byte[algorithm.keyLength()]);
Path merge1 = new Path(workDir, "TestVectorOrcFile.testEncryptMerge.merge1-" +
fileFormat.getName() + ".orc");
// merge all three files together
fs.delete(merge1, false);
List<Path> paths = OrcFile.mergeFiles(merge1,
OrcFile.writerOptions(conf).setKeyProvider(keystore),
Arrays.asList(input1, input2, input3));
assertEquals(3, paths.size());
// test reading with no keys
Reader reader = OrcFile.createReader(merge1, OrcFile.readerOptions(conf));
assertEquals(9 * 1024, reader.getNumberOfRows());
assertEquals(CompressionKind.ZLIB, reader.getCompressionKind());
assertEquals(1000, reader.getRowIndexStride());
assertEquals(0xc00, reader.getCompressionSize());
assertEquals(fileFormat, reader.getFileVersion());
assertEquals(9, reader.getStripes().size());
EncryptionKey[] keys = reader.getColumnEncryptionKeys();
assertEquals(2, keys.length);
assertEquals("pii", keys[0].getKeyName());
assertEquals(0, keys[0].getKeyVersion());
assertFalse(keys[0].isAvailable());
assertEquals("top_secret", keys[1].getKeyName());
assertEquals(0, keys[1].getKeyVersion());
assertFalse(keys[1].isAvailable());
// check the file stats
ColumnStatistics[] stats = reader.getStatistics();
assertEquals(9 * 1024, stats[0].getNumberOfValues());
assertEquals(0, stats[1].getNumberOfValues());
assertEquals(9 * 1024, stats[2].getNumberOfValues());
assertEquals(9 * 1024, stats[3].getNumberOfValues());
assertEquals("00037F39CF870A1F49129F9C82D935665D352FFD25EA3296208F6F7B16FD654F",
((StringColumnStatistics) stats[3]).getMinimum());
assertEquals("FFF60CF25C8E227396BC77DD808773DA69D767D6B0417ADB1A0CAC51CC168797",
((StringColumnStatistics) stats[3]).getMaximum());
assertEquals(9 * 1024, stats[4].getNumberOfValues());
assertEquals("001277C7986C02D9CDA490756055C6A81F3838D3394F18806DD3359AAD59862A",
((StringColumnStatistics) stats[4]).getMinimum());
assertEquals("FFFF1E62E46263E623F704AC22C2F27E5BBDED8693546A2A11F011251A53D23D",
((StringColumnStatistics) stats[4]).getMaximum());
// check the stripe stats
List<StripeStatistics> stripeStats = reader.getStripeStatistics();
for(int s=0; s < stripeStats.size(); ++s) {
ColumnStatistics[] cs = stripeStats.get(s).getColumnStatistics();
String msg = "stripe " + s;
assertEquals(1024, cs[0].getNumberOfValues(), msg);
assertEquals(0, cs[1].getNumberOfValues(), msg);
assertEquals(1024, cs[2].getNumberOfValues(), msg);
assertEquals(1024, cs[3].getNumberOfValues(), msg);
assertEquals(64, ((StringColumnStatistics) cs[3]).getMinimum().length(), msg);
assertEquals(64, ((StringColumnStatistics) cs[3]).getMaximum().length(), msg);
assertEquals(1024, cs[4].getNumberOfValues(), msg);
assertEquals(64, ((StringColumnStatistics) cs[4]).getMinimum().length(), msg);
assertEquals(64, ((StringColumnStatistics) cs[4]).getMaximum().length(), msg);
}
// check the file contents
RecordReader rows = reader.rows();
VectorizedRowBatch batch = reader.getSchema().createRowBatchV2();
LongColumnVector a = (LongColumnVector) batch.cols[0];
StructColumnVector b = (StructColumnVector) batch.cols[1];
BytesColumnVector c = (BytesColumnVector) b.fields[0];
BytesColumnVector d = (BytesColumnVector) b.fields[1];
for(int btch=0; btch < 9; ++btch) {
assertTrue(rows.nextBatch(batch));
assertEquals(1024, batch.size);
for(int r=0; r < batch.size; ++r) {
long value = btch * 1024 + r;
String msg = "batch " + btch + " row " + r;
assertTrue(a.isNull[r], msg);
assertEquals(computeSha(Long.toHexString(value)), c.toString(r), msg);
assertEquals(
computeSha(String.format("%010x", value * 1_000_001)),
d.toString(r), msg);
}
}
assertFalse(rows.nextBatch(batch));
rows.close();
reader.close();
// test reading with keys
reader = OrcFile.createReader(merge1,
OrcFile.readerOptions(conf).setKeyProvider(keystore));
assertEquals(9 * 1024, reader.getNumberOfRows());
keys = reader.getColumnEncryptionKeys();
assertEquals(2, keys.length);
assertEquals("pii", keys[0].getKeyName());
assertEquals(0, keys[0].getKeyVersion());
assertTrue(keys[0].isAvailable());
assertEquals("top_secret", keys[1].getKeyName());
assertEquals(0, keys[1].getKeyVersion());
assertTrue(keys[1].isAvailable());
// check the file stats
stats = reader.getStatistics();
assertEquals(9 * 1024, stats[0].getNumberOfValues());
assertEquals(9 * 1024, stats[1].getNumberOfValues());
assertEquals(0, ((IntegerColumnStatistics) stats[1]).getMinimum());
assertEquals(9 * 1024 - 1, ((IntegerColumnStatistics) stats[1]).getMaximum());
assertEquals(9 * 1024, stats[2].getNumberOfValues());
assertEquals(9 * 1024, stats[3].getNumberOfValues());
assertEquals("0", ((StringColumnStatistics) stats[3]).getMinimum());
assertEquals("fff", ((StringColumnStatistics) stats[3]).getMaximum());
assertEquals(9 * 1024, stats[4].getNumberOfValues());
assertEquals("0000000000", ((StringColumnStatistics) stats[4]).getMinimum());
assertEquals("022541e1bf", ((StringColumnStatistics) stats[4]).getMaximum());
// check the stripe stats
stripeStats = reader.getStripeStatistics();
for(int s=0; s < stripeStats.size(); ++s) {
long low = s * 1024;
long high = s * 1024 + 1023;
ColumnStatistics[] cs = stripeStats.get(s).getColumnStatistics();
String msg = "stripe " + s;
assertEquals(1024, cs[0].getNumberOfValues(), msg);
assertEquals(1024, cs[1].getNumberOfValues(), msg);
assertEquals(low, ((IntegerColumnStatistics) cs[1]).getMinimum(), msg);
assertEquals(high, ((IntegerColumnStatistics) cs[1]).getMaximum(), msg);
assertEquals(1024, cs[2].getNumberOfValues(), msg);
assertEquals(1024, cs[3].getNumberOfValues(), msg);
assertEquals(Long.toHexString(low),
((StringColumnStatistics) cs[3]).getMinimum(), msg);
assertEquals(s == 0 ? "ff" : Long.toHexString(high),
((StringColumnStatistics) cs[3]).getMaximum(), msg);
assertEquals(1024, cs[4].getNumberOfValues(), msg);
assertEquals(String.format("%010x", 1_000_001 * low),
((StringColumnStatistics) cs[4]).getMinimum(), msg);
assertEquals(String.format("%010x", 1_000_001 * high),
((StringColumnStatistics) cs[4]).getMaximum(), msg);
}
// check the file contents
rows = reader.rows();
for(int btch=0; btch < 9; ++btch) {
assertTrue(rows.nextBatch(batch));
assertEquals(1024, batch.size);
for(int r=0; r < batch.size; ++r) {
long value = btch * 1024 + r;
String msg = "batch " + btch + " row " + r;
assertEquals(value, a.vector[r], msg);
assertEquals(Long.toHexString(value), c.toString(r), msg);
assertEquals(String.format("%010x", value * 1_000_001), d.toString(r), msg);
}
}
assertFalse(rows.nextBatch(batch));
rows.close();
reader.close();
Path merge2 = new Path(workDir, "TestVectorOrcFile.testEncryptMerge.merge2-" +
fileFormat.getName() + ".orc");
fs.delete(merge2, false);
paths = OrcFile.mergeFiles(merge2,
OrcFile.writerOptions(conf).setKeyProvider(keystore),
Arrays.asList(input2, input4, input1, input5));
// make sure only input1 & input2 were merged
assertEquals(2, paths.size());
assertTrue(paths.contains(input1));
assertTrue(paths.contains(input2));
reader = OrcFile.createReader(merge2, OrcFile.readerOptions(conf));
assertEquals(2 * 3 * 1024, reader.getNumberOfRows());
assertEquals(CompressionKind.ZLIB, reader.getCompressionKind());
assertEquals(0x800, reader.getCompressionSize());
assertEquals(1000, reader.getRowIndexStride());
assertEquals(fileFormat, reader.getFileVersion());
assertEquals(6, reader.getStripes().size());
assertEquals(2, reader.getColumnEncryptionKeys().length);
assertEquals(2, reader.getDataMasks().length);
assertEquals(2, reader.getEncryptionVariants().length);
reader.close();
}
Path exampleDir = new Path(System.getProperty("example.dir",
"../../examples/"));
@ParameterizedTest
@MethodSource("data")
public void testZeroByteOrcFile(Version fileFormat) throws Exception {
// we only have to run this test once, since it is a 0 byte file.
assumeTrue(fileFormat == OrcFile.Version.V_0_11);
Path zeroFile = new Path(exampleDir, "zero.orc");
Reader reader = OrcFile.createReader(zeroFile, OrcFile.readerOptions(conf));
assertEquals(0, reader.getNumberOfRows());
assertEquals("struct<>", reader.getSchema().toString());
assertEquals(CompressionKind.NONE, reader.getCompressionKind());
assertEquals(0, reader.getRawDataSize());
assertEquals(0, reader.getRowIndexStride());
assertEquals(DEFAULT_COMPRESSION_BLOCK_SIZE, reader.getCompressionSize());
assertEquals(0, reader.getMetadataSize());
assertEquals(OrcFile.Version.CURRENT, reader.getFileVersion());
assertEquals(0, reader.getStripes().size());
assertEquals(0, reader.getStatistics().length);
assertEquals(0, reader.getMetadataKeys().size());
assertEquals(OrcFile.CURRENT_WRITER, reader.getWriterVersion());
VectorizedRowBatch batch =
TypeDescription.fromString("struct<>").createRowBatch();
assertFalse(reader.rows().nextBatch(batch));
}
@ParameterizedTest
@MethodSource("data")
public void testFutureOrcFile(Version fileFormat) throws Exception {
assumeTrue(fileFormat == OrcFile.Version.V_0_11);
Path zeroFile = new Path(exampleDir, "version1999.orc");
try {
OrcFile.createReader(zeroFile, OrcFile.readerOptions(conf));
fail("no exception for bad version");
} catch (IOException e) {
String m = e.getMessage();
assertTrue(m.contains("version1999.orc was written by a future ORC version 19.99."));
assertTrue(m.contains("This file is not readable by this version of ORC."));
assertTrue(m.contains("Postscript: footerLength: 19 compression: NONE " +
"compressionBlockSize: 65536 version: 19 version: 99 metadataLength: 0 " +
"writerVersion: 1"));
}
}
@ParameterizedTest
@MethodSource("data")
public void testEmptyDoubleStream(Version fileFormat) throws Exception {
TypeDescription schema =
TypeDescription.fromString("struct<list1:array<double>," +
"list2:array<float>>");
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).version(fileFormat));
VectorizedRowBatch batch = schema.createRowBatch();
batch.size = 2;
ListColumnVector list1 = (ListColumnVector) batch.cols[0];
ListColumnVector list2 = (ListColumnVector) batch.cols[1];
for(int r=0; r < batch.size; ++r) {
list1.offsets[r] = 0;
list1.lengths[r] = 0;
list2.offsets[r] = 0;
list2.lengths[r] = 0;
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
assertTrue(rows.nextBatch(batch));
assertEquals(2, batch.size);
list1 = (ListColumnVector) batch.cols[0];
list2 = (ListColumnVector) batch.cols[1];
for(int r=0; r < batch.size; ++r) {
assertEquals(0, list1.lengths[r]);
assertEquals(0, list2.lengths[r]);
}
assertFalse(rows.nextBatch(batch));
rows.close();
}
@ParameterizedTest
@MethodSource("data")
public void testPredicatePushdownForComplex(Version fileFormat) throws Exception {
TypeDescription schema = createComplexInnerSchema();
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(400000L)
.compress(CompressionKind.NONE)
.bufferSize(500)
.rowIndexStride(1000)
.version(fileFormat));
VectorizedRowBatch batch = schema.createRowBatch();
batch.ensureSize(3500);
batch.size = 3500;
for(int i=0; i < 3500; ++i) {
((LongColumnVector) batch.cols[0]).vector[i] = i;
((LongColumnVector)((StructColumnVector) batch.cols[1]).fields[0]).vector[i] = i * 300;
((BytesColumnVector)((StructColumnVector) batch.cols[1]).fields[1]).setVal(i,
Integer.toHexString(10*i).getBytes(StandardCharsets.UTF_8));
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
assertEquals(3500, reader.getNumberOfRows());
SearchArgument sarg = SearchArgumentFactory.newBuilder()
.startAnd()
.startNot()
.lessThan("complex.int2", PredicateLeaf.Type.LONG, 300000L)
.end()
.lessThan("complex.int2", PredicateLeaf.Type.LONG, 600000L)
.end()
.build();
RecordReader rows = reader.rows(reader.options()
.range(0L, Long.MAX_VALUE)
.include(new boolean[]{true, true, true, true, true})
.searchArgument(sarg, new String[]{null, "int1", "complex","int2","string1"}));
batch = reader.getSchema().createRowBatch(2000);
LongColumnVector ints1 = (LongColumnVector) batch.cols[0];
StructColumnVector struct1 = (StructColumnVector) batch.cols[1];
LongColumnVector ints2 = (LongColumnVector) struct1.fields[0];
BytesColumnVector strs = (BytesColumnVector) struct1.fields[1];
assertEquals(1000L, rows.getRowNumber());
assertTrue(rows.nextBatch(batch));
assertEquals(1000, batch.size);
for(int i=1000; i < 2000; ++i) {
assertEquals(i,ints1.vector[i-1000]);
assertEquals(300 * i, ints2.vector[i - 1000]);
assertEquals(Integer.toHexString(10*i), strs.toString(i - 1000));
}
assertFalse(rows.nextBatch(batch));
assertEquals(3500, rows.getRowNumber());
// look through the file with no rows selected
sarg = SearchArgumentFactory.newBuilder()
.startAnd()
.lessThan("complex.int2", PredicateLeaf.Type.LONG, 0L)
.end()
.build();
rows = reader.rows(reader.options()
.range(0L, Long.MAX_VALUE)
.include(new boolean[]{true, true, true, true, true})
.searchArgument(sarg, new String[]{null, "int1",null,"int2","string1"}));
assertEquals(3500L, rows.getRowNumber());
assertFalse(rows.nextBatch(batch));
// select first 100 and last 100 rows
sarg = SearchArgumentFactory.newBuilder()
.startOr()
.lessThan("complex.int2", PredicateLeaf.Type.LONG, 300L * 100)
.startNot()
.lessThan("complex.int2", PredicateLeaf.Type.LONG, 300L * 3400)
.end()
.end()
.build();
rows = reader.rows(reader.options()
.range(0L, Long.MAX_VALUE)
.include(new boolean[]{true, true,true,true, true})
.searchArgument(sarg, new String[]{null, "int1",null, "int2","string1"})
.allowSARGToFilter(false));
assertEquals(0, rows.getRowNumber());
assertTrue(rows.nextBatch(batch));
assertEquals(1000, batch.size);
assertEquals(3000, rows.getRowNumber());
for(int i=0; i < 1000; ++i) {
assertEquals(300 * i, ints2.vector[i]);
assertEquals(Integer.toHexString(10*i), strs.toString(i));
}
assertTrue(rows.nextBatch(batch));
assertEquals(500, batch.size);
assertEquals(3500, rows.getRowNumber());
for(int i=3000; i < 3500; ++i) {
assertEquals(300 * i, ints2.vector[i - 3000]);
assertEquals(Integer.toHexString(10*i), strs.toString(i - 3000));
}
assertFalse(rows.nextBatch(batch));
assertEquals(3500, rows.getRowNumber());
}
@ParameterizedTest
@MethodSource("data")
public void testPredicatePushdownWithNan(Version fileFormat) throws Exception {
TypeDescription schema = TypeDescription.createStruct()
.addField("double1", TypeDescription.createDouble())
.addField("float1", TypeDescription.createFloat());
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(400000L)
.compress(CompressionKind.NONE)
.bufferSize(500)
.rowIndexStride(1000)
.version(fileFormat));
VectorizedRowBatch batch = schema.createRowBatch();
batch.ensureSize(3500);
batch.size = 3500;
batch.cols[0].noNulls = true;
batch.cols[1].noNulls = true;
DoubleColumnVector dbcol = ((DoubleColumnVector) batch.cols[0]);
DoubleColumnVector fcol = ((DoubleColumnVector) batch.cols[1]);
// first row NaN (resulting to min/max and sum columnStats of stride to be NaN)
// NaN in the middle of a stride causes Sum of last stride to be NaN
dbcol.vector[0] = Double.NaN;
fcol.vector[0] = Double.NaN;
for (int i=1; i < 3500; ++i) {
dbcol.vector[i] = i == 3200 ? Double.NaN : i;
fcol.vector[i] = i == 3200 ? Double.NaN : i;
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
assertEquals(3500, reader.getNumberOfRows());
// Only the first stride matches the predicate, just need to make sure NaN stats are ignored
// Test double category push down
SearchArgument sarg = SearchArgumentFactory.newBuilder()
.startAnd()
.lessThan("double1", PredicateLeaf.Type.FLOAT, 100d)
.end()
.build();
RecordReader rows = reader.rows(reader.options()
.range(0L, Long.MAX_VALUE)
.searchArgument(sarg, new String[]{"double1"})
.allowSARGToFilter(false));
batch = reader.getSchema().createRowBatch(3500);
rows.nextBatch(batch);
// First stride should be read as NaN sum is ignored
assertEquals(1000, batch.size);
rows.nextBatch(batch);
// Last stride should be read as NaN sum is ignored
assertEquals(500, batch.size);
rows.nextBatch(batch);
assertEquals(0, batch.size);
// Test float category push down
sarg = SearchArgumentFactory.newBuilder()
.startAnd()
.lessThan("float1", PredicateLeaf.Type.FLOAT, 100d)
.end()
.build();
rows = reader.rows(reader.options()
.range(0L, Long.MAX_VALUE)
.searchArgument(sarg, new String[]{"float1"})
.allowSARGToFilter(false));
batch = reader.getSchema().createRowBatch(3500);
rows.nextBatch(batch);
// First stride should be read as NaN sum is ignored
assertEquals(1000, batch.size);
rows.nextBatch(batch);
// Last stride should be read as NaN sum is ignored
assertEquals(500, batch.size);
rows.nextBatch(batch);
assertEquals(0, batch.size);
}
@ParameterizedTest
@MethodSource("data")
public void testPredicatePushdownWithSumOverflow(Version fileFormat) throws Exception {
TypeDescription schema = TypeDescription.createStruct()
.addField("double1", TypeDescription.createDouble())
.addField("float1", TypeDescription.createFloat());
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.stripeSize(400000L)
.compress(CompressionKind.NONE)
.bufferSize(500)
.rowIndexStride(1000)
.version(fileFormat));
VectorizedRowBatch batch = schema.createRowBatch();
batch.ensureSize(3500);
batch.size = 3500;
batch.cols[0].noNulls = true;
batch.cols[1].noNulls = true;
DoubleColumnVector dbcol = ((DoubleColumnVector) batch.cols[0]);
DoubleColumnVector fcol = ((DoubleColumnVector) batch.cols[1]);
double largeNumber = Double.MAX_VALUE / 2 + Double.MAX_VALUE / 4;
// Here we are writing 3500 rows of data, with stripeSize set to 400000
// and rowIndexStride set to 1000, so 1 stripe will be written,
// indexed in 4 strides.
// Two large values are written in the first and fourth strides,
// causing the statistical sum to overflow, sum is not a finite value,
// but this does not prevent pushing down (range comparisons work fine)
fcol.vector[0] = dbcol.vector[0] = largeNumber;
fcol.vector[1] = dbcol.vector[1] = largeNumber;
for (int i=2; i < 3500; ++i) {
if (i >= 3200 && i<= 3201) {
fcol.vector[i] = dbcol.vector[i] = largeNumber;
} else {
dbcol.vector[i] = i;
fcol.vector[i] = i;
}
}
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
assertEquals(3500, reader.getNumberOfRows());
// Test double category push down
SearchArgument sarg = SearchArgumentFactory.newBuilder()
.startAnd()
.lessThan("double1", PredicateLeaf.Type.FLOAT, 100d)
.end()
.build();
RecordReader rows = reader.rows(reader.options()
.range(0L, Long.MAX_VALUE)
.searchArgument(sarg, new String[]{"double1"}));
batch = reader.getSchema().createRowBatch(3500);
rows.nextBatch(batch);
// First stride should be read
assertEquals(1000, batch.size);
rows.nextBatch(batch);
// Last stride should not be read, even if sum is not finite
assertEquals(0, batch.size);
// Test float category push down
sarg = SearchArgumentFactory.newBuilder()
.startAnd()
.lessThan("float1", PredicateLeaf.Type.FLOAT, 100d)
.end()
.build();
rows = reader.rows(reader.options()
.range(0L, Long.MAX_VALUE)
.searchArgument(sarg, new String[]{"float1"}));
batch = reader.getSchema().createRowBatch(3500);
rows.nextBatch(batch);
// First stride should be read
assertEquals(1000, batch.size);
rows.nextBatch(batch);
// Last stride should not be read, even if sum is not finite
assertEquals(0, batch.size);
}
/**
* Test predicate pushdown on nulls, with different combinations of
* values and nulls.
*/
@ParameterizedTest
@MethodSource("data")
public void testPredicatePushdownAllNulls(Version fileFormat) throws Exception {
TypeDescription schema = createInnerSchema();
try (Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).rowIndexStride(1024).version(fileFormat))) {
VectorizedRowBatch batch = schema.createRowBatch();
batch.size = 1024;
// write 1024 rows of (null, "val")
batch.cols[0].noNulls = false;
batch.cols[0].isNull[0] = true;
batch.cols[0].isRepeating = true;
batch.cols[1].isRepeating = true;
((BytesColumnVector) batch.cols[1]).setVal(0, "val".getBytes(StandardCharsets.UTF_8));
writer.addRowBatch(batch);
// write 1024 rows of (123, null)
batch.cols[0].isNull[0] = false;
((LongColumnVector) batch.cols[0]).vector[0] = 123;
batch.cols[1].noNulls = false;
batch.cols[1].isNull[0] = true;
writer.addRowBatch(batch);
// write 1024 rows of (null, null)
batch.cols[0].isNull[0] = true;
writer.addRowBatch(batch);
}
try (Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs))) {
assertEquals(3072, reader.getNumberOfRows());
VectorizedRowBatch batch = reader.getSchema().createRowBatch();
// int1 is not null
SearchArgument sarg =
SearchArgumentFactory.newBuilder()
.startNot()
.isNull("int1", PredicateLeaf.Type.LONG)
.end()
.build();
// should find one row group
try (RecordReader rows = reader.rows(reader.options().searchArgument(sarg, new String[]{}))) {
rows.nextBatch(batch);
assertEquals(1024, batch.size);
assertTrue(batch.cols[0].isRepeating);
assertEquals(123, ((LongColumnVector) batch.cols[0]).vector[0]);
assertFalse(rows.nextBatch(batch));
}
// string1 is not null
sarg = SearchArgumentFactory.newBuilder()
.startNot()
.isNull("string1", PredicateLeaf.Type.STRING)
.end()
.build();
// should find one row group
try (RecordReader rows = reader.rows(reader.options().searchArgument(sarg, new String[]{}))) {
rows.nextBatch(batch);
assertEquals(1024, batch.size);
assertTrue(batch.cols[1].isRepeating);
assertEquals("val", ((BytesColumnVector) batch.cols[1]).toString(0));
assertFalse(rows.nextBatch(batch));
}
}
}
/**
* Write three row groups, one with (null, null), one with (1, "val"), and one with
* alternating rows.
*/
@ParameterizedTest
@MethodSource("data")
public void testPredicatePushdownMixedNulls(Version fileFormat) throws Exception {
TypeDescription schema = createInnerSchema();
try (Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.rowIndexStride(1024)
.version(fileFormat))) {
VectorizedRowBatch batch = schema.createRowBatch();
batch.cols[0].noNulls = false;
batch.cols[1].noNulls = false;
batch.size = 1024;
for (int b = 0; b < 3; ++b) {
for (int i = 0; i < batch.size; ++i) {
if (b == 0 || (b == 2 && i % 2 == 0)) {
batch.cols[0].isNull[i] = true; // every other value is null or 1
batch.cols[1].isNull[i] = true; // every other value is null or "val"
} else {
batch.cols[0].isNull[i] = false;
((LongColumnVector) batch.cols[0]).vector[i] = 1;
batch.cols[1].isNull[i] = false;
((BytesColumnVector) batch.cols[1]).setVal(i, "val".getBytes(StandardCharsets.UTF_8));
}
}
writer.addRowBatch(batch);
}
}
try (Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs))) {
assertEquals(3*1024, reader.getNumberOfRows());
VectorizedRowBatch batch = reader.getSchema().createRowBatch();
// int1 not in (1) -- should select 0 of the row groups
SearchArgument sarg =
SearchArgumentFactory.newBuilder()
.startNot()
.in("int1", PredicateLeaf.Type.LONG, 1L)
.end().build();
try (RecordReader rows = reader.rows(reader.options().searchArgument(sarg, new String[]{}))) {
assertFalse(rows.nextBatch(batch));
}
// string1 not in ("val") -- should select 0 of the row groups
sarg = SearchArgumentFactory.newBuilder()
.startNot()
.in("string1", PredicateLeaf.Type.STRING, "val")
.end().build();
try (RecordReader rows = reader.rows(reader.options().searchArgument(sarg, new String[]{}))) {
assertFalse(rows.nextBatch(batch));
}
}
}
@ParameterizedTest
@MethodSource("data")
public void testColumnEncryption(Version fileFormat) throws Exception {
assumeTrue(fileFormat != OrcFile.Version.V_0_11);
final int ROWS = 1000;
final int SEED = 2;
final Random random = new Random(SEED);
TypeDescription schema =
TypeDescription.fromString("struct<i:int,norm:int,x:array<string>,j:int>");
byte[] piiKey = new byte[16];
random.nextBytes(piiKey);
byte[] creditKey = new byte[32];
random.nextBytes(creditKey);
InMemoryKeystore keys = new InMemoryKeystore(random)
.addKey("pii", EncryptionAlgorithm.AES_CTR_128, piiKey)
.addKey("credit", EncryptionAlgorithm.AES_CTR_256, creditKey);
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.version(fileFormat)
.setKeyProvider(keys)
.encrypt("pii:i,j;credit:x")
.masks((String)OrcConf.DATA_MASK.getDefaultValue()));
VectorizedRowBatch batch = schema.createRowBatch();
batch.size = ROWS;
LongColumnVector i = (LongColumnVector) batch.cols[0];
LongColumnVector norm = (LongColumnVector) batch.cols[1];
ListColumnVector x = (ListColumnVector) batch.cols[2];
BytesColumnVector xElem = (BytesColumnVector) x.child;
xElem.ensureSize(3 * ROWS, false);
LongColumnVector j = (LongColumnVector) batch.cols[3];
for(int r=0; r < ROWS; ++r) {
i.vector[r] = r * 3;
j.vector[r] = r * 7;
norm.vector[r] = r * 5;
int start = x.childCount;
x.offsets[r] = start;
x.lengths[r] = 3;
x.childCount += x.lengths[r];
for(int child=0; child < x.lengths[r]; ++child) {
xElem.setVal(start + child,
String.format("%d.%d", r, child).getBytes(StandardCharsets.UTF_8));
}
}
writer.addRowBatch(batch);
writer.close();
// Read without any keys
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf)
.setKeyProvider(new InMemoryKeystore()));
ColumnStatistics[] stats = reader.getStatistics();
assertEquals(ROWS, stats[0].getNumberOfValues());
assertEquals(0, stats[1].getNumberOfValues());
assertTrue(stats[1].hasNull());
assertEquals(ROWS, stats[2].getNumberOfValues());
assertEquals(0, ((IntegerColumnStatistics) stats[2]).getMinimum());
assertEquals(ROWS * 5 - 5, ((IntegerColumnStatistics) stats[2]).getMaximum());
assertEquals(0, stats[3].getNumberOfValues());
assertEquals(0, stats[4].getNumberOfValues());
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
i = (LongColumnVector) batch.cols[0];
norm = (LongColumnVector) batch.cols[1];
x = (ListColumnVector) batch.cols[2];
j = (LongColumnVector) batch.cols[3];
// ensure that we get the right number of rows with all nulls
assertTrue(rows.nextBatch(batch));
assertEquals(ROWS, batch.size);
assertTrue(i.isRepeating);
assertFalse(i.noNulls);
assertTrue(i.isNull[0]);
assertTrue(j.isRepeating);
assertFalse(j.noNulls);
assertTrue(j.isNull[0]);
assertTrue(x.isRepeating);
assertFalse(x.noNulls);
assertTrue(x.isNull[0]);
for(int r=0; r < ROWS; ++r) {
assertEquals(r * 5, norm.vector[r], "row " + r);
}
assertFalse(rows.nextBatch(batch));
rows.close();
// Add a new version of the pii key
random.nextBytes(piiKey);
keys.addKey("pii", 1, EncryptionAlgorithm.AES_CTR_128, piiKey);
// Read with the keys
reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf)
.setKeyProvider(keys));
stats = reader.getStatistics();
assertEquals(ROWS, stats[0].getNumberOfValues());
assertEquals(ROWS, stats[1].getNumberOfValues());
assertEquals(0, ((IntegerColumnStatistics) stats[1]).getMinimum());
assertEquals(3 * (ROWS - 1), ((IntegerColumnStatistics) stats[1]).getMaximum());
assertEquals(0, ((IntegerColumnStatistics) stats[2]).getMinimum());
assertEquals(5 * (ROWS - 1), ((IntegerColumnStatistics) stats[2]).getMaximum());
assertEquals(ROWS, stats[3].getNumberOfValues());
assertEquals(3 * ROWS, stats[4].getNumberOfValues());
assertEquals("0.0", ((StringColumnStatistics)stats[4]).getMinimum());
assertEquals("999.2", ((StringColumnStatistics)stats[4]).getMaximum());
assertEquals(ROWS, stats[5].getNumberOfValues());
assertEquals(0, ((IntegerColumnStatistics) stats[5]).getMinimum());
assertEquals(7 * (ROWS - 1), ((IntegerColumnStatistics) stats[5]).getMaximum());
rows = reader.rows();
batch = reader.getSchema().createRowBatch();
i = (LongColumnVector) batch.cols[0];
norm = (LongColumnVector) batch.cols[1];
x = (ListColumnVector) batch.cols[2];
j = (LongColumnVector) batch.cols[3];
xElem = (BytesColumnVector) x.child;
assertTrue(rows.nextBatch(batch));
assertEquals(ROWS, batch.size);
assertFalse(i.isRepeating);
assertFalse(x.isRepeating);
assertFalse(xElem.isRepeating);
assertFalse(j.isRepeating);
assertTrue(i.noNulls);
assertTrue(x.noNulls);
assertTrue(xElem.noNulls);
assertTrue(j.noNulls);
for(int r=0; r < ROWS; ++r) {
String msg = "row " + r;
assertEquals(r * 3, i.vector[r], msg);
assertEquals(r * 5, norm.vector[r], msg);
assertEquals(r * 3, x.offsets[r], msg);
assertEquals(3, x.lengths[r], msg);
for(int child=0; child < x.lengths[r]; ++child) {
assertEquals(String.format("%d.%d", r, child),
xElem.toString((int) x.offsets[r] + child), msg);
}
assertEquals(r * 7, j.vector[r], msg);
}
assertFalse(rows.nextBatch(batch));
rows.close();
}
@ParameterizedTest
@MethodSource("data")
public void testMultiStripeColumnEncryption(Version fileFormat) throws Exception {
assumeTrue(fileFormat != OrcFile.Version.V_0_11);
final EncryptionAlgorithm algorithm = EncryptionAlgorithm.AES_CTR_128;
final int BATCHES = 100;
final int SEED = 3;
final Random random = new Random(SEED);
TypeDescription schema = TypeDescription.fromString(
"struct<dec:decimal(20,4)," +
"dt:date," +
"time:timestamp," +
"dbl:double," +
"bool:boolean," +
"bin:binary>");
InMemoryKeystore allKeys = new InMemoryKeystore();
byte[][] keys = new byte[6][];
for(int k=0; k < keys.length; ++k) {
keys[k] = new byte[algorithm.keyLength()];
random.nextBytes(keys[k]);
allKeys.addKey("key_" + k, algorithm, keys[k]);
}
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.version(fileFormat)
.stripeSize(10000)
.setKeyProvider(allKeys)
.encrypt("key_0:dec;key_1:dt;key_2:time;key_3:dbl;key_4:bool;key_5:bin"));
// Set size to 1000 precisely so that stripes are exactly 5000 rows long.
VectorizedRowBatch batch = schema.createRowBatch(1000);
DecimalColumnVector dec = (DecimalColumnVector) batch.cols[0];
LongColumnVector dt = (LongColumnVector) batch.cols[1];
TimestampColumnVector time = (TimestampColumnVector) batch.cols[2];
DoubleColumnVector dbl = (DoubleColumnVector) batch.cols[3];
LongColumnVector bool = (LongColumnVector) batch.cols[4];
BytesColumnVector bin = (BytesColumnVector) batch.cols[5];
// Generate 100 batches of 1,000 rows each
batch.size = 1000;
dec.isRepeating = true;
dt.isRepeating = true;
time.isRepeating = true;
dbl.isRepeating = true;
bool.isRepeating = true;
bin.isRepeating = true;
for(int b=0; b < BATCHES; ++b) {
dec.set(0, new HiveDecimalWritable(String.format("%d.%03d", b, b)));
dt.vector[0] = new DateWritable(new Date(96 + b, 12, 11)).getDays();
time.set(0, Timestamp.valueOf(String.format("2014-12-14 12:00:00.%04d", b)));
dbl.vector[0] = b + 0.5;
bool.vector[0] = b % 2;
bin.setVal(0, Integer.toString(b).getBytes(StandardCharsets.UTF_8));
writer.addRowBatch(batch);
}
writer.close();
// Read without any keys
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf)
.setKeyProvider(new InMemoryKeystore()));
checkHasData(reader.rows(), batch, BATCHES,
false, false, false, false, false, false);
// read with all of the keys
reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf)
.setKeyProvider(allKeys));
checkHasData(reader.rows(), batch, BATCHES,
true, true, true, true, true, true);
// try enabling each key by itself
for(int c=0; c < 6; ++c) {
InMemoryKeystore single = new InMemoryKeystore();
single.addKey("key_" + c, algorithm, keys[c]);
reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).setKeyProvider(single));
boolean[] hasData = new boolean[6];
hasData[c] = true;
checkHasData(reader.rows(), batch, BATCHES, hasData);
}
}
private void checkHasData(RecordReader reader, VectorizedRowBatch batch,
int BATCHES, boolean... hasData) throws IOException {
for(int b=0; b < BATCHES; ++b) {
assertTrue(reader.nextBatch(batch), "batch " + b);
for(int c=0; c < hasData.length; c++) {
if (hasData[c]) {
// the expected value
String expected = null;
// a function from the row to the value as a string
IntFunction<String> actual = row -> null;
switch (c) {
case 0:
expected = new HiveDecimalWritable(String.format("%d.%03d", b, b)).toString();
actual = row -> ((DecimalColumnVector) batch.cols[0]).vector[row].toString();
break;
case 1:
expected = Long.toString(new DateWritable(new Date(96 + b, 12, 11)).getDays());
actual = row -> Long.toString(((LongColumnVector) batch.cols[1]).vector[row]);
break;
case 2:
expected = Timestamp.valueOf(String.format("2014-12-14 12:00:00.%04d", b)).toString();
actual = row -> ((TimestampColumnVector) batch.cols[2]).asScratchTimestamp(row).toString();
break;
case 3:
expected = Double.toString(b + 0.5);
actual = row -> Double.toString(((DoubleColumnVector) batch.cols[3]).vector[row]);
break;
case 4:
expected = Long.toString(b % 2);
actual = row -> Long.toString(((LongColumnVector) batch.cols[4]).vector[row]);
break;
default:
expected = Integer.toString(b);
actual = row -> ((BytesColumnVector) batch.cols[5]).toString(row);
break;
}
assertTrue(batch.cols[c].noNulls, "batch " + b + " column " + c);
assertEquals(expected, actual.apply(0), "batch " + b + " column " + c + " row 0");
// Not all of the readers set isRepeating, so if it isn't set, check the values.
if (!batch.cols[c].isRepeating) {
for(int r=1; r < batch.size; ++r) {
assertEquals(expected, actual.apply(r), "batch " + b + " column " + c + " row " + r);
}
}
} else {
assertTrue(batch.cols[c].isRepeating, "batch " + b + " column " + c);
assertTrue(batch.cols[c].isNull[0], "batch " + b + " column " + c);
}
}
}
assertFalse(reader.nextBatch(batch), "end");
reader.close();
}
}
| 205,783 | 41.005307 | 108 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/MockDataReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.orc.DataReader;
import org.apache.orc.OrcProto;
import org.apache.orc.StripeInformation;
import org.apache.orc.TypeDescription;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.IdentityHashMap;
import java.util.List;
import java.util.Set;
public class MockDataReader implements DataReader {
private final List<MockStripe> stripes = new ArrayList<>();
private final int numColumns;
private long offset = 3;
private MockStripe next;
private Set<ByteBuffer> outBuffers =
Collections.newSetFromMap(new IdentityHashMap<>());
private final InStream.StreamOptions options;
public MockDataReader(TypeDescription schema) {
this(schema, new InStream.StreamOptions());
}
public MockDataReader(TypeDescription schema,
InStream.StreamOptions options) {
numColumns = schema.getMaximumId() + 1;
next = new MockStripe(numColumns, stripes.size(), offset);
this.options = options;
}
public MockDataReader addStream(int column, OrcProto.Stream.Kind kind,
ByteBuffer bytes) {
next.addStream(column, kind, offset, bytes);
offset += bytes.remaining();
return this;
}
public MockDataReader addEncoding(OrcProto.ColumnEncoding.Kind kind) {
next.addEncoding(kind);
return this;
}
public MockDataReader addStripeFooter(int rows, String timezone) {
next.close(rows, timezone);
stripes.add(next);
offset += next.getFooterLength();
next = new MockStripe(numColumns, stripes.size(), offset);
return this;
}
@Override
public void open() { }
@Override
public OrcProto.StripeFooter readStripeFooter(StripeInformation stripe) {
return stripes.get((int) stripe.getStripeId()).getFooter();
}
@Override
public BufferChunkList readFileData(BufferChunkList list,
boolean doForceDirect) {
for(BufferChunk buffer = list.get(); buffer != null;
buffer = (BufferChunk) buffer.next) {
if (!buffer.hasData()) {
MockStripe stripe = getStripeByOffset(buffer.getOffset());
ByteBuffer data = stripe.getData(buffer.getOffset(), buffer.getLength());
outBuffers.add(data);
buffer.setChunk(data);
}
}
return list;
}
@Override
public boolean isTrackingDiskRanges() {
return true;
}
@Override
public void releaseBuffer(ByteBuffer toRelease) {
outBuffers.remove(toRelease);
}
@Override
public DataReader clone() {
throw new UnsupportedOperationException("Clone not supported.");
}
@Override
public void close() { }
@Override
public InStream.StreamOptions getCompressionOptions() {
return options;
}
public MockStripe getStripe(int id) {
return stripes.get(id);
}
private MockStripe getStripeByOffset(long offset) {
for(MockStripe stripe: stripes) {
if (stripe.getOffset() <= offset &&
(offset - stripe.getOffset() < stripe.getLength())) {
return stripe;
}
}
throw new IllegalArgumentException("Can't find stripe at " + offset);
}
void resetCounts() {
for(MockStripe stripe: stripes) {
stripe.resetCounts();
}
}
}
| 4,077 | 28.338129 | 81 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/MockStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.orc.OrcProto;
import java.nio.ByteBuffer;
class MockStream {
final int column;
final OrcProto.Stream.Kind kind;
private final ByteBuffer bytes;
final long offset;
final int length;
int readCount = 0;
MockStream(int column, OrcProto.Stream.Kind kind, long offset,
ByteBuffer bytes) {
this.column = column;
this.kind = kind;
this.bytes = bytes;
this.offset = offset;
this.length = bytes.remaining();
}
ByteBuffer getData(long offset, int length) {
if (offset < this.offset ||
offset + length > this.offset + this.length) {
throw new IllegalArgumentException("Bad getData [" + offset + ", " +
(offset + length) + ") from [" +
this.offset + ", " +
(this.offset + this.length) + ")");
}
ByteBuffer copy = bytes.duplicate();
int posn = (int) (offset - this.offset);
copy.position(posn);
copy.limit(posn + length);
return copy;
}
}
| 1,913 | 32.578947 | 80 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/MockStripe.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.orc.OrcProto;
import org.apache.orc.StripeInformation;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
public class MockStripe implements StripeInformation {
private final int numColumns;
private final List<MockStream> streams = new ArrayList<>();
private final List<OrcProto.ColumnEncoding> encodings = new ArrayList<>();
private final long startOffset;
private final int stripeId;
private int indexLength = 0;
private int dataLength = 0;
private int footerLength = 0;
private OrcProto.StripeFooter footer;
private int rows = 0;
public MockStripe(int numColumns, int stripeId, long offset) {
this.numColumns = numColumns;
this.startOffset = offset;
this.stripeId = stripeId;
}
public MockStream addStream(int column, OrcProto.Stream.Kind kind,
long offset, ByteBuffer bytes) {
MockStream result = new MockStream(column, kind, offset, bytes);
streams.add(result);
int length = bytes.remaining();
if (StreamName.getArea(kind) == StreamName.Area.INDEX) {
indexLength += length;
} else {
dataLength += length;
}
return result;
}
public void addEncoding(OrcProto.ColumnEncoding.Kind kind) {
OrcProto.ColumnEncoding.Builder result =
OrcProto.ColumnEncoding.newBuilder();
result.setKind(kind);
encodings.add(result.build());
}
public void close(int rows, String timezone) {
this.rows = rows;
// make sure we have enough encodings
while (encodings.size() < numColumns) {
addEncoding(OrcProto.ColumnEncoding.Kind.DIRECT);
}
OrcProto.StripeFooter.Builder foot = OrcProto.StripeFooter.newBuilder();
for(MockStream stream: streams) {
foot.addStreams(OrcProto.Stream.newBuilder()
.setKind(stream.kind)
.setLength(stream.length)
.setColumn(stream.column).build());
}
for(OrcProto.ColumnEncoding encoding: encodings) {
foot.addColumns(encoding);
}
if (timezone != null) {
foot.setWriterTimezone(timezone);
}
footer = foot.build();
footerLength = footer.getSerializedSize();
}
@Override
public long getOffset() {
return startOffset;
}
@Override
public long getLength() {
return indexLength + dataLength + footerLength;
}
@Override
public long getIndexLength() {
return indexLength;
}
@Override
public long getDataLength() {
return dataLength;
}
@Override
public long getFooterLength() {
return footerLength;
}
@Override
public long getNumberOfRows() {
return rows;
}
@Override
public long getStripeId() {
return stripeId;
}
@Override
public boolean hasEncryptionStripeId() {
return false;
}
@Override
public long getEncryptionStripeId() {
return stripeId;
}
@Override
public byte[][] getEncryptedLocalKeys() {
return new byte[0][];
}
public MockStream getStream(int column, OrcProto.Stream.Kind kind) {
for (MockStream stream: streams) {
if (stream.column == column && stream.kind == kind) {
return stream;
}
}
throw new IllegalArgumentException("Can't find stream column " + column
+ " kind " + kind);
}
private MockStream getStream(long offset) {
for (MockStream stream: streams) {
if (stream.offset <= offset && offset < stream.offset + stream.length) {
return stream;
}
}
throw new IllegalArgumentException("Can't find stream at offset " + offset);
}
public ByteBuffer getData(long offset, int length) {
MockStream stream = getStream(offset);
stream.readCount += 1;
return stream.getData(offset, length);
}
public void resetCounts() {
for(MockStream stream: streams) {
stream.readCount = 0;
}
}
public OrcProto.StripeFooter getFooter() {
return footer;
}
}
| 4,802 | 26.603448 | 80 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/TestBitFieldReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.orc.CompressionCodec;
import org.apache.orc.impl.writer.StreamOptions;
import org.junit.jupiter.api.Test;
import java.nio.ByteBuffer;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class TestBitFieldReader {
public void runSeekTest(CompressionCodec codec) throws Exception {
TestInStream.OutputCollector collect = new TestInStream.OutputCollector();
final int COUNT = 16384;
StreamOptions options = new StreamOptions(500);
if (codec != null) {
options.withCodec(codec, codec.getDefaultOptions());
}
BitFieldWriter out = new BitFieldWriter(
new OutStream("test", options, collect), 1);
TestInStream.PositionCollector[] positions =
new TestInStream.PositionCollector[COUNT];
for(int i=0; i < COUNT; ++i) {
positions[i] = new TestInStream.PositionCollector();
out.getPosition(positions[i]);
// test runs, non-runs
if (i < COUNT / 2) {
out.write(i & 1);
} else {
out.write((i/3) & 1);
}
}
out.flush();
ByteBuffer inBuf = ByteBuffer.allocate(collect.buffer.size());
collect.buffer.setByteBuffer(inBuf, 0, collect.buffer.size());
inBuf.flip();
BitFieldReader in = new BitFieldReader(InStream.create("test",
new BufferChunk(inBuf, 0), 0, inBuf.remaining(),
InStream.options().withCodec(codec).withBufferSize(500)));
for(int i=0; i < COUNT; ++i) {
int x = in.next();
if (i < COUNT / 2) {
assertEquals(i & 1, x);
} else {
assertEquals((i/3) & 1, x);
}
}
for(int i=COUNT-1; i >= 0; --i) {
in.seek(positions[i]);
int x = in.next();
if (i < COUNT / 2) {
assertEquals(i & 1, x);
} else {
assertEquals((i/3) & 1, x);
}
}
}
@Test
public void testUncompressedSeek() throws Exception {
runSeekTest(null);
}
@Test
public void testCompressedSeek() throws Exception {
runSeekTest(new ZlibCodec());
}
@Test
public void testSkips() throws Exception {
TestInStream.OutputCollector collect = new TestInStream.OutputCollector();
BitFieldWriter out = new BitFieldWriter(
new OutStream("test", new StreamOptions(100), collect), 1);
final int COUNT = 16384;
for(int i=0; i < COUNT; ++i) {
if (i < COUNT/2) {
out.write(i & 1);
} else {
out.write((i/3) & 1);
}
}
out.flush();
ByteBuffer inBuf = ByteBuffer.allocate(collect.buffer.size());
collect.buffer.setByteBuffer(inBuf, 0, collect.buffer.size());
inBuf.flip();
BitFieldReader in = new BitFieldReader(InStream.create("test",
new BufferChunk(inBuf, 0), 0, inBuf.remaining()));
for(int i=0; i < COUNT; i += 5) {
int x = in.next();
if (i < COUNT/2) {
assertEquals(i & 1, x);
} else {
assertEquals((i/3) & 1, x);
}
if (i < COUNT - 5) {
in.skip(4);
}
in.skip(0);
}
}
@Test
public void testSeekSkip() throws Exception {
TestInStream.OutputCollector collect = new TestInStream.OutputCollector();
BitFieldWriter out = new BitFieldWriter(
new OutStream("test", new StreamOptions(100), collect), 1);
final int COUNT = 256;
TestInStream.PositionCollector posn = new TestInStream.PositionCollector();
for(int i=0; i < COUNT; ++i) {
if (i == 200) {
out.getPosition(posn);
}
if (i < COUNT/2) {
out.write(i & 1);
} else {
out.write((i/3) & 1);
}
}
out.flush();
ByteBuffer inBuf = ByteBuffer.allocate(collect.buffer.size());
collect.buffer.setByteBuffer(inBuf, 0, collect.buffer.size());
inBuf.flip();
BitFieldReader in = new BitFieldReader(InStream.create("test",
new BufferChunk(inBuf, 0), 0, inBuf.remaining()));
in.seek(posn);
in.skip(10);
for(int r = 210; r < COUNT; ++r) {
int x = in.next();
if (r < COUNT/2) {
assertEquals(r & 1, x);
} else {
assertEquals((r/3) & 1, x);
}
}
}
}
| 4,895 | 30.587097 | 79 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/TestBitPack.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import com.google.common.primitives.Longs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.orc.impl.writer.StreamOptions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInfo;
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.Random;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class TestBitPack {
private static final int SIZE = 100;
private static Random rand = new Random(100);
Path workDir = new Path(System.getProperty("test.tmp.dir", "target" + File.separator + "test"
+ File.separator + "tmp"));
Configuration conf;
FileSystem fs;
Path testFilePath;
@BeforeEach
public void openFileSystem(TestInfo testInfo) throws Exception {
conf = new Configuration();
fs = FileSystem.getLocal(conf);
testFilePath = new Path(workDir, "TestOrcFile." +
testInfo.getTestMethod().get().getName() + ".orc");
fs.delete(testFilePath, false);
}
private long[] deltaEncode(long[] inp) {
long[] output = new long[inp.length];
SerializationUtils utils = new SerializationUtils();
for (int i = 0; i < inp.length; i++) {
output[i] = utils.zigzagEncode(inp[i]);
}
return output;
}
private long nextLong(Random rng, long n) {
long bits, val;
do {
bits = (rng.nextLong() << 1) >>> 1;
val = bits % n;
} while (bits - val + (n - 1) < 0L);
return val;
}
private void runTest(int numBits) throws IOException {
long[] inp = new long[SIZE];
for (int i = 0; i < SIZE; i++) {
long val = 0;
if (numBits <= 32) {
if (numBits == 1) {
val = -1 * rand.nextInt(2);
} else {
val = rand.nextInt((int) Math.pow(2, numBits - 1));
}
} else {
val = nextLong(rand, (long) Math.pow(2, numBits - 2));
}
if (val % 2 == 0) {
val = -val;
}
inp[i] = val;
}
long[] deltaEncoded = deltaEncode(inp);
long minInput = Collections.min(Longs.asList(deltaEncoded));
long maxInput = Collections.max(Longs.asList(deltaEncoded));
long rangeInput = maxInput - minInput;
SerializationUtils utils = new SerializationUtils();
int fixedWidth = utils.findClosestNumBits(rangeInput);
TestInStream.OutputCollector collect = new TestInStream.OutputCollector();
OutStream output = new OutStream("test", new StreamOptions(SIZE), collect);
utils.writeInts(deltaEncoded, 0, deltaEncoded.length, fixedWidth, output);
output.flush();
ByteBuffer inBuf = ByteBuffer.allocate(collect.buffer.size());
collect.buffer.setByteBuffer(inBuf, 0, collect.buffer.size());
inBuf.flip();
long[] buff = new long[SIZE];
utils.readInts(buff, 0, SIZE, fixedWidth,
InStream.create("test", new BufferChunk(inBuf,0), 0,
inBuf.remaining()));
for (int i = 0; i < SIZE; i++) {
buff[i] = utils.zigzagDecode(buff[i]);
}
assertEquals(numBits, fixedWidth);
assertArrayEquals(inp, buff);
}
@Test
public void test01BitPacking1Bit() throws IOException {
runTest(1);
}
@Test
public void test02BitPacking2Bit() throws IOException {
runTest(2);
}
@Test
public void test03BitPacking3Bit() throws IOException {
runTest(3);
}
@Test
public void test04BitPacking4Bit() throws IOException {
runTest(4);
}
@Test
public void test05BitPacking5Bit() throws IOException {
runTest(5);
}
@Test
public void test06BitPacking6Bit() throws IOException {
runTest(6);
}
@Test
public void test07BitPacking7Bit() throws IOException {
runTest(7);
}
@Test
public void test08BitPacking8Bit() throws IOException {
runTest(8);
}
@Test
public void test09BitPacking9Bit() throws IOException {
runTest(9);
}
@Test
public void test10BitPacking10Bit() throws IOException {
runTest(10);
}
@Test
public void test11BitPacking11Bit() throws IOException {
runTest(11);
}
@Test
public void test12BitPacking12Bit() throws IOException {
runTest(12);
}
@Test
public void test13BitPacking13Bit() throws IOException {
runTest(13);
}
@Test
public void test14BitPacking14Bit() throws IOException {
runTest(14);
}
@Test
public void test15BitPacking15Bit() throws IOException {
runTest(15);
}
@Test
public void test16BitPacking16Bit() throws IOException {
runTest(16);
}
@Test
public void test17BitPacking17Bit() throws IOException {
runTest(17);
}
@Test
public void test18BitPacking18Bit() throws IOException {
runTest(18);
}
@Test
public void test19BitPacking19Bit() throws IOException {
runTest(19);
}
@Test
public void test20BitPacking20Bit() throws IOException {
runTest(20);
}
@Test
public void test21BitPacking21Bit() throws IOException {
runTest(21);
}
@Test
public void test22BitPacking22Bit() throws IOException {
runTest(22);
}
@Test
public void test23BitPacking23Bit() throws IOException {
runTest(23);
}
@Test
public void test24BitPacking24Bit() throws IOException {
runTest(24);
}
@Test
public void test26BitPacking26Bit() throws IOException {
runTest(26);
}
@Test
public void test28BitPacking28Bit() throws IOException {
runTest(28);
}
@Test
public void test30BitPacking30Bit() throws IOException {
runTest(30);
}
@Test
public void test32BitPacking32Bit() throws IOException {
runTest(32);
}
@Test
public void test40BitPacking40Bit() throws IOException {
runTest(40);
}
@Test
public void test48BitPacking48Bit() throws IOException {
runTest(48);
}
@Test
public void test56BitPacking56Bit() throws IOException {
runTest(56);
}
@Test
public void test64BitPacking64Bit() throws IOException {
runTest(64);
}
}
| 6,902 | 23.830935 | 95 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/TestColumnStatisticsImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.serde2.io.DateWritable;
import org.apache.orc.DecimalColumnStatistics;
import org.apache.orc.OrcFile;
import org.apache.orc.OrcProto;
import org.apache.orc.Reader;
import org.apache.orc.TimestampColumnStatistics;
import org.apache.orc.TypeDescription;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.util.TimeZone;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestColumnStatisticsImpl {
@Test
public void testUpdateDate() {
ColumnStatisticsImpl stat = ColumnStatisticsImpl.create(TypeDescription.createDate());
DateWritable date = new DateWritable(16400);
stat.increment();
stat.updateDate(date);
assertDateStatistics(stat, 1, 16400, 16400);
date.set(16410);
stat.increment();
stat.updateDate(date);
assertDateStatistics(stat, 2, 16400, 16410);
date.set(16420);
stat.increment();
stat.updateDate(date);
assertDateStatistics(stat, 3, 16400, 16420);
}
private void assertDateStatistics(ColumnStatisticsImpl stat, int count, int minimum, int maximum) {
OrcProto.ColumnStatistics.Builder builder = stat.serialize();
assertEquals(count, builder.getNumberOfValues());
assertTrue(builder.hasDateStatistics());
assertFalse(builder.hasStringStatistics());
OrcProto.DateStatistics protoStat = builder.getDateStatistics();
assertTrue(protoStat.hasMinimum());
assertEquals(minimum, protoStat.getMinimum());
assertTrue(protoStat.hasMaximum());
assertEquals(maximum, protoStat.getMaximum());
}
@Test
public void testOldTimestamps() throws IOException {
TimeZone original = TimeZone.getDefault();
TimeZone.setDefault(TimeZone.getTimeZone("America/Los_Angeles"));
Path exampleDir = new Path(System.getProperty("example.dir"));
Path file = new Path(exampleDir, "TestOrcFile.testTimestamp.orc");
Configuration conf = new Configuration();
Reader reader = OrcFile.createReader(file, OrcFile.readerOptions(conf));
TimestampColumnStatistics stats =
(TimestampColumnStatistics) reader.getStatistics()[0];
assertEquals("1995-01-01 00:00:00.688", stats.getMinimum().toString());
// ORC-611: add TS stats nanosecond support for older files by using (max TS + 0.999 ms)
assertEquals("2037-01-01 00:00:00.000999999", stats.getMaximum().toString());
TimeZone.setDefault(original);
}
@Test
public void testTimestamps() {
TimeZone original = TimeZone.getDefault();
TimeZone.setDefault(TimeZone.getTimeZone("UTC"));
TypeDescription instant = TypeDescription.createTimestampInstant();
ColumnStatisticsImpl stats = ColumnStatisticsImpl.create(instant);
TimestampColumnStatistics dstats = (TimestampColumnStatistics) stats;
assertNull(dstats.getMinimumUTC());
assertNull(dstats.getMaximumUTC());
stats.updateTimestamp(123, 456789);
stats.updateTimestamp(1234, 567890);
stats.increment(2);
assertEquals("1970-01-01 00:00:00.123456789", dstats.getMinimum().toString());
assertEquals("1970-01-01 00:00:01.23456789", dstats.getMaximum().toString());
stats.updateTimestamp(123, 400000);
stats.updateTimestamp(1234, 600000);
assertEquals("1970-01-01 00:00:00.1234", dstats.getMinimum().toString());
assertEquals("1970-01-01 00:00:01.2346", dstats.getMaximum().toString());
stats.updateTimestamp(122, 300000);
stats.updateTimestamp(1235, 400000);
assertEquals("1970-01-01 00:00:00.1223", dstats.getMinimum().toString());
assertEquals("1970-01-01 00:00:01.2354", dstats.getMaximum().toString());
stats.merge(stats);
assertEquals("1970-01-01 00:00:00.1223", dstats.getMinimum().toString());
assertEquals("1970-01-01 00:00:01.2354", dstats.getMaximum().toString());
ColumnStatisticsImpl stats2 = ColumnStatisticsImpl.create(instant);
stats2.updateTimestamp(100, 1);
stats2.increment(1);
TimestampColumnStatistics dstats2 = (TimestampColumnStatistics) stats2;
assertEquals("1970-01-01 00:00:00.100000001", dstats2.getMinimum().toString());
assertEquals("1970-01-01 00:00:00.100000001", dstats2.getMaximum().toString());
stats.merge(stats2);
assertEquals("1970-01-01 00:00:00.100000001", dstats.getMinimum().toString());
assertEquals("1970-01-01 00:00:01.2354", dstats.getMaximum().toString());
stats2.updateTimestamp(2000, 123456);
assertEquals("1970-01-01 00:00:00.100000001", dstats2.getMinimum().toString());
assertEquals("1970-01-01 00:00:02.000123456", dstats2.getMaximum().toString());
stats.merge(stats2);
assertEquals("1970-01-01 00:00:00.100000001", dstats.getMinimum().toString());
assertEquals("1970-01-01 00:00:02.000123456", dstats.getMaximum().toString());
TimeZone.setDefault(original);
}
@Test
public void testDecimal64Overflow() {
TypeDescription schema = TypeDescription.fromString("decimal(18,6)");
OrcProto.ColumnStatistics.Builder pb =
OrcProto.ColumnStatistics.newBuilder();
OrcProto.DecimalStatistics.Builder decimalBuilder =
OrcProto.DecimalStatistics.newBuilder();
decimalBuilder.setMaximum("1000.0");
decimalBuilder.setMinimum("1.010");
decimalBuilder.setSum("123456789.123456");
pb.setDecimalStatistics(decimalBuilder);
pb.setHasNull(false);
pb.setNumberOfValues(3);
// the base case doesn't overflow
DecimalColumnStatistics stats1 = (DecimalColumnStatistics)
ColumnStatisticsImpl.deserialize(schema, pb.build());
ColumnStatisticsImpl updateStats1 = (ColumnStatisticsImpl) stats1;
assertEquals("1.01", stats1.getMinimum().toString());
assertEquals("1000", stats1.getMaximum().toString());
assertEquals("123456789.123456", stats1.getSum().toString());
assertEquals(3, stats1.getNumberOfValues());
// Now set the sum to something that overflows Decimal64.
decimalBuilder.setSum("1234567890123.45");
pb.setDecimalStatistics(decimalBuilder);
DecimalColumnStatistics stats2 = (DecimalColumnStatistics)
ColumnStatisticsImpl.deserialize(schema, pb.build());
assertNull(stats2.getSum());
// merge them together
updateStats1.merge((ColumnStatisticsImpl) stats2);
assertNull(stats1.getSum());
updateStats1.reset();
assertEquals("0", stats1.getSum().toString());
updateStats1.increment();
updateStats1.updateDecimal64(10000, 6);
assertEquals("0.01", stats1.getSum().toString());
updateStats1.updateDecimal64(1, 4);
assertEquals("0.0101", stats1.getSum().toString());
updateStats1.updateDecimal64(TypeDescription.MAX_DECIMAL64, 6);
assertNull(stats1.getSum());
updateStats1.reset();
updateStats1.updateDecimal64(TypeDescription.MAX_DECIMAL64, 6);
assertEquals("999999999999.999999", stats1.getSum().toString());
updateStats1.updateDecimal64(1, 6);
assertNull(stats1.getSum());
updateStats1.reset();
ColumnStatisticsImpl updateStats2 = (ColumnStatisticsImpl) stats2;
updateStats2.reset();
updateStats1.increment();
updateStats2.increment();
updateStats1.updateDecimal64(TypeDescription.MAX_DECIMAL64, 6);
updateStats2.updateDecimal64(TypeDescription.MAX_DECIMAL64, 6);
assertEquals("999999999999.999999", stats1.getSum().toString());
assertEquals("999999999999.999999", stats2.getSum().toString());
updateStats1.merge(updateStats2);
assertNull(stats1.getSum());
}
@Test
public void testCollectionColumnStats() {
/* test List */
final ColumnStatisticsImpl statList = ColumnStatisticsImpl.create(TypeDescription.createList(TypeDescription.createInt()));
statList.increment();
statList.updateCollectionLength(10);
statList.increment();
statList.updateCollectionLength(20);
statList.increment();
statList.updateCollectionLength(30);
statList.increment();
statList.updateCollectionLength(40);
final OrcProto.ColumnStatistics.Builder builder = statList.serialize();
final OrcProto.CollectionStatistics collectionStatistics = builder.getCollectionStatistics();
assertEquals(10, collectionStatistics.getMinChildren());
assertEquals(40, collectionStatistics.getMaxChildren());
assertEquals(100, collectionStatistics.getTotalChildren());
}
}
| 9,323 | 41 | 127 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/TestConvertTreeReaderFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.type.HiveDecimal;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DateColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.Decimal64ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ListColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.OrcConf;
import org.apache.orc.OrcFile;
import org.apache.orc.OrcFile.WriterOptions;
import org.apache.orc.Reader;
import org.apache.orc.RecordReader;
import org.apache.orc.TestProlepticConversions;
import org.apache.orc.TypeDescription;
import org.apache.orc.Writer;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInfo;
import java.io.File;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.sql.Timestamp;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.GregorianCalendar;
import java.util.concurrent.TimeUnit;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.Mockito.mock;
public class TestConvertTreeReaderFactory {
private Path workDir =
new Path(System.getProperty("test.tmp.dir", "target" + File.separator + "test" + File.separator + "tmp"));
private Configuration conf;
private FileSystem fs;
private Path testFilePath;
private int LARGE_BATCH_SIZE;
private static final int INCREASING_BATCH_SIZE_FIRST = 30;
private static final int INCREASING_BATCH_SIZE_SECOND = 50;
@BeforeEach
public void setupPath(TestInfo testInfo) throws Exception {
// Default CV length is 1024
this.LARGE_BATCH_SIZE = 1030;
this.conf = new Configuration();
this.fs = FileSystem.getLocal(conf);
this.testFilePath = new Path(workDir, TestWriterImpl.class.getSimpleName() +
testInfo.getTestMethod().get().getName().replaceFirst("\\[[0-9]+]", "") +
".orc");
fs.delete(testFilePath, false);
}
public <TExpectedColumnVector extends ColumnVector> TExpectedColumnVector createORCFileWithLargeArray(
TypeDescription schema, Class<TExpectedColumnVector> expectedColumnType, boolean useDecimal64)
throws IOException, ParseException {
conf = new Configuration();
fs = FileSystem.getLocal(conf);
fs.setWorkingDirectory(workDir);
Writer w = OrcFile.createWriter(testFilePath, OrcFile.writerOptions(conf).setSchema(schema));
SimpleDateFormat dateFormat = TestProlepticConversions.createParser("yyyy-MM-dd", new GregorianCalendar());
VectorizedRowBatch batch = schema.createRowBatch(
useDecimal64 ? TypeDescription.RowBatchVersion.USE_DECIMAL64 : TypeDescription.RowBatchVersion.ORIGINAL,
LARGE_BATCH_SIZE);
ListColumnVector listCol = (ListColumnVector) batch.cols[0];
TExpectedColumnVector dcv = (TExpectedColumnVector) (listCol).child;
batch.size = 1;
for (int row = 0; row < LARGE_BATCH_SIZE; ++row) {
setElementInVector(expectedColumnType, dateFormat, dcv, row);
}
listCol.childCount = 1;
listCol.lengths[0] = LARGE_BATCH_SIZE;
listCol.offsets[0] = 0;
w.addRowBatch(batch);
w.close();
assertEquals(((ListColumnVector) batch.cols[0]).child.getClass(), expectedColumnType);
return (TExpectedColumnVector) ((ListColumnVector) batch.cols[0]).child;
}
public <TExpectedColumnVector extends ColumnVector> TExpectedColumnVector createORCFileWithBatchesOfIncreasingSizeInDifferentStripes(
TypeDescription schema, Class<TExpectedColumnVector> typeClass, boolean useDecimal64)
throws IOException, ParseException {
conf = new Configuration();
fs = FileSystem.getLocal(conf);
fs.setWorkingDirectory(workDir);
WriterOptions options = OrcFile.writerOptions(conf);
Writer w = OrcFile.createWriter(testFilePath, options.setSchema(schema));
SimpleDateFormat dateFormat = TestProlepticConversions.createParser("yyyy-MM-dd", new GregorianCalendar());
VectorizedRowBatch batch = schema.createRowBatch(
useDecimal64 ? TypeDescription.RowBatchVersion.USE_DECIMAL64 : TypeDescription.RowBatchVersion.ORIGINAL,
INCREASING_BATCH_SIZE_FIRST);
TExpectedColumnVector columnVector = (TExpectedColumnVector) batch.cols[0];
batch.size = INCREASING_BATCH_SIZE_FIRST;
for (int row = 0; row < INCREASING_BATCH_SIZE_FIRST; ++row) {
setElementInVector(typeClass, dateFormat, columnVector, row);
}
w.addRowBatch(batch);
w.writeIntermediateFooter(); //forcing a new stripe
batch = schema.createRowBatch(
useDecimal64 ? TypeDescription.RowBatchVersion.USE_DECIMAL64 : TypeDescription.RowBatchVersion.ORIGINAL,
INCREASING_BATCH_SIZE_SECOND);
columnVector = (TExpectedColumnVector) batch.cols[0];
batch.size = INCREASING_BATCH_SIZE_SECOND;
for (int row = 0; row < INCREASING_BATCH_SIZE_SECOND; ++row) {
setElementInVector(typeClass, dateFormat, columnVector, row);
}
w.addRowBatch(batch);
w.close();
return (TExpectedColumnVector) batch.cols[0];
}
private void setElementInVector(
Class<?> expectedColumnType, SimpleDateFormat dateFormat, ColumnVector dcv, int row)
throws ParseException {
if (dcv instanceof DecimalColumnVector) {
((DecimalColumnVector) dcv).set(row, HiveDecimal.create(row * 2 + 1));
} else if (dcv instanceof DoubleColumnVector) {
((DoubleColumnVector) dcv).vector[row] = row * 2 + 1;
} else if (dcv instanceof BytesColumnVector) {
((BytesColumnVector) dcv).setVal(row, ((row * 2 + 1) + "").getBytes(StandardCharsets.UTF_8));
} else if (dcv instanceof LongColumnVector) {
((LongColumnVector) dcv).vector[row] = row * 2 + 1;
} else if (dcv instanceof TimestampColumnVector) {
((TimestampColumnVector) dcv).set(row, Timestamp.valueOf((1900 + row) + "-04-01 12:34:56.9"));
} else if (dcv instanceof DateColumnVector) {
String date = String.format("%04d-01-23", row * 2 + 1);
((DateColumnVector) dcv).vector[row] = TimeUnit.MILLISECONDS.toDays(dateFormat.parse(date).getTime());
} else {
throw new IllegalStateException("Writing File with a large array of "+ expectedColumnType + " is not supported!");
}
}
public <TExpectedColumnVector extends ColumnVector> TExpectedColumnVector readORCFileWithLargeArray(
String typeString, Class<TExpectedColumnVector> expectedColumnType) throws Exception {
Reader.Options options = new Reader.Options();
TypeDescription schema = TypeDescription.fromString("struct<col1:array<" + typeString + ">>");
options.schema(schema);
String expected = options.toString();
Configuration conf = new Configuration();
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf));
RecordReader rows = reader.rows(options);
VectorizedRowBatch batch = schema.createRowBatchV2();
while (rows.nextBatch(batch)) {
assertTrue(batch.size > 0);
}
assertEquals(expected, options.toString());
assertEquals(batch.cols.length, 1);
assertTrue(batch.cols[0] instanceof ListColumnVector);
assertEquals(((ListColumnVector) batch.cols[0]).child.getClass(), expectedColumnType);
return (TExpectedColumnVector) ((ListColumnVector) batch.cols[0]).child;
}
public void readORCFileIncreasingBatchSize(String typeString, Class<?> expectedColumnType) throws Exception {
Reader.Options options = new Reader.Options();
TypeDescription schema = TypeDescription.fromString("struct<col1:" + typeString + ">");
options.schema(schema);
String expected = options.toString();
Configuration conf = new Configuration();
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf));
RecordReader rows = reader.rows(options);
VectorizedRowBatch batch = schema.createRowBatchV2();
rows.nextBatch(batch);
assertEquals(INCREASING_BATCH_SIZE_FIRST , batch.size);
assertEquals(expected, options.toString());
assertEquals(batch.cols.length, 1);
assertEquals(batch.cols[0].getClass(), expectedColumnType);
rows.nextBatch(batch);
assertEquals(INCREASING_BATCH_SIZE_SECOND , batch.size);
assertEquals(expected, options.toString());
assertEquals(batch.cols.length, 1);
assertEquals(batch.cols[0].getClass(), expectedColumnType);
}
public void testConvertToDecimal() throws Exception {
Decimal64ColumnVector columnVector =
readORCFileWithLargeArray("decimal(6,1)", Decimal64ColumnVector.class);
assertEquals(LARGE_BATCH_SIZE, columnVector.vector.length);
}
public void testConvertToVarchar() throws Exception {
BytesColumnVector columnVector = readORCFileWithLargeArray("varchar(10)", BytesColumnVector.class);
assertEquals(LARGE_BATCH_SIZE, columnVector.vector.length);
}
public void testConvertToBinary() throws Exception {
BytesColumnVector columnVector = readORCFileWithLargeArray("binary", BytesColumnVector.class);
assertEquals(LARGE_BATCH_SIZE, columnVector.vector.length);
}
public void testConvertToDouble() throws Exception {
DoubleColumnVector columnVector = readORCFileWithLargeArray("double", DoubleColumnVector.class);
assertEquals(LARGE_BATCH_SIZE, columnVector.vector.length);
}
public void testConvertToInteger() throws Exception {
LongColumnVector columnVector = readORCFileWithLargeArray("int", LongColumnVector.class);
assertEquals(LARGE_BATCH_SIZE, columnVector.vector.length);
}
public void testConvertToFloat() throws Exception {
DoubleColumnVector columnVector = readORCFileWithLargeArray("float", DoubleColumnVector.class);
assertEquals(LARGE_BATCH_SIZE, columnVector.vector.length);
}
public void testConvertToTimestamp() throws Exception {
TimestampColumnVector columnVector =
readORCFileWithLargeArray("timestamp", TimestampColumnVector.class);
assertEquals(LARGE_BATCH_SIZE, columnVector.time.length);
}
public void testConvertToDate() throws Exception {
DateColumnVector columnVector = readORCFileWithLargeArray("date", DateColumnVector.class);
assertEquals(LARGE_BATCH_SIZE, columnVector.vector.length);
}
@Test
public void testDecimalArrayBiggerThanDefault() throws Exception {
String typeStr = "decimal(6,1)";
Class typeClass = DecimalColumnVector.class;
TypeDescription schema = TypeDescription.fromString("struct<col1:array<" + typeStr + ">>");
createORCFileWithLargeArray(schema, typeClass, typeClass.equals(Decimal64ColumnVector.class));
try {
// Test all possible conversions
// check ConvertTreeReaderFactory.createDecimalConvertTreeReader
testConvertToInteger();
testConvertToDouble();
testConvertToVarchar();
testConvertToTimestamp();
testConvertToDecimal();
} finally {
// Make sure we delete file across tests
fs.delete(testFilePath, false);
}
}
@Test
public void testDecimal64ArrayBiggerThanDefault() throws Exception {
String typeStr = "decimal(6,1)";
Class typeClass = Decimal64ColumnVector.class;
TypeDescription schema = TypeDescription.fromString("struct<col1:array<" + typeStr + ">>");
createORCFileWithLargeArray(schema, typeClass, typeClass.equals(Decimal64ColumnVector.class));
try {
// Test all possible conversions
// check ConvertTreeReaderFactory.createDecimalConvertTreeReader
testConvertToInteger();
testConvertToDouble();
testConvertToVarchar();
testConvertToTimestamp();
testConvertToDecimal();
} finally {
// Make sure we delete file across tests
fs.delete(testFilePath, false);
}
}
@Test
public void testStringArrayBiggerThanDefault() throws Exception {
String typeStr = "varchar(10)";
Class typeClass = BytesColumnVector.class;
TypeDescription schema = TypeDescription.fromString("struct<col1:array<" + typeStr + ">>");
createORCFileWithLargeArray(schema, typeClass, typeClass.equals(Decimal64ColumnVector.class));
try {
// Test all possible conversions
// check ConvertTreeReaderFactory.createStringConvertTreeReader
testConvertToInteger();
testConvertToDouble();
testConvertToDecimal();
testConvertToVarchar();
testConvertToBinary();
testConvertToTimestamp();
testConvertToDate();
} finally {
// Make sure we delete file across tests
fs.delete(testFilePath, false);
}
}
@Test
public void testBinaryArrayBiggerThanDefault() throws Exception {
String typeStr = "binary";
Class typeClass = BytesColumnVector.class;
TypeDescription schema = TypeDescription.fromString("struct<col1:array<" + typeStr + ">>");
createORCFileWithLargeArray(schema, typeClass, typeClass.equals(Decimal64ColumnVector.class));
try {
// Test all possible conversions
// check ConvertTreeReaderFactory.createBinaryConvertTreeReader
testConvertToVarchar();
} finally {
// Make sure we delete file across tests
fs.delete(testFilePath, false);
}
}
@Test
public void testDoubleArrayBiggerThanDefault() throws Exception {
String typeStr = "double";
Class typeClass = DoubleColumnVector.class;
TypeDescription schema = TypeDescription.fromString("struct<col1:array<" + typeStr + ">>");
createORCFileWithLargeArray(schema, typeClass, typeClass.equals(Decimal64ColumnVector.class));
try {
// Test all possible conversions
// check ConvertTreeReaderFactory.createDoubleConvertTreeReader
testConvertToDouble();
testConvertToInteger();
testConvertToFloat();
testConvertToDecimal();
testConvertToVarchar();
testConvertToTimestamp();
} finally {
// Make sure we delete file across tests
fs.delete(testFilePath, false);
}
}
@Test
public void testIntArrayBiggerThanDefault() throws Exception {
String typeStr = "int";
Class typeClass = LongColumnVector.class;
TypeDescription schema = TypeDescription.fromString("struct<col1:array<" + typeStr + ">>");
createORCFileWithLargeArray(schema, typeClass, typeClass.equals(Decimal64ColumnVector.class));
try {
// Test all possible conversions
// check ConvertTreeReaderFactory.createAnyIntegerConvertTreeReader
testConvertToInteger();
testConvertToDouble();
testConvertToDecimal();
testConvertToVarchar();
testConvertToTimestamp();
} finally {
// Make sure we delete file across tests
fs.delete(testFilePath, false);
}
}
@Test
public void testTimestampArrayBiggerThanDefault() throws Exception {
String typeStr = "timestamp";
Class typeClass = TimestampColumnVector.class;
TypeDescription schema = TypeDescription.fromString("struct<col1:array<" + typeStr + ">>");
createORCFileWithLargeArray(schema, typeClass, typeClass.equals(Decimal64ColumnVector.class));
try {
// Test all possible conversions
// check ConvertTreeReaderFactory.createTimestampConvertTreeReader
testConvertToInteger();
testConvertToDouble();
testConvertToDecimal();
testConvertToVarchar();
testConvertToTimestamp();
testConvertToDate();
} finally {
// Make sure we delete file across tests
fs.delete(testFilePath, false);
}
}
@Test
public void testDateArrayBiggerThanDefault() throws Exception {
String typeStr = "date";
Class typeClass = DateColumnVector.class;
TypeDescription schema = TypeDescription.fromString("struct<col1:array<" + typeStr + ">>");
createORCFileWithLargeArray(schema, typeClass, typeClass.equals(Decimal64ColumnVector.class));
try {
// Test all possible conversions
// check ConvertTreeReaderFactory.createDateConvertTreeReader
testConvertToVarchar();
testConvertToTimestamp();
} finally {
fs.delete(testFilePath, false);
}
}
@Test
public void testDecimalVectorIncreasingSizeInDifferentStripes() throws Exception {
String typeStr = "decimal(6,1)";
Class typeClass = DecimalColumnVector.class;
TypeDescription schema = TypeDescription.fromString("struct<col1:" + typeStr + ">");
createORCFileWithBatchesOfIncreasingSizeInDifferentStripes(schema, typeClass, typeClass.equals(Decimal64ColumnVector.class));
try {
testConvertToIntegerIncreasingSize();
testConvertToDoubleIncreasingSize();
testConvertToVarcharIncreasingSize();
testConvertToTimestampIncreasingSize();
testConvertToDecimalIncreasingSize();
} finally {
fs.delete(testFilePath, false);
}
}
@Test
public void testDecimal64VectorIncreasingSizeInDifferentStripes() throws Exception {
String typeStr = "decimal(6,1)";
Class typeClass = Decimal64ColumnVector.class;
TypeDescription schema = TypeDescription.fromString("struct<col1:" + typeStr + ">");
createORCFileWithBatchesOfIncreasingSizeInDifferentStripes(schema, typeClass,
typeClass.equals(Decimal64ColumnVector.class));
try {
testConvertToIntegerIncreasingSize();
testConvertToDoubleIncreasingSize();
testConvertToVarcharIncreasingSize();
testConvertToTimestampIncreasingSize();
testConvertToDecimalIncreasingSize();
} finally {
// Make sure we delete file across tests
fs.delete(testFilePath, false);
}
}
@Test
public void testStringVectorIncreasingSizeInDifferentStripes() throws Exception {
String typeStr = "varchar(10)";
Class typeClass = BytesColumnVector.class;
TypeDescription schema = TypeDescription.fromString("struct<col1:" + typeStr + ">");
createORCFileWithBatchesOfIncreasingSizeInDifferentStripes(schema, typeClass,
typeClass.equals(Decimal64ColumnVector.class));
try {
testConvertToIntegerIncreasingSize();
testConvertToDoubleIncreasingSize();
testConvertToDecimalIncreasingSize();
testConvertToVarcharIncreasingSize();
testConvertToBinaryIncreasingSize();
testConvertToTimestampIncreasingSize();
testConvertToDateIncreasingSize();
} finally {
// Make sure we delete file across tests
fs.delete(testFilePath, false);
}
}
@Test
public void testReadOrcByteArraysException() {
InStream stream = mock(InStream.class);
RunLengthIntegerReaderV2 lengths = mock(RunLengthIntegerReaderV2.class);
int batchSize = 1024;
LongColumnVector defaultBatchSizeScratchlcv = new LongColumnVector(batchSize);
for (int i = 0; i < batchSize; i++) {
defaultBatchSizeScratchlcv.vector[i] = Integer.MAX_VALUE - 8;
}
BytesColumnVector defaultBatchSizeResult = new BytesColumnVector(batchSize);
IOException defaultBatchSizeException = assertThrows(
IOException.class,
() -> TreeReaderFactory.BytesColumnVectorUtil.readOrcByteArrays(stream, lengths,
defaultBatchSizeScratchlcv, defaultBatchSizeResult, batchSize));
assertEquals("totalLength:-9216 is a negative number. " +
"The current batch size is 1024, " +
"you can reduce the value by 'orc.row.batch.size'.",
defaultBatchSizeException.getMessage());
int batchSizeOne = 1;
LongColumnVector batchSizeOneScratchlcv = new LongColumnVector(batchSizeOne);
for (int i = 0; i < batchSizeOne; i++) {
batchSizeOneScratchlcv.vector[i] = Long.MAX_VALUE;
}
BytesColumnVector batchSizeOneResult = new BytesColumnVector(batchSizeOne);
IOException batchSizeOneException = assertThrows(
IOException.class,
() -> TreeReaderFactory.BytesColumnVectorUtil.readOrcByteArrays(stream, lengths,
batchSizeOneScratchlcv, batchSizeOneResult, batchSizeOne));
assertEquals("totalLength:-1 is a negative number.",
batchSizeOneException.getMessage());
}
public void testBinaryVectorIncreasingSizeInDifferentStripes() throws Exception {
String typeStr = "binary";
Class typeClass = BytesColumnVector.class;
TypeDescription schema = TypeDescription.fromString("struct<col1:" + typeStr + ">");
createORCFileWithBatchesOfIncreasingSizeInDifferentStripes(schema, typeClass,
typeClass.equals(Decimal64ColumnVector.class));
try {
testConvertToVarcharIncreasingSize();
} finally {
fs.delete(testFilePath, false);
}
}
@Test
public void testDoubleVectorIncreasingSizeInDifferentStripes() throws Exception {
String typeStr = "double";
Class typeClass = DoubleColumnVector.class;
TypeDescription schema = TypeDescription.fromString("struct<col1:" + typeStr + ">");
createORCFileWithBatchesOfIncreasingSizeInDifferentStripes(schema, typeClass,
typeClass.equals(Decimal64ColumnVector.class));
try {
testConvertToDoubleIncreasingSize();
testConvertToIntegerIncreasingSize();
testConvertToFloatIncreasingSize();
testConvertToDecimalIncreasingSize();
testConvertToVarcharIncreasingSize();
testConvertToTimestampIncreasingSize();
} finally {
fs.delete(testFilePath, false);
}
}
@Test
public void testIntVectorIncreasingSizeInDifferentStripes() throws Exception {
String typeStr = "int";
Class typeClass = LongColumnVector.class;
TypeDescription schema = TypeDescription.fromString("struct<col1:" + typeStr + ">");
createORCFileWithBatchesOfIncreasingSizeInDifferentStripes(schema, typeClass,
typeClass.equals(Decimal64ColumnVector.class));
try {
testConvertToIntegerIncreasingSize();
testConvertToDoubleIncreasingSize();
testConvertToDecimalIncreasingSize();
testConvertToVarcharIncreasingSize();
testConvertToTimestampIncreasingSize();
} finally {
fs.delete(testFilePath, false);
}
}
@Test
public void testTimestampVectorIncreasingSizeInDifferentStripes() throws Exception {
String typeStr = "timestamp";
Class typeClass = TimestampColumnVector.class;
TypeDescription schema = TypeDescription.fromString("struct<col1:" + typeStr + ">");
createORCFileWithBatchesOfIncreasingSizeInDifferentStripes(schema, typeClass,
typeClass.equals(Decimal64ColumnVector.class));
try {
testConvertToIntegerIncreasingSize();
testConvertToDoubleIncreasingSize();
testConvertToDecimalIncreasingSize();
testConvertToVarcharIncreasingSize();
testConvertToTimestampIncreasingSize();
testConvertToDateIncreasingSize();
} finally {
fs.delete(testFilePath, false);
}
}
@Test
public void testDateVectorIncreasingSizeInDifferentStripes() throws Exception {
String typeStr = "date";
Class typeClass = DateColumnVector.class;
TypeDescription schema = TypeDescription.fromString("struct<col1:" + typeStr + ">");
createORCFileWithBatchesOfIncreasingSizeInDifferentStripes(schema, typeClass,
typeClass.equals(Decimal64ColumnVector.class));
try {
testConvertToVarcharIncreasingSize();
testConvertToTimestampIncreasingSize();
} finally {
fs.delete(testFilePath, false);
}
}
private void testConvertToDoubleIncreasingSize() throws Exception {
readORCFileIncreasingBatchSize("double", DoubleColumnVector.class);
}
private void testConvertToIntegerIncreasingSize() throws Exception {
readORCFileIncreasingBatchSize("int", LongColumnVector.class);
}
private void testConvertToFloatIncreasingSize() throws Exception {
readORCFileIncreasingBatchSize("float", DoubleColumnVector.class);
}
public void testConvertToDecimalIncreasingSize() throws Exception {
readORCFileIncreasingBatchSize("decimal(6,1)", Decimal64ColumnVector.class);
}
private void testConvertToVarcharIncreasingSize() throws Exception {
readORCFileIncreasingBatchSize("varchar(10)", BytesColumnVector.class);
}
private void testConvertToTimestampIncreasingSize() throws Exception {
readORCFileIncreasingBatchSize("timestamp", TimestampColumnVector.class);
}
private void testConvertToDateIncreasingSize() throws Exception {
readORCFileIncreasingBatchSize("date", DateColumnVector.class);
}
private void testConvertToBinaryIncreasingSize() throws Exception {
readORCFileIncreasingBatchSize("binary", BytesColumnVector.class);
}
@Test
public void testDecimalConvertInNullStripe() throws Exception {
try {
Configuration decimalConf = new Configuration(conf);
decimalConf.set(OrcConf.STRIPE_ROW_COUNT.getAttribute(), "1024");
decimalConf.set(OrcConf.ROWS_BETWEEN_CHECKS.getAttribute(), "1");
String typeStr = "decimal(5,1)";
TypeDescription schema = TypeDescription.fromString("struct<col1:" + typeStr + ">");
Writer w = OrcFile.createWriter(testFilePath, OrcFile.writerOptions(decimalConf).setSchema(schema));
VectorizedRowBatch b = schema.createRowBatch();
DecimalColumnVector f1 = (DecimalColumnVector) b.cols[0];
f1.isRepeating = true;
f1.set(0, (HiveDecimal) null);
b.size = 1024;
w.addRowBatch(b);
b.reset();
for (int i = 0; i < 1024; i++) {
f1.set(i, HiveDecimal.create(i + 1));
}
b.size = 1024;
w.addRowBatch(b);
b.reset();
f1.isRepeating = true;
f1.set(0, HiveDecimal.create(1));
b.size = 1024;
w.addRowBatch(b);
b.reset();
w.close();
testDecimalConvertToLongInNullStripe();
testDecimalConvertToDoubleInNullStripe();
testDecimalConvertToStringInNullStripe();
testDecimalConvertToTimestampInNullStripe();
testDecimalConvertToDecimalInNullStripe();
} finally {
fs.delete(testFilePath, false);
}
}
private void readDecimalInNullStripe(String typeString, Class<?> expectedColumnType,
String[] expectedResult) throws Exception {
Reader.Options options = new Reader.Options();
TypeDescription schema = TypeDescription.fromString("struct<col1:" + typeString + ">");
options.schema(schema);
String expected = options.toString();
Configuration conf = new Configuration();
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf));
RecordReader rows = reader.rows(options);
VectorizedRowBatch batch = schema.createRowBatch();
rows.nextBatch(batch);
assertEquals(1024, batch.size);
assertEquals(expected, options.toString());
assertEquals(batch.cols.length, 1);
assertEquals(batch.cols[0].getClass(), expectedColumnType);
assertTrue(batch.cols[0].isRepeating);
StringBuilder sb = new StringBuilder();
batch.cols[0].stringifyValue(sb, 1023);
assertEquals(sb.toString(), expectedResult[0]);
rows.nextBatch(batch);
assertEquals(1024, batch.size);
assertEquals(expected, options.toString());
assertEquals(batch.cols.length, 1);
assertEquals(batch.cols[0].getClass(), expectedColumnType);
assertFalse(batch.cols[0].isRepeating);
StringBuilder sb2 = new StringBuilder();
batch.cols[0].stringifyValue(sb2, 1023);
assertEquals(sb2.toString(), expectedResult[1]);
rows.nextBatch(batch);
assertEquals(1024, batch.size);
assertEquals(expected, options.toString());
assertEquals(batch.cols.length, 1);
assertEquals(batch.cols[0].getClass(), expectedColumnType);
assertTrue(batch.cols[0].isRepeating);
StringBuilder sb3 = new StringBuilder();
batch.cols[0].stringifyValue(sb3, 1023);
assertEquals(sb3.toString(), expectedResult[2]);
}
private void testDecimalConvertToLongInNullStripe() throws Exception {
readDecimalInNullStripe("bigint", LongColumnVector.class,
new String[]{"null", "1024", "1"});
}
private void testDecimalConvertToDoubleInNullStripe() throws Exception {
readDecimalInNullStripe("double", DoubleColumnVector.class,
new String[]{"null", "1024.0", "1.0"});
}
private void testDecimalConvertToStringInNullStripe() throws Exception {
readDecimalInNullStripe("string", BytesColumnVector.class,
new String[]{"null", "\"1024\"", "\"1\""});
}
private void testDecimalConvertToTimestampInNullStripe() throws Exception {
readDecimalInNullStripe("timestamp", TimestampColumnVector.class,
new String[]{"null", "1970-01-01 00:17:04.0", "1970-01-01 00:00:01.0"});
}
private void testDecimalConvertToDecimalInNullStripe() throws Exception {
readDecimalInNullStripe("decimal(18,2)", DecimalColumnVector.class,
new String[]{"null", "1024", "1"});
}
}
| 29,949 | 38.511873 | 135 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/TestCryptoUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.BytesWritable;
import org.apache.orc.EncryptionAlgorithm;
import org.apache.orc.InMemoryKeystore;
import org.apache.orc.OrcConf;
import org.apache.orc.OrcProto;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.security.Key;
import java.util.List;
import java.util.Random;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestCryptoUtils {
@Test
public void testCreateStreamIv() throws Exception {
EncryptionAlgorithm aes128 = EncryptionAlgorithm.AES_CTR_128;
byte[] iv = new byte[aes128.getIvLength()];
CryptoUtils.modifyIvForStream(0x234567, OrcProto.Stream.Kind.BLOOM_FILTER_UTF8,
0x123456).accept(iv);
assertEquals(16, iv.length);
assertEquals(0x23, iv[0]);
assertEquals(0x45, iv[1]);
assertEquals(0x67, iv[2]);
assertEquals(0x0, iv[3]);
assertEquals(0x8, iv[4]);
assertEquals(0x12, iv[5]);
assertEquals(0x34, iv[6]);
assertEquals(0x56, iv[7]);
}
@Test
public void testMemoryKeyProvider() throws IOException {
Configuration conf = new Configuration();
OrcConf.KEY_PROVIDER.setString(conf, "memory");
// Hard code the random so that we know the bytes that will come out.
InMemoryKeystore provider =
(InMemoryKeystore) CryptoUtils.getKeyProvider(conf, new Random(24));
byte[] piiKey = new byte[]{0,1,2,3,4,5,6,7,8,9,0xa,0xb,0xc,0xd,0xe,0xf};
provider.addKey("pii", EncryptionAlgorithm.AES_CTR_128, piiKey);
byte[] piiKey2 = new byte[]{0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17,
0x18,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f};
provider.addKey("pii", 1, EncryptionAlgorithm.AES_CTR_128, piiKey2);
byte[] secretKey = new byte[]{0x20,0x21,0x22,0x23,0x24,0x25,0x26,0x27,
0x28,0x29,0x2a,0x2b,0x2c,0x2d,0x2e,0x2f};
provider.addKey("secret", EncryptionAlgorithm.AES_CTR_128, secretKey);
List<String> keyNames = provider.getKeyNames();
assertEquals(2, keyNames.size());
assertTrue(keyNames.contains("pii"));
assertTrue(keyNames.contains("secret"));
HadoopShims.KeyMetadata meta = provider.getCurrentKeyVersion("pii");
assertEquals(1, meta.getVersion());
LocalKey localKey = provider.createLocalKey(meta);
byte[] encrypted = localKey.getEncryptedKey();
// make sure that we get exactly what we expect to test the encryption
assertEquals("c7 ab 4f bb 38 f4 de ad d0 b3 59 e2 21 2a 95 32",
new BytesWritable(encrypted).toString());
// now check to make sure that we get the expected bytes back
assertEquals("c7 a1 d0 41 7b 24 72 44 1a 58 c7 72 4a d4 be b3",
new BytesWritable(localKey.getDecryptedKey().getEncoded()).toString());
Key key = provider.decryptLocalKey(meta, encrypted);
assertEquals(new BytesWritable(localKey.getDecryptedKey().getEncoded()).toString(),
new BytesWritable(key.getEncoded()).toString());
}
@Test
public void testInvalidKeyProvider() throws IOException {
Configuration conf = new Configuration();
OrcConf.KEY_PROVIDER.setString(conf, "");
assertNull(CryptoUtils.getKeyProvider(conf, new Random()));
}
}
| 4,127 | 40.69697 | 87 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/TestDataReaderProperties.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.orc.CompressionKind;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.util.function.Supplier;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.mockito.Mockito.mock;
public class TestDataReaderProperties {
private Supplier<FileSystem> mockedSupplier = mock(Supplier.class);
private Path mockedPath = mock(Path.class);
private boolean mockedZeroCopy = false;
@Test
public void testCompleteBuild() throws IOException {
InStream.StreamOptions options = InStream.options()
.withCodec(OrcCodecPool.getCodec(CompressionKind.ZLIB));
DataReaderProperties properties = DataReaderProperties.builder()
.withFileSystemSupplier(mockedSupplier)
.withPath(mockedPath)
.withCompression(options)
.withZeroCopy(mockedZeroCopy)
.build();
assertEquals(mockedSupplier, properties.getFileSystemSupplier());
assertEquals(mockedPath, properties.getPath());
assertEquals(CompressionKind.ZLIB,
properties.getCompression().getCodec().getKind());
assertEquals(mockedZeroCopy, properties.getZeroCopy());
}
@Test
public void testFileSystemSupplier() throws IOException {
DataReaderProperties properties = DataReaderProperties.builder()
.withFileSystemSupplier(mockedSupplier)
.withPath(mockedPath)
.build();
assertEquals(mockedSupplier, properties.getFileSystemSupplier());
}
@Test
public void testWhenFilesystemIsProvidedGetFileSystemSupplierReturnsSupplier() throws IOException {
DataReaderProperties properties = DataReaderProperties.builder()
.withFileSystemSupplier(mockedSupplier)
.withPath(mockedPath)
.build();
Supplier<FileSystem> supplierFromProperties = properties.getFileSystemSupplier();
assertEquals(mockedSupplier, supplierFromProperties);
}
@Test
public void testMissingNonRequiredArgs() throws IOException {
DataReaderProperties properties = DataReaderProperties.builder()
.withFileSystemSupplier(mockedSupplier)
.withPath(mockedPath)
.build();
assertEquals(mockedSupplier, properties.getFileSystemSupplier());
assertEquals(mockedPath, properties.getPath());
assertNull(properties.getCompression());
assertFalse(properties.getZeroCopy());
}
@Test
public void testEmptyBuild() {
assertThrows(NullPointerException.class, () -> {
DataReaderProperties.builder().build();
});
}
@Test
public void testMissingPath() {
assertThrows(NullPointerException.class, () -> {
DataReaderProperties.builder()
.withFileSystemSupplier(mockedSupplier)
.withCompression(InStream.options())
.withZeroCopy(mockedZeroCopy)
.build();
});
}
@Test
public void testMissingFileSystem() {
assertThrows(NullPointerException.class, () -> {
DataReaderProperties.builder()
.withPath(mockedPath)
.withCompression(InStream.options())
.withZeroCopy(mockedZeroCopy)
.build();
});
}
}
| 4,140 | 33.22314 | 101 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/TestDateUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class TestDateUtils {
/**
* Test case for DateColumnVector's changeCalendar
* epoch days, hybrid representation, proleptic representation
* 16768: hybrid: 2015-11-29 proleptic: 2015-11-29
* -141418: hybrid: 1582-10-24 proleptic: 1582-10-24
* -141427: hybrid: 1582-10-15 proleptic: 1582-10-15
* -141428: hybrid: 1582-10-04 proleptic: 1582-10-14
* -141430: hybrid: 1582-10-02 proleptic: 1582-10-12
* -141437: hybrid: 1582-09-25 proleptic: 1582-10-05
* -141438: hybrid: 1582-09-24 proleptic: 1582-10-04
* -499952: hybrid: 0601-03-04 proleptic: 0601-03-07
* -499955: hybrid: 0601-03-01 proleptic: 0601-03-04
* @throws Exception
*/
@Test
public void testConversion() throws Exception {
checkConversion(16768, "2015-11-29", "2015-11-29");
checkConversion(-141418, "1582-10-24", "1582-10-24");
checkConversion(-141427, "1582-10-15", "1582-10-15");
checkConversion(-141428, "1582-10-04", "1582-10-14");
checkConversion(-141430, "1582-10-02", "1582-10-12");
checkConversion(-141437, "1582-09-25", "1582-10-05");
checkConversion(-499952, "0601-03-04", "0601-03-07");
checkConversion(-499955, "0601-03-01", "0601-03-04");
}
void checkConversion(int dayOfEpoch, String hybrid, String proleptic) {
String result = DateUtils.printDate(dayOfEpoch, false);
assertEquals(hybrid, result, "day " + dayOfEpoch);
assertEquals(dayOfEpoch, (int) DateUtils.parseDate(result, false));
result = DateUtils.printDate(dayOfEpoch, true);
assertEquals(proleptic, result, "day " + dayOfEpoch);
assertEquals(dayOfEpoch, (int) DateUtils.parseDate(result, true));
}
}
| 2,585 | 41.393443 | 75 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/TestDynamicArray.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.junit.jupiter.api.Test;
import java.util.Random;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
public class TestDynamicArray {
@Test
public void testByteArray() throws Exception {
DynamicByteArray dba = new DynamicByteArray(3, 10);
dba.add((byte) 0);
dba.add((byte) 1);
dba.set(3, (byte) 3);
dba.set(2, (byte) 2);
dba.add((byte) 4);
assertEquals("{0,1,2,3,4}", dba.toString());
assertEquals(5, dba.size());
byte[] val;
val = new byte[0];
assertEquals(0, dba.compare(val, 0, 0, 2, 0));
assertEquals(-1, dba.compare(val, 0, 0, 2, 1));
val = new byte[]{3,42};
assertEquals(1, dba.compare(val, 0, 1, 2, 0));
assertEquals(1, dba.compare(val, 0, 1, 2, 1));
assertEquals(0, dba.compare(val, 0, 1, 3, 1));
assertEquals(-1, dba.compare(val, 0, 1, 3, 2));
assertEquals(1, dba.compare(val, 0, 2, 3, 1));
val = new byte[256];
for(int b=-128; b < 128; ++b) {
dba.add((byte) b);
val[b+128] = (byte) b;
}
assertEquals(0, dba.compare(val, 0, 256, 5, 256));
assertEquals(1, dba.compare(val, 0, 1, 0, 1));
assertEquals(1, dba.compare(val, 254, 1, 0, 1));
assertEquals(1, dba.compare(val, 120, 1, 64, 1));
val = new byte[1024];
Random rand = new Random(1701);
for(int i = 0; i < val.length; ++i) {
rand.nextBytes(val);
}
dba.add(val, 0, 1024);
assertEquals(1285, dba.size());
assertEquals(0, dba.compare(val, 0, 1024, 261, 1024));
}
@Test
public void testIntArray() throws Exception {
DynamicIntArray dia = new DynamicIntArray(10);
for(int i=0; i < 10000; ++i) {
dia.add(2*i);
}
assertEquals(10000, dia.size());
for(int i=0; i < 10000; ++i) {
assertEquals(2*i, dia.get(i));
}
dia.clear();
assertEquals(0, dia.size());
dia.add(3);
dia.add(12);
dia.add(65);
assertEquals("{3,12,65}", dia.toString());
for(int i=0; i < 5; ++i) {
dia.increment(i, 3);
}
assertEquals("{6,15,68,3,3}", dia.toString());
}
@Test
public void testEmptyIntArrayToString() {
DynamicIntArray dia = new DynamicIntArray();
assertEquals("{}", dia.toString());
}
@Test
public void testByteArrayOverflow() {
DynamicByteArray dba = new DynamicByteArray();
byte[] val = new byte[1024];
dba.add(val, 0, val.length);
byte[] bigVal = new byte[2048];
RuntimeException exception = assertThrows(
RuntimeException.class,
// Need to construct a large array, limited by the heap limit of UT, may cause OOM.
// The add method does not check whether byte[] and length are consistent,
// so it is a bit hacky.
() -> dba.add(bigVal, 0, Integer.MAX_VALUE - 16));
assertEquals("chunkIndex overflow:-65535. " +
"You can set orc.column.encoding.direct=columnName, " +
"or orc.dictionary.key.threshold=0 to turn off dictionary encoding.",
exception.getMessage());
}
}
| 3,855 | 31.957265 | 89 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/TestEncryption.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory;
import org.apache.orc.EncryptionAlgorithm;
import org.apache.orc.InMemoryKeystore;
import org.apache.orc.OrcConf;
import org.apache.orc.OrcFile;
import org.apache.orc.Reader;
import org.apache.orc.RecordReader;
import org.apache.orc.TypeDescription;
import org.apache.orc.Writer;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class TestEncryption {
Path workDir = new Path(System.getProperty("test.tmp.dir"));
Configuration conf;
FileSystem fs;
Path testFilePath;
TypeDescription schema;
KeyProvider keyProvider;
String encryption;
String mask;
@BeforeEach
public void openFileSystem() throws Exception {
conf = new Configuration();
conf.setInt(OrcConf.ROW_INDEX_STRIDE.getAttribute(), VectorizedRowBatch.DEFAULT_SIZE);
fs = FileSystem.getLocal(conf);
fs.setWorkingDirectory(workDir);
testFilePath = new Path("testWriterImpl.orc");
fs.create(testFilePath, true);
schema = TypeDescription.fromString("struct<id:int,name:string>");
byte[] kmsKey = "secret123".getBytes(StandardCharsets.UTF_8);
keyProvider = new InMemoryKeystore()
.addKey("pii", EncryptionAlgorithm.AES_CTR_128, kmsKey);
encryption = "pii:id,name";
mask = "sha256:id,name";
}
@AfterEach
public void deleteTestFile() throws Exception {
fs.delete(testFilePath, false);
}
private void write() throws IOException {
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.overwrite(true)
.setKeyProvider(keyProvider)
.encrypt(encryption)
.masks(mask));
VectorizedRowBatch batch = schema.createRowBatch();
LongColumnVector id = (LongColumnVector) batch.cols[0];
BytesColumnVector name = (BytesColumnVector) batch.cols[1];
for (int r = 0; r < VectorizedRowBatch.DEFAULT_SIZE * 2; ++r) {
int row = batch.size++;
id.vector[row] = r;
byte[] buffer = ("name-" + (r * 3)).getBytes(StandardCharsets.UTF_8);
name.setRef(row, buffer, 0, buffer.length);
if (batch.size == batch.getMaxSize()) {
writer.addRowBatch(batch);
batch.reset();
}
}
if (batch.size != 0) {
writer.addRowBatch(batch);
}
writer.close();
}
private void read(boolean pushDown) throws IOException {
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).setKeyProvider(keyProvider));
SearchArgument searchArgument = pushDown ? SearchArgumentFactory.newBuilder()
.equals("id", PredicateLeaf.Type.LONG, (long) VectorizedRowBatch.DEFAULT_SIZE)
.build() : null;
VectorizedRowBatch batch = schema.createRowBatch();
Reader.Options options = reader.options().schema(this.schema);
if (pushDown) {
options = options.searchArgument(searchArgument, new String[]{"id"});
}
RecordReader rowIterator = reader.rows(options);
LongColumnVector idColumn = (LongColumnVector) batch.cols[0];
BytesColumnVector nameColumn = (BytesColumnVector) batch.cols[1];
int batchNum = pushDown ? 1 : 0;
while (rowIterator.nextBatch(batch)) {
for (int row = 0; row < batch.size; ++row) {
long value = row + ((long) batchNum * VectorizedRowBatch.DEFAULT_SIZE);
assertEquals(value, idColumn.vector[row]);
assertEquals("name-" + (value * 3), nameColumn.toString(row));
}
batchNum ++;
}
rowIterator.close();
}
@Test
public void testReadEncryption() throws IOException {
write();
read(false);
}
@Test
public void testPushDownReadEncryption() throws IOException {
write();
read(true);
}
}
| 5,183 | 35 | 90 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/TestInStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.hive.common.io.DiskRangeList;
import org.apache.orc.CompressionCodec;
import org.apache.orc.EncryptionAlgorithm;
import org.apache.orc.OrcProto;
import org.apache.orc.PhysicalWriter;
import org.apache.orc.impl.writer.StreamOptions;
import org.junit.jupiter.api.Test;
import javax.crypto.spec.SecretKeySpec;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.security.Key;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotSame;
import static org.junit.jupiter.api.Assertions.assertSame;
import static org.junit.jupiter.api.Assertions.fail;
public class TestInStream {
public static class OutputCollector implements PhysicalWriter.OutputReceiver {
public DynamicByteArray buffer = new DynamicByteArray();
@Override
public void output(ByteBuffer buffer) {
this.buffer.add(buffer.array(), buffer.arrayOffset() + buffer.position(),
buffer.remaining());
}
@Override
public void suppress() {
// PASS
}
}
static class PositionCollector
implements PositionProvider, PositionRecorder {
private List<Long> positions = new ArrayList<>();
private int index = 0;
@Override
public long getNext() {
return positions.get(index++);
}
@Override
public void addPosition(long offset) {
positions.add(offset);
}
public void reset() {
index = 0;
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder("position: ");
for(int i=0; i < positions.size(); ++i) {
if (i != 0) {
builder.append(", ");
}
builder.append(positions.get(i));
}
return builder.toString();
}
}
static byte[] getUncompressed(PositionCollector[] positions) throws IOException {
OutputCollector collect = new OutputCollector();
try (OutStream out = new OutStream("test", new StreamOptions(100), collect)) {
for (int i = 0; i < 1024; ++i) {
positions[i] = new PositionCollector();
out.getPosition(positions[i]);
out.write(i);
}
out.flush();
}
assertEquals(1024, collect.buffer.size());
for(int i=0; i < 1024; ++i) {
assertEquals((byte) i, collect.buffer.get(i));
}
return collect.buffer.get();
}
@Test
public void testUncompressed() throws Exception {
PositionCollector[] positions = new PositionCollector[1024];
byte[] bytes = getUncompressed(positions);
for(int i=0; i < 1024; ++i) {
assertEquals((byte) i, bytes[i]);
}
ByteBuffer inBuf = ByteBuffer.wrap(bytes);
InStream in = InStream.create("test", new BufferChunk(inBuf, 0),
0, inBuf.remaining());
assertEquals("uncompressed stream test position: 0 length: 1024" +
" range: 0 offset: 0 position: 0 limit: 1024",
in.toString());
for(int i=0; i < 1024; ++i) {
int x = in.read();
assertEquals(i & 0xff, x);
}
for(int i=1023; i >= 0; --i) {
in.seek(positions[i]);
assertEquals(i & 0xff, in.read());
}
}
@Test
public void testUncompressedPartial() throws Exception {
PositionCollector[] positions = new PositionCollector[1024];
byte[] bytes = getUncompressed(positions);
ByteBuffer inBuf = ByteBuffer.allocate(3 * 1024);
inBuf.position(123);
inBuf.put(bytes);
inBuf.clear();
InStream in = InStream.create("test", new BufferChunk(inBuf, 33),
156, 1024);
assertEquals("uncompressed stream test position: 0 length: 1024" +
" range: 0 offset: 33 position: 123 limit: 1147",
in.toString());
for(int i=0; i < 1024; ++i) {
int x = in.read();
assertEquals(i & 0xff, x, "value " + i);
}
for(int i=1023; i >= 0; --i) {
in.seek(positions[i]);
assertEquals(i & 0xff, in.read(), "value " + i);
}
}
static byte[] getEncrypted(PositionCollector[] positions,
byte[] key,
byte[] iv,
EncryptionAlgorithm algorithm,
int ROW_COUNT,
long DATA_CONST) throws IOException {
OutputCollector collect = new OutputCollector();
for(int i=0; i < key.length; ++i) {
key[i] = (byte) i;
}
Key decryptKey = new SecretKeySpec(key, algorithm.getAlgorithm());
StreamOptions writerOptions = new StreamOptions(100)
.withEncryption(algorithm, decryptKey);
writerOptions.modifyIv(CryptoUtils.modifyIvForStream(0,
OrcProto.Stream.Kind.DATA, 1));
System.arraycopy(writerOptions.getIv(), 0, iv, 0, iv.length);
try (OutStream out = new OutStream("test", writerOptions, collect);
DataOutputStream outStream = new DataOutputStream(out)) {
for (int i = 0; i < ROW_COUNT; ++i) {
positions[i] = new PositionCollector();
out.getPosition(positions[i]);
outStream.writeLong(i * DATA_CONST);
}
out.flush();
}
byte[] result = collect.buffer.get();
assertEquals(ROW_COUNT * 8L, result.length);
return result;
}
@Test
public void testEncrypted() throws Exception {
final long DATA_CONST = 0x1_0000_0003L;
final int ROW_COUNT = 1024;
PositionCollector[] positions = new PositionCollector[ROW_COUNT];
EncryptionAlgorithm algorithm = EncryptionAlgorithm.AES_CTR_128;
byte[] rawKey = new byte[algorithm.keyLength()];
byte[] iv = new byte[algorithm.getIvLength()];
byte[] bytes = getEncrypted(positions, rawKey, iv, algorithm, ROW_COUNT,
DATA_CONST);
// Allocate the stream into three ranges. making sure that they don't fall
// on the 16 byte aes boundaries.
int[] rangeSizes = {1965, ROW_COUNT * 8 - 1965 - 15, 15};
int offset = 0;
BufferChunkList list = new BufferChunkList();
for(int size: rangeSizes) {
ByteBuffer buffer = ByteBuffer.allocate(size);
buffer.put(bytes, offset, size);
buffer.flip();
list.add(new BufferChunk(buffer, offset));
offset += size;
}
try (InStream in = InStream.create("test", list.get(), 0, bytes.length,
InStream.options().withEncryption(EncryptionAlgorithm.AES_CTR_128,
new SecretKeySpec(rawKey, algorithm.getAlgorithm()), iv));
DataInputStream inputStream = new DataInputStream(in)) {
assertEquals("encrypted uncompressed stream test position: 0 length: 8192" +
" range: 0 offset: 0 position: 0 limit: 1965",
in.toString());
for (int i = 0; i < ROW_COUNT; ++i) {
assertEquals(i * DATA_CONST, inputStream.readLong(), "row " + i);
}
for (int i = ROW_COUNT - 1; i >= 0; --i) {
in.seek(positions[i]);
assertEquals(i * DATA_CONST, inputStream.readLong(), "row " + i);
}
}
}
@Test
public void testEncryptedPartial() throws Exception {
final long DATA_CONST = 0x1_0000_0003L;
final int ROW_COUNT = 1024;
PositionCollector[] positions = new PositionCollector[ROW_COUNT];
EncryptionAlgorithm algorithm = EncryptionAlgorithm.AES_CTR_128;
byte[] rawKey = new byte[algorithm.keyLength()];
byte[] iv = new byte[algorithm.getIvLength()];
byte[] bytes = getEncrypted(positions, rawKey, iv, algorithm, ROW_COUNT,
DATA_CONST);
// Allocate the stream into three ranges. making sure that they don't fall
// on the 16 byte aes boundaries.
BufferChunkList list = new BufferChunkList();
ByteBuffer buffer = ByteBuffer.allocate(2000);
buffer.position(35);
buffer.put(bytes, 0, 1965);
buffer.clear();
list.add(new BufferChunk(buffer, 0));
int SECOND_SIZE = ROW_COUNT * 8 - 1965 - 15;
buffer = ByteBuffer.allocate(SECOND_SIZE);
buffer.put(bytes, 1965, buffer.remaining());
buffer.clear();
list.add(new BufferChunk(buffer, 2000));
buffer = ByteBuffer.allocate(2000);
buffer.put(bytes, 1965 + SECOND_SIZE, 15);
buffer.clear();
list.add(new BufferChunk(buffer, 2000 + SECOND_SIZE));
try (InStream in = InStream.create("test", list.get(), 35, bytes.length,
InStream.options().withEncryption(EncryptionAlgorithm.AES_CTR_128,
new SecretKeySpec(rawKey, algorithm.getAlgorithm()), iv));
DataInputStream inputStream = new DataInputStream(in)) {
assertEquals("encrypted uncompressed stream test position: 0 length: 8192" +
" range: 0 offset: 0 position: 0 limit: 1965",
in.toString());
for (int i = 0; i < ROW_COUNT; ++i) {
assertEquals(i * DATA_CONST, inputStream.readLong(), "row " + i);
}
for (int i = ROW_COUNT - 1; i >= 0; --i) {
in.seek(positions[i]);
assertEquals(i * DATA_CONST, inputStream.readLong(), "row " + i);
}
}
}
static byte[] getCompressedEncrypted(byte[] key,
byte[] iv,
PositionCollector[] positions,
EncryptionAlgorithm algorithm,
int ROW_COUNT,
long DATA_CONST) throws IOException {
OutputCollector collect = new OutputCollector();
for(int i=0; i < key.length; ++i) {
key[i] = (byte) i;
}
Key decryptKey = new SecretKeySpec(key, algorithm.getAlgorithm());
CompressionCodec codec = new ZlibCodec();
StreamOptions writerOptions = new StreamOptions(500)
.withCodec(codec, codec.getDefaultOptions())
.withEncryption(algorithm, decryptKey);
writerOptions.modifyIv(CryptoUtils.modifyIvForStream(0,
OrcProto.Stream.Kind.DATA, 1));
System.arraycopy(writerOptions.getIv(), 0, iv, 0, iv.length);
try (OutStream out = new OutStream("test", writerOptions, collect);
DataOutputStream outStream = new DataOutputStream(out)) {
for (int i = 0; i < ROW_COUNT; ++i) {
positions[i] = new PositionCollector();
out.getPosition(positions[i]);
outStream.writeLong(i * DATA_CONST);
}
out.flush();
}
return collect.buffer.get();
}
@Test
public void testCompressedEncrypted() throws Exception {
final long DATA_CONST = 0x1_0000_0003L;
final int ROW_COUNT = 1024;
EncryptionAlgorithm algorithm = EncryptionAlgorithm.AES_CTR_128;
byte[] key = new byte[algorithm.keyLength()];
byte[] iv = new byte[algorithm.getIvLength()];
PositionCollector[] positions = new PositionCollector[ROW_COUNT];
byte[] bytes= getCompressedEncrypted(key, iv, positions, algorithm, ROW_COUNT, DATA_CONST);
// currently 3957 bytes
assertEquals(3957, bytes.length);
// Allocate the stream into three ranges. making sure that they don't fall
// on the 16 byte aes boundaries.
int[] rangeSizes = {1998, bytes.length - 1998 - 15, 15};
int offset = 0;
BufferChunkList list = new BufferChunkList();
for(int size: rangeSizes) {
ByteBuffer buffer = ByteBuffer.allocate(size);
buffer.put(bytes, offset, size);
buffer.flip();
list.add(new BufferChunk(buffer, offset));
offset += size;
}
try (InStream in = InStream.create("test", list.get(), 0, bytes.length,
InStream.options()
.withCodec(new ZlibCodec()).withBufferSize(500)
.withEncryption(algorithm, new SecretKeySpec(key,
algorithm.getAlgorithm()), iv));
DataInputStream inputStream = new DataInputStream(in)) {
assertEquals("encrypted compressed stream test position: 0 length: " +
bytes.length +
" range: 0 offset: 0 limit: 1998 range 0 = 0 to" +
" 1998; range 1 = 1998 to " + (bytes.length - 15) +
"; range 2 = " +
(bytes.length - 15) + " to " + bytes.length,
in.toString());
for (int i = 0; i < ROW_COUNT; ++i) {
assertEquals(i * DATA_CONST, inputStream.readLong(), "row " + i);
}
for (int i = ROW_COUNT - 1; i >= 0; --i) {
in.seek(positions[i]);
assertEquals(i * DATA_CONST, inputStream.readLong(), "row " + i);
}
}
}
@Test
public void testCompressedEncryptedPartial() throws Exception {
final long DATA_CONST = 0x1_0000_0003L;
final int ROW_COUNT = 1024;
EncryptionAlgorithm algorithm = EncryptionAlgorithm.AES_CTR_128;
byte[] key = new byte[algorithm.keyLength()];
byte[] iv = new byte[algorithm.getIvLength()];
PositionCollector[] positions = new PositionCollector[ROW_COUNT];
byte[] bytes= getCompressedEncrypted(key, iv, positions, algorithm, ROW_COUNT, DATA_CONST);
// currently 3957 bytes
assertEquals(3957, bytes.length);
// Allocate the stream into three ranges. making sure that they don't fall
// on the 16 byte aes boundaries.
BufferChunkList list = new BufferChunkList();
ByteBuffer buffer = ByteBuffer.allocate(2000);
buffer.position(2);
buffer.put(bytes, 0 , 1998);
buffer.clear();
list.add(new BufferChunk(buffer, 100));
int SECOND_SIZE = bytes.length - 1998 - 15;
buffer = ByteBuffer.allocate(SECOND_SIZE);
buffer.put(bytes, 1998, SECOND_SIZE);
buffer.clear();
list.add(new BufferChunk(buffer, 2100));
buffer = ByteBuffer.allocate(1000);
buffer.put(bytes, 1998 + SECOND_SIZE, 15);
buffer.clear();
list.add(new BufferChunk(buffer, 2100 + SECOND_SIZE));
try (InStream in = InStream.create("test", list.get(), 102, bytes.length,
InStream.options()
.withCodec(new ZlibCodec()).withBufferSize(500)
.withEncryption(algorithm, new SecretKeySpec(key,
algorithm.getAlgorithm()), iv));
DataInputStream inputStream = new DataInputStream(in)) {
assertEquals("encrypted compressed stream test position: 0 length: " +
bytes.length +
" range: 0 offset: 0 limit: 1998 range 0 = 100 to 2100;" +
" range 1 = 2100 to 4044; range 2 = 4044 to 5044",
in.toString());
for (int i = 0; i < ROW_COUNT; ++i) {
assertEquals(i * DATA_CONST, inputStream.readLong(), "row " + i);
}
for (int i = ROW_COUNT - 1; i >= 0; --i) {
in.seek(positions[i]);
assertEquals(i * DATA_CONST, inputStream.readLong(), "row " + i);
}
}
}
byte[] getCompressed(PositionCollector[] positions) throws IOException {
CompressionCodec codec = new ZlibCodec();
StreamOptions options = new StreamOptions(300)
.withCodec(codec, codec.getDefaultOptions());
OutputCollector collect = new OutputCollector();
try (OutStream out = new OutStream("test", options, collect)) {
for (int i = 0; i < 1024; ++i) {
positions[i] = new PositionCollector();
out.getPosition(positions[i]);
out.write(i);
}
out.flush();
assertEquals("test", out.toString());
}
return collect.buffer.get();
}
@Test
public void testCompressed() throws Exception {
PositionCollector[] positions = new PositionCollector[1024];
byte[] bytes = getCompressed(positions);
assertEquals(961, bytes.length);
InStream in = InStream.create("test", new BufferChunk(ByteBuffer.wrap(bytes), 0), 0,
bytes.length, InStream.options().withCodec(new ZlibCodec()).withBufferSize(300));
assertEquals("compressed stream test position: 0 length: 961 range: 0" +
" offset: 0 limit: 961 range 0 = 0 to 961",
in.toString());
for(int i=0; i < 1024; ++i) {
int x = in.read();
assertEquals(i & 0xff, x);
}
assertEquals(0, in.available());
for(int i=1023; i >= 0; --i) {
in.seek(positions[i]);
assertEquals(i & 0xff, in.read());
}
}
private long seekPosition(long prevPos,
PositionCollector[] positions,
int posIdx,
InStream in,
boolean needsSeek)
throws IOException {
if (needsSeek) {
assertNotEquals(prevPos, positions[posIdx].getNext());
} else {
assertEquals(prevPos, positions[posIdx].getNext());
}
positions[posIdx].reset();
ByteBuffer c = ((InStream.CompressedStream) in).compressed;
in.seek(positions[posIdx]);
assertEquals(posIdx & 0xff, in.read());
if (needsSeek) {
assertNotSame(c, ((InStream.CompressedStream) in).compressed);
} else {
assertSame(c, ((InStream.CompressedStream) in).compressed);
}
positions[posIdx].reset();
return positions[posIdx].getNext();
}
@Test
public void testCompressedSeeks() throws Exception {
// We test two scenarios one where the stream is perfectly aligned with the DiskRange and the
// other where it requires an offset
for (int offset : new int[]{0, 10}) {
int compValues = 1024;
int origValues = 100;
PositionCollector[] positions = new PositionCollector[compValues + origValues];
byte[] compBytes = getCompressed(positions);
assertEquals(961, compBytes.length);
// Add an original chunk at the end
byte[] bytes = new byte[compBytes.length + 3 + origValues + offset];
System.arraycopy(compBytes, 0, bytes, offset, compBytes.length);
int startPos = offset + compBytes.length;
// Write original header
bytes[startPos] = (byte) ((origValues << 1) + 1);
bytes[startPos + 1] = (byte) (origValues >> 7);
bytes[startPos + 2] = (byte) (origValues >> 15);
for (int i = 0; i < 100; i++) {
positions[compValues + i] = new PositionCollector();
positions[compValues + i].addPosition(compBytes.length);
positions[compValues + i].addPosition(i);
bytes[startPos + 3 + i] = (byte) (compValues + i);
}
InStream in = InStream.create("test", new BufferChunk(ByteBuffer.wrap(bytes), 0), offset,
compBytes.length + 3 + origValues,
InStream.options()
.withCodec(new ZlibCodec())
.withBufferSize(300));
assertEquals("compressed stream test position: 0 length: 1064 range: 0" +
String.format(" offset: %d limit: %d range 0 = 0 to %d",
offset,
bytes.length,
bytes.length),
in.toString());
// Position to the last
long currPos = positions[positions.length - 1].getNext();
positions[positions.length - 1].reset();
in.seek(positions[positions.length - 1]);
// Seek to the first should reposition compressed
currPos = seekPosition(currPos, positions, 0, in, true);
// Seek to next position should not require a seek
currPos = seekPosition(currPos, positions, 1, in, false);
// Seek to 301 which should require a seek
currPos = seekPosition(currPos, positions, 301, in, true);
// Seek to next position should not require a seek
seekPosition(currPos, positions, 302, in, false);
// Seek to 601 which should require a seek
currPos = seekPosition(currPos, positions, 601, in, true);
// Seek to next position should not require a seek
seekPosition(currPos, positions, 602, in, false);
// Seek to 1024 which should seek to original
currPos = seekPosition(currPos, positions, 1024, in, true);
// Seek to next position should not require a seek
seekPosition(currPos, positions, 1025, in, false);
seekPosition(currPos, positions, 1026, in, false);
}
}
@Test
public void testInvalidSeek() throws Exception {
PositionCollector[] positions = new PositionCollector[1024];
byte[] bytes = getCompressed(positions);
assertEquals(961, bytes.length);
InStream in = InStream.create("test", new BufferChunk(ByteBuffer.wrap(bytes), 0), 0,
bytes.length, InStream.options().withCodec(new ZlibCodec()).withBufferSize(300));
assertEquals("compressed stream test position: 0 length: 961 range: 0" +
" offset: 0 limit: 961 range 0 = 0 to 961",
in.toString());
PositionCollector invalidPosition = new PositionCollector();
invalidPosition.addPosition(-1);
invalidPosition.addPosition(0);
in.seek(invalidPosition);
assertEquals(0, in.read());
assertEquals(1, in.read());
}
@Test
public void testCompressedPartial() throws Exception {
PositionCollector[] positions = new PositionCollector[1024];
byte[] bytes = getCompressed(positions);
assertEquals(961, bytes.length);
ByteBuffer buffer = ByteBuffer.allocate(1500);
buffer.position(39);
buffer.put(bytes, 0, bytes.length);
buffer.clear();
InStream in = InStream.create("test", new BufferChunk(buffer, 100), 139,
bytes.length, InStream.options().withCodec(new ZlibCodec()).withBufferSize(300));
assertEquals("compressed stream test position: 0 length: 961 range: 0" +
" offset: 39 limit: 1000 range 0 = 100 to 1600",
in.toString());
for(int i=0; i < 1024; ++i) {
int x = in.read();
assertEquals(i & 0xff, x);
}
assertEquals(0, in.available());
for(int i=1023; i >= 0; --i) {
in.seek(positions[i]);
assertEquals(i & 0xff, in.read());
}
}
@Test
public void testCorruptStream() throws Exception {
OutputCollector collect = new OutputCollector();
CompressionCodec codec = new ZlibCodec();
StreamOptions options = new StreamOptions(500)
.withCodec(codec, codec.getDefaultOptions());
try (OutStream out = new OutStream("test", options, collect)) {
for (int i = 0; i < 1024; ++i) {
out.write(i);
}
out.flush();
}
// now try to read the stream with a buffer that is too small
ByteBuffer inBuf = ByteBuffer.allocate(collect.buffer.size());
collect.buffer.setByteBuffer(inBuf, 0, collect.buffer.size());
inBuf.flip();
InStream in = InStream.create("test", new BufferChunk(inBuf, 0), 0,
inBuf.remaining(),
InStream.options().withCodec(codec).withBufferSize(100));
byte[] contents = new byte[1024];
try {
in.read(contents);
fail();
} catch(IllegalArgumentException iae) {
// EXPECTED
}
// make a corrupted header
inBuf.clear();
inBuf.put((byte) 32);
inBuf.put((byte) 0);
inBuf.flip();
in = InStream.create("test2", new BufferChunk(inBuf, 0), 0,
inBuf.remaining(),
InStream.options().withCodec(codec).withBufferSize(300));
try {
in.read();
fail();
} catch (IllegalStateException ise) {
// EXPECTED
}
}
@Test
public void testDisjointBuffers() throws Exception {
OutputCollector collect = new OutputCollector();
CompressionCodec codec = new ZlibCodec();
StreamOptions options = new StreamOptions(400)
.withCodec(codec, codec.getDefaultOptions());
PositionCollector[] positions = new PositionCollector[1024];
try (OutStream out = new OutStream("test", options, collect);
DataOutputStream stream = new DataOutputStream(out)) {
for (int i = 0; i < 1024; ++i) {
positions[i] = new PositionCollector();
out.getPosition(positions[i]);
stream.writeInt(i);
}
out.flush();
}
assertEquals(1674, collect.buffer.size());
ByteBuffer[] inBuf = new ByteBuffer[3];
inBuf[0] = ByteBuffer.allocate(500);
inBuf[1] = ByteBuffer.allocate(1200);
inBuf[2] = ByteBuffer.allocate(500);
collect.buffer.setByteBuffer(inBuf[0], 0, 483);
collect.buffer.setByteBuffer(inBuf[1], 483, 1625 - 483);
collect.buffer.setByteBuffer(inBuf[2], 1625, 1674 - 1625);
BufferChunkList buffers = new BufferChunkList();
int offset = 0;
for(ByteBuffer buffer: inBuf) {
buffer.flip();
buffers.add(new BufferChunk(buffer, offset));
offset += buffer.remaining();
}
InStream.StreamOptions inOptions = InStream.options()
.withCodec(codec).withBufferSize(400);
try (InStream in = InStream.create("test", buffers.get(), 0, 1674, inOptions);
DataInputStream inStream = new DataInputStream(in)) {
assertEquals("compressed stream test position: 0 length: 1674 range: 0" +
" offset: 0 limit: 483 range 0 = 0 to 483;" +
" range 1 = 483 to 1625; range 2 = 1625 to 1674",
in.toString());
for (int i = 0; i < 1024; ++i) {
int x = inStream.readInt();
assertEquals(i, x);
}
assertEquals(0, in.available());
for (int i = 1023; i >= 0; --i) {
in.seek(positions[i]);
assertEquals(i, inStream.readInt());
}
}
buffers.clear();
buffers.add(new BufferChunk(inBuf[1], 483));
buffers.add(new BufferChunk(inBuf[2], 1625));
try (InStream in = InStream.create("test", buffers.get(), 0, 1674, inOptions);
DataInputStream inStream = new DataInputStream(in)) {
positions[303].reset();
in.seek(positions[303]);
for (int i = 303; i < 1024; ++i) {
assertEquals(i, inStream.readInt());
}
}
buffers.clear();
buffers.add(new BufferChunk(inBuf[0], 0));
buffers.add(new BufferChunk(inBuf[2], 1625));
try (InStream in = InStream.create("test", buffers.get(), 0, 1674, inOptions);
DataInputStream inStream = new DataInputStream(in)) {
positions[1001].reset();
for (int i = 0; i < 300; ++i) {
assertEquals(i, inStream.readInt());
}
in.seek(positions[1001]);
for (int i = 1001; i < 1024; ++i) {
assertEquals(i, inStream.readInt());
}
}
}
@Test
public void testUncompressedDisjointBuffers() throws Exception {
OutputCollector collect = new OutputCollector();
PositionCollector[] positions = new PositionCollector[1024];
try (OutStream out = new OutStream("test", new StreamOptions(400), collect);
DataOutputStream stream = new DataOutputStream(out)) {
for (int i = 0; i < 1024; ++i) {
positions[i] = new PositionCollector();
out.getPosition(positions[i]);
stream.writeInt(i);
}
out.flush();
}
assertEquals(4096, collect.buffer.size());
ByteBuffer[] inBuf = new ByteBuffer[3];
inBuf[0] = ByteBuffer.allocate(1100);
inBuf[1] = ByteBuffer.allocate(2200);
inBuf[2] = ByteBuffer.allocate(1100);
collect.buffer.setByteBuffer(inBuf[0], 0, 1024);
collect.buffer.setByteBuffer(inBuf[1], 1024, 2048);
collect.buffer.setByteBuffer(inBuf[2], 3072, 1024);
for(ByteBuffer buffer: inBuf) {
buffer.flip();
}
BufferChunkList buffers = new BufferChunkList();
buffers.add(new BufferChunk(inBuf[0], 0));
buffers.add(new BufferChunk(inBuf[1], 1024));
buffers.add(new BufferChunk(inBuf[2], 3072));
try (InStream in = InStream.create("test", buffers.get(), 0, 4096);
DataInputStream inStream = new DataInputStream(in)) {
assertEquals("uncompressed stream test position: 0 length: 4096" +
" range: 0 offset: 0 position: 0 limit: 1024",
in.toString());
for (int i = 0; i < 1024; ++i) {
int x = inStream.readInt();
assertEquals(i, x);
}
assertEquals(0, in.available());
for (int i = 1023; i >= 0; --i) {
in.seek(positions[i]);
assertEquals(i, inStream.readInt());
}
}
buffers.clear();
buffers.add(new BufferChunk(inBuf[1], 1024));
buffers.add(new BufferChunk(inBuf[2], 3072));
try (InStream in = InStream.create("test", buffers.get(), 0, 4096);
DataInputStream inStream = new DataInputStream(in)) {
positions[256].reset();
in.seek(positions[256]);
for (int i = 256; i < 1024; ++i) {
assertEquals(i, inStream.readInt());
}
}
buffers.clear();
buffers.add(new BufferChunk(inBuf[0], 0));
buffers.add(new BufferChunk(inBuf[2], 3072));
try (InStream in = InStream.create("test", buffers.get(), 0, 4096);
DataInputStream inStream = new DataInputStream(in)) {
positions[768].reset();
for (int i = 0; i < 256; ++i) {
assertEquals(i, inStream.readInt());
}
in.seek(positions[768]);
for (int i = 768; i < 1024; ++i) {
assertEquals(i, inStream.readInt());
}
}
}
@Test
public void testEmptyDiskRange() throws IOException {
DiskRangeList range = new BufferChunk(ByteBuffer.allocate(0), 0);
try (InStream stream = new InStream.UncompressedStream("test", range, 0, 0)) {
assertEquals(0, stream.available());
stream.seek(new PositionProvider() {
@Override
public long getNext() {
return 0;
}
});
assertEquals(0, stream.available());
}
}
private static byte[] input(int... data) {
byte[] result = new byte[data.length];
for(int i = 0; i < data.length; ++i) {
result[i] = (byte) data[i];
}
return result;
}
// a zlib stream of 16 sequences of [0..255]
private static final byte[] compressed = input(
106, 2, 0, 99, 96, 100, 98, 102, 97, 101, 99, -25,
-32, -28, -30, -26, -31, -27, -29, 23, 16, 20, 18, 22,
17, 21, 19, -105, -112, -108, -110, -106, -111, -107, -109, 87,
80, 84, 82, 86, 81, 85, 83, -41, -48, -44, -46, -42,
-47, -43, -45, 55, 48, 52, 50, 54, 49, 53, 51, -73,
-80, -76, -78, -74, -79, -75, -77, 119, 112, 116, 114, 118,
113, 117, 115, -9, -16, -12, -14, -10, -15, -11, -13, 15,
8, 12, 10, 14, 9, 13, 11, -113, -120, -116, -118, -114,
-119, -115, -117, 79, 72, 76, 74, 78, 73, 77, 75, -49,
-56, -52, -54, -50, -55, -51, -53, 47, 40, 44, 42, 46,
41, 45, 43, -81, -88, -84, -86, -82, -87, -83, -85, 111,
104, 108, 106, 110, 105, 109, 107, -17, -24, -20, -22, -18,
-23, -19, -21, -97, 48, 113, -46, -28, 41, 83, -89, 77,
-97, 49, 115, -42, -20, 57, 115, -25, -51, 95, -80, 112,
-47, -30, 37, 75, -105, 45, 95, -79, 114, -43, -22, 53,
107, -41, -83, -33, -80, 113, -45, -26, 45, 91, -73, 109,
-33, -79, 115, -41, -18, 61, 123, -9, -19, 63, 112, -16,
-48, -31, 35, 71, -113, 29, 63, 113, -14, -44, -23, 51,
103, -49, -99, -65, 112, -15, -46, -27, 43, 87, -81, 93,
-65, 113, -13, -42, -19, 59, 119, -17, -35, 127, -16, -16,
-47, -29, 39, 79, -97, 61, 127, -15, -14, -43, -21, 55,
111, -33, -67, -1, -16, -15, -45, -25, 47, 95, -65, 125,
-1, -15, -13, -41, -17, 63, 127, -1, -3, 103, 24, -11,
-1, -88, -1, 71, -3, 63, -22, -1, 81, -1, -113, -6,
127, -44, -1, -93, -2, 31, -11, -1, -88, -1, 71, -3,
63, -22, -1, 81, -1, -113, -6, 127, 4, -8, 31, 0);
@Test
public void testMultiRangeCompressed() throws IOException {
// Set up an initial buffer of PREVIOUS_LENGTH followed by our stream
// at START.
final long START = 1_000_000_000;
final int PREVIOUS_LENGTH = 3000;
BufferChunkList list = new BufferChunkList();
byte[] previous = new byte[PREVIOUS_LENGTH];
Arrays.fill(previous, (byte) -1);
list.add(new BufferChunk(ByteBuffer.wrap(previous), START - PREVIOUS_LENGTH));
list.add(new BufferChunk(ByteBuffer.wrap(compressed), START));
InStream.StreamOptions options =
InStream.options().withCodec(new ZlibCodec()).withBufferSize(4096);
InStream inStream = InStream.create("test", list.get(), START, 4096, options);
byte[] inBuffer = new byte[4096];
assertEquals(4096, inStream.read(inBuffer));
for(int i=0; i < inBuffer.length; ++i) {
assertEquals((byte)i, inBuffer[i], "position " + i);
}
}
@Test
public void testExtraFrontUncompressed() throws IOException {
// Set up a stream that starts at START, which is divided in to regions
// of CHUNK_LENGTH. There are two EXTRA_FRONT byte buffers in front of the
// stream.
final long START = 1_000_000_000;
final int EXTRA_FRONT = 3_000;
final int CHUNK_LENGTH = 100;
final int STREAM_LENGTH = 4096;
BufferChunkList list = new BufferChunkList();
list.add(new BufferChunk(ByteBuffer.allocate(EXTRA_FRONT),
START - 2 * EXTRA_FRONT));
byte[] extraFront = new byte[EXTRA_FRONT + CHUNK_LENGTH];
Arrays.fill(extraFront, (byte) -1);
for(int i=0; i < CHUNK_LENGTH; ++i) {
extraFront[EXTRA_FRONT + i] = (byte) i;
}
list.add(new BufferChunk(ByteBuffer.wrap(extraFront), START - EXTRA_FRONT));
byte[] expected = new byte[STREAM_LENGTH];
for(int i=CHUNK_LENGTH; i < expected.length; ++i) {
expected[i] = (byte) i;
}
int posn = CHUNK_LENGTH;
while (posn <= expected.length) {
list.add(new BufferChunk(
ByteBuffer.wrap(expected, posn,
Math.min(CHUNK_LENGTH, expected.length - posn)),
START + posn));
posn += CHUNK_LENGTH;
}
// now set up the stream to read it
InStream.StreamOptions options = InStream.options();
InStream inStream = InStream.create("test", list.get(), START, STREAM_LENGTH,
options);
// ensure the data is correct
byte[] inBuffer = new byte[STREAM_LENGTH];
posn = 0;
int read = inStream.read(inBuffer);
while (read != -1) {
assertEquals(Math.min(STREAM_LENGTH - posn, CHUNK_LENGTH), read, "Read length at " + posn);
for(int i=0; i < read; ++i) {
assertEquals((byte)(posn + i), inBuffer[i], "posn " + posn + " + " + i);
}
posn += read;
read = inStream.read(inBuffer);
}
}
@Test
public void testExtraFrontCompressed() throws IOException {
// Set up a stream that starts at START, which is divided in to regions
// of CHUNK_LENGTH. There are two EXTRA_FRONT byte buffers in front of the
// stream.
final long START = 1_000_000_000;
final int EXTRA_FRONT = 3_000;
final int CHUNK_LENGTH = 100;
BufferChunkList list = new BufferChunkList();
list.add(new BufferChunk(ByteBuffer.allocate(EXTRA_FRONT),
START - 2 * EXTRA_FRONT));
byte[] extraFront = new byte[EXTRA_FRONT + CHUNK_LENGTH];
Arrays.fill(extraFront, (byte) -1);
System.arraycopy(compressed, 0, extraFront, EXTRA_FRONT, CHUNK_LENGTH);
list.add(new BufferChunk(ByteBuffer.wrap(extraFront), START - EXTRA_FRONT));
int posn = CHUNK_LENGTH;
while (posn < compressed.length) {
list.add(new BufferChunk(
ByteBuffer.wrap(compressed, posn,
Math.min(CHUNK_LENGTH, compressed.length - posn)),
START + posn));
posn += CHUNK_LENGTH;
}
// now set up the stream to read it
InStream.StreamOptions options =
InStream.options().withCodec(new ZlibCodec()).withBufferSize(4096);
InStream inStream = InStream.create("test", list.get(), START, 4096, options);
// ensure the data is correct
byte[] inBuffer = new byte[4096];
assertEquals(4096, inStream.read(inBuffer));
for(int i=0; i < inBuffer.length; ++i) {
assertEquals((byte)i, inBuffer[i], "position " + i);
}
}
@Test
public void testMultiRangeCompressHeader() throws IOException {
// Set up a buffer where the first 5 bytes are each a chunk and then the
// rest of the stream follows.
final long START = 1_000_000_000;
BufferChunkList list = new BufferChunkList();
for(int i=0; i < 5; ++i) {
list.add(new BufferChunk(ByteBuffer.wrap(compressed, i, 1), START + i));
}
list.add(new BufferChunk(
ByteBuffer.wrap(compressed, 5, compressed.length - 5), START + 5));
InStream.StreamOptions options =
InStream.options().withCodec(new ZlibCodec()).withBufferSize(4096);
InStream inStream = InStream.create("test", list.get(), START, 4096, options);
byte[] inBuffer = new byte[4096];
assertEquals(4096, inStream.read(inBuffer));
for(int i=0; i < inBuffer.length; ++i) {
assertEquals((byte)i, inBuffer[i], "position " + i);
}
}
private static final byte[] uncompressed = input(
0, 1, 2, 3, 4, 5, 6, 7, 8, 9);
@Test
public void testStreamResetWithIncreasedLength() throws IOException {
// Set up an initial buffer of PREVIOUS_LENGTH followed by our stream
// at START.
final long START = 1_000;
final int PREVIOUS_LENGTH = 30;
BufferChunkList list = new BufferChunkList();
byte[] previous = new byte[PREVIOUS_LENGTH];
Arrays.fill(previous, (byte) -1);
list.add(new BufferChunk(ByteBuffer.wrap(previous), START - PREVIOUS_LENGTH));
list.add(new BufferChunk(ByteBuffer.wrap(uncompressed), START));
// Creating a stream of 10 bytes, but with a length of 5
InStream inStream = InStream.create("test", list.get(), START, 5, new InStream.StreamOptions());
// Resetting the stream with the increased length
inStream.reset(list.get(), 10);
// Reading the stream and expecting to read 10 bytes
byte[] inBuffer = new byte[10];
assertEquals(10, inStream.read(inBuffer));
}
@Test
public void testStreamResetWithoutIncreasedLength() throws IOException {
// Set up an initial buffer of PREVIOUS_LENGTH followed by our stream
// at START.
final long START = 1_000;
final int PREVIOUS_LENGTH = 30;
BufferChunkList list = new BufferChunkList();
byte[] previous = new byte[PREVIOUS_LENGTH];
Arrays.fill(previous, (byte) -1);
list.add(new BufferChunk(ByteBuffer.wrap(previous), START - PREVIOUS_LENGTH));
list.add(new BufferChunk(ByteBuffer.wrap(uncompressed), START));
// Creating a stream of 10 bytes, but with a shorter length of 5
InStream inStream = InStream.create("test", list.get(), START, 5, new InStream.StreamOptions());
// Resetting the stream without updating its length
inStream.reset(list.get());
// Reading the stream and expecting to read 5 bytes as the initial stream length
byte[] inBuffer = new byte[5];
assertEquals(5, inStream.read(inBuffer));
}
}
| 39,718 | 38.560757 | 115 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/TestIntegerCompressionReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.orc.CompressionCodec;
import org.apache.orc.impl.writer.StreamOptions;
import org.junit.jupiter.api.Test;
import java.nio.ByteBuffer;
import java.util.Random;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class TestIntegerCompressionReader {
public void runSeekTest(CompressionCodec codec) throws Exception {
TestInStream.OutputCollector collect = new TestInStream.OutputCollector();
StreamOptions options = new StreamOptions(1000);
if (codec != null) {
options.withCodec(codec, codec.getDefaultOptions());
}
RunLengthIntegerWriterV2 out = new RunLengthIntegerWriterV2(
new OutStream("test", options, collect), true);
TestInStream.PositionCollector[] positions =
new TestInStream.PositionCollector[4096];
Random random = new Random(99);
int[] junk = new int[2048];
for(int i=0; i < junk.length; ++i) {
junk[i] = random.nextInt();
}
for(int i=0; i < 4096; ++i) {
positions[i] = new TestInStream.PositionCollector();
out.getPosition(positions[i]);
// test runs, incrementing runs, non-runs
if (i < 1024) {
out.write(i/4);
} else if (i < 2048) {
out.write(2*i);
} else {
out.write(junk[i-2048]);
}
}
out.flush();
ByteBuffer inBuf = ByteBuffer.allocate(collect.buffer.size());
collect.buffer.setByteBuffer(inBuf, 0, collect.buffer.size());
inBuf.flip();
RunLengthIntegerReaderV2 in =
new RunLengthIntegerReaderV2(InStream.create("test",
new BufferChunk(inBuf, 0), 0, inBuf.remaining(),
InStream.options().withCodec(codec).withBufferSize(1000)), true, false);
for(int i=0; i < 2048; ++i) {
int x = (int) in.next();
if (i < 1024) {
assertEquals(i/4, x);
} else {
assertEquals(2*i, x);
}
}
for(int i=2047; i >= 0; --i) {
in.seek(positions[i]);
int x = (int) in.next();
if (i < 1024) {
assertEquals(i/4, x);
} else {
assertEquals(2*i, x);
}
}
}
@Test
public void testUncompressedSeek() throws Exception {
runSeekTest(null);
}
@Test
public void testCompressedSeek() throws Exception {
runSeekTest(new ZlibCodec());
}
@Test
public void testSkips() throws Exception {
TestInStream.OutputCollector collect = new TestInStream.OutputCollector();
RunLengthIntegerWriterV2 out = new RunLengthIntegerWriterV2(
new OutStream("test", new StreamOptions(100), collect), true);
for(int i=0; i < 2048; ++i) {
if (i < 1024) {
out.write(i);
} else {
out.write(256 * i);
}
}
out.flush();
ByteBuffer inBuf = ByteBuffer.allocate(collect.buffer.size());
collect.buffer.setByteBuffer(inBuf, 0, collect.buffer.size());
inBuf.flip();
RunLengthIntegerReaderV2 in =
new RunLengthIntegerReaderV2(InStream.create("test",
new BufferChunk(inBuf, 0), 0,
inBuf.remaining()), true, false);
for(int i=0; i < 2048; i += 10) {
int x = (int) in.next();
if (i < 1024) {
assertEquals(i, x);
} else {
assertEquals(256 * i, x);
}
if (i < 2038) {
in.skip(9);
}
in.skip(0);
}
}
}
| 4,192 | 31.503876 | 84 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/TestMemoryManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.orc.MemoryManager;
import org.junit.jupiter.api.Test;
import org.mockito.Mockito;
import java.lang.management.ManagementFactory;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.ArgumentMatchers.eq;
/**
* Test the ORC memory manager.
*/
public class TestMemoryManager {
private static final double ERROR = 0.000001;
private static class NullCallback implements MemoryManagerImpl.Callback {
public boolean checkMemory(double newScale) {
return false;
}
}
@Test
public void testBasics() throws Exception {
Configuration conf = new Configuration();
MemoryManagerImpl mgr = new MemoryManagerImpl(conf);
NullCallback callback = new NullCallback();
long poolSize = mgr.getTotalMemoryPool();
assertEquals(Math.round(ManagementFactory.getMemoryMXBean().
getHeapMemoryUsage().getMax() * 0.5d), poolSize);
assertEquals(1.0, mgr.getAllocationScale(), 0.00001);
mgr.addWriter(new Path("p1"), 1000, callback);
assertEquals(1.0, mgr.getAllocationScale(), 0.00001);
mgr.addWriter(new Path("p1"), poolSize / 2, callback);
assertEquals(1.0, mgr.getAllocationScale(), 0.00001);
mgr.addWriter(new Path("p2"), poolSize / 2, callback);
assertEquals(1.0, mgr.getAllocationScale(), 0.00001);
mgr.addWriter(new Path("p3"), poolSize / 2, callback);
assertEquals(0.6666667, mgr.getAllocationScale(), 0.00001);
mgr.addWriter(new Path("p4"), poolSize / 2, callback);
assertEquals(0.5, mgr.getAllocationScale(), 0.000001);
mgr.addWriter(new Path("p4"), 3 * poolSize / 2, callback);
assertEquals(0.3333333, mgr.getAllocationScale(), 0.000001);
mgr.removeWriter(new Path("p1"));
mgr.removeWriter(new Path("p2"));
assertEquals(0.5, mgr.getAllocationScale(), 0.00001);
mgr.removeWriter(new Path("p4"));
assertEquals(1.0, mgr.getAllocationScale(), 0.00001);
}
@Test
public void testConfig() throws Exception {
Configuration conf = new Configuration();
conf.set("hive.exec.orc.memory.pool", "0.9");
MemoryManagerImpl mgr = new MemoryManagerImpl(conf);
long mem =
ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax();
System.err.print("Memory = " + mem);
long pool = mgr.getTotalMemoryPool();
assertTrue(mem * 0.899 < pool, "Pool too small: " + pool);
assertTrue(pool < mem * 0.901, "Pool too big: " + pool);
}
@Test
public void testCallback() throws Exception {
Configuration conf = new Configuration();
MemoryManagerImpl mgr = new MemoryManagerImpl(conf);
long pool = mgr.getTotalMemoryPool();
MemoryManager.Callback[] calls = new MemoryManager.Callback[20];
for(int i=0; i < calls.length; ++i) {
calls[i] = Mockito.mock(MemoryManager.Callback.class);
mgr.addWriter(new Path(Integer.toString(i)), pool/4, calls[i]);
}
// check to make sure that they get scaled down
for(int i=0; i < calls.length; ++i) {
mgr.checkMemory(0, calls[i]);
}
for(int call=0; call < calls.length; ++call) {
Mockito.verify(calls[call]).checkMemory(eq(0.2d));
}
}
}
| 4,104 | 38.471154 | 75 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/TestOrcFilterContextImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.StructColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.TypeDescription;
import org.junit.jupiter.api.Test;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestOrcFilterContextImpl {
private final TypeDescription schema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createLong())
.addField("f2", TypeDescription.createStruct()
.addField("f2a", TypeDescription.createLong())
.addField("f2b", TypeDescription.createString()))
.addField("f3", TypeDescription.createString());
@Test
public void testSuccessfulRetrieval() {
VectorizedRowBatch b = createBatch();
OrcFilterContextImpl fc = new OrcFilterContextImpl(schema, false);
fc.setBatch(b);
validateF1Vector(fc.findColumnVector("f1"), 1);
validateF2Vector(fc.findColumnVector("f2"));
validateF2AVector(fc.findColumnVector("f2.f2a"));
validateF2BVector(fc.findColumnVector("f2.f2b"));
validateF3Vector(fc.findColumnVector("f3"));
}
@Test
public void testSuccessfulRetrievalWithBatchChange() {
VectorizedRowBatch b1 = createBatch();
VectorizedRowBatch b2 = createBatch();
((LongColumnVector) b2.cols[0]).vector[0] = 100;
OrcFilterContextImpl fc = new OrcFilterContextImpl(schema, false);
fc.setBatch(b1);
validateF1Vector(fc.findColumnVector("f1"), 1);
// Change the batch
fc.setBatch(b2);
validateF1Vector(fc.findColumnVector("f1"), 100);
}
@Test
public void testMissingFieldTopLevel() {
VectorizedRowBatch b = createBatch();
OrcFilterContextImpl fc = new OrcFilterContextImpl(schema, false);
fc.setBatch(b);
// Missing field at top level
IllegalArgumentException e = assertThrows(IllegalArgumentException.class,
() -> fc.findColumnVector("f4"));
assertTrue(e.getMessage().contains("Field f4 not found in"));
}
@Test
public void testMissingFieldNestedLevel() {
VectorizedRowBatch b = createBatch();
OrcFilterContextImpl fc = new OrcFilterContextImpl(schema, false);
fc.setBatch(b);
// Missing field at top level
IllegalArgumentException e = assertThrows(IllegalArgumentException.class,
() -> fc.findColumnVector("f2.c"));
assertTrue(e.getMessage().contains(
"Field c not found in struct<f2a:bigint,f2b:string>"));
}
@Test
public void testPropagations() {
OrcFilterContextImpl fc = new OrcFilterContextImpl(schema, false);
assertNull(fc.getBatch());
fc.setBatch(schema.createRowBatch());
assertNotNull(fc.getBatch());
assertFalse(fc.isSelectedInUse());
// Set selections
fc.setSelectedInUse(true);
fc.getSelected()[0] = 5;
fc.setSelectedSize(1);
assertTrue(fc.isSelectedInUse());
assertEquals(1, fc.getSelectedSize());
assertEquals(fc.getBatch().getMaxSize(), fc.getSelected().length);
assertArrayEquals(new int[] {5}, Arrays.copyOf(fc.getSelected(), fc.getSelectedSize()));
assertTrue(fc.validateSelected());
fc.setSelectedSize(2);
assertFalse(fc.validateSelected());
// Use a new selected vector
fc.setSelected(new int[fc.getBatch().getMaxSize()]);
assertArrayEquals(new int[] {0, 0}, Arrays.copyOf(fc.getSelected(), fc.getSelectedSize()));
// Increase the size of the vector
fc.reset();
assertFalse(fc.isSelectedInUse());
int currSize = fc.getBatch().getMaxSize();
assertEquals(currSize, fc.getSelected().length);
fc.updateSelected(currSize + 1);
assertEquals(currSize + 1, fc.getSelected().length);
// Set the filter context
fc.setFilterContext(true, new int[3], 1);
assertTrue(fc.isSelectedInUse());
assertEquals(3, fc.getBatch().getMaxSize());
assertEquals(1, fc.getSelectedSize());
}
private VectorizedRowBatch createBatch() {
VectorizedRowBatch b = schema.createRowBatch();
LongColumnVector v1 = (LongColumnVector) b.cols[0];
StructColumnVector v2 = (StructColumnVector) b.cols[1];
LongColumnVector v2a = (LongColumnVector) v2.fields[0];
BytesColumnVector v2b = (BytesColumnVector) v2.fields[1];
BytesColumnVector v3 = (BytesColumnVector) b.cols[2];
v1.vector[0] = 1;
v2a.vector[0] = 2;
v2b.setVal(0, "3".getBytes(StandardCharsets.UTF_8));
v3.setVal(0, "4".getBytes(StandardCharsets.UTF_8));
return b;
}
private void validateF1Vector(ColumnVector[] v, long headValue) {
assertEquals(1, v.length);
validateF1Vector(v[0], headValue);
}
private void validateF1Vector(ColumnVector v, long headValue) {
LongColumnVector l = (LongColumnVector) v;
assertEquals(headValue, l.vector[0]);
}
private void validateF2Vector(ColumnVector[] v) {
assertEquals(1, v.length);
validateF2Vector(v[0]);
}
private void validateF2Vector(ColumnVector v) {
StructColumnVector s = (StructColumnVector) v;
validateF2AVector(s.fields[0]);
validateF2BVector(s.fields[1]);
}
private void validateF2AVector(ColumnVector[] v) {
assertEquals(2, v.length);
validateF2Vector(v[0]);
validateF2AVector(v[1]);
}
private void validateF2AVector(ColumnVector v) {
LongColumnVector l = (LongColumnVector) v;
assertEquals(2, l.vector[0]);
}
private void validateF2BVector(ColumnVector[] v) {
assertEquals(2, v.length);
validateF2Vector(v[0]);
validateF2BVector(v[1]);
}
private void validateF2BVector(ColumnVector v) {
BytesColumnVector b = (BytesColumnVector) v;
assertEquals("3", b.toString(0));
}
private void validateF3Vector(ColumnVector[] v) {
assertEquals(1, v.length);
BytesColumnVector b = (BytesColumnVector) v[0];
assertEquals("4", b.toString(0));
}
}
| 7,268 | 34.807882 | 95 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/TestOrcLargeStripe.java | /*
* Copyright 2015 The Apache Software Foundation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.CompressionKind;
import org.apache.orc.OrcConf;
import org.apache.orc.OrcFile;
import org.apache.orc.Reader;
import org.apache.orc.RecordReader;
import org.apache.orc.TypeDescription;
import org.apache.orc.Writer;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInfo;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.Random;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assumptions.assumeTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.anyBoolean;
import static org.mockito.Mockito.anyInt;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
@ExtendWith(MockitoExtension.class)
public class TestOrcLargeStripe {
private Path workDir = new Path(System.getProperty("test.tmp.dir", "target" + File.separator + "test"
+ File.separator + "tmp"));
Configuration conf;
FileSystem fs;
private Path testFilePath;
@BeforeEach
public void openFileSystem(TestInfo testInfo) throws Exception {
conf = new Configuration();
fs = FileSystem.getLocal(conf);
testFilePath = new Path(workDir, "TestOrcFile." +
testInfo.getTestMethod().get().getName() + ".orc");
fs.delete(testFilePath, false);
}
@Mock
private FSDataInputStream mockDataInput;
static class RangeBuilder {
BufferChunkList result = new BufferChunkList();
RangeBuilder range(long offset, int length) {
result.add(new BufferChunk(offset, length));
return this;
}
BufferChunkList build() {
return result;
}
}
@Test
public void testZeroCopy() throws Exception {
BufferChunkList ranges = new RangeBuilder().range(1000, 3000).build();
HadoopShims.ZeroCopyReaderShim mockZcr = mock(HadoopShims.ZeroCopyReaderShim.class);
when(mockZcr.readBuffer(anyInt(), anyBoolean()))
.thenAnswer(invocation -> ByteBuffer.allocate(1000));
RecordReaderUtils.readDiskRanges(mockDataInput, mockZcr, ranges, true);
verify(mockDataInput).seek(1000);
verify(mockZcr).readBuffer(3000, false);
verify(mockZcr).readBuffer(2000, false);
verify(mockZcr).readBuffer(1000, false);
}
@Test
public void testRangeMerge() throws Exception {
BufferChunkList rangeList = new RangeBuilder()
.range(100, 1000)
.range(1000, 10000)
.range(3000, 30000).build();
RecordReaderUtils.readDiskRanges(mockDataInput, null, rangeList, false);
verify(mockDataInput).readFully(eq(100L), any(), eq(0), eq(32900));
}
@Test
public void testRangeSkip() throws Exception {
BufferChunkList rangeList = new RangeBuilder()
.range(1000, 1000)
.range(2000, 1000)
.range(4000, 1000)
.range(4100, 100)
.range(8000, 1000).build();
RecordReaderUtils.readDiskRanges(mockDataInput, null, rangeList, false);
verify(mockDataInput).readFully(eq(1000L), any(), eq(0), eq(2000));
verify(mockDataInput).readFully(eq(4000L), any(), eq(0), eq(1000));
verify(mockDataInput).readFully(eq(8000L), any(), eq(0), eq(1000));
}
@Test
public void testEmpty() throws Exception {
BufferChunkList rangeList = new RangeBuilder().build();
RecordReaderUtils.readDiskRanges(mockDataInput, null, rangeList, false);
verify(mockDataInput, never()).readFully(anyLong(), any(), anyInt(), anyInt());
}
@Test
public void testConfigMaxChunkLimit() throws IOException {
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
TypeDescription schema = TypeDescription.createTimestamp();
fs.delete(testFilePath, false);
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).stripeSize(100000).bufferSize(10000)
.version(OrcFile.Version.V_0_11).fileSystem(fs));
writer.close();
OrcFile.ReaderOptions opts = OrcFile.readerOptions(conf);
Reader reader = OrcFile.createReader(testFilePath, opts);
RecordReader recordReader = reader.rows(new Reader.Options().range(0L, Long.MAX_VALUE));
assertTrue(recordReader instanceof RecordReaderImpl);
assertEquals(Integer.MAX_VALUE - 1024, ((RecordReaderImpl) recordReader).getMaxDiskRangeChunkLimit());
conf = new Configuration();
conf.setInt(OrcConf.ORC_MAX_DISK_RANGE_CHUNK_LIMIT.getHiveConfName(), 1000);
opts = OrcFile.readerOptions(conf);
reader = OrcFile.createReader(testFilePath, opts);
recordReader = reader.rows(new Reader.Options().range(0L, Long.MAX_VALUE));
assertTrue(recordReader instanceof RecordReaderImpl);
assertEquals(1000, ((RecordReaderImpl) recordReader).getMaxDiskRangeChunkLimit());
}
@Test
public void testStringDirectGreaterThan2GB() throws IOException {
final Runtime rt = Runtime.getRuntime();
assumeTrue(rt.maxMemory() > 4_000_000_000L);
TypeDescription schema = TypeDescription.createString();
conf.setDouble("hive.exec.orc.dictionary.key.size.threshold", 0.0);
Writer writer = OrcFile.createWriter(
testFilePath,
OrcFile.writerOptions(conf).setSchema(schema)
.compress(CompressionKind.NONE));
// 5000 is the lower bound for a stripe
int size = 5000;
int width = 500_000;
// generate a random string that is width characters long
Random random = new Random(123);
char[] randomChars= new char[width];
int posn = 0;
for(int length = 0; length < width && posn < randomChars.length; ++posn) {
char cp = (char) random.nextInt(Character.MIN_SUPPLEMENTARY_CODE_POINT);
// make sure we get a valid, non-surrogate
while (Character.isSurrogate(cp)) {
cp = (char) random.nextInt(Character.MIN_SUPPLEMENTARY_CODE_POINT);
}
// compute the length of the utf8
length += cp < 0x80 ? 1 : (cp < 0x800 ? 2 : 3);
randomChars[posn] = cp;
}
// put the random characters in as a repeating value.
VectorizedRowBatch batch = schema.createRowBatch();
BytesColumnVector string = (BytesColumnVector) batch.cols[0];
string.setVal(0, new String(randomChars, 0, posn).getBytes(StandardCharsets.UTF_8));
string.isRepeating = true;
for(int rows=size; rows > 0; rows -= batch.size) {
batch.size = Math.min(rows, batch.getMaxSize());
writer.addRowBatch(batch);
}
writer.close();
try {
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
int rowsRead = 0;
while (rows.nextBatch(batch)) {
rowsRead += batch.size;
}
assertEquals(size, rowsRead);
} finally {
fs.delete(testFilePath, false);
}
}
@Test
public void testAdjustRowBatchSizeWhenReadLargeString() throws IOException {
final Runtime rt = Runtime.getRuntime();
assumeTrue(rt.maxMemory() > 4_000_000_000L);
TypeDescription schema = TypeDescription.createString();
conf.setDouble("hive.exec.orc.dictionary.key.size.threshold", 0.0);
Writer writer = OrcFile.createWriter(
testFilePath,
OrcFile.writerOptions(conf).setSchema(schema)
.compress(CompressionKind.NONE));
// default batch size
int size = 1024;
int width = Integer.MAX_VALUE / 1000;
// generate a random string that is width characters long
Random random = new Random(123);
char[] randomChars= new char[width];
int posn = 0;
for(int length = 0; length < width && posn < randomChars.length; ++posn) {
char cp = (char) random.nextInt(Character.MIN_SUPPLEMENTARY_CODE_POINT);
// make sure we get a valid, non-surrogate
while (Character.isSurrogate(cp)) {
cp = (char) random.nextInt(Character.MIN_SUPPLEMENTARY_CODE_POINT);
}
// compute the length of the utf8
length += cp < 0x80 ? 1 : (cp < 0x800 ? 2 : 3);
randomChars[posn] = cp;
}
// put the random characters in as a repeating value.
VectorizedRowBatch batch = schema.createRowBatch();
BytesColumnVector string = (BytesColumnVector) batch.cols[0];
string.setVal(0, new String(randomChars, 0, posn).getBytes(StandardCharsets.UTF_8));
string.isRepeating = true;
for(int rows=size; rows > 0; rows -= batch.size) {
batch.size = Math.min(rows, batch.getMaxSize());
writer.addRowBatch(batch);
}
writer.close();
// default batch size
IOException exception = assertThrows(
IOException.class,
() -> {
try (Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs))) {
RecordReader rows = reader.rows();
rows.nextBatch(reader.getSchema().createRowBatch());
}
}
);
assertEquals("totalLength:-2095944704 is a negative number. " +
"The current batch size is 1024, " +
"you can reduce the value by 'orc.row.batch.size'.",
exception.getCause().getMessage());
try {
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows();
// Modify RowBatchMaxSize to reduce from 1024 to 2
batch = reader.getSchema().createRowBatch(2);
int rowsRead = 0;
while (rows.nextBatch(batch)) {
rowsRead += batch.size;
}
assertEquals(size, rowsRead);
} finally {
fs.delete(testFilePath, false);
}
}
}
| 11,288 | 37.660959 | 106 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/TestOrcWideTable.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class TestOrcWideTable {
@Test
public void testBufferSizeFor1Col() throws IOException {
assertEquals(128 * 1024, WriterImpl.getEstimatedBufferSize(512 * 1024 * 1024,
1, 128*1024));
}
@Test
public void testBufferSizeFor50Col() throws IOException {
assertEquals(256 * 1024, WriterImpl.getEstimatedBufferSize(256 * 1024 * 1024,
50, 256*1024));
}
@Test
public void testBufferSizeFor1000Col() throws IOException {
assertEquals(32 * 1024, WriterImpl.getEstimatedBufferSize(512 * 1024 * 1024,
1000, 128*1024));
}
@Test
public void testBufferSizeFor2000Col() throws IOException {
assertEquals(16 * 1024, WriterImpl.getEstimatedBufferSize(512 * 1024 * 1024,
2000, 256*1024));
}
@Test
public void testBufferSizeFor4000Col() throws IOException {
assertEquals(8 * 1024, WriterImpl.getEstimatedBufferSize(512 * 1024 * 1024,
4000, 256*1024));
}
@Test
public void testBufferSizeFor25000Col() throws IOException {
assertEquals(4 * 1024, WriterImpl.getEstimatedBufferSize(512 * 1024 * 1024,
25000, 256*1024));
}
}
| 2,082 | 31.046154 | 81 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/TestOutStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.orc.CompressionCodec;
import org.apache.orc.EncryptionAlgorithm;
import org.apache.orc.InMemoryKeystore;
import org.apache.orc.OrcProto;
import org.apache.orc.PhysicalWriter;
import org.apache.orc.impl.writer.StreamOptions;
import org.junit.jupiter.api.Test;
import org.mockito.Mockito;
import javax.crypto.Cipher;
import javax.crypto.spec.IvParameterSpec;
import javax.crypto.spec.SecretKeySpec;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.security.Key;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.fail;
import static org.junit.jupiter.api.Assumptions.assumeTrue;
public class TestOutStream {
@Test
public void testFlush() throws Exception {
PhysicalWriter.OutputReceiver receiver =
Mockito.mock(PhysicalWriter.OutputReceiver.class);
CompressionCodec codec = new ZlibCodec();
StreamOptions options = new StreamOptions(128 * 1024)
.withCodec(codec, codec.getDefaultOptions());
try (OutStream stream = new OutStream("test", options, receiver)) {
assertEquals(0L, stream.getBufferSize());
stream.write(new byte[]{0, 1, 2});
stream.flush();
Mockito.verify(receiver).output(Mockito.any(ByteBuffer.class));
assertEquals(0L, stream.getBufferSize());
}
}
@Test
public void testAssertBufferSizeValid() {
try {
OutStream.assertBufferSizeValid(1 + (1<<23));
fail("Invalid buffer-size " + (1 + (1<<23)) + " should have been blocked.");
}
catch (IllegalArgumentException expected) {
// Pass.
}
OutStream.assertBufferSizeValid((1<<23) - 1);
}
@Test
public void testEncryption() throws Exception {
TestInStream.OutputCollector receiver = new TestInStream.OutputCollector();
EncryptionAlgorithm aes128 = EncryptionAlgorithm.AES_CTR_128;
byte[] keyBytes = new byte[aes128.keyLength()];
for(int i=0; i < keyBytes.length; ++i) {
keyBytes[i] = (byte) i;
}
Key material = new SecretKeySpec(keyBytes, aes128.getAlgorithm());
// test out stripe 18
StreamOptions options = new StreamOptions(50)
.withEncryption(aes128, material);
options.modifyIv(CryptoUtils.modifyIvForStream(0x34,
OrcProto.Stream.Kind.DATA, 18));
try (OutStream stream = new OutStream("test", options, receiver)) {
byte[] data = new byte[210];
for (int i = 0; i < data.length; ++i) {
data[i] = (byte) (i + 3);
}
// make 17 empty stripes for the stream
for (int i = 0; i < 18; ++i) {
stream.flush();
}
stream.write(data);
stream.flush();
byte[] output = receiver.buffer.get();
// These are the outputs of aes256 with the key and incrementing ivs.
// I included these hardcoded values to make sure that we are getting
// AES128 encryption.
//
// I used http://extranet.cryptomathic.com/aescalc/index to compute these:
// key: 000102030405060708090a0b0c0d0e0f
// input: 00003400010000120000000000000000
// ecb encrypt output: 822252A81CC7E7FE3E51F50E0E9B64B1
int[] generated = new int[]{
0x82, 0x22, 0x52, 0xA8, 0x1C, 0xC7, 0xE7, 0xFE, // block 0
0x3E, 0x51, 0xF5, 0x0E, 0x0E, 0x9B, 0x64, 0xB1,
0xF6, 0x4D, 0x36, 0xA9, 0xD9, 0xD7, 0x55, 0xDE, // block 1
0xCB, 0xD5, 0x62, 0x0E, 0x6D, 0xA6, 0x6B, 0x16,
0x00, 0x0B, 0xE8, 0xBA, 0x9D, 0xDE, 0x78, 0xEC, // block 2
0x73, 0x05, 0xF6, 0x1E, 0x76, 0xD7, 0x9B, 0x7A,
0x47, 0xE9, 0x61, 0x90, 0x65, 0x8B, 0x54, 0xAC, // block 3
0xF2, 0x3F, 0x67, 0xAE, 0x25, 0x63, 0x1D, 0x4B,
0x41, 0x48, 0xC4, 0x15, 0x5F, 0x2A, 0x7F, 0x91, // block 4
0x9A, 0x87, 0xA1, 0x09, 0xFF, 0x68, 0x68, 0xCC,
0xC0, 0x80, 0x52, 0xD4, 0xA5, 0x07, 0x4B, 0x79, // block 5
0xC7, 0x08, 0x46, 0x46, 0x8C, 0x74, 0x2C, 0x0D,
0x9F, 0x55, 0x7E, 0xA7, 0x17, 0x47, 0x91, 0xFD, // block 6
0x01, 0xD4, 0x24, 0x1F, 0x76, 0xA1, 0xDC, 0xC3,
0xEA, 0x13, 0x4C, 0x29, 0xCA, 0x68, 0x1E, 0x4F, // block 7
0x0D, 0x19, 0xE5, 0x09, 0xF9, 0xC5, 0xF4, 0x15,
0x9A, 0xAD, 0xC4, 0xA1, 0x0F, 0x28, 0xD4, 0x3D, // block 8
0x59, 0xF0, 0x68, 0xD3, 0xC4, 0x98, 0x74, 0x68,
0x37, 0xA4, 0xF4, 0x7C, 0x02, 0xCE, 0xC6, 0xCA, // block 9
0xA1, 0xF8, 0xC3, 0x8C, 0x7B, 0x72, 0x38, 0xD1,
0xAA, 0x52, 0x90, 0xDE, 0x28, 0xA1, 0x53, 0x6E, // block a
0xA6, 0x5C, 0xC0, 0x89, 0xC4, 0x21, 0x76, 0xC0,
0x1F, 0xED, 0x0A, 0xF9, 0xA2, 0xA7, 0xC1, 0x8D, // block b
0xA0, 0x92, 0x44, 0x4F, 0x60, 0x51, 0x7F, 0xD8,
0x6D, 0x16, 0xAF, 0x46, 0x1C, 0x27, 0x20, 0x1C, // block c
0x01, 0xBD, 0xC5, 0x0B, 0x62, 0x3F, 0xEF, 0xEE,
0x37, 0xae // block d
};
assertEquals(generated.length, output.length);
for (int i = 0; i < generated.length; ++i) {
assertEquals((byte) (generated[i] ^ data[i]), output[i], "i = " + i);
}
receiver.buffer.clear();
stream.changeIv(CryptoUtils.modifyIvForStripe(19));
data = new byte[]{0x47, 0x77, 0x65, 0x6e};
stream.write(data);
stream.flush();
output = receiver.buffer.get();
generated = new int[]{0x16, 0x03, 0xE6, 0xC3};
assertEquals(generated.length, output.length);
for (int i = 0; i < generated.length; ++i) {
assertEquals((byte) (generated[i] ^ data[i]), output[i], "i = " + i);
}
}
}
@Test
public void testCompression256Encryption() throws Exception {
// disable test if AES_256 is not available
assumeTrue(InMemoryKeystore.SUPPORTS_AES_256);
TestInStream.OutputCollector receiver = new TestInStream.OutputCollector();
EncryptionAlgorithm aes256 = EncryptionAlgorithm.AES_CTR_256;
byte[] keyBytes = new byte[aes256.keyLength()];
for(int i=0; i < keyBytes.length; ++i) {
keyBytes[i] = (byte) (i * 13);
}
Key material = new SecretKeySpec(keyBytes, aes256.getAlgorithm());
CompressionCodec codec = new ZlibCodec();
StreamOptions options = new StreamOptions(1024)
.withCodec(codec, codec.getDefaultOptions())
.withEncryption(aes256, material)
.modifyIv(CryptoUtils.modifyIvForStream(0x1, OrcProto.Stream.Kind.DATA, 1));
try (OutStream stream = new OutStream("test", options, receiver)) {
for (int i = 0; i < 10000; ++i) {
stream.write(("The Cheesy Poofs " + i + "\n")
.getBytes(StandardCharsets.UTF_8));
}
stream.flush();
}
// get the compressed, encrypted data
byte[] encrypted = receiver.buffer.get();
// decrypt it
Cipher decrypt = aes256.createCipher();
decrypt.init(Cipher.DECRYPT_MODE, material,
new IvParameterSpec(options.getIv()));
byte[] compressed = decrypt.doFinal(encrypted);
// use InStream to decompress it
BufferChunkList ranges = new BufferChunkList();
ranges.add(new BufferChunk(ByteBuffer.wrap(compressed), 0));
try (InStream decompressedStream = InStream.create("test", ranges.get(), 0,
compressed.length,
InStream.options().withCodec(new ZlibCodec()).withBufferSize(1024));
BufferedReader reader
= new BufferedReader(new InputStreamReader(decompressedStream,
StandardCharsets.UTF_8))) {
// check the contents of the decompressed stream
for (int i = 0; i < 10000; ++i) {
assertEquals("The Cheesy Poofs " + i, reader.readLine(), "i = " + i);
}
assertNull(reader.readLine());
}
}
}
| 8,635 | 37.726457 | 84 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/TestPhysicalFsWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.Progressable;
import org.apache.orc.CompressionKind;
import org.apache.orc.OrcFile;
import org.apache.orc.OrcProto;
import org.apache.orc.PhysicalWriter;
import org.apache.orc.TypeDescription;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.io.OutputStream;
import java.net.URI;
import java.net.URISyntaxException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class TestPhysicalFsWriter {
final Configuration conf = new Configuration();
static class MemoryOutputStream extends OutputStream {
private final List<byte[]> contents;
MemoryOutputStream(List<byte[]> contents) {
this.contents = contents;
}
@Override
public void write(int b) {
contents.add(new byte[]{(byte) b});
}
@Override
public void write(byte[] a, int offset, int length) {
byte[] buffer = new byte[length];
System.arraycopy(a, offset, buffer, 0, length);
contents.add(buffer);
}
}
static class MemoryFileSystem extends FileSystem {
@Override
public URI getUri() {
try {
return new URI("test:///");
} catch (URISyntaxException e) {
throw new IllegalStateException("bad url", e);
}
}
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
return null;
}
@Override
public FSDataOutputStream create(Path f, FsPermission permission,
boolean overwrite, int bufferSize,
short replication, long blockSize,
Progressable progress) throws IOException {
List<byte[]> contents = new ArrayList<>();
fileContents.put(f, contents);
return new FSDataOutputStream(new MemoryOutputStream(contents), null);
}
@Override
public FSDataOutputStream append(Path f, int bufferSize,
Progressable progress) {
throw new UnsupportedOperationException("append not supported");
}
@Override
public boolean rename(Path src, Path dst) {
boolean result = fileContents.containsKey(src) &&
!fileContents.containsKey(dst);
if (result) {
List<byte[]> contents = fileContents.remove(src);
fileContents.put(dst, contents);
}
return result;
}
@Override
public boolean delete(Path f, boolean recursive) {
boolean result = fileContents.containsKey(f);
fileContents.remove(f);
return result;
}
@Override
public FileStatus[] listStatus(Path f) {
return new FileStatus[]{getFileStatus(f)};
}
@Override
public void setWorkingDirectory(Path new_dir) {
currentWorkingDirectory = new_dir;
}
@Override
public Path getWorkingDirectory() {
return currentWorkingDirectory;
}
@Override
public boolean mkdirs(Path f, FsPermission permission) {
return false;
}
@Override
public FileStatus getFileStatus(Path f) {
List<byte[]> contents = fileContents.get(f);
if (contents != null) {
long sum = 0;
for(byte[] b: contents) {
sum += b.length;
}
return new FileStatus(sum, false, 1, 256 * 1024, 0, f);
}
return null;
}
private final Map<Path, List<byte[]>> fileContents = new HashMap<>();
private Path currentWorkingDirectory = new Path("/");
}
@Test
public void testStripePadding() throws IOException {
TypeDescription schema = TypeDescription.fromString("int");
OrcFile.WriterOptions opts =
OrcFile.writerOptions(conf)
.stripeSize(32 * 1024)
.blockSize(64 * 1024)
.compress(CompressionKind.NONE)
.setSchema(schema);
MemoryFileSystem fs = new MemoryFileSystem();
PhysicalFsWriter writer = new PhysicalFsWriter(fs, new Path("test1.orc"),
opts);
writer.writeHeader();
StreamName stream0 = new StreamName(0, OrcProto.Stream.Kind.DATA);
PhysicalWriter.OutputReceiver output = writer.createDataStream(stream0);
byte[] buffer = new byte[1024];
for(int i=0; i < buffer.length; ++i) {
buffer[i] = (byte) i;
}
for(int i=0; i < 63; ++i) {
output.output(ByteBuffer.wrap(buffer));
}
OrcProto.StripeFooter.Builder footer = OrcProto.StripeFooter.newBuilder();
OrcProto.StripeInformation.Builder dirEntry =
OrcProto.StripeInformation.newBuilder();
writer.finalizeStripe(footer, dirEntry);
// check to make sure that it laid it out without padding
assertEquals(0L, dirEntry.getIndexLength());
assertEquals(63 * 1024L, dirEntry.getDataLength());
assertEquals(3, dirEntry.getOffset());
for(int i=0; i < 62; ++i) {
output.output(ByteBuffer.wrap(buffer));
}
footer = OrcProto.StripeFooter.newBuilder();
dirEntry = OrcProto.StripeInformation.newBuilder();
writer.finalizeStripe(footer, dirEntry);
// the second one should pad
assertEquals(64 * 1024, dirEntry.getOffset());
assertEquals(62 * 1024, dirEntry.getDataLength());
long endOfStripe = dirEntry.getOffset() + dirEntry.getIndexLength() +
dirEntry.getDataLength() + dirEntry.getFooterLength();
for(int i=0; i < 3; ++i) {
output.output(ByteBuffer.wrap(buffer));
}
footer = OrcProto.StripeFooter.newBuilder();
dirEntry = OrcProto.StripeInformation.newBuilder();
writer.finalizeStripe(footer, dirEntry);
// the third one should be over the padding limit
assertEquals(endOfStripe, dirEntry.getOffset());
assertEquals(3 * 1024, dirEntry.getDataLength());
}
@Test
public void testNoStripePadding() throws IOException {
TypeDescription schema = TypeDescription.fromString("int");
OrcFile.WriterOptions opts =
OrcFile.writerOptions(conf)
.blockPadding(false)
.stripeSize(32 * 1024)
.blockSize(64 * 1024)
.compress(CompressionKind.NONE)
.setSchema(schema);
MemoryFileSystem fs = new MemoryFileSystem();
PhysicalFsWriter writer = new PhysicalFsWriter(fs, new Path("test1.orc"),
opts);
writer.writeHeader();
StreamName stream0 = new StreamName(0, OrcProto.Stream.Kind.DATA);
PhysicalWriter.OutputReceiver output = writer.createDataStream(stream0);
byte[] buffer = new byte[1024];
for(int i=0; i < buffer.length; ++i) {
buffer[i] = (byte) i;
}
for(int i=0; i < 63; ++i) {
output.output(ByteBuffer.wrap(buffer));
}
OrcProto.StripeFooter.Builder footer = OrcProto.StripeFooter.newBuilder();
OrcProto.StripeInformation.Builder dirEntry =
OrcProto.StripeInformation.newBuilder();
writer.finalizeStripe(footer, dirEntry);
// check to make sure that it laid it out without padding
assertEquals(0L, dirEntry.getIndexLength());
assertEquals(63 * 1024L, dirEntry.getDataLength());
assertEquals(3, dirEntry.getOffset());
long endOfStripe = dirEntry.getOffset() + dirEntry.getDataLength()
+ dirEntry.getFooterLength();
for(int i=0; i < 62; ++i) {
output.output(ByteBuffer.wrap(buffer));
}
footer = OrcProto.StripeFooter.newBuilder();
dirEntry = OrcProto.StripeInformation.newBuilder();
writer.finalizeStripe(footer, dirEntry);
// no padding, because we turned it off
assertEquals(endOfStripe, dirEntry.getOffset());
assertEquals(62 * 1024, dirEntry.getDataLength());
}
static class MockHadoopShim implements HadoopShims {
long lastShortBlock = -1;
@Override
public DirectDecompressor getDirectDecompressor(DirectCompressionType codec) {
return null;
}
@Override
public ZeroCopyReaderShim getZeroCopyReader(FSDataInputStream in,
ByteBufferPoolShim pool) {
return null;
}
@Override
public boolean endVariableLengthBlock(OutputStream output) throws IOException {
if (output instanceof FSDataOutputStream) {
lastShortBlock = ((FSDataOutputStream) output).getPos();
return true;
}
return false;
}
@Override
public KeyProvider getHadoopKeyProvider(Configuration conf, Random random) {
return null;
}
}
@Test
public void testShortBlock() throws IOException {
MockHadoopShim shim = new MockHadoopShim();
TypeDescription schema = TypeDescription.fromString("int");
OrcFile.WriterOptions opts =
OrcFile.writerOptions(conf)
.blockPadding(false)
.stripeSize(32 * 1024)
.blockSize(64 * 1024)
.compress(CompressionKind.NONE)
.setSchema(schema)
.setShims(shim)
.writeVariableLengthBlocks(true);
MemoryFileSystem fs = new MemoryFileSystem();
PhysicalFsWriter writer = new PhysicalFsWriter(fs, new Path("test1.orc"),
opts);
writer.writeHeader();
StreamName stream0 = new StreamName(0, OrcProto.Stream.Kind.DATA);
PhysicalWriter.OutputReceiver output = writer.createDataStream(stream0);
byte[] buffer = new byte[1024];
for(int i=0; i < buffer.length; ++i) {
buffer[i] = (byte) i;
}
for(int i=0; i < 63; ++i) {
output.output(ByteBuffer.wrap(buffer));
}
OrcProto.StripeFooter.Builder footer = OrcProto.StripeFooter.newBuilder();
OrcProto.StripeInformation.Builder dirEntry =
OrcProto.StripeInformation.newBuilder();
writer.finalizeStripe(footer, dirEntry);
// check to make sure that it laid it out without padding
assertEquals(0L, dirEntry.getIndexLength());
assertEquals(63 * 1024L, dirEntry.getDataLength());
assertEquals(3, dirEntry.getOffset());
long endOfStripe = dirEntry.getOffset() + dirEntry.getDataLength()
+ dirEntry.getFooterLength();
for(int i=0; i < 62; ++i) {
output.output(ByteBuffer.wrap(buffer));
}
footer = OrcProto.StripeFooter.newBuilder();
dirEntry = OrcProto.StripeInformation.newBuilder();
writer.finalizeStripe(footer, dirEntry);
// we should get a short block and no padding
assertEquals(endOfStripe, dirEntry.getOffset());
assertEquals(62 * 1024, dirEntry.getDataLength());
assertEquals(endOfStripe, shim.lastShortBlock);
}
}
| 11,569 | 33.640719 | 83 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/TestPredicatePushDownBounds.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory;
import org.apache.orc.IntegerColumnStatistics;
import org.apache.orc.TypeDescription;
import org.apache.orc.util.BloomFilter;
import org.junit.jupiter.api.Test;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import static org.apache.orc.impl.TestRecordReaderImpl.createPredicateLeaf;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class TestPredicatePushDownBounds {
/**
* This test case handles the Equals corner case where the predicate is equal
* to truncated upper and lower bounds.
*
* @throws Exception
*/
@Test
public void testCornerCases() {
int stringLength = 1100;
byte[] utf8F;
byte[] utf8P;
final TypeDescription schema = TypeDescription.createString();
final ColumnStatisticsImpl stat = ColumnStatisticsImpl.create(schema);
BloomFilter bf = new BloomFilter(100);
// FFF... to PPP...
for (int i = 70; i <= 80; i++) {
final String inputString = StringUtils
.repeat(Character.toString((char) i), stringLength);
bf.addString(inputString);
}
final String longStringF = StringUtils
.repeat(Character.toString('F'), stringLength);
final String longStringP = StringUtils
.repeat(Character.toString('P'), stringLength);
/* String that matches the upperbound value after truncation */
final String upperboundString =
StringUtils.repeat(Character.toString('P'), 1023) + "Q";
/* String that matches the lower value after truncation */
final String lowerboundString = StringUtils
.repeat(Character.toString('F'), 1024);
final String shortStringF = StringUtils.repeat(Character.toString('F'), 50);
final String shortStringP =
StringUtils.repeat(Character.toString('P'), 50) + "Q";
/* Test for a case EQUALS where only upperbound is set */
final PredicateLeaf predicateUpperBoundEquals = TestRecordReaderImpl
.createPredicateLeaf(PredicateLeaf.Operator.EQUALS,
PredicateLeaf.Type.STRING, "x", upperboundString, null);
/* Test for a case LESS_THAN where only upperbound is set */
final PredicateLeaf predicateUpperBoundLessThan = TestRecordReaderImpl
.createPredicateLeaf(PredicateLeaf.Operator.LESS_THAN,
PredicateLeaf.Type.STRING, "x", upperboundString, null);
/* Test for a case LESS_THAN_EQUALS where only upperbound is set */
final PredicateLeaf predicateUpperBoundLessThanEquals = TestRecordReaderImpl
.createPredicateLeaf(PredicateLeaf.Operator.LESS_THAN_EQUALS,
PredicateLeaf.Type.STRING, "x", upperboundString, null);
utf8F = shortStringF.getBytes(StandardCharsets.UTF_8);
stat.increment();
stat.updateString(utf8F, 0, utf8F.length, 1);
utf8P = longStringP.getBytes(StandardCharsets.UTF_8);
stat.increment();
stat.updateString(utf8P, 0, utf8P.length, 1);
assertEquals(SearchArgument.TruthValue.NO, RecordReaderImpl
.evaluatePredicate(stat, predicateUpperBoundEquals, null));
assertEquals(SearchArgument.TruthValue.YES, RecordReaderImpl
.evaluatePredicate(stat, predicateUpperBoundLessThan, null));
assertEquals(SearchArgument.TruthValue.YES, RecordReaderImpl
.evaluatePredicate(stat, predicateUpperBoundLessThanEquals, null));
stat.reset();
utf8F = longStringF.getBytes(StandardCharsets.UTF_8);
stat.increment();
stat.updateString(utf8F, 0, utf8F.length, 1);
utf8P = shortStringP.getBytes(StandardCharsets.UTF_8);
stat.increment();
stat.updateString(utf8P, 0, utf8P.length, 1);
/* Test for a case Equals where only lowerbound is set */
final PredicateLeaf predicateLowerBoundEquals = createPredicateLeaf(
PredicateLeaf.Operator.EQUALS, PredicateLeaf.Type.STRING, "x",
lowerboundString, null);
/* Test for a case LESS_THAN where only lowerbound is set */
final PredicateLeaf predicateLowerBoundLessThan = createPredicateLeaf(
PredicateLeaf.Operator.LESS_THAN, PredicateLeaf.Type.STRING, "x",
lowerboundString, null);
/* Test for a case LESS_THAN_EQUALS where only lowerbound is set */
final PredicateLeaf predicateLowerBoundLessThanEquals = createPredicateLeaf(
PredicateLeaf.Operator.LESS_THAN_EQUALS, PredicateLeaf.Type.STRING, "x",
lowerboundString, null);
assertEquals(SearchArgument.TruthValue.NO, RecordReaderImpl
.evaluatePredicate(stat, predicateLowerBoundEquals, null));
assertEquals(SearchArgument.TruthValue.NO, RecordReaderImpl
.evaluatePredicate(stat, predicateLowerBoundLessThan, bf));
assertEquals(SearchArgument.TruthValue.NO, RecordReaderImpl
.evaluatePredicate(stat, predicateLowerBoundLessThanEquals, null));
}
/**
* A case where the search values fall within the upperbound and lower bound
* range.
*
* @throws Exception
*/
@Test
public void testNormalCase() throws Exception {
int stringLength = 1100;
/* length of string in BF */
int bfStringLength = 50;
//int stringLength = 11;
byte[] utf8F;
byte[] utf8P;
final TypeDescription schema = TypeDescription.createString();
final ColumnStatisticsImpl stat = ColumnStatisticsImpl.create(schema);
BloomFilter bf = new BloomFilter(100);
// FFF... to PPP...
for (int i = 70; i <= 80; i++) {
final String inputString = StringUtils
.repeat(Character.toString((char) i), bfStringLength);
bf.addString(inputString);
}
final String longStringF = StringUtils
.repeat(Character.toString('F'), stringLength);
final String longStringP = StringUtils
.repeat(Character.toString('P'), stringLength);
final String predicateString = StringUtils
.repeat(Character.toString('I'), 50);
/* Test for a case where only upperbound is set */
final PredicateLeaf predicateEquals = createPredicateLeaf(
PredicateLeaf.Operator.EQUALS, PredicateLeaf.Type.STRING, "x",
predicateString, null);
/* trigger lower bound */
utf8F = longStringF.getBytes(StandardCharsets.UTF_8);
stat.increment();
stat.updateString(utf8F, 0, utf8F.length, 1);
/* trigger upper bound */
utf8P = longStringP.getBytes(StandardCharsets.UTF_8);
stat.increment();
stat.updateString(utf8P, 0, utf8P.length, 1);
assertEquals(SearchArgument.TruthValue.YES_NO,
RecordReaderImpl.evaluatePredicate(stat, predicateEquals, bf));
}
/**
* Test for IN search arg when upper and lower bounds are set.
*
* @throws Exception
*/
@Test
public void testIN() throws Exception {
int stringLength = 1100;
byte[] utf8F;
byte[] utf8P;
final TypeDescription schema = TypeDescription.createString();
final ColumnStatisticsImpl stat = ColumnStatisticsImpl.create(schema);
final BloomFilter bf = new BloomFilter(100);
// FFF... to PPP...
for (int i = 70; i <= 80; i++) {
final String inputString = StringUtils
.repeat(Character.toString((char) i), stringLength);
bf.addString(inputString);
}
final String longStringF = StringUtils
.repeat(Character.toString('F'), stringLength);
final String longStringP = StringUtils
.repeat(Character.toString('P'), stringLength);
/* String that matches the upperbound value after truncation */
final String upperboundString =
StringUtils.repeat(Character.toString('P'), 1023) + "Q";
/* String that matches the lower value after truncation */
final String lowerboundString = StringUtils
.repeat(Character.toString('F'), 1024);
final String shortStringF = StringUtils.repeat(Character.toString('F'), 50);
final String shortStringP =
StringUtils.repeat(Character.toString('P'), 50) + "Q";
final List<Object> args = new ArrayList<Object>();
args.add(upperboundString);
/* set upper bound */
utf8F = shortStringF.getBytes(StandardCharsets.UTF_8);
stat.increment();
stat.updateString(utf8F, 0, utf8F.length, 1);
utf8P = longStringP.getBytes(StandardCharsets.UTF_8);
stat.increment();
stat.updateString(utf8P, 0, utf8P.length, 1);
/* Test for a case IN where only upper bound is set and test literal is equal to upperbound */
final PredicateLeaf predicateUpperBoundSet = TestRecordReaderImpl
.createPredicateLeaf(PredicateLeaf.Operator.IN,
PredicateLeaf.Type.STRING, "x", null, args);
assertEquals(SearchArgument.TruthValue.NO,
RecordReaderImpl.evaluatePredicate(stat, predicateUpperBoundSet, null));
/* Test for lower bound set only */
args.clear();
args.add(lowerboundString);
stat.reset();
/* set lower bound */
utf8F = longStringF.getBytes(StandardCharsets.UTF_8);
stat.increment();
stat.updateString(utf8F, 0, utf8F.length, 1);
utf8P = shortStringP.getBytes(StandardCharsets.UTF_8);
stat.increment();
stat.updateString(utf8P, 0, utf8P.length, 1);
/* Test for a case IN where only lower bound is set and the test literal is lowerbound string */
final PredicateLeaf predicateLowerBoundSet = TestRecordReaderImpl
.createPredicateLeaf(PredicateLeaf.Operator.IN,
PredicateLeaf.Type.STRING, "x", null, args);
assertEquals(SearchArgument.TruthValue.NO,
RecordReaderImpl.evaluatePredicate(stat, predicateLowerBoundSet, null));
/* Test the case were both upper and lower bounds are set */
args.clear();
args.add(lowerboundString);
args.add(upperboundString);
stat.reset();
/* set upper and lower bound */
utf8F = longStringF.getBytes(StandardCharsets.UTF_8);
stat.increment();
stat.updateString(utf8F, 0, utf8F.length, 1);
utf8P = longStringP.getBytes(StandardCharsets.UTF_8);
stat.increment();
stat.updateString(utf8P, 0, utf8P.length, 1);
final PredicateLeaf predicateUpperLowerBoundSet = TestRecordReaderImpl
.createPredicateLeaf(PredicateLeaf.Operator.IN,
PredicateLeaf.Type.STRING, "x", null, args);
assertEquals(SearchArgument.TruthValue.NO, RecordReaderImpl
.evaluatePredicate(stat, predicateUpperLowerBoundSet, null));
/* test the boundary condition */
args.clear();
args.add(longStringF);
args.add(longStringP);
stat.reset();
/* set upper and lower bound */
utf8F = longStringF.getBytes(StandardCharsets.UTF_8);
stat.increment();
stat.updateString(utf8F, 0, utf8F.length, 1);
utf8P = longStringP.getBytes(StandardCharsets.UTF_8);
stat.increment();
stat.updateString(utf8P, 0, utf8P.length, 1);
final PredicateLeaf predicateUpperLowerBoundSetBoundary = TestRecordReaderImpl
.createPredicateLeaf(PredicateLeaf.Operator.IN,
PredicateLeaf.Type.STRING, "x", null, args);
assertEquals(SearchArgument.TruthValue.YES_NO, RecordReaderImpl
.evaluatePredicate(stat, predicateUpperLowerBoundSetBoundary, null));
}
/**
* Test for LESS_THAN_EQUALS search arg when upper and lower bounds are the same.
*
* @throws Exception
*/
@Test
public void testLessThanEquals() {
final TypeDescription schema = TypeDescription.createInt();
final ColumnStatisticsImpl stat = ColumnStatisticsImpl.create(schema);
stat.increment();
stat.updateInteger(1, 100);
stat.updateInteger(3, 100);
IntegerColumnStatistics typed = (IntegerColumnStatistics) stat;
assertEquals(1, typed.getMinimum());
assertEquals(3, typed.getMaximum());
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startAnd()
.lessThanEquals("c", PredicateLeaf.Type.LONG, 3L)
.end()
.build();
assertEquals(SearchArgument.TruthValue.YES, RecordReaderImpl
.evaluatePredicate(stat, sArg.getLeaves().get(0), null));
// Corner case where MIN == MAX == 3
final ColumnStatisticsImpl newStat = ColumnStatisticsImpl.create(schema);
newStat.increment();
newStat.updateInteger(3, 100);
typed = (IntegerColumnStatistics) newStat;
assertEquals(3, typed.getMinimum());
assertEquals(3, typed.getMaximum());
sArg = SearchArgumentFactory.newBuilder()
.startAnd()
.lessThanEquals("c", PredicateLeaf.Type.LONG, 3L)
.end()
.build();
assertEquals(SearchArgument.TruthValue.YES, RecordReaderImpl
.evaluatePredicate(newStat, sArg.getLeaves().get(0), null));
}
}
| 13,485 | 35.058824 | 100 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/TestReaderImpl.java | /*
* Copyright 2016 The Apache Software Foundation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PositionedReadable;
import org.apache.hadoop.fs.Seekable;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.Decimal64ColumnVector;
import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.util.Progressable;
import org.apache.orc.FileFormatException;
import org.apache.orc.OrcFile;
import org.apache.orc.OrcProto;
import org.apache.orc.OrcUtils;
import org.apache.orc.Reader;
import org.apache.orc.RecordReader;
import org.apache.orc.StripeStatistics;
import org.apache.orc.TestVectorOrcFile;
import org.apache.orc.TypeDescription;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.io.ByteArrayInputStream;
import java.io.EOFException;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.nio.ByteBuffer;
import java.nio.charset.CharacterCodingException;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestReaderImpl {
private Path workDir = new Path(System.getProperty("example.dir",
"../../examples/"));
private final Path path = new Path("test-file.orc");
private FSDataInputStream in;
private int psLen;
private ByteBuffer buffer;
@BeforeEach
public void setup() {
in = null;
}
@Test
public void testEnsureOrcFooterSmallTextFile() throws IOException {
prepareTestCase("1".getBytes(StandardCharsets.UTF_8));
assertThrows(FileFormatException.class, () -> {
ReaderImpl.ensureOrcFooter(in, path, psLen, buffer);
});
}
@Test
public void testEnsureOrcFooterLargeTextFile() throws IOException {
prepareTestCase("This is Some Text File".getBytes(StandardCharsets.UTF_8));
assertThrows(FileFormatException.class, () -> {
ReaderImpl.ensureOrcFooter(in, path, psLen, buffer);
});
}
@Test
public void testEnsureOrcFooter011ORCFile() throws IOException {
prepareTestCase(composeContent(OrcFile.MAGIC, "FOOTER"));
ReaderImpl.ensureOrcFooter(in, path, psLen, buffer);
}
@Test
public void testEnsureOrcFooterCorrectORCFooter() throws IOException {
prepareTestCase(composeContent("", OrcFile.MAGIC));
ReaderImpl.ensureOrcFooter(in, path, psLen, buffer);
}
@Test
public void testOptionSafety() throws IOException {
Reader.Options options = new Reader.Options();
String expected = options.toString();
Configuration conf = new Configuration();
Path path = new Path(TestVectorOrcFile.getFileFromClasspath
("orc-file-11-format.orc"));
try (Reader reader = OrcFile.createReader(path, OrcFile.readerOptions(conf));
RecordReader rows = reader.rows(options)) {
VectorizedRowBatch batch = reader.getSchema().createRowBatchV2();
while (rows.nextBatch(batch)) {
assertTrue(batch.size > 0);
}
}
assertEquals(expected, options.toString());
}
private void prepareTestCase(byte[] bytes) throws IOException {
buffer = ByteBuffer.wrap(bytes);
psLen = buffer.get(bytes.length - 1) & 0xff;
in = new FSDataInputStream(new SeekableByteArrayInputStream(bytes));
}
private byte[] composeContent(String headerStr, String footerStr) throws CharacterCodingException {
ByteBuffer header = Text.encode(headerStr);
ByteBuffer footer = Text.encode(footerStr);
int headerLen = header.remaining();
int footerLen = footer.remaining() + 1;
ByteBuffer buf = ByteBuffer.allocate(headerLen + footerLen);
buf.put(header);
buf.put(footer);
buf.put((byte) footerLen);
return buf.array();
}
private static final class SeekableByteArrayInputStream extends ByteArrayInputStream
implements Seekable, PositionedReadable {
public SeekableByteArrayInputStream(byte[] buf) {
super(buf);
}
@Override
public void seek(long pos) throws IOException {
this.reset();
this.skip(pos);
}
@Override
public long getPos() throws IOException {
return pos;
}
@Override
public boolean seekToNewSource(long targetPos) throws IOException {
return false;
}
@Override
public int read(long position, byte[] buffer, int offset, int length)
throws IOException {
long oldPos = getPos();
int nread = -1;
try {
seek(position);
nread = read(buffer, offset, length);
} finally {
seek(oldPos);
}
return nread;
}
@Override
public void readFully(long position, byte[] buffer, int offset, int length)
throws IOException {
int nread = 0;
while (nread < length) {
int nbytes = read(position + nread, buffer, offset + nread, length - nread);
if (nbytes < 0) {
throw new EOFException("End of file reached before reading fully.");
}
nread += nbytes;
}
}
@Override
public void readFully(long position, byte[] buffer)
throws IOException {
readFully(position, buffer, 0, buffer.length);
}
}
static byte[] byteArray(int... input) {
byte[] result = new byte[input.length];
for(int i=0; i < result.length; ++i) {
result[i] = (byte) input[i];
}
return result;
}
static class MockInputStream extends FSDataInputStream {
MockFileSystem fs;
// A single row ORC file
static final byte[] SIMPLE_ORC = byteArray(
0x4f, 0x52, 0x43, 0x42, 0x00, 0x80, 0x0a, 0x06, 0x08, 0x01, 0x10, 0x01, 0x18, 0x03, 0x12, 0x02,
0x08, 0x00, 0x12, 0x02, 0x08, 0x02, 0x0a, 0x12, 0x0a, 0x04, 0x08, 0x00, 0x50, 0x00, 0x0a, 0x0a,
0x08, 0x00, 0x12, 0x02, 0x18, 0x00, 0x50, 0x00, 0x58, 0x03, 0x08, 0x03, 0x10, 0x16, 0x1a, 0x0a,
0x08, 0x03, 0x10, 0x00, 0x18, 0x03, 0x20, 0x10, 0x28, 0x01, 0x22, 0x08, 0x08, 0x0c, 0x12, 0x01,
0x01, 0x1a, 0x01, 0x78, 0x22, 0x02, 0x08, 0x03, 0x30, 0x01, 0x3a, 0x04, 0x08, 0x00, 0x50, 0x00,
0x3a, 0x0a, 0x08, 0x00, 0x12, 0x02, 0x18, 0x00, 0x50, 0x00, 0x58, 0x03, 0x40, 0x00, 0x48, 0x00,
0x08, 0x36, 0x10, 0x00, 0x22, 0x02, 0x00, 0x0c, 0x28, 0x14, 0x30, 0x07, 0x82, 0xf4, 0x03, 0x03,
0x4f, 0x52, 0x43, 0x13);
public MockInputStream(MockFileSystem fs) throws IOException {
super(new SeekableByteArrayInputStream(SIMPLE_ORC));
this.fs = fs;
}
public void close() {
fs.removeStream(this);
}
}
static class MockFileSystem extends FileSystem {
final List<MockInputStream> streams = new ArrayList<>();
public MockFileSystem(Configuration conf) {
setConf(conf);
}
@Override
public URI getUri() {
try {
return new URI("mock:///");
} catch (URISyntaxException e) {
throw new IllegalArgumentException("bad uri", e);
}
}
@Override
public FSDataInputStream open(Path path, int i) throws IOException {
MockInputStream result = new MockInputStream(this);
streams.add(result);
return result;
}
void removeStream(MockInputStream stream) {
streams.remove(stream);
}
int streamCount() {
return streams.size();
}
@Override
public FSDataOutputStream create(Path path, FsPermission fsPermission,
boolean b, int i, short i1, long l,
Progressable progressable) throws IOException {
throw new IOException("Can't create");
}
@Override
public FSDataOutputStream append(Path path, int i,
Progressable progressable) throws IOException {
throw new IOException("Can't append");
}
@Override
public boolean rename(Path path, Path path1) {
return false;
}
@Override
public boolean delete(Path path, boolean b) {
return false;
}
@Override
public FileStatus[] listStatus(Path path) {
return new FileStatus[0];
}
@Override
public void setWorkingDirectory(Path path) {
// ignore
}
@Override
public Path getWorkingDirectory() {
return new Path("/");
}
@Override
public boolean mkdirs(Path path, FsPermission fsPermission) {
return false;
}
@Override
public FileStatus getFileStatus(Path path) {
return new FileStatus(MockInputStream.SIMPLE_ORC.length, false, 1, 4096,
0, path);
}
}
@Test
public void testClosingRowsFirst() throws Exception {
Configuration conf = new Configuration();
MockFileSystem fs = new MockFileSystem(conf);
Reader reader = OrcFile.createReader(new Path("/foo"),
OrcFile.readerOptions(conf).filesystem(fs));
assertEquals(1, fs.streamCount());
RecordReader rows = reader.rows();
assertEquals(1, fs.streamCount());
RecordReader rows2 = reader.rows();
assertEquals(2, fs.streamCount());
rows.close();
assertEquals(1, fs.streamCount());
rows2.close();
assertEquals(0, fs.streamCount());
reader.close();
assertEquals(0, fs.streamCount());
}
@Test
public void testClosingReaderFirst() throws Exception {
Configuration conf = new Configuration();
MockFileSystem fs = new MockFileSystem(conf);
Reader reader = OrcFile.createReader(new Path("/foo"),
OrcFile.readerOptions(conf).filesystem(fs));
assertEquals(1, fs.streamCount());
RecordReader rows = reader.rows();
assertEquals(1, fs.streamCount());
reader.close();
assertEquals(1, fs.streamCount());
rows.close();
assertEquals(0, fs.streamCount());
}
@Test
public void testClosingMultiple() throws Exception {
Configuration conf = new Configuration();
MockFileSystem fs = new MockFileSystem(conf);
Reader reader = OrcFile.createReader(new Path("/foo"),
OrcFile.readerOptions(conf).filesystem(fs));
Reader reader2 = OrcFile.createReader(new Path("/bar"),
OrcFile.readerOptions(conf).filesystem(fs));
assertEquals(2, fs.streamCount());
reader.close();
assertEquals(1, fs.streamCount());
reader2.close();
assertEquals(0, fs.streamCount());
}
@Test
public void testOrcTailStripeStats() throws Exception {
Configuration conf = new Configuration();
Path path = new Path(workDir, "orc_split_elim_new.orc");
FileSystem fs = path.getFileSystem(conf);
try (ReaderImpl reader = (ReaderImpl) OrcFile.createReader(path,
OrcFile.readerOptions(conf).filesystem(fs))) {
OrcTail tail = reader.extractFileTail(fs, path, Long.MAX_VALUE);
List<StripeStatistics> stats = tail.getStripeStatistics();
assertEquals(1, stats.size());
OrcProto.TimestampStatistics tsStats =
stats.get(0).getColumn(5).getTimestampStatistics();
assertEquals(-28800000, tsStats.getMinimumUtc());
assertEquals(-28550000, tsStats.getMaximumUtc());
// Test Tail and Stats extraction from ByteBuffer
ByteBuffer tailBuffer = tail.getSerializedTail();
OrcTail extractedTail = ReaderImpl.extractFileTail(tailBuffer);
assertEquals(tail.getTailBuffer(), extractedTail.getTailBuffer());
assertEquals(tail.getTailBuffer().getData(), extractedTail.getTailBuffer().getData());
assertEquals(tail.getTailBuffer().getOffset(), extractedTail.getTailBuffer().getOffset());
assertEquals(tail.getTailBuffer().getEnd(), extractedTail.getTailBuffer().getEnd());
assertEquals(tail.getMetadataOffset(), extractedTail.getMetadataOffset());
assertEquals(tail.getMetadataSize(), extractedTail.getMetadataSize());
Reader dummyReader = new ReaderImpl(null,
OrcFile.readerOptions(OrcFile.readerOptions(conf).getConfiguration())
.orcTail(extractedTail));
List<StripeStatistics> tailBufferStats = dummyReader.getVariantStripeStatistics(null);
assertEquals(stats.size(), tailBufferStats.size());
OrcProto.TimestampStatistics bufferTsStats = tailBufferStats.get(0).getColumn(5).getTimestampStatistics();
assertEquals(tsStats.getMinimumUtc(), bufferTsStats.getMinimumUtc());
assertEquals(tsStats.getMaximumUtc(), bufferTsStats.getMaximumUtc());
}
}
@Test
public void testGetRawDataSizeFromColIndices() throws Exception {
Configuration conf = new Configuration();
Path path = new Path(workDir, "orc_split_elim_new.orc");
FileSystem fs = path.getFileSystem(conf);
try (ReaderImpl reader = (ReaderImpl) OrcFile.createReader(path,
OrcFile.readerOptions(conf).filesystem(fs))) {
TypeDescription schema = reader.getSchema();
List<OrcProto.Type> types = OrcUtils.getOrcTypes(schema);
boolean[] include = new boolean[schema.getMaximumId() + 1];
List<Integer> list = new ArrayList<Integer>();
for (int i = 0; i < include.length; i++) {
include[i] = true;
list.add(i);
}
List<OrcProto.ColumnStatistics> stats = reader.getFileTail().getFooter().getStatisticsList();
assertEquals(
ReaderImpl.getRawDataSizeFromColIndices(include, schema, stats),
ReaderImpl.getRawDataSizeFromColIndices(list, types, stats));
}
}
private void CheckFileWithSargs(String fileName, String softwareVersion)
throws IOException {
Configuration conf = new Configuration();
Path path = new Path(workDir, fileName);
FileSystem fs = path.getFileSystem(conf);
try (ReaderImpl reader = (ReaderImpl) OrcFile.createReader(path,
OrcFile.readerOptions(conf).filesystem(fs))) {
assertEquals(softwareVersion, reader.getSoftwareVersion());
Reader.Options opt = new Reader.Options();
SearchArgument.Builder builder = SearchArgumentFactory.newBuilder(conf);
builder.equals("id", PredicateLeaf.Type.LONG, 18000000000L);
opt.searchArgument(builder.build(), new String[]{"id"});
TypeDescription schema = reader.getSchema();
VectorizedRowBatch batch = schema.createRowBatch();
try (RecordReader rows = reader.rows(opt)) {
assertTrue(rows.nextBatch(batch), "No rows read out!");
assertEquals(5, batch.size);
assertFalse(rows.nextBatch(batch));
}
}
}
@Test
public void testSkipBadBloomFilters() throws IOException {
CheckFileWithSargs("bad_bloom_filter_1.6.11.orc", "ORC C++ 1.6.11");
CheckFileWithSargs("bad_bloom_filter_1.6.0.orc", "ORC C++ ");
}
@Test
public void testReadDecimalV2File() throws IOException {
Configuration conf = new Configuration();
Path path = new Path(workDir, "decimal64_v2_cplusplus.orc");
FileSystem fs = path.getFileSystem(conf);
try (ReaderImpl reader = (ReaderImpl) OrcFile.createReader(path,
OrcFile.readerOptions(conf).filesystem(fs))) {
assertEquals("ORC C++ 1.8.0-SNAPSHOT", reader.getSoftwareVersion());
OrcTail tail = reader.extractFileTail(fs, path, Long.MAX_VALUE);
List<StripeStatistics> stats = tail.getStripeStatistics();
assertEquals(1, stats.size());
try (RecordReader rows = reader.rows()) {
TypeDescription schema = reader.getSchema();
assertEquals("struct<a:bigint,b:decimal(10,2),c:decimal(2,2),d:decimal(2,2),e:decimal(2,2)>",
schema.toString());
VectorizedRowBatch batch = schema.createRowBatchV2();
assertTrue(rows.nextBatch(batch), "No rows read out!");
assertEquals(10, batch.size);
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
Decimal64ColumnVector col2 = (Decimal64ColumnVector) batch.cols[1];
Decimal64ColumnVector col3 = (Decimal64ColumnVector) batch.cols[2];
Decimal64ColumnVector col4 = (Decimal64ColumnVector) batch.cols[3];
Decimal64ColumnVector col5 = (Decimal64ColumnVector) batch.cols[4];
for (int i = 0; i < batch.size; ++i) {
assertEquals(17292380420L + i, col1.vector[i]);
if (i == 0) {
long scaleNum = (long) Math.pow(10, col2.scale);
assertEquals(164.16 * scaleNum, col2.vector[i]);
} else {
assertEquals(col2.vector[i - 1] * 2, col2.vector[i]);
}
assertEquals(col3.vector[i] + col4.vector[i], col5.vector[i]);
}
assertFalse(rows.nextBatch(batch));
}
}
}
@Test
public void testExtractFileTailIndexOutOfBoundsException() throws Exception {
Configuration conf = new Configuration();
Path path = new Path(workDir, "demo-11-none.orc");
FileSystem fs = path.getFileSystem(conf);
FileStatus fileStatus = fs.getFileStatus(path);
try (ReaderImpl reader = (ReaderImpl) OrcFile.createReader(path,
OrcFile.readerOptions(conf).filesystem(fs))) {
OrcTail tail = reader.extractFileTail(fs, path, Long.MAX_VALUE);
ByteBuffer tailBuffer = tail.getSerializedTail();
OrcTail extractedTail = ReaderImpl.extractFileTail(tailBuffer, fileStatus.getLen(), fileStatus.getModificationTime());
assertEquals(tail.getFileLength(), extractedTail.getFileLength());
assertEquals(tail.getFooter().getMetadataList(), extractedTail.getFooter().getMetadataList());
assertEquals(tail.getFooter().getStripesList(), extractedTail.getFooter().getStripesList());
}
}
@Test
public void testWithoutCompressionBlockSize() throws IOException {
Configuration conf = new Configuration();
Path path = new Path(workDir, "TestOrcFile.testWithoutCompressionBlockSize.orc");
FileSystem fs = path.getFileSystem(conf);
try (ReaderImpl reader = (ReaderImpl) OrcFile.createReader(path,
OrcFile.readerOptions(conf).filesystem(fs))) {
try (RecordReader rows = reader.rows()) {
TypeDescription schema = reader.getSchema();
assertEquals("bigint", schema.toString());
VectorizedRowBatch batch = schema.createRowBatchV2();
assertTrue(rows.nextBatch(batch), "No rows read out!");
assertEquals(100, batch.size);
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
for (int i = 0; i < batch.size; ++i) {
assertEquals(1L + i, col1.vector[i]);
}
assertFalse(rows.nextBatch(batch));
}
}
}
@Test
public void testSargSkipPickupGroupWithoutIndex() throws IOException {
Configuration conf = new Configuration();
// We use ORC files in two languages to test, the previous Java version could not work
// well when orc.row.index.stride > 0 and orc.create.index=false, now it can skip these row groups.
Path[] paths = new Path[] {
// Writen by C++ API with schema struct<x:int,y:string> orc.row.index.stride=0
new Path(workDir, "TestOrcFile.testSargSkipPickupGroupWithoutIndexCPlusPlus.orc"),
// Writen by old Java API with schema struct<x:int,y:string> orc.row.index.stride=1000,orc.create.index=false
new Path(workDir, "TestOrcFile.testSargSkipPickupGroupWithoutIndexJava.orc"),
};
for (Path path: paths) {
FileSystem fs = path.getFileSystem(conf);
try (ReaderImpl reader = (ReaderImpl) OrcFile.createReader(path,
OrcFile.readerOptions(conf).filesystem(fs))) {
SearchArgument sarg = SearchArgumentFactory.newBuilder()
.startNot()
.lessThan("x", PredicateLeaf.Type.LONG, 100L)
.end().build();
try (RecordReader rows = reader.rows(reader.options().searchArgument(sarg, new String[]{"x"}))) {
TypeDescription schema = reader.getSchema();
assertEquals("struct<x:int,y:string>", schema.toString());
VectorizedRowBatch batch = schema.createRowBatchV2();
assertTrue(rows.nextBatch(batch), "No rows read out!");
assertEquals(1024, batch.size);
LongColumnVector col1 = (LongColumnVector) batch.cols[0];
for (int i = 0; i < batch.size; ++i) {
assertEquals(i, col1.vector[i]);
}
assertTrue(rows.nextBatch(batch));
}
}
}
}
}
| 21,256 | 36.424296 | 124 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/TestRecordReaderImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PositionedReadable;
import org.apache.hadoop.fs.Seekable;
import org.apache.hadoop.hive.common.io.DiskRangeList;
import org.apache.hadoop.hive.common.type.HiveDecimal;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument.TruthValue;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentImpl;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.orc.ColumnStatistics;
import org.apache.orc.CompressionCodec;
import org.apache.orc.CompressionKind;
import org.apache.orc.DataReader;
import org.apache.orc.OrcConf;
import org.apache.orc.OrcFile;
import org.apache.orc.OrcProto;
import org.apache.orc.Reader;
import org.apache.orc.RecordReader;
import org.apache.orc.TestVectorOrcFile;
import org.apache.orc.TypeDescription;
import org.apache.orc.Writer;
import org.apache.orc.impl.RecordReaderImpl.Location;
import org.apache.orc.impl.RecordReaderImpl.SargApplier;
import org.apache.orc.impl.reader.ReaderEncryption;
import org.apache.orc.impl.reader.StripePlanner;
import org.apache.orc.impl.reader.tree.TypeReader;
import org.apache.orc.impl.writer.StreamOptions;
import org.apache.orc.util.BloomFilter;
import org.apache.orc.util.BloomFilterIO;
import org.apache.orc.util.BloomFilterUtf8;
import org.junit.jupiter.api.Test;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.lang.reflect.Field;
import java.nio.ByteBuffer;
import java.nio.IntBuffer;
import java.nio.charset.StandardCharsets;
import java.sql.Date;
import java.sql.Timestamp;
import java.text.DateFormat;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.time.LocalDate;
import java.time.LocalTime;
import java.time.ZoneId;
import java.time.chrono.ChronoLocalDate;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.TimeZone;
import static org.apache.orc.impl.RecordReaderUtils.MAX_BYTE_WIDTH;
import static org.apache.orc.impl.RecordReaderUtils.MAX_VALUES_LENGTH;
import static org.apache.orc.OrcFile.CURRENT_WRITER;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.atLeastOnce;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class TestRecordReaderImpl {
// This is a work around until we update storage-api to allow ChronoLocalDate in
// predicates.
static Date toDate(ChronoLocalDate date) {
return new Date(date.atTime(LocalTime.MIDNIGHT).atZone(ZoneId.systemDefault())
.toEpochSecond() * 1000);
}
@Test
public void testFindColumn() throws Exception {
Configuration conf = new Configuration();
TypeDescription file = TypeDescription.fromString("struct<a:int,c:string,e:int>");
TypeDescription reader = TypeDescription.fromString("struct<a:int,b:double,c:string,d:double,e:bigint>");
SchemaEvolution evo = new SchemaEvolution(file, reader, new Reader.Options(conf));
assertEquals(1, RecordReaderImpl.findColumns(evo, "a"));
assertEquals(-1, RecordReaderImpl.findColumns(evo, "b"));
assertEquals(2, RecordReaderImpl.findColumns(evo, "c"));
assertEquals(-1, RecordReaderImpl.findColumns(evo, "d"));
assertEquals(3, RecordReaderImpl.findColumns(evo, "e"));
}
@Test
public void testFindColumnCaseInsensitively() throws Exception {
Configuration conf = new Configuration();
TypeDescription file = TypeDescription.fromString("struct<A:int>");
TypeDescription reader = TypeDescription.fromString("struct<a:int>");
conf.setBoolean("orc.schema.evolution.case.sensitive", false);
SchemaEvolution evo = new SchemaEvolution(file, reader, new Reader.Options(conf));
assertEquals(1, RecordReaderImpl.findColumns(evo, "A"));
}
@Test
public void testForcePositionalEvolution() throws Exception {
Configuration conf = new Configuration();
Path oldFilePath = new Path(TestVectorOrcFile.getFileFromClasspath("orc-file-11-format.orc"));
Reader reader = OrcFile.createReader(oldFilePath,
OrcFile.readerOptions(conf).filesystem(FileSystem.getLocal(conf)));
TypeDescription fileSchema =
TypeDescription.fromString("struct<col0:boolean,col1:tinyint,col2:smallint,"
+ "col3:int,col4:bigint,col5:float,col6:double,col7:"
+ "binary,col8:string,col9:struct<list:array<struct<int1:int,"
+ "string1:string>>>,col10:array<struct<int1:int,string1:string>>,"
+ "col11:map<string,struct<int1:int,string1:string>>,col12:timestamp,"
+ "col13:decimal(38,10)>");
SchemaEvolution evo = new SchemaEvolution(fileSchema, reader.getSchema(),
new Reader.Options(conf).forcePositionalEvolution(true));
assertEquals(4, RecordReaderImpl.findColumns(evo, "int1"));
evo = new SchemaEvolution(fileSchema, reader.getSchema(),
new Reader.Options(conf).forcePositionalEvolution(false));
assertEquals(-1, RecordReaderImpl.findColumns(evo, "int1"));
TypeDescription acidSchema = SchemaEvolution.createEventSchema(fileSchema);
SchemaEvolution evoAcid =
new SchemaEvolution(acidSchema, reader.getSchema(),
new Reader.Options(conf).forcePositionalEvolution(true));
// ahead by 6 for 1 struct + 5 for row-id
assertEquals(6+4, RecordReaderImpl.findColumns(evoAcid, "int1"));
evoAcid =
new SchemaEvolution(acidSchema, reader.getSchema(),
new Reader.Options(conf).forcePositionalEvolution(false));
assertEquals(-1, RecordReaderImpl.findColumns(evoAcid, "int1"));
}
/**
* Create a predicate leaf. This is used by another test.
*/
public static PredicateLeaf createPredicateLeaf(PredicateLeaf.Operator operator,
PredicateLeaf.Type type,
String columnName,
Object literal,
List<Object> literalList) {
if (literal instanceof ChronoLocalDate) {
literal = toDate((ChronoLocalDate) literal);
}
return new SearchArgumentImpl.PredicateLeafImpl(operator, type, columnName,
literal, literalList);
}
static class BufferInStream
extends InputStream implements PositionedReadable, Seekable {
private final byte[] buffer;
private final int length;
private int position = 0;
BufferInStream(byte[] bytes, int length) {
this.buffer = bytes;
this.length = length;
}
@Override
public int read() {
if (position < length) {
return buffer[position++];
}
return -1;
}
@Override
public int read(byte[] bytes, int offset, int length) {
int lengthToRead = Math.min(length, this.length - this.position);
if (lengthToRead >= 0) {
for(int i=0; i < lengthToRead; ++i) {
bytes[offset + i] = buffer[position++];
}
return lengthToRead;
} else {
return -1;
}
}
@Override
public int read(long position, byte[] bytes, int offset, int length) {
this.position = (int) position;
return read(bytes, offset, length);
}
@Override
public void readFully(long position, byte[] bytes, int offset,
int length) throws IOException {
this.position = (int) position;
while (length > 0) {
int result = read(bytes, offset, length);
offset += result;
length -= result;
if (result < 0) {
throw new IOException("Read past end of buffer at " + offset);
}
}
}
@Override
public void readFully(long position, byte[] bytes) throws IOException {
readFully(position, bytes, 0, bytes.length);
}
@Override
public void seek(long position) {
this.position = (int) position;
}
@Override
public long getPos() {
return position;
}
@Override
public boolean seekToNewSource(long position) throws IOException {
this.position = (int) position;
return false;
}
}
@Test
public void testMaxLengthToReader() throws Exception {
Configuration conf = new Configuration();
OrcProto.Type rowType = OrcProto.Type.newBuilder()
.setKind(OrcProto.Type.Kind.STRUCT).build();
OrcProto.Footer footer = OrcProto.Footer.newBuilder()
.setHeaderLength(0).setContentLength(0).setNumberOfRows(0)
.setRowIndexStride(0).addTypes(rowType).build();
OrcProto.PostScript ps = OrcProto.PostScript.newBuilder()
.setCompression(OrcProto.CompressionKind.NONE)
.setFooterLength(footer.getSerializedSize())
.setMagic("ORC").addVersion(0).addVersion(11).build();
DataOutputBuffer buffer = new DataOutputBuffer();
footer.writeTo(buffer);
ps.writeTo(buffer);
buffer.write(ps.getSerializedSize());
FileSystem fs = mock(FileSystem.class);
FSDataInputStream file =
new FSDataInputStream(new BufferInStream(buffer.getData(),
buffer.getLength()));
Path p = new Path("/dir/file.orc");
when(fs.open(eq(p))).thenReturn(file);
OrcFile.ReaderOptions options = OrcFile.readerOptions(conf);
options.filesystem(fs);
options.maxLength(buffer.getLength());
when(fs.getFileStatus(eq(p)))
.thenReturn(new FileStatus(10, false, 3, 3000, 0, p));
Reader reader = OrcFile.createReader(p, options);
assertEquals(0, reader.getNumberOfRows());
}
static class StubPredicate implements PredicateLeaf {
final PredicateLeaf.Type type;
StubPredicate(PredicateLeaf.Type type) {
this.type = type;
}
@Override
public Operator getOperator() {
return null;
}
@Override
public Type getType() {
return type;
}
@Override
public String getColumnName() {
return null;
}
@Override
public Object getLiteral() {
return null;
}
@Override
public int getId() { return -1; }
@Override
public List<Object> getLiteralList() {
return null;
}
}
static Location compareToRange(PredicateLeaf.Type type, Comparable point,
Comparable min, Comparable max) {
PredicateLeaf predicate = new StubPredicate(type);
return new RecordReaderImpl.ValueRange(predicate, min, max, true)
.compare(point);
}
@Test
public void testCompareToRangeInt() {
assertEquals(Location.BEFORE,
compareToRange(PredicateLeaf.Type.LONG, 19L, 20L, 40L));
assertEquals(Location.AFTER,
compareToRange(PredicateLeaf.Type.LONG, 41L, 20L, 40L));
assertEquals(Location.MIN,
compareToRange(PredicateLeaf.Type.LONG, 20L, 20L, 40L));
assertEquals(Location.MIDDLE,
compareToRange(PredicateLeaf.Type.LONG, 21L, 20L, 40L));
assertEquals(Location.MAX,
compareToRange(PredicateLeaf.Type.LONG, 40L, 20L, 40L));
assertEquals(Location.BEFORE,
compareToRange(PredicateLeaf.Type.LONG, 0L, 1L, 1L));
assertEquals(Location.MIN,
compareToRange(PredicateLeaf.Type.LONG, 1L, 1L, 1L));
assertEquals(Location.AFTER,
compareToRange(PredicateLeaf.Type.LONG, 2L, 1L, 1L));
}
@Test
public void testCompareToRangeString() {
assertEquals(Location.BEFORE,
compareToRange(PredicateLeaf.Type.STRING, "a", "b", "c"));
assertEquals(Location.AFTER,
compareToRange(PredicateLeaf.Type.STRING, "d", "b", "c"));
assertEquals(Location.MIN,
compareToRange(PredicateLeaf.Type.STRING, "b", "b", "c"));
assertEquals(Location.MIDDLE,
compareToRange(PredicateLeaf.Type.STRING, "bb", "b", "c"));
assertEquals(Location.MAX,
compareToRange(PredicateLeaf.Type.STRING, "c", "b", "c"));
assertEquals(Location.BEFORE,
compareToRange(PredicateLeaf.Type.STRING, "a", "b", "b"));
assertEquals(Location.MIN,
compareToRange(PredicateLeaf.Type.STRING, "b", "b", "b"));
assertEquals(Location.AFTER,
compareToRange(PredicateLeaf.Type.STRING, "c", "b", "b"));
}
@Test
public void testCompareToCharNeedConvert() {
assertEquals(Location.BEFORE,
compareToRange(PredicateLeaf.Type.STRING, "apple", "hello", "world"));
assertEquals(Location.AFTER,
compareToRange(PredicateLeaf.Type.STRING, "zombie", "hello", "world"));
assertEquals(Location.MIN,
compareToRange(PredicateLeaf.Type.STRING, "hello", "hello", "world"));
assertEquals(Location.MIDDLE,
compareToRange(PredicateLeaf.Type.STRING, "pilot", "hello", "world"));
assertEquals(Location.MAX,
compareToRange(PredicateLeaf.Type.STRING, "world", "hello", "world"));
assertEquals(Location.BEFORE,
compareToRange(PredicateLeaf.Type.STRING, "apple", "hello", "hello"));
assertEquals(Location.MIN,
compareToRange(PredicateLeaf.Type.STRING, "hello", "hello", "hello"));
assertEquals(Location.AFTER,
compareToRange(PredicateLeaf.Type.STRING, "zombie", "hello", "hello"));
}
@Test
public void testGetMin() throws Exception {
assertEquals(10L, RecordReaderImpl.getValueRange(
ColumnStatisticsImpl.deserialize(null, createIntStats(10L, 100L)),
new StubPredicate(PredicateLeaf.Type.LONG), true).lower);
assertEquals(10.0d, RecordReaderImpl.getValueRange(
ColumnStatisticsImpl.deserialize(null,
OrcProto.ColumnStatistics.newBuilder()
.setNumberOfValues(1)
.setDoubleStatistics(OrcProto.DoubleStatistics.newBuilder()
.setMinimum(10.0d).setMaximum(100.0d).build()).build()),
new StubPredicate(PredicateLeaf.Type.FLOAT), true).lower);
assertNull(RecordReaderImpl.getValueRange(
ColumnStatisticsImpl.deserialize(null,
OrcProto.ColumnStatistics.newBuilder()
.setNumberOfValues(1)
.setStringStatistics(OrcProto.StringStatistics.newBuilder().build())
.build()), new StubPredicate(PredicateLeaf.Type.STRING), true).lower);
assertEquals("a", RecordReaderImpl.getValueRange(
ColumnStatisticsImpl.deserialize(null,
OrcProto.ColumnStatistics.newBuilder()
.setNumberOfValues(1)
.setStringStatistics(OrcProto.StringStatistics.newBuilder()
.setMinimum("a").setMaximum("b").build()).build()),
new StubPredicate(PredicateLeaf.Type.STRING), true).lower);
assertEquals("hello", RecordReaderImpl.getValueRange(
ColumnStatisticsImpl.deserialize(null, createStringStats("hello", "world")),
new StubPredicate(PredicateLeaf.Type.STRING), true).lower);
assertEquals(new HiveDecimalWritable("111.1"), RecordReaderImpl.getValueRange(
ColumnStatisticsImpl.deserialize(null,
createDecimalStats("111.1", "112.1")),
new StubPredicate(PredicateLeaf.Type.DECIMAL), true).lower);
}
private static OrcProto.ColumnStatistics createIntStats(Long min,
Long max) {
OrcProto.ColumnStatistics.Builder result =
OrcProto.ColumnStatistics.newBuilder();
OrcProto.IntegerStatistics.Builder intStats =
OrcProto.IntegerStatistics.newBuilder();
if (min != null) {
intStats.setMinimum(min);
result.setNumberOfValues(1);
}
if (max != null) {
intStats.setMaximum(max);
}
return result.setIntStatistics(intStats.build()).build();
}
private static OrcProto.ColumnStatistics createBooleanStats(int n, int trueCount) {
OrcProto.BucketStatistics.Builder boolStats = OrcProto.BucketStatistics.newBuilder();
boolStats.addCount(trueCount);
return OrcProto.ColumnStatistics.newBuilder()
.setNumberOfValues(n).setBucketStatistics(
boolStats.build()).build();
}
private static OrcProto.ColumnStatistics createIntStats(int min, int max) {
OrcProto.IntegerStatistics.Builder intStats = OrcProto.IntegerStatistics.newBuilder();
intStats.setMinimum(min);
intStats.setMaximum(max);
return OrcProto.ColumnStatistics.newBuilder()
.setNumberOfValues(1)
.setIntStatistics(intStats.build()).build();
}
private static OrcProto.ColumnStatistics createDoubleStats(double min, double max) {
OrcProto.DoubleStatistics.Builder dblStats = OrcProto.DoubleStatistics.newBuilder();
dblStats.setMinimum(min);
dblStats.setMaximum(max);
return OrcProto.ColumnStatistics.newBuilder()
.setNumberOfValues(1)
.setDoubleStatistics(dblStats.build()).build();
}
//fixme
private static OrcProto.ColumnStatistics createStringStats(String min, String max,
boolean hasNull) {
OrcProto.StringStatistics.Builder strStats = OrcProto.StringStatistics.newBuilder();
strStats.setMinimum(min);
strStats.setMaximum(max);
return OrcProto.ColumnStatistics.newBuilder()
.setNumberOfValues(1)
.setStringStatistics(strStats.build())
.setHasNull(hasNull).build();
}
private static OrcProto.ColumnStatistics createStringStats(String min, String max) {
OrcProto.StringStatistics.Builder strStats = OrcProto.StringStatistics.newBuilder();
strStats.setMinimum(min);
strStats.setMaximum(max);
return OrcProto.ColumnStatistics.newBuilder()
.setNumberOfValues(1)
.setStringStatistics(strStats.build())
.build();
}
private static OrcProto.ColumnStatistics createDateStats(int min, int max) {
OrcProto.DateStatistics.Builder dateStats = OrcProto.DateStatistics.newBuilder();
dateStats.setMinimum(min);
dateStats.setMaximum(max);
return OrcProto.ColumnStatistics.newBuilder().setNumberOfValues(1)
.setDateStatistics(dateStats.build()).build();
}
private static final TimeZone utcTz = TimeZone.getTimeZone("UTC");
private static OrcProto.ColumnStatistics createTimestampStats(String min, String max) {
OrcProto.TimestampStatistics.Builder tsStats = OrcProto.TimestampStatistics.newBuilder();
tsStats.setMinimumUtc(getUtcTimestamp(min));
tsStats.setMaximumUtc(getUtcTimestamp(max));
return OrcProto.ColumnStatistics.newBuilder()
.setNumberOfValues(1)
.setTimestampStatistics(tsStats.build()).build();
}
private static OrcProto.ColumnStatistics createDecimalStats(String min, String max) {
return createDecimalStats(min, max, true);
}
private static OrcProto.ColumnStatistics createDecimalStats(String min, String max,
boolean hasNull) {
OrcProto.DecimalStatistics.Builder decStats = OrcProto.DecimalStatistics.newBuilder();
decStats.setMinimum(min);
decStats.setMaximum(max);
return OrcProto.ColumnStatistics.newBuilder()
.setNumberOfValues(1)
.setDecimalStatistics(decStats.build())
.setHasNull(hasNull).build();
}
@Test
public void testGetMax() {
assertEquals(100L, RecordReaderImpl.getValueRange(
ColumnStatisticsImpl.deserialize(null, createIntStats(10L, 100L)),
new StubPredicate(PredicateLeaf.Type.LONG), true).upper);
assertEquals(100.0d, RecordReaderImpl.getValueRange(
ColumnStatisticsImpl.deserialize(null,
OrcProto.ColumnStatistics.newBuilder()
.setNumberOfValues(1)
.setDoubleStatistics(OrcProto.DoubleStatistics.newBuilder()
.setMinimum(10.0d).setMaximum(100.0d).build()).build()),
new StubPredicate(PredicateLeaf.Type.FLOAT), true).upper);
assertNull(RecordReaderImpl.getValueRange(
ColumnStatisticsImpl.deserialize(null,
OrcProto.ColumnStatistics.newBuilder()
.setNumberOfValues(1)
.setStringStatistics(OrcProto.StringStatistics.newBuilder().build())
.build()), new StubPredicate(PredicateLeaf.Type.STRING), true).upper);
assertEquals("b", RecordReaderImpl.getValueRange(
ColumnStatisticsImpl.deserialize(null,
OrcProto.ColumnStatistics.newBuilder()
.setNumberOfValues(1)
.setStringStatistics(OrcProto.StringStatistics.newBuilder()
.setMinimum("a").setMaximum("b").build()).build()),
new StubPredicate(PredicateLeaf.Type.STRING), true).upper);
assertEquals("world", RecordReaderImpl.getValueRange(
ColumnStatisticsImpl.deserialize(null,
createStringStats("hello", "world")),
new StubPredicate(PredicateLeaf.Type.STRING), true).upper);
assertEquals(new HiveDecimalWritable("112.1"), RecordReaderImpl.getValueRange(
ColumnStatisticsImpl.deserialize(null,
createDecimalStats("111.1", "112.1")),
new StubPredicate(PredicateLeaf.Type.DECIMAL), true).upper);
}
static TruthValue evaluateBoolean(OrcProto.ColumnStatistics stats,
PredicateLeaf predicate) {
OrcProto.ColumnEncoding encoding =
OrcProto.ColumnEncoding.newBuilder()
.setKind(OrcProto.ColumnEncoding.Kind.DIRECT)
.build();
return RecordReaderImpl.evaluatePredicateProto(stats, predicate, null,
encoding, null,
OrcFile.WriterVersion.ORC_135, TypeDescription.createBoolean());
}
static TruthValue evaluateInteger(OrcProto.ColumnStatistics stats,
PredicateLeaf predicate) {
OrcProto.ColumnEncoding encoding =
OrcProto.ColumnEncoding.newBuilder()
.setKind(OrcProto.ColumnEncoding.Kind.DIRECT_V2)
.build();
return RecordReaderImpl.evaluatePredicateProto(stats, predicate, null,
encoding, null,
OrcFile.WriterVersion.ORC_135, TypeDescription.createLong());
}
static TruthValue evaluateDouble(OrcProto.ColumnStatistics stats,
PredicateLeaf predicate) {
OrcProto.ColumnEncoding encoding =
OrcProto.ColumnEncoding.newBuilder()
.setKind(OrcProto.ColumnEncoding.Kind.DIRECT)
.build();
return RecordReaderImpl.evaluatePredicateProto(stats, predicate, null,
encoding, null,
OrcFile.WriterVersion.ORC_135, TypeDescription.createDouble());
}
static TruthValue evaluateTimestamp(OrcProto.ColumnStatistics stats,
PredicateLeaf predicate,
boolean include135,
boolean useUTCTimestamp) {
OrcProto.ColumnEncoding encoding =
OrcProto.ColumnEncoding.newBuilder()
.setKind(OrcProto.ColumnEncoding.Kind.DIRECT)
.build();
return RecordReaderImpl.evaluatePredicateProto(stats, predicate, null,
encoding, null,
include135 ? OrcFile.WriterVersion.ORC_135: OrcFile.WriterVersion.ORC_101,
TypeDescription.createTimestamp(), true, useUTCTimestamp);
}
static TruthValue evaluateTimestampWithWriterCalendar(OrcProto.ColumnStatistics stats,
PredicateLeaf predicate,
boolean include135,
boolean writerUsedProlepticGregorian,
boolean useUTCTimestamp) {
OrcProto.ColumnEncoding encoding =
OrcProto.ColumnEncoding.newBuilder()
.setKind(OrcProto.ColumnEncoding.Kind.DIRECT)
.build();
return RecordReaderImpl.evaluatePredicateProto(stats, predicate, null,
encoding, null,
include135 ? OrcFile.WriterVersion.ORC_135: OrcFile.WriterVersion.ORC_101,
TypeDescription.createTimestamp(), writerUsedProlepticGregorian, useUTCTimestamp);
}
static TruthValue evaluateTimestampBloomfilter(OrcProto.ColumnStatistics stats,
PredicateLeaf predicate,
BloomFilter bloom,
OrcFile.WriterVersion version,
boolean useUTCTimestamp) {
OrcProto.ColumnEncoding.Builder encoding =
OrcProto.ColumnEncoding.newBuilder()
.setKind(OrcProto.ColumnEncoding.Kind.DIRECT);
if (version.includes(OrcFile.WriterVersion.ORC_135)) {
encoding.setBloomEncoding(BloomFilterIO.Encoding.UTF8_UTC.getId());
}
OrcProto.Stream.Kind kind =
version.includes(OrcFile.WriterVersion.ORC_101) ?
OrcProto.Stream.Kind.BLOOM_FILTER_UTF8 :
OrcProto.Stream.Kind.BLOOM_FILTER;
OrcProto.BloomFilter.Builder builder =
OrcProto.BloomFilter.newBuilder();
BloomFilterIO.serialize(builder, bloom);
return RecordReaderImpl.evaluatePredicateProto(stats, predicate, kind,
encoding.build(), builder.build(), version,
TypeDescription.createTimestamp(), true, useUTCTimestamp);
}
@Test
public void testPredEvalWithBooleanStats() throws Exception {
PredicateLeaf pred = createPredicateLeaf(
PredicateLeaf.Operator.NULL_SAFE_EQUALS, PredicateLeaf.Type.BOOLEAN, "x", true, null);
assertEquals(TruthValue.YES_NO,
evaluateBoolean(createBooleanStats(10, 10), pred));
assertEquals(TruthValue.NO,
evaluateBoolean(createBooleanStats(10, 0), pred));
pred = createPredicateLeaf(
PredicateLeaf.Operator.NULL_SAFE_EQUALS, PredicateLeaf.Type.BOOLEAN, "x", true, null);
assertEquals(TruthValue.YES_NO,
evaluateBoolean(createBooleanStats(10, 10), pred));
assertEquals(TruthValue.NO,
evaluateBoolean(createBooleanStats(10, 0), pred));
pred = createPredicateLeaf(
PredicateLeaf.Operator.NULL_SAFE_EQUALS, PredicateLeaf.Type.BOOLEAN, "x", false, null);
assertEquals(TruthValue.NO,
evaluateBoolean(createBooleanStats(10, 10), pred));
assertEquals(TruthValue.YES_NO,
evaluateBoolean(createBooleanStats(10, 0), pred));
}
@Test
public void testPredEvalWithIntStats() throws Exception {
PredicateLeaf pred = createPredicateLeaf(
PredicateLeaf.Operator.NULL_SAFE_EQUALS, PredicateLeaf.Type.LONG, "x", 15L, null);
assertEquals(TruthValue.YES_NO,
evaluateInteger(createIntStats(10, 100), pred));
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.FLOAT, "x", 15.0, null);
assertEquals(TruthValue.YES_NO,
evaluateInteger(createIntStats(10, 100), pred));
// Stats gets converted to column type. "15" is outside of "10" and "100"
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.STRING, "x", "15", null);
assertEquals(TruthValue.NO,
evaluateInteger(createIntStats(10, 100), pred));
// Integer stats will not be converted date because of days/seconds/millis ambiguity
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.DATE, "x", LocalDate.ofEpochDay(15), null);
try {
evaluateInteger(createIntStats(10, 100), pred);
fail("evaluate should throw");
} catch (RecordReaderImpl.SargCastException ia) {
assertEquals("ORC SARGS could not convert from Long to DATE", ia.getMessage());
}
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.DECIMAL, "x", new HiveDecimalWritable("15"), null);
assertEquals(TruthValue.YES_NO,
evaluateInteger(createIntStats(10, 100), pred));
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.TIMESTAMP, "x", new Timestamp(15), null);
try {
evaluateInteger(createIntStats(10, 100), pred);
fail("evaluate should throw");
} catch (RecordReaderImpl.SargCastException ia) {
assertEquals("ORC SARGS could not convert from Long to TIMESTAMP", ia.getMessage());
}
}
@Test
public void testPredEvalWithDoubleStats() throws Exception {
PredicateLeaf pred = createPredicateLeaf(
PredicateLeaf.Operator.NULL_SAFE_EQUALS, PredicateLeaf.Type.LONG, "x", 15L, null);
assertEquals(TruthValue.YES_NO,
evaluateDouble(createDoubleStats(10.0, 100.0), pred));
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.FLOAT, "x", 15.0, null);
assertEquals(TruthValue.YES_NO,
evaluateDouble(createDoubleStats(10.0, 100.0), pred));
// Stats gets converted to column type. "15.0" is outside of "10.0" and "100.0"
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.STRING, "x", "15", null);
assertEquals(TruthValue.NO,
evaluateDouble(createDoubleStats(10.0, 100.0), pred));
// Double is not converted to date type because of days/seconds/millis ambiguity
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.DATE, "x", LocalDate.ofEpochDay(15), null);
try {
evaluateDouble(createDoubleStats(10.0, 100.0), pred);
fail("evaluate should throw");
} catch (RecordReaderImpl.SargCastException ia) {
assertEquals("ORC SARGS could not convert from Double to DATE", ia.getMessage());
}
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.DECIMAL, "x", new HiveDecimalWritable("15"), null);
assertEquals(TruthValue.YES_NO,
evaluateDouble(createDoubleStats(10.0, 100.0), pred));
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.TIMESTAMP, "x", new Timestamp(15*1000L), null);
assertEquals(TruthValue.YES_NO,
evaluateDouble(createDoubleStats(10.0, 100.0), pred));
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.TIMESTAMP, "x", new Timestamp(150*1000L), null);
assertEquals(TruthValue.NO,
evaluateDouble(createDoubleStats(10.0, 100.0), pred));
}
@Test
public void testPredEvalWithStringStats() throws Exception {
PredicateLeaf pred = createPredicateLeaf(
PredicateLeaf.Operator.NULL_SAFE_EQUALS, PredicateLeaf.Type.LONG, "x", 100L, null);
assertEquals(TruthValue.YES_NO,
evaluateInteger(createStringStats("10", "1000"), pred));
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.FLOAT, "x", 100.0, null);
assertEquals(TruthValue.YES_NO,
evaluateInteger(createStringStats("10", "1000"), pred));
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.STRING, "x", "100", null);
assertEquals(TruthValue.YES_NO,
evaluateInteger(createStringStats("10", "1000"), pred));
// IllegalArgumentException is thrown when converting String to Date, hence YES_NO
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.DATE, "x", LocalDate.ofEpochDay(100), null);
assertEquals(TruthValue.YES_NO,
evaluateInteger(createDateStats(10, 1000), pred));
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.DECIMAL, "x", new HiveDecimalWritable("100"), null);
assertEquals(TruthValue.YES_NO,
evaluateInteger(createStringStats("10", "1000"), pred));
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.TIMESTAMP, "x", new Timestamp(100), null);
try {
evaluateInteger(createStringStats("10", "1000"), pred);
fail("evaluate should throw");
} catch (RecordReaderImpl.SargCastException ia) {
assertEquals("ORC SARGS could not convert from String to TIMESTAMP", ia.getMessage());
}
}
@Test
public void testPredEvalWithDateStats() throws Exception {
PredicateLeaf pred = createPredicateLeaf(
PredicateLeaf.Operator.NULL_SAFE_EQUALS, PredicateLeaf.Type.LONG, "x", 15L, null);
// Date to Integer conversion is not possible.
try {
evaluateInteger(createDateStats(10, 100), pred);
fail("evaluate should throw");
} catch (RecordReaderImpl.SargCastException ia) {
assertEquals("ORC SARGS could not convert from LocalDate to LONG", ia.getMessage());
}
// Date to Float conversion is also not possible.
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.FLOAT, "x", 15.0, null);
try {
evaluateInteger(createDateStats(10, 100), pred);
fail("evaluate should throw");
} catch (RecordReaderImpl.SargCastException ia) {
assertEquals("ORC SARGS could not convert from LocalDate to FLOAT", ia.getMessage());
}
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.STRING, "x", "15", null);
assertEquals(TruthValue.NO,
evaluateInteger(createDateStats(10, 100), pred));
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.STRING, "x", "1970-01-11", null);
assertEquals(TruthValue.YES_NO,
evaluateInteger(createDateStats(10, 100), pred));
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.STRING, "x", "15.1", null);
assertEquals(TruthValue.NO,
evaluateInteger(createDateStats(10, 100), pred));
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.STRING, "x", "__a15__1", null);
assertEquals(TruthValue.NO,
evaluateInteger(createDateStats(10, 100), pred));
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.STRING, "x", "2000-01-16", null);
assertEquals(TruthValue.NO,
evaluateInteger(createDateStats(10, 100), pred));
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.STRING, "x", "1970-01-16", null);
assertEquals(TruthValue.YES_NO,
evaluateInteger(createDateStats(10, 100), pred));
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.DATE, "x", LocalDate.ofEpochDay(15), null);
assertEquals(TruthValue.YES_NO,
evaluateInteger(createDateStats(10, 100), pred));
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.DATE, "x", LocalDate.ofEpochDay(150), null);
assertEquals(TruthValue.NO,
evaluateInteger(createDateStats(10, 100), pred));
// Date to Decimal conversion is also not possible.
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.DECIMAL, "x", new HiveDecimalWritable("15"), null);
try {
evaluateInteger(createDateStats(10, 100), pred);
fail("evaluate should throw");
} catch (RecordReaderImpl.SargCastException ia) {
assertEquals("ORC SARGS could not convert from LocalDate to DECIMAL", ia.getMessage());
}
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.TIMESTAMP, "x", new Timestamp(15), null);
assertEquals(TruthValue.NO,
evaluateInteger(createDateStats(10, 100), pred));
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.TIMESTAMP, "x", new Timestamp(15L * 24L * 60L * 60L * 1000L), null);
assertEquals(TruthValue.YES_NO,
evaluateInteger(createDateStats(10, 100), pred));
}
@Test
public void testPredEvalWithDecimalStats() throws Exception {
PredicateLeaf pred = createPredicateLeaf(
PredicateLeaf.Operator.NULL_SAFE_EQUALS, PredicateLeaf.Type.LONG, "x", 15L, null);
assertEquals(TruthValue.YES_NO,
evaluateInteger(createDecimalStats("10.0", "100.0"), pred));
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.FLOAT, "x", 15.0, null);
assertEquals(TruthValue.YES_NO,
evaluateInteger(createDecimalStats("10.0", "100.0"), pred));
// "15" out of range of "10.0" and "100.0"
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.STRING, "x", "15", null);
assertEquals(TruthValue.NO,
evaluateInteger(createDecimalStats("10.0", "100.0"), pred));
// Decimal to Date not possible.
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.DATE, "x", LocalDate.ofEpochDay(15), null);
try {
evaluateInteger(createDecimalStats("10.0", "100.0"), pred);
fail("evaluate should throw");
} catch (RecordReaderImpl.SargCastException ia) {
assertEquals("ORC SARGS could not convert from HiveDecimal to DATE", ia.getMessage());
}
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.DECIMAL, "x", new HiveDecimalWritable("15"), null);
assertEquals(TruthValue.YES_NO,
evaluateInteger(createDecimalStats("10.0", "100.0"), pred));
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.TIMESTAMP, "x", new Timestamp(15 * 1000L), null);
assertEquals(TruthValue.YES_NO,
evaluateInteger(createDecimalStats("10.0", "100.0"), pred));
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.TIMESTAMP, "x", new Timestamp(150 * 1000L), null);
assertEquals(TruthValue.NO,
evaluateInteger(createDecimalStats("10.0", "100.0"), pred));
}
@Test
public void testPredEvalTimestampStatsDiffWriter() {
// Proleptic - NoUTC
PredicateLeaf pred = createPredicateLeaf(
PredicateLeaf.Operator.NULL_SAFE_EQUALS, PredicateLeaf.Type.TIMESTAMP, "x",
Timestamp.valueOf("1017-01-01 00:00:00"), null);
assertEquals(TruthValue.YES_NO,
evaluateTimestampWithWriterCalendar(createTimestampStats("1017-01-01 00:00:00", "1017-01-01 00:00:00"),
pred, true, true, false));
// NoProleptic - NoUTC -> 1016-12-26 00:00:00.0
long predTime = DateUtils.convertTimeToProleptic(Timestamp.valueOf("1017-01-01 00:00:00").getTime(), false);
PredicateLeaf pred2 = createPredicateLeaf(
PredicateLeaf.Operator.NULL_SAFE_EQUALS, PredicateLeaf.Type.TIMESTAMP, "x", new Timestamp(predTime), null);
assertEquals(TruthValue.YES_NO,
evaluateTimestampWithWriterCalendar(createTimestampStats("1017-01-01 00:00:00", "1017-01-01 00:00:00"),
pred2, true, false, false));
// NoProleptic - UTC -> 1016-12-25 16:00:00.0
predTime = DateUtils.convertTimeToProleptic(getUtcTimestamp("1017-01-01 00:00:00"), true);
PredicateLeaf pred3 = createPredicateLeaf(
PredicateLeaf.Operator.NULL_SAFE_EQUALS, PredicateLeaf.Type.TIMESTAMP, "x", new Timestamp(predTime), null);
assertEquals(TruthValue.YES_NO,
evaluateTimestampWithWriterCalendar(createTimestampStats("1017-01-01 00:00:00", "1017-01-01 00:00:00"),
pred3, true, false, true));
// Proleptic - UTC -> 1016-12-31 16:00:00.0
predTime = getUtcTimestamp("1017-01-01 00:00:00");
PredicateLeaf pred4 = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.TIMESTAMP, "x", new Timestamp(predTime), null);
assertEquals(TruthValue.YES_NO,
evaluateTimestampWithWriterCalendar(createTimestampStats("1017-01-01 00:00:00", "1017-01-01 00:00:00"),
pred4, true, true, true));
}
@Test
public void testPredEvalWithTimestampStats() throws Exception {
PredicateLeaf pred = createPredicateLeaf(
PredicateLeaf.Operator.NULL_SAFE_EQUALS, PredicateLeaf.Type.TIMESTAMP,
"x", Timestamp.valueOf("2017-01-01 00:00:00"), null);
assertEquals(TruthValue.YES_NO,
evaluateTimestamp(createTimestampStats("2017-01-01 00:00:00",
"2018-01-01 00:00:00"), pred, true, false));
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.FLOAT, "x", 15.0, null);
assertEquals(TruthValue.YES_NO_NULL,
evaluateTimestamp(createTimestampStats("2017-01-01 00:00:00", "2018-01-01 00:00:00"),
pred, true, false));
assertEquals(TruthValue.YES_NO_NULL,
evaluateTimestamp(createTimestampStats("2017-01-01 00:00:00", "2018-01-01 00:00:00"),
pred, true, false));
// pre orc-135 should always be yes_no_null.
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.TIMESTAMP, "x", Timestamp.valueOf("2017-01-01 00:00:00"), null);
assertEquals(TruthValue.YES_NO_NULL,
evaluateTimestamp(createTimestampStats("2017-01-01 00:00:00", "2017-01-01 00:00:00"),
pred, false, false));
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.STRING, "x", Timestamp.valueOf("2017-01-01 00:00:00").toString(), null);
assertEquals(TruthValue.YES_NO,
evaluateTimestamp(createTimestampStats("2017-01-01 00:00:00", "2018-01-01 00:00:00"),
pred, true, false));
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.DATE, "x", Date.valueOf("2016-01-01"), null);
assertEquals(TruthValue.NO,
evaluateTimestamp(createTimestampStats("2017-01-01 00:00:00", "2017-01-01 00:00:00"),
pred, true, false));
assertEquals(TruthValue.YES_NO,
evaluateTimestamp(createTimestampStats("2015-01-01 00:00:00", "2016-01-01 00:00:00"),
pred, true, false));
pred = createPredicateLeaf(PredicateLeaf.Operator.NULL_SAFE_EQUALS,
PredicateLeaf.Type.DECIMAL, "x", new HiveDecimalWritable("15"), null);
assertEquals(TruthValue.YES_NO_NULL,
evaluateTimestamp(createTimestampStats("2015-01-01 00:00:00", "2016-01-01 00:00:00"),
pred, true, false));
}
@Test
public void testEquals() throws Exception {
PredicateLeaf pred = createPredicateLeaf
(PredicateLeaf.Operator.EQUALS, PredicateLeaf.Type.LONG,
"x", 15L, null);
assertEquals(TruthValue.NO_NULL,
evaluateInteger(createIntStats(20L, 30L), pred));
assertEquals(TruthValue.YES_NO_NULL,
evaluateInteger(createIntStats(15L, 30L), pred)) ;
assertEquals(TruthValue.YES_NO_NULL,
evaluateInteger(createIntStats(10L, 30L), pred));
assertEquals(TruthValue.YES_NO_NULL,
evaluateInteger(createIntStats(10L, 15L), pred));
assertEquals(TruthValue.NO_NULL,
evaluateInteger(createIntStats(0L, 10L), pred));
assertEquals(TruthValue.YES_NULL,
evaluateInteger(createIntStats(15L, 15L), pred));
}
@Test
public void testNullSafeEquals() throws Exception {
PredicateLeaf pred = createPredicateLeaf
(PredicateLeaf.Operator.NULL_SAFE_EQUALS, PredicateLeaf.Type.LONG,
"x", 15L, null);
assertEquals(TruthValue.NO,
evaluateInteger(createIntStats(20L, 30L), pred));
assertEquals(TruthValue.YES_NO,
evaluateInteger(createIntStats(15L, 30L), pred));
assertEquals(TruthValue.YES_NO,
evaluateInteger(createIntStats(10L, 30L), pred));
assertEquals(TruthValue.YES_NO,
evaluateInteger(createIntStats(10L, 15L), pred));
assertEquals(TruthValue.NO,
evaluateInteger(createIntStats(0L, 10L), pred));
assertEquals(TruthValue.YES_NO,
evaluateInteger(createIntStats(15L, 15L), pred));
}
@Test
public void testLessThan() throws Exception {
PredicateLeaf lessThan = createPredicateLeaf
(PredicateLeaf.Operator.LESS_THAN, PredicateLeaf.Type.LONG,
"x", 15L, null);
assertEquals(TruthValue.NO_NULL,
evaluateInteger(createIntStats(20L, 30L), lessThan));
assertEquals(TruthValue.NO_NULL,
evaluateInteger(createIntStats(15L, 30L), lessThan));
assertEquals(TruthValue.YES_NO_NULL,
evaluateInteger(createIntStats(10L, 30L), lessThan));
assertEquals(TruthValue.YES_NO_NULL,
evaluateInteger(createIntStats(10L, 15L), lessThan));
assertEquals(TruthValue.YES_NULL,
evaluateInteger(createIntStats(0L, 10L), lessThan));
}
@Test
public void testLessThanEquals() throws Exception {
PredicateLeaf pred = createPredicateLeaf
(PredicateLeaf.Operator.LESS_THAN_EQUALS, PredicateLeaf.Type.LONG,
"x", 15L, null);
assertEquals(TruthValue.NO_NULL,
evaluateInteger(createIntStats(20L, 30L), pred));
assertEquals(TruthValue.YES_NO_NULL,
evaluateInteger(createIntStats(15L, 30L), pred));
assertEquals(TruthValue.YES_NO_NULL,
evaluateInteger(createIntStats(10L, 30L), pred));
assertEquals(TruthValue.YES_NULL,
evaluateInteger(createIntStats(10L, 15L), pred));
assertEquals(TruthValue.YES_NULL,
evaluateInteger(createIntStats(0L, 10L), pred));
}
@Test
public void testIn() throws Exception {
List<Object> args = new ArrayList<Object>();
args.add(10L);
args.add(20L);
PredicateLeaf pred = createPredicateLeaf
(PredicateLeaf.Operator.IN, PredicateLeaf.Type.LONG,
"x", null, args);
assertEquals(TruthValue.YES_NULL,
evaluateInteger(createIntStats(20L, 20L), pred));
assertEquals(TruthValue.NO_NULL,
evaluateInteger(createIntStats(30L, 30L), pred));
assertEquals(TruthValue.YES_NO_NULL,
evaluateInteger(createIntStats(10L, 30L), pred));
assertEquals(TruthValue.NO_NULL,
evaluateInteger(createIntStats(12L, 18L), pred));
}
@Test
public void testInDatePredConversion() {
List<Object> args = new ArrayList<>();
args.add(toDate(LocalDate.ofEpochDay(15)));
PredicateLeaf pred = createPredicateLeaf
(PredicateLeaf.Operator.IN, PredicateLeaf.Type.DATE,
"x", null, args);
assertEquals(TruthValue.YES_NULL,
evaluateInteger(createDateStats(15, 15), pred));
assertEquals(TruthValue.YES_NO_NULL,
evaluateInteger(createDateStats(10, 30), pred));
assertEquals(TruthValue.NO_NULL,
evaluateInteger(createDateStats(5, 10), pred));
assertEquals(TruthValue.NO_NULL,
evaluateInteger(createDateStats(16, 30), pred));
}
@Test
public void testBetween() {
List<Object> args = new ArrayList<Object>();
args.add(10L);
args.add(20L);
PredicateLeaf pred = createPredicateLeaf
(PredicateLeaf.Operator.BETWEEN, PredicateLeaf.Type.LONG,
"x", null, args);
assertEquals(TruthValue.NO_NULL,
evaluateInteger(createIntStats(0L, 5L), pred));
assertEquals(TruthValue.NO_NULL,
evaluateInteger(createIntStats(30L, 40L), pred));
assertEquals(TruthValue.YES_NO_NULL,
evaluateInteger(createIntStats(5L, 15L), pred));
assertEquals(TruthValue.YES_NO_NULL,
evaluateInteger(createIntStats(15L, 25L), pred));
assertEquals(TruthValue.YES_NO_NULL,
evaluateInteger(createIntStats(5L, 25L), pred));
assertEquals(TruthValue.YES_NULL,
evaluateInteger(createIntStats(10L, 20L), pred));
assertEquals(TruthValue.YES_NULL,
evaluateInteger(createIntStats(12L, 18L), pred));
// check with empty predicate list
args.clear();
pred = createPredicateLeaf
(PredicateLeaf.Operator.BETWEEN, PredicateLeaf.Type.LONG,
"x", null, args);
assertEquals(TruthValue.YES_NO_NULL,
evaluateInteger(createIntStats(0L, 5L), pred));
assertEquals(TruthValue.YES_NO_NULL,
evaluateInteger(createIntStats(30L, 40L), pred));
assertEquals(TruthValue.YES_NO_NULL,
evaluateInteger(createIntStats(5L, 15L), pred));
assertEquals(TruthValue.YES_NO_NULL,
evaluateInteger(createIntStats(10L, 20L), pred));
}
@Test
public void testIsNull() {
PredicateLeaf pred = createPredicateLeaf
(PredicateLeaf.Operator.IS_NULL, PredicateLeaf.Type.LONG,
"x", null, null);
assertEquals(TruthValue.YES_NO,
evaluateInteger(createIntStats(20L, 30L), pred));
}
@Test
public void testEqualsWithNullInStats() {
PredicateLeaf pred = createPredicateLeaf
(PredicateLeaf.Operator.EQUALS, PredicateLeaf.Type.STRING,
"x", "c", null);
assertEquals(TruthValue.NO_NULL,
evaluateInteger(createStringStats("d", "e", true), pred)); // before
assertEquals(TruthValue.NO_NULL,
evaluateInteger(createStringStats("a", "b", true), pred)); // after
assertEquals(TruthValue.YES_NO_NULL,
evaluateInteger(createStringStats("b", "c", true), pred)); // max
assertEquals(TruthValue.YES_NO_NULL,
evaluateInteger(createStringStats("c", "d", true), pred)); // min
assertEquals(TruthValue.YES_NO_NULL,
evaluateInteger(createStringStats("b", "d", true), pred)); // middle
assertEquals(TruthValue.YES_NULL,
evaluateInteger(createStringStats("c", "c", true), pred)); // same
}
@Test
public void testNullSafeEqualsWithNullInStats() {
PredicateLeaf pred = createPredicateLeaf
(PredicateLeaf.Operator.NULL_SAFE_EQUALS, PredicateLeaf.Type.STRING,
"x", "c", null);
assertEquals(TruthValue.NO,
evaluateInteger(createStringStats("d", "e", true), pred)); // before
assertEquals(TruthValue.NO,
evaluateInteger(createStringStats("a", "b", true), pred)); // after
assertEquals(TruthValue.YES_NO,
evaluateInteger(createStringStats("b", "c", true), pred)); // max
assertEquals(TruthValue.YES_NO,
evaluateInteger(createStringStats("c", "d", true), pred)); // min
assertEquals(TruthValue.YES_NO,
evaluateInteger(createStringStats("b", "d", true), pred)); // middle
assertEquals(TruthValue.YES_NO,
evaluateInteger(createStringStats("c", "c", true), pred)); // same
}
@Test
public void testLessThanWithNullInStats() {
PredicateLeaf pred = createPredicateLeaf
(PredicateLeaf.Operator.LESS_THAN, PredicateLeaf.Type.STRING,
"x", "c", null);
assertEquals(TruthValue.NO_NULL,
evaluateInteger(createStringStats("d", "e", true), pred)); // before
assertEquals(TruthValue.YES_NULL,
evaluateInteger(createStringStats("a", "b", true), pred)); // after
assertEquals(TruthValue.YES_NO_NULL,
evaluateInteger(createStringStats("b", "c", true), pred)); // max
assertEquals(TruthValue.NO_NULL,
evaluateInteger(createStringStats("c", "d", true), pred)); // min
assertEquals(TruthValue.YES_NO_NULL,
evaluateInteger(createStringStats("b", "d", true), pred)); // middle
assertEquals(TruthValue.NO_NULL, // min, same stats
evaluateInteger(createStringStats("c", "c", true), pred));
}
@Test
public void testLessThanEqualsWithNullInStats() throws Exception {
PredicateLeaf pred = createPredicateLeaf
(PredicateLeaf.Operator.LESS_THAN_EQUALS, PredicateLeaf.Type.STRING,
"x", "c", null);
assertEquals(TruthValue.NO_NULL,
evaluateInteger(createStringStats("d", "e", true), pred)); // before
assertEquals(TruthValue.YES_NULL,
evaluateInteger(createStringStats("a", "b", true), pred)); // after
assertEquals(TruthValue.YES_NULL,
evaluateInteger(createStringStats("b", "c", true), pred)); // max
assertEquals(TruthValue.YES_NO_NULL,
evaluateInteger(createStringStats("c", "d", true), pred)); // min
assertEquals(TruthValue.YES_NO_NULL,
evaluateInteger(createStringStats("b", "d", true), pred)); // middle
assertEquals(TruthValue.YES_NULL,
evaluateInteger(createStringStats("c", "c", true), pred)); // same
}
@Test
public void testInWithNullInStats() throws Exception {
List<Object> args = new ArrayList<Object>();
args.add("c");
args.add("f");
PredicateLeaf pred = createPredicateLeaf
(PredicateLeaf.Operator.IN, PredicateLeaf.Type.STRING,
"x", null, args);
assertEquals(TruthValue.NO_NULL, // before & after
evaluateInteger(createStringStats("d", "e", true), pred));
assertEquals(TruthValue.NO_NULL,
evaluateInteger(createStringStats("a", "b", true), pred)); // after
assertEquals(TruthValue.YES_NO_NULL,
evaluateInteger(createStringStats("e", "f", true), pred)); // max
assertEquals(TruthValue.YES_NO_NULL,
evaluateInteger(createStringStats("c", "d", true), pred)); // min
assertEquals(TruthValue.YES_NO_NULL,
evaluateInteger(createStringStats("b", "d", true), pred)); // middle
assertEquals(TruthValue.YES_NULL,
evaluateInteger(createStringStats("c", "c", true), pred)); // same
}
@Test
public void testBetweenWithNullInStats() throws Exception {
List<Object> args = new ArrayList<Object>();
args.add("c");
args.add("f");
PredicateLeaf pred = createPredicateLeaf
(PredicateLeaf.Operator.BETWEEN, PredicateLeaf.Type.STRING,
"x", null, args);
assertEquals(TruthValue.YES_NULL, // before & after
evaluateInteger(createStringStats("d", "e", true), pred));
assertEquals(TruthValue.YES_NULL, // before & max
evaluateInteger(createStringStats("e", "f", true), pred));
assertEquals(TruthValue.NO_NULL, // before & before
evaluateInteger(createStringStats("h", "g", true), pred));
assertEquals(TruthValue.YES_NO_NULL, // before & min
evaluateInteger(createStringStats("f", "g", true), pred));
assertEquals(TruthValue.YES_NO_NULL, // before & middle
evaluateInteger(createStringStats("e", "g", true), pred));
assertEquals(TruthValue.YES_NULL, // min & after
evaluateInteger(createStringStats("c", "e", true), pred));
assertEquals(TruthValue.YES_NULL, // min & max
evaluateInteger(createStringStats("c", "f", true), pred));
assertEquals(TruthValue.YES_NO_NULL, // min & middle
evaluateInteger(createStringStats("c", "g", true), pred));
assertEquals(TruthValue.NO_NULL,
evaluateInteger(createStringStats("a", "b", true), pred)); // after
assertEquals(TruthValue.YES_NO_NULL,
evaluateInteger(createStringStats("a", "c", true), pred)); // max
assertEquals(TruthValue.YES_NO_NULL,
evaluateInteger(createStringStats("b", "d", true), pred)); // middle
assertEquals(TruthValue.YES_NULL, // min & after, same stats
evaluateInteger(createStringStats("c", "c", true), pred));
}
@Test
public void testTimestampStatsOldFiles() throws Exception {
PredicateLeaf pred = createPredicateLeaf
(PredicateLeaf.Operator.EQUALS, PredicateLeaf.Type.TIMESTAMP,
"x", Timestamp.valueOf("2000-01-01 00:00:00"), null);
OrcProto.ColumnStatistics cs = createTimestampStats("2000-01-01 00:00:00", "2001-01-01 00:00:00");
assertEquals(TruthValue.YES_NO_NULL,
evaluateTimestampBloomfilter(cs, pred, new BloomFilterUtf8(10000, 0.01), OrcFile.WriterVersion.ORC_101, false));
BloomFilterUtf8 bf = new BloomFilterUtf8(10, 0.05);
bf.addLong(getUtcTimestamp("2000-06-01 00:00:00"));
assertEquals(TruthValue.NO_NULL,
evaluateTimestampBloomfilter(cs, pred, bf, OrcFile.WriterVersion.ORC_135, false));
assertEquals(TruthValue.YES_NO_NULL,
evaluateTimestampBloomfilter(cs, pred, bf, OrcFile.WriterVersion.ORC_101, false));
}
@Test
public void testTimestampUTC() throws Exception {
DateFormat f = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
f.setTimeZone(TimeZone.getTimeZone("UTC"));
PredicateLeaf pred = createPredicateLeaf
(PredicateLeaf.Operator.EQUALS, PredicateLeaf.Type.TIMESTAMP,
"x", new Timestamp(f.parse("2015-01-01 00:00:00").getTime()), null);
PredicateLeaf pred2 = createPredicateLeaf
(PredicateLeaf.Operator.EQUALS, PredicateLeaf.Type.TIMESTAMP,
"x", new Timestamp(f.parse("2014-12-31 23:59:59").getTime()), null);
PredicateLeaf pred3 = createPredicateLeaf
(PredicateLeaf.Operator.EQUALS, PredicateLeaf.Type.TIMESTAMP,
"x", new Timestamp(f.parse("2016-01-01 00:00:01").getTime()), null);
OrcProto.ColumnStatistics cs = createTimestampStats("2015-01-01 00:00:00", "2016-01-01 00:00:00");
assertEquals(TruthValue.YES_NO_NULL,
evaluateTimestamp(cs, pred, true, true));
assertEquals(TruthValue.NO_NULL,
evaluateTimestamp(cs, pred2, true, true));
assertEquals(TruthValue.NO_NULL,
evaluateTimestamp(cs, pred3, true, true));
assertEquals(TruthValue.NO_NULL,
evaluateTimestampBloomfilter(cs, pred, new BloomFilterUtf8(10000, 0.01), OrcFile.WriterVersion.ORC_135, true));
assertEquals(TruthValue.NO_NULL,
evaluateTimestampBloomfilter(cs, pred2, new BloomFilterUtf8(10000, 0.01), OrcFile.WriterVersion.ORC_135, true));
BloomFilterUtf8 bf = new BloomFilterUtf8(10, 0.05);
bf.addLong(getUtcTimestamp("2015-06-01 00:00:00"));
assertEquals(TruthValue.NO_NULL,
evaluateTimestampBloomfilter(cs, pred, bf, OrcFile.WriterVersion.ORC_135, true));
bf.addLong(getUtcTimestamp("2015-01-01 00:00:00"));
assertEquals(TruthValue.YES_NO_NULL,
evaluateTimestampBloomfilter(cs, pred, bf, OrcFile.WriterVersion.ORC_135, true));
}
private static long getUtcTimestamp(String ts) {
DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
dateFormat.setTimeZone(utcTz);
try {
return dateFormat.parse(ts).getTime();
} catch (ParseException e) {
throw new IllegalArgumentException("Can't parse " + ts, e);
}
}
@Test
public void testIsNullWithNullInStats() throws Exception {
PredicateLeaf pred = createPredicateLeaf
(PredicateLeaf.Operator.IS_NULL, PredicateLeaf.Type.STRING,
"x", null, null);
assertEquals(TruthValue.YES_NO,
evaluateInteger(createStringStats("c", "d", true), pred));
assertEquals(TruthValue.NO,
evaluateInteger(createStringStats("c", "d", false), pred));
}
@Test
public void testOverlap() throws Exception {
assertFalse(RecordReaderUtils.overlap(0, 10, -10, -1));
assertTrue(RecordReaderUtils.overlap(0, 10, -1, 0));
assertTrue(RecordReaderUtils.overlap(0, 10, -1, 1));
assertTrue(RecordReaderUtils.overlap(0, 10, 2, 8));
assertTrue(RecordReaderUtils.overlap(0, 10, 5, 10));
assertTrue(RecordReaderUtils.overlap(0, 10, 10, 11));
assertTrue(RecordReaderUtils.overlap(0, 10, 0, 10));
assertTrue(RecordReaderUtils.overlap(0, 10, -1, 11));
assertFalse(RecordReaderUtils.overlap(0, 10, 11, 12));
}
private static DiskRangeList diskRanges(Integer... points) {
DiskRangeList head = null, tail = null;
for(int i = 0; i < points.length; i += 2) {
DiskRangeList range = new DiskRangeList(points[i], points[i+1]);
if (tail == null) {
head = tail = range;
} else {
tail = tail.insertAfter(range);
}
}
return head;
}
@Test
public void testGetIndexPosition() throws Exception {
boolean uncompressed = false;
boolean compressed = true;
assertEquals(0, RecordReaderUtils.getIndexPosition
(OrcProto.ColumnEncoding.Kind.DIRECT, TypeDescription.Category.INT,
OrcProto.Stream.Kind.PRESENT, compressed, true));
assertEquals(4, RecordReaderUtils.getIndexPosition
(OrcProto.ColumnEncoding.Kind.DIRECT, TypeDescription.Category.INT,
OrcProto.Stream.Kind.DATA, compressed, true));
assertEquals(3, RecordReaderUtils.getIndexPosition
(OrcProto.ColumnEncoding.Kind.DIRECT, TypeDescription.Category.INT,
OrcProto.Stream.Kind.DATA, uncompressed, true));
assertEquals(0, RecordReaderUtils.getIndexPosition
(OrcProto.ColumnEncoding.Kind.DIRECT, TypeDescription.Category.INT,
OrcProto.Stream.Kind.DATA, compressed, false));
assertEquals(4, RecordReaderUtils.getIndexPosition
(OrcProto.ColumnEncoding.Kind.DICTIONARY, TypeDescription.Category.STRING,
OrcProto.Stream.Kind.DATA, compressed, true));
assertEquals(4, RecordReaderUtils.getIndexPosition
(OrcProto.ColumnEncoding.Kind.DIRECT, TypeDescription.Category.BINARY,
OrcProto.Stream.Kind.DATA, compressed, true));
assertEquals(3, RecordReaderUtils.getIndexPosition
(OrcProto.ColumnEncoding.Kind.DIRECT, TypeDescription.Category.BINARY,
OrcProto.Stream.Kind.DATA, uncompressed, true));
assertEquals(6, RecordReaderUtils.getIndexPosition
(OrcProto.ColumnEncoding.Kind.DIRECT, TypeDescription.Category.BINARY,
OrcProto.Stream.Kind.LENGTH, compressed, true));
assertEquals(4, RecordReaderUtils.getIndexPosition
(OrcProto.ColumnEncoding.Kind.DIRECT, TypeDescription.Category.BINARY,
OrcProto.Stream.Kind.LENGTH, uncompressed, true));
assertEquals(4, RecordReaderUtils.getIndexPosition
(OrcProto.ColumnEncoding.Kind.DIRECT, TypeDescription.Category.DECIMAL,
OrcProto.Stream.Kind.DATA, compressed, true));
assertEquals(3, RecordReaderUtils.getIndexPosition
(OrcProto.ColumnEncoding.Kind.DIRECT, TypeDescription.Category.DECIMAL,
OrcProto.Stream.Kind.DATA, uncompressed, true));
assertEquals(6, RecordReaderUtils.getIndexPosition
(OrcProto.ColumnEncoding.Kind.DIRECT, TypeDescription.Category.DECIMAL,
OrcProto.Stream.Kind.SECONDARY, compressed, true));
assertEquals(4, RecordReaderUtils.getIndexPosition
(OrcProto.ColumnEncoding.Kind.DIRECT, TypeDescription.Category.DECIMAL,
OrcProto.Stream.Kind.SECONDARY, uncompressed, true));
assertEquals(4, RecordReaderUtils.getIndexPosition
(OrcProto.ColumnEncoding.Kind.DIRECT, TypeDescription.Category.TIMESTAMP,
OrcProto.Stream.Kind.DATA, compressed, true));
assertEquals(3, RecordReaderUtils.getIndexPosition
(OrcProto.ColumnEncoding.Kind.DIRECT, TypeDescription.Category.TIMESTAMP,
OrcProto.Stream.Kind.DATA, uncompressed, true));
assertEquals(7, RecordReaderUtils.getIndexPosition
(OrcProto.ColumnEncoding.Kind.DIRECT, TypeDescription.Category.TIMESTAMP,
OrcProto.Stream.Kind.SECONDARY, compressed, true));
assertEquals(5, RecordReaderUtils.getIndexPosition
(OrcProto.ColumnEncoding.Kind.DIRECT, TypeDescription.Category.TIMESTAMP,
OrcProto.Stream.Kind.SECONDARY, uncompressed, true));
}
@Test
public void testPartialPlan() throws Exception {
TypeDescription schema = TypeDescription.fromString("struct<x:int,y:int>");
MockDataReader dataReader = new MockDataReader(schema)
.addStream(1, OrcProto.Stream.Kind.ROW_INDEX,
createRowIndex(entry(0, -1, -1, 0),
entry(100, -1, -1, 10000),
entry(200, -1, -1, 20000),
entry(300, -1, -1, 30000),
entry(400, -1, -1, 40000),
entry(500, -1, -1, 50000)))
.addStream(2, OrcProto.Stream.Kind.ROW_INDEX,
createRowIndex(entry(0, -1, -1, 0),
entry(200, -1, -1, 20000),
entry(400, -1, -1, 40000),
entry(600, -1, -1, 60000),
entry(800, -1, -1, 80000),
entry(1000, -1, -1, 100000)))
.addStream(1, OrcProto.Stream.Kind.PRESENT, createDataStream(1, 1000))
.addStream(1, OrcProto.Stream.Kind.DATA, createDataStream(2, 99000))
.addStream(2, OrcProto.Stream.Kind.PRESENT, createDataStream(3, 2000))
.addStream(2, OrcProto.Stream.Kind.DATA, createDataStream(4, 198000))
.addEncoding(OrcProto.ColumnEncoding.Kind.DIRECT)
.addEncoding(OrcProto.ColumnEncoding.Kind.DIRECT)
.addEncoding(OrcProto.ColumnEncoding.Kind.DIRECT)
.addStripeFooter(1000, null);
MockStripe stripe = dataReader.getStripe(0);
// get the start of the data streams
final long START = stripe.getStream(1, OrcProto.Stream.Kind.PRESENT).offset;
boolean[] columns = new boolean[]{true, true, false};
boolean[] rowGroups = new boolean[]{true, true, false, false, true, false};
// filter by rows and groups
StripePlanner planner = new StripePlanner(schema, new ReaderEncryption(),
dataReader, OrcFile.WriterVersion.ORC_14, false, Integer.MAX_VALUE);
planner.parseStripe(stripe, columns);
OrcIndex index = planner.readRowIndex(null, null);
BufferChunkList result = planner.readData(index, rowGroups, false, TypeReader.ReadPhase.ALL);
assertEquals(START, result.get(0).getOffset());
assertEquals(1000, result.get(0).getLength());
assertEquals(START + 1000, result.get(1).getOffset());
assertEquals(20000 + RecordReaderUtils.WORST_UNCOMPRESSED_SLOP, result.get(1).getLength());
assertEquals(START + 41000, result.get(2).getOffset());
assertEquals(10000 + RecordReaderUtils.WORST_UNCOMPRESSED_SLOP, result.get(2).getLength());
assertNull(result.get(3));
// if we read no rows, don't read any bytes
rowGroups = new boolean[]{false, false, false, false, false, false};
result = planner.readData(index, rowGroups, false, TypeReader.ReadPhase.ALL);
assertNull(result.get(0));
// all rows, but only columns 0 and 2.
rowGroups = null;
columns = new boolean[]{true, false, true};
planner.parseStripe(stripe, columns).readRowIndex(null, index);
result = planner.readData(index, rowGroups, false, TypeReader.ReadPhase.ALL);
assertEquals(START + 100000, result.get(0).getOffset());
assertEquals(2000, result.get(0).getLength());
assertEquals(START + 102000, result.get(1).getOffset());
assertEquals(198000, result.get(1).getLength());
assertNull(result.get(2));
rowGroups = new boolean[]{false, true, false, false, false, false};
result = planner.readData(index, rowGroups, false, TypeReader.ReadPhase.ALL);
assertEquals(START + 100200, result.get(0).getOffset());
assertEquals(1800, result.get(0).getLength());
assertEquals(START + 122000, result.get(1).getOffset());
assertEquals(20000 + RecordReaderUtils.WORST_UNCOMPRESSED_SLOP,
result.get(1).getLength());
assertNull(result.get(2));
rowGroups = new boolean[]{false, false, false, false, false, true};
columns = new boolean[]{true, true, true};
planner.parseStripe(stripe, columns).readRowIndex(null, index);
result = planner.readData(index, rowGroups, false, TypeReader.ReadPhase.ALL);
assertEquals(START + 500, result.get(0).getOffset());
assertEquals(500, result.get(0).getLength());
assertEquals(START + 51000, result.get(1).getOffset());
assertEquals(49000, result.get(1).getLength());
assertEquals(START + 101000, result.get(2).getOffset());
assertEquals(1000, result.get(2).getLength());
assertEquals(START + 202000, result.get(3).getOffset());
assertEquals(98000, result.get(3).getLength());
assertNull(result.get(4));
}
@Test
public void testPartialPlanCompressed() throws Exception {
TypeDescription schema = TypeDescription.fromString("struct<x:int,y:int>");
InStream.StreamOptions options =
new InStream.StreamOptions()
.withCodec(OrcCodecPool.getCodec(CompressionKind.ZLIB))
.withBufferSize(1024);
int stretchFactor = 2 + (MAX_VALUES_LENGTH * MAX_BYTE_WIDTH - 1) / options.getBufferSize();
final int SLOP = stretchFactor * (OutStream.HEADER_SIZE + options.getBufferSize());
MockDataReader dataReader = new MockDataReader(schema, options)
.addStream(1, OrcProto.Stream.Kind.ROW_INDEX,
createRowIndex(options,
entry(0, -1, -1, -1, 0),
entry(100, -1, -1, -1, 10000),
entry(200, -1, -1, -1, 20000),
entry(300, -1, -1, -1, 30000),
entry(400, -1, -1, -1, 40000),
entry(500, -1, -1, -1, 50000)))
.addStream(2, OrcProto.Stream.Kind.ROW_INDEX,
createRowIndex(options,
entry(0, -1, -1, -1, 0),
entry(200, -1, -1, -1, 20000),
entry(400, -1, -1, -1, 40000),
entry(600, -1, -1, -1, 60000),
entry(800, -1, -1, -1, 80000),
entry(1000, -1, -1, -1, 100000)))
.addStream(1, OrcProto.Stream.Kind.PRESENT, createDataStream(1, 1000))
.addStream(1, OrcProto.Stream.Kind.DATA, createDataStream(2, 99000))
.addStream(2, OrcProto.Stream.Kind.PRESENT, createDataStream(3, 2000))
.addStream(2, OrcProto.Stream.Kind.DATA, createDataStream(4, 198000))
.addEncoding(OrcProto.ColumnEncoding.Kind.DIRECT)
.addEncoding(OrcProto.ColumnEncoding.Kind.DIRECT)
.addEncoding(OrcProto.ColumnEncoding.Kind.DIRECT)
.addStripeFooter(1000, null);
MockStripe stripe = dataReader.getStripe(0);
// get the start of the data streams
final long START = stripe.getStream(1, OrcProto.Stream.Kind.PRESENT).offset;
StripePlanner planner = new StripePlanner(schema, new ReaderEncryption(),
dataReader, OrcFile.WriterVersion.ORC_14, false, Integer.MAX_VALUE);
// filter by rows and groups
boolean[] columns = new boolean[]{true, true, false};
boolean[] rowGroups = new boolean[]{true, true, false, false, true, false};
planner.parseStripe(stripe, columns);
OrcIndex index = planner.readRowIndex(null, null);
BufferChunkList result = planner.readData(index, rowGroups, false, TypeReader.ReadPhase.ALL);
assertEquals(START, result.get(0).getOffset());
assertEquals(1000, result.get(0).getLength());
assertEquals(START + 1000, result.get(1).getOffset());
assertEquals(20000 + SLOP, result.get(1).getLength());
assertEquals(START + 41000, result.get(2).getOffset());
assertEquals(10000 + SLOP, result.get(2).getLength());
assertNull(result.get(3));
rowGroups = new boolean[]{false, false, false, false, false, true};
result = planner.readData(index, rowGroups, false, TypeReader.ReadPhase.ALL);
assertEquals(START + 500, result.get(0).getOffset());
assertEquals(500, result.get(0).getLength());
assertEquals(START + 51000, result.get(1).getOffset());
assertEquals(49000, result.get(1).getLength());
assertNull(result.get(2));
}
@Test
public void testPartialPlanString() throws Exception {
TypeDescription schema = TypeDescription.fromString("struct<x:string,y:int>");
MockDataReader dataReader =
new MockDataReader(schema)
.addStream(1, OrcProto.Stream.Kind.ROW_INDEX,
createRowIndex(entry(0, -1, -1, 0),
entry(100, -1, -1, 10000),
entry(200, -1, -1, 20000),
entry(300, -1, -1, 30000),
entry(400, -1, -1, 40000),
entry(500, -1, -1, 50000)))
.addStream(2, OrcProto.Stream.Kind.ROW_INDEX,
createRowIndex(entry(0, -1, -1, 0),
entry(200, -1, -1, 20000),
entry(400, -1, -1, 40000),
entry(600, -1, -1, 60000),
entry(800, -1, -1, 80000),
entry(1000, -1, -1, 100000)))
.addStream(1, OrcProto.Stream.Kind.PRESENT, createDataStream(1, 1000))
.addStream(1, OrcProto.Stream.Kind.DATA, createDataStream(2, 94000))
.addStream(1, OrcProto.Stream.Kind.LENGTH, createDataStream(3, 2000))
.addStream(1, OrcProto.Stream.Kind.DICTIONARY_DATA, createDataStream(4, 3000))
.addStream(2, OrcProto.Stream.Kind.PRESENT, createDataStream(5, 2000))
.addStream(2, OrcProto.Stream.Kind.DATA, createDataStream(6, 198000))
.addEncoding(OrcProto.ColumnEncoding.Kind.DIRECT)
.addEncoding(OrcProto.ColumnEncoding.Kind.DICTIONARY)
.addEncoding(OrcProto.ColumnEncoding.Kind.DIRECT)
.addStripeFooter(1000, null);
MockStripe stripe = dataReader.getStripe(0);
// get the start of the data streams
final long START = stripe.getStream(1, OrcProto.Stream.Kind.PRESENT).offset;
// filter by rows and groups
StripePlanner planner = new StripePlanner(schema, new ReaderEncryption(),
dataReader, OrcFile.WriterVersion.ORC_14, false, Integer.MAX_VALUE);
// filter by rows and groups
boolean[] columns = new boolean[]{true, true, false};
boolean[] rowGroups = new boolean[]{false, true, false, false, true, true};
planner.parseStripe(stripe, columns);
OrcIndex index = planner.readRowIndex(null, null);
BufferChunkList result = planner.readData(index, rowGroups, false, TypeReader.ReadPhase.ALL);
assertEquals(START + 100, result.get(0).getOffset());
assertEquals(900, result.get(0).getLength());
assertEquals(START + 11000, result.get(1).getOffset());
assertEquals(10000 + RecordReaderUtils.WORST_UNCOMPRESSED_SLOP,
result.get(1).getLength());
assertEquals(START + 41000, result.get(2).getOffset());
assertEquals(54000, result.get(2).getLength());
assertEquals(START + 95000, result.get(3).getOffset());
assertEquals(2000, result.get(3).getLength());
assertEquals(START + 97000, result.get(4).getOffset());
assertEquals(3000, result.get(4).getLength());
assertNull(result.get(5));
// Don't read anything if no groups are selected
rowGroups = new boolean[6];
result = planner.readData(index, rowGroups, false, TypeReader.ReadPhase.ALL);
assertNull(result.get(0));
}
@Test
public void testIntNullSafeEqualsBloomFilter() throws Exception {
PredicateLeaf pred = createPredicateLeaf(
PredicateLeaf.Operator.NULL_SAFE_EQUALS, PredicateLeaf.Type.LONG, "x", 15L, null);
BloomFilter bf = new BloomFilter(10000);
for (int i = 20; i < 1000; i++) {
bf.addLong(i);
}
ColumnStatistics cs = ColumnStatisticsImpl.deserialize(null, createIntStats(10, 100));
assertEquals(TruthValue.NO, RecordReaderImpl.evaluatePredicate(cs, pred, bf));
bf.addLong(15);
assertEquals(TruthValue.YES_NO, RecordReaderImpl.evaluatePredicate(cs, pred, bf));
}
@Test
public void testIntEqualsBloomFilter() throws Exception {
PredicateLeaf pred = createPredicateLeaf(
PredicateLeaf.Operator.EQUALS, PredicateLeaf.Type.LONG, "x", 15L, null);
BloomFilter bf = new BloomFilter(10000);
for (int i = 20; i < 1000; i++) {
bf.addLong(i);
}
ColumnStatistics cs = ColumnStatisticsImpl.deserialize(null, createIntStats(10, 100));
assertEquals(TruthValue.NO_NULL, RecordReaderImpl.evaluatePredicate(cs, pred, bf));
bf.addLong(15);
assertEquals(TruthValue.YES_NO_NULL, RecordReaderImpl.evaluatePredicate(cs, pred, bf));
}
@Test
public void testIntInBloomFilter() throws Exception {
List<Object> args = new ArrayList<Object>();
args.add(15L);
args.add(19L);
PredicateLeaf pred = createPredicateLeaf
(PredicateLeaf.Operator.IN, PredicateLeaf.Type.LONG,
"x", null, args);
BloomFilter bf = new BloomFilter(10000);
for (int i = 20; i < 1000; i++) {
bf.addLong(i);
}
ColumnStatistics cs = ColumnStatisticsImpl.deserialize(null, createIntStats(10, 100));
assertEquals(TruthValue.NO_NULL, RecordReaderImpl.evaluatePredicate(cs, pred, bf));
bf.addLong(19);
assertEquals(TruthValue.YES_NO_NULL, RecordReaderImpl.evaluatePredicate(cs, pred, bf));
bf.addLong(15);
assertEquals(TruthValue.YES_NO_NULL, RecordReaderImpl.evaluatePredicate(cs, pred, bf));
}
@Test
public void testDoubleNullSafeEqualsBloomFilter() throws Exception {
PredicateLeaf pred = createPredicateLeaf(
PredicateLeaf.Operator.NULL_SAFE_EQUALS, PredicateLeaf.Type.FLOAT, "x", 15.0, null);
BloomFilter bf = new BloomFilter(10000);
for (int i = 20; i < 1000; i++) {
bf.addDouble(i);
}
ColumnStatistics cs = ColumnStatisticsImpl.deserialize(null, createDoubleStats(10.0, 100.0));
assertEquals(TruthValue.NO, RecordReaderImpl.evaluatePredicate(cs, pred, bf));
bf.addDouble(15.0);
assertEquals(TruthValue.YES_NO, RecordReaderImpl.evaluatePredicate(cs, pred, bf));
}
@Test
public void testDoubleEqualsBloomFilter() throws Exception {
PredicateLeaf pred = createPredicateLeaf(
PredicateLeaf.Operator.EQUALS, PredicateLeaf.Type.FLOAT, "x", 15.0, null);
BloomFilter bf = new BloomFilter(10000);
for (int i = 20; i < 1000; i++) {
bf.addDouble(i);
}
ColumnStatistics cs = ColumnStatisticsImpl.deserialize(null, createDoubleStats(10.0, 100.0));
assertEquals(TruthValue.NO_NULL, RecordReaderImpl.evaluatePredicate(cs, pred, bf));
bf.addDouble(15.0);
assertEquals(TruthValue.YES_NO_NULL, RecordReaderImpl.evaluatePredicate(cs, pred, bf));
}
@Test
public void testDoubleInBloomFilter() throws Exception {
List<Object> args = new ArrayList<Object>();
args.add(15.0);
args.add(19.0);
PredicateLeaf pred = createPredicateLeaf
(PredicateLeaf.Operator.IN, PredicateLeaf.Type.FLOAT,
"x", null, args);
BloomFilter bf = new BloomFilter(10000);
for (int i = 20; i < 1000; i++) {
bf.addDouble(i);
}
ColumnStatistics cs = ColumnStatisticsImpl.deserialize(null, createDoubleStats(10.0, 100.0));
assertEquals(TruthValue.NO_NULL, RecordReaderImpl.evaluatePredicate(cs, pred, bf));
bf.addDouble(19.0);
assertEquals(TruthValue.YES_NO_NULL, RecordReaderImpl.evaluatePredicate(cs, pred, bf));
bf.addDouble(15.0);
assertEquals(TruthValue.YES_NO_NULL, RecordReaderImpl.evaluatePredicate(cs, pred, bf));
}
@Test
public void testStringNullSafeEqualsBloomFilter() throws Exception {
PredicateLeaf pred = createPredicateLeaf(
PredicateLeaf.Operator.NULL_SAFE_EQUALS, PredicateLeaf.Type.STRING, "x", "str_15", null);
BloomFilter bf = new BloomFilter(10000);
for (int i = 20; i < 1000; i++) {
bf.addString("str_" + i);
}
ColumnStatistics cs = ColumnStatisticsImpl.deserialize(null, createStringStats("str_10", "str_200"));
assertEquals(TruthValue.NO, RecordReaderImpl.evaluatePredicate(cs, pred, bf));
bf.addString("str_15");
assertEquals(TruthValue.YES_NO, RecordReaderImpl.evaluatePredicate(cs, pred, bf));
}
@Test
public void testStringEqualsBloomFilter() throws Exception {
PredicateLeaf pred = createPredicateLeaf(
PredicateLeaf.Operator.EQUALS, PredicateLeaf.Type.STRING, "x", "str_15", null);
BloomFilter bf = new BloomFilter(10000);
for (int i = 20; i < 1000; i++) {
bf.addString("str_" + i);
}
ColumnStatistics cs = ColumnStatisticsImpl.deserialize(null, createStringStats("str_10", "str_200"));
assertEquals(TruthValue.NO_NULL, RecordReaderImpl.evaluatePredicate(cs, pred, bf));
bf.addString("str_15");
assertEquals(TruthValue.YES_NO_NULL, RecordReaderImpl.evaluatePredicate(cs, pred, bf));
}
@Test
public void testStringInBloomFilter() throws Exception {
List<Object> args = new ArrayList<Object>();
args.add("str_15");
args.add("str_19");
PredicateLeaf pred = createPredicateLeaf
(PredicateLeaf.Operator.IN, PredicateLeaf.Type.STRING,
"x", null, args);
BloomFilter bf = new BloomFilter(10000);
for (int i = 20; i < 1000; i++) {
bf.addString("str_" + i);
}
ColumnStatistics cs = ColumnStatisticsImpl.deserialize(null, createStringStats("str_10", "str_200"));
assertEquals(TruthValue.NO_NULL, RecordReaderImpl.evaluatePredicate(cs, pred, bf));
bf.addString("str_19");
assertEquals(TruthValue.YES_NO_NULL, RecordReaderImpl.evaluatePredicate(cs, pred, bf));
bf.addString("str_15");
assertEquals(TruthValue.YES_NO_NULL, RecordReaderImpl.evaluatePredicate(cs, pred, bf));
}
@Test
public void testDateWritableNullSafeEqualsBloomFilter() throws Exception {
PredicateLeaf pred = createPredicateLeaf(
PredicateLeaf.Operator.NULL_SAFE_EQUALS, PredicateLeaf.Type.DATE, "x",
LocalDate.ofEpochDay(15), null);
BloomFilter bf = new BloomFilter(10000);
for (int i = 20; i < 1000; i++) {
bf.addLong(i);
}
ColumnStatistics cs = ColumnStatisticsImpl.deserialize(null, createDateStats(10, 100));
assertEquals(TruthValue.NO, RecordReaderImpl.evaluatePredicate(cs, pred, bf));
bf.addLong(15);
assertEquals(TruthValue.YES_NO, RecordReaderImpl.evaluatePredicate(cs, pred, bf));
}
@Test
public void testDateWritableEqualsBloomFilter() throws Exception {
PredicateLeaf pred = createPredicateLeaf(
PredicateLeaf.Operator.EQUALS, PredicateLeaf.Type.DATE, "x",
LocalDate.ofEpochDay(15), null);
BloomFilter bf = new BloomFilter(10000);
for (int i = 20; i < 1000; i++) {
bf.addLong(i);
}
ColumnStatistics cs = ColumnStatisticsImpl.deserialize(null, createDateStats(10, 100));
assertEquals(TruthValue.NO_NULL, RecordReaderImpl.evaluatePredicate(cs, pred, bf));
bf.addLong(15);
assertEquals(TruthValue.YES_NO_NULL, RecordReaderImpl.evaluatePredicate(cs, pred, bf));
}
@Test
public void testDateWritableInBloomFilter() throws Exception {
List<Object> args = new ArrayList<>();
args.add(toDate(LocalDate.ofEpochDay(15)));
args.add(toDate(LocalDate.ofEpochDay(19)));
PredicateLeaf pred = createPredicateLeaf
(PredicateLeaf.Operator.IN, PredicateLeaf.Type.DATE,
"x", null, args);
BloomFilter bf = new BloomFilter(10000);
for (int i = 20; i < 1000; i++) {
bf.addLong(i);
}
ColumnStatistics cs = ColumnStatisticsImpl.deserialize(null, createDateStats(10, 100));
assertEquals(TruthValue.NO_NULL, RecordReaderImpl.evaluatePredicate(cs, pred, bf));
bf.addLong(19);
assertEquals(TruthValue.YES_NO_NULL, RecordReaderImpl.evaluatePredicate(cs, pred, bf));
bf.addLong(15);
assertEquals(TruthValue.YES_NO_NULL, RecordReaderImpl.evaluatePredicate(cs, pred, bf));
}
@Test
public void testDecimalEqualsBloomFilter() throws Exception {
PredicateLeaf pred = createPredicateLeaf(
PredicateLeaf.Operator.EQUALS, PredicateLeaf.Type.DECIMAL, "x",
new HiveDecimalWritable("15"),
null);
BloomFilter bf = new BloomFilter(10000);
for (int i = 20; i < 1000; i++) {
bf.addString(HiveDecimal.create(i).toString());
}
ColumnStatistics cs = ColumnStatisticsImpl.deserialize(null, createDecimalStats("10", "200"));
assertEquals(TruthValue.NO_NULL, RecordReaderImpl.evaluatePredicate(cs, pred, bf));
bf.addString(HiveDecimal.create(15).toString());
assertEquals(TruthValue.YES_NO_NULL, RecordReaderImpl.evaluatePredicate(cs, pred, bf));
}
@Test
public void testDecimalInBloomFilter() throws Exception {
List<Object> args = new ArrayList<Object>();
args.add(new HiveDecimalWritable("15"));
args.add(new HiveDecimalWritable("19"));
PredicateLeaf pred = createPredicateLeaf
(PredicateLeaf.Operator.IN, PredicateLeaf.Type.DECIMAL,
"x", null, args);
BloomFilter bf = new BloomFilter(10000);
for (int i = 20; i < 1000; i++) {
bf.addString(HiveDecimal.create(i).toString());
}
ColumnStatistics cs = ColumnStatisticsImpl.deserialize(null, createDecimalStats("10", "200"));
assertEquals(TruthValue.NO_NULL, RecordReaderImpl.evaluatePredicate(cs, pred, bf));
bf.addString(HiveDecimal.create(19).toString());
assertEquals(TruthValue.YES_NO_NULL, RecordReaderImpl.evaluatePredicate(cs, pred, bf));
bf.addString(HiveDecimal.create(15).toString());
assertEquals(TruthValue.YES_NO_NULL, RecordReaderImpl.evaluatePredicate(cs, pred, bf));
}
@Test
public void testNullsInBloomFilter() throws Exception {
List<Object> args = new ArrayList<Object>();
args.add(new HiveDecimalWritable("15"));
args.add(null);
args.add(new HiveDecimalWritable("19"));
PredicateLeaf pred = createPredicateLeaf
(PredicateLeaf.Operator.IN, PredicateLeaf.Type.DECIMAL,
"x", null, args);
BloomFilter bf = new BloomFilter(10000);
for (int i = 20; i < 1000; i++) {
bf.addString(HiveDecimal.create(i).toString());
}
ColumnStatistics cs = ColumnStatisticsImpl.deserialize(null, createDecimalStats("10", "200", false));
// hasNull is false, so bloom filter should return NO
assertEquals(TruthValue.NO, RecordReaderImpl.evaluatePredicate(cs, pred, bf));
cs = ColumnStatisticsImpl.deserialize(null, createDecimalStats("10", "200", true));
// hasNull is true, so bloom filter should return YES_NO_NULL
assertEquals(TruthValue.YES_NO_NULL, RecordReaderImpl.evaluatePredicate(cs, pred, bf));
bf.addString(HiveDecimal.create(19).toString());
assertEquals(TruthValue.YES_NO_NULL, RecordReaderImpl.evaluatePredicate(cs, pred, bf));
bf.addString(HiveDecimal.create(15).toString());
assertEquals(TruthValue.YES_NO_NULL, RecordReaderImpl.evaluatePredicate(cs, pred, bf));
}
@Test
public void testClose() throws Exception {
DataReader mockedDataReader = mock(DataReader.class);
DataReader cloned = mock(DataReader.class);
when(mockedDataReader.clone()).thenReturn(cloned);
closeMockedRecordReader(mockedDataReader);
verify(cloned, atLeastOnce()).close();
}
@Test
public void testCloseWithException() throws Exception {
DataReader mockedDataReader = mock(DataReader.class);
DataReader cloned = mock(DataReader.class);
when(mockedDataReader.clone()).thenReturn(cloned);
doThrow(IOException.class).when(cloned).close();
try {
closeMockedRecordReader(mockedDataReader);
fail("Exception should have been thrown when Record Reader was closed");
} catch (IOException expected) {
}
verify(cloned, atLeastOnce()).close();
}
Path workDir = new Path(System.getProperty("test.tmp.dir",
"target" + File.separator + "test" + File.separator + "tmp"));
private void closeMockedRecordReader(DataReader mockedDataReader) throws IOException {
Configuration conf = new Configuration();
Path path = new Path(workDir, "empty.orc");
FileSystem.get(conf).delete(path, true);
Writer writer = OrcFile.createWriter(path, OrcFile.writerOptions(conf)
.setSchema(TypeDescription.createLong()));
writer.close();
Reader reader = OrcFile.createReader(path, OrcFile.readerOptions(conf));
RecordReader recordReader = reader.rows(reader.options()
.dataReader(mockedDataReader));
recordReader.close();
}
static ByteBuffer createDataStream(int tag, int bytes) {
ByteBuffer result = ByteBuffer.allocate(bytes);
IntBuffer iBuf = result.asIntBuffer();
for(int i= 0; i < bytes; i += 4) {
iBuf.put((tag << 24) + i);
}
result.limit(bytes);
return result;
}
static OrcProto.RowIndexEntry entry(int... values) {
OrcProto.RowIndexEntry.Builder builder = OrcProto.RowIndexEntry.newBuilder();
for(int v: values) {
builder.addPositions(v);
}
return builder.build();
}
static ByteBuffer createRowIndex(InStream.StreamOptions options,
OrcProto.RowIndexEntry... entries
) throws IOException {
ByteBuffer uncompressed = createRowIndex(entries);
if (options.getCodec() != null) {
CompressionCodec codec = options.getCodec();
PhysicalFsWriter.BufferedStream buffer =
new PhysicalFsWriter.BufferedStream();
StreamOptions writerOptions = new StreamOptions(options.getBufferSize())
.withCodec(codec, codec.getDefaultOptions());
try (OutStream out = new OutStream("row index", writerOptions, buffer)) {
out.write(uncompressed.array(),
uncompressed.arrayOffset() + uncompressed.position(),
uncompressed.remaining());
out.flush();
}
return buffer.getByteBuffer();
} else {
return uncompressed;
}
}
static ByteBuffer createRowIndex(OrcProto.RowIndexEntry... entries) {
OrcProto.RowIndex.Builder builder = OrcProto.RowIndex.newBuilder();
for(OrcProto.RowIndexEntry entry: entries) {
builder.addEntry(entry);
}
return ByteBuffer.wrap(builder.build().toByteArray());
}
static ByteBuffer createRowIndex(int value) {
OrcProto.RowIndexEntry entry =
OrcProto.RowIndexEntry.newBuilder().addPositions(value).build();
return ByteBuffer.wrap(OrcProto.RowIndex.newBuilder().addEntry(entry)
.build().toByteArray());
}
static ByteBuffer createBloomFilter(int value) {
OrcProto.BloomFilter entry =
OrcProto.BloomFilter.newBuilder().setNumHashFunctions(value).build();
return ByteBuffer.wrap(OrcProto.BloomFilterIndex.newBuilder()
.addBloomFilter(entry).build().toByteArray());
}
static MockDataReader createOldBlooms(TypeDescription schema) {
return new MockDataReader(schema)
.addStream(1, OrcProto.Stream.Kind.ROW_INDEX, createRowIndex(10))
.addStream(1, OrcProto.Stream.Kind.BLOOM_FILTER, createBloomFilter(11))
.addStream(2, OrcProto.Stream.Kind.ROW_INDEX, createRowIndex(20))
.addStream(2, OrcProto.Stream.Kind.BLOOM_FILTER, createBloomFilter(21))
.addStream(3, OrcProto.Stream.Kind.ROW_INDEX, createRowIndex(30))
.addStream(3, OrcProto.Stream.Kind.BLOOM_FILTER, createBloomFilter(31))
.addStripeFooter(1000, null);
}
static MockDataReader createMixedBlooms(TypeDescription schema) {
return new MockDataReader(schema)
.addStream(1, OrcProto.Stream.Kind.ROW_INDEX, createRowIndex(10))
.addStream(1, OrcProto.Stream.Kind.BLOOM_FILTER, createBloomFilter(11))
.addStream(2, OrcProto.Stream.Kind.ROW_INDEX, createRowIndex(20))
.addStream(2, OrcProto.Stream.Kind.BLOOM_FILTER, createBloomFilter(21))
.addStream(2, OrcProto.Stream.Kind.BLOOM_FILTER_UTF8, createBloomFilter(22))
.addStream(3, OrcProto.Stream.Kind.ROW_INDEX, createRowIndex(30))
.addStream(3, OrcProto.Stream.Kind.BLOOM_FILTER, createBloomFilter(31))
.addStream(3, OrcProto.Stream.Kind.BLOOM_FILTER_UTF8, createBloomFilter(32))
.addStripeFooter(1000, null);
}
static MockDataReader createNewBlooms(TypeDescription schema) {
return new MockDataReader(schema)
.addStream(1, OrcProto.Stream.Kind.ROW_INDEX, createRowIndex(10))
.addStream(1, OrcProto.Stream.Kind.BLOOM_FILTER, createBloomFilter(11))
.addStream(2, OrcProto.Stream.Kind.ROW_INDEX, createRowIndex(20))
.addStream(2, OrcProto.Stream.Kind.BLOOM_FILTER_UTF8, createBloomFilter(22))
.addStream(3, OrcProto.Stream.Kind.ROW_INDEX, createRowIndex(30))
.addStream(3, OrcProto.Stream.Kind.BLOOM_FILTER_UTF8, createBloomFilter(32))
.addStripeFooter(1000, null);
}
@Test
public void testOldBloomFilters() throws Exception {
TypeDescription schema = TypeDescription.fromString("struct<x:int,y:decimal(10,2),z:string>");
MockDataReader dataReader = createOldBlooms(schema);
MockStripe stripe = dataReader.getStripe(0);
// use old blooms
ReaderEncryption encryption = new ReaderEncryption();
StripePlanner planner = new StripePlanner(schema, encryption, dataReader,
OrcFile.WriterVersion.HIVE_4243, false, Integer.MAX_VALUE);
planner.parseStripe(stripe, new boolean[]{true, true, false, true});
OrcIndex index =
planner.readRowIndex(new boolean[]{false, true, false, true}, null);
OrcProto.Stream.Kind[] bloomFilterKinds = index.getBloomFilterKinds();
assertEquals(OrcProto.Stream.Kind.BLOOM_FILTER, bloomFilterKinds[1]);
assertEquals(OrcProto.Stream.Kind.BLOOM_FILTER, bloomFilterKinds[3]);
assertEquals(1, stripe.getStream(1, OrcProto.Stream.Kind.ROW_INDEX).readCount);
assertEquals(1, stripe.getStream(1, OrcProto.Stream.Kind.BLOOM_FILTER).readCount);
assertEquals(0, stripe.getStream(2, OrcProto.Stream.Kind.ROW_INDEX).readCount);
assertEquals(0, stripe.getStream(2, OrcProto.Stream.Kind.BLOOM_FILTER).readCount);
assertEquals(1, stripe.getStream(3, OrcProto.Stream.Kind.ROW_INDEX).readCount);
assertEquals(1, stripe.getStream(3, OrcProto.Stream.Kind.BLOOM_FILTER).readCount);
// ignore non-utf8 bloom filter
dataReader.resetCounts();
Arrays.fill(bloomFilterKinds, null);
planner = new StripePlanner(schema, encryption, dataReader,
OrcFile.WriterVersion.HIVE_4243, true, Integer.MAX_VALUE);
planner.parseStripe(stripe, new boolean[]{true, true, true, false});
planner.readRowIndex(new boolean[]{false, true, true, false}, index);
assertEquals(OrcProto.Stream.Kind.BLOOM_FILTER, bloomFilterKinds[1]);
assertNull(bloomFilterKinds[2]);
assertEquals(1, stripe.getStream(1, OrcProto.Stream.Kind.ROW_INDEX).readCount);
assertEquals(1, stripe.getStream(1, OrcProto.Stream.Kind.BLOOM_FILTER).readCount);
assertEquals(1, stripe.getStream(2, OrcProto.Stream.Kind.ROW_INDEX).readCount);
assertEquals(0, stripe.getStream(2, OrcProto.Stream.Kind.BLOOM_FILTER).readCount);
assertEquals(0, stripe.getStream(3, OrcProto.Stream.Kind.ROW_INDEX).readCount);
assertEquals(0, stripe.getStream(3, OrcProto.Stream.Kind.BLOOM_FILTER).readCount);
// check that we are handling the post hive-12055 strings correctly
dataReader.resetCounts();
Arrays.fill(bloomFilterKinds, null);
planner = new StripePlanner(schema, encryption, dataReader,
OrcFile.WriterVersion.HIVE_12055, true, Integer.MAX_VALUE);
planner.parseStripe(stripe, new boolean[]{true, true, true, true});
planner.readRowIndex(new boolean[]{false, true, true, true}, index);
assertEquals(OrcProto.Stream.Kind.BLOOM_FILTER, bloomFilterKinds[1]);
assertNull(bloomFilterKinds[2]);
assertEquals(OrcProto.Stream.Kind.BLOOM_FILTER, bloomFilterKinds[3]);
assertEquals(1, stripe.getStream(1, OrcProto.Stream.Kind.ROW_INDEX).readCount);
assertEquals(1, stripe.getStream(1, OrcProto.Stream.Kind.BLOOM_FILTER).readCount);
assertEquals(1, stripe.getStream(2, OrcProto.Stream.Kind.ROW_INDEX).readCount);
assertEquals(0, stripe.getStream(2, OrcProto.Stream.Kind.BLOOM_FILTER).readCount);
assertEquals(1, stripe.getStream(3, OrcProto.Stream.Kind.ROW_INDEX).readCount);
assertEquals(1, stripe.getStream(3, OrcProto.Stream.Kind.BLOOM_FILTER).readCount);
// ignore non-utf8 bloom filter on decimal
dataReader.resetCounts();
Arrays.fill(bloomFilterKinds, null);
planner = new StripePlanner(schema, encryption, dataReader,
OrcFile.WriterVersion.HIVE_4243, true, Integer.MAX_VALUE);
planner.parseStripe(stripe, new boolean[]{true, false, true, false});
planner.readRowIndex(new boolean[]{false, false, true, false}, index);
assertNull(bloomFilterKinds[1]);
assertNull(bloomFilterKinds[2]);
assertEquals(0, stripe.getStream(1, OrcProto.Stream.Kind.ROW_INDEX).readCount);
assertEquals(0, stripe.getStream(1, OrcProto.Stream.Kind.BLOOM_FILTER).readCount);
assertEquals(1, stripe.getStream(2, OrcProto.Stream.Kind.ROW_INDEX).readCount);
assertEquals(0, stripe.getStream(2, OrcProto.Stream.Kind.BLOOM_FILTER).readCount);
assertEquals(0, stripe.getStream(3, OrcProto.Stream.Kind.ROW_INDEX).readCount);
assertEquals(0, stripe.getStream(3, OrcProto.Stream.Kind.BLOOM_FILTER).readCount);
}
@Test
public void testCompatibleBloomFilters() throws Exception {
TypeDescription schema = TypeDescription.fromString("struct<x:int,y:decimal(10,2),z:string>");
MockDataReader dataReader = createMixedBlooms(schema);
MockStripe stripe = dataReader.getStripe(0);
// use old bloom filters
ReaderEncryption encryption = new ReaderEncryption();
StripePlanner planner = new StripePlanner(schema, encryption, dataReader,
OrcFile.WriterVersion.HIVE_4243, true, Integer.MAX_VALUE);
planner.parseStripe(stripe, new boolean[]{true, true, false, true});
OrcIndex index =
planner.readRowIndex(new boolean[]{false, true, false, true}, null);
OrcProto.Stream.Kind[] bloomFilterKinds = index.getBloomFilterKinds();
assertEquals(OrcProto.Stream.Kind.BLOOM_FILTER, bloomFilterKinds[1]);
assertEquals(OrcProto.Stream.Kind.BLOOM_FILTER_UTF8, bloomFilterKinds[3]);
assertEquals(1, stripe.getStream(1, OrcProto.Stream.Kind.ROW_INDEX).readCount);
assertEquals(1, stripe.getStream(1, OrcProto.Stream.Kind.BLOOM_FILTER).readCount);
assertEquals(0, stripe.getStream(2, OrcProto.Stream.Kind.ROW_INDEX).readCount);
assertEquals(0, stripe.getStream(2, OrcProto.Stream.Kind.BLOOM_FILTER).readCount);
assertEquals(0, stripe.getStream(2, OrcProto.Stream.Kind.BLOOM_FILTER_UTF8).readCount);
assertEquals(1, stripe.getStream(3, OrcProto.Stream.Kind.ROW_INDEX).readCount);
assertEquals(0, stripe.getStream(3, OrcProto.Stream.Kind.BLOOM_FILTER).readCount);
assertEquals(1, stripe.getStream(3, OrcProto.Stream.Kind.BLOOM_FILTER_UTF8).readCount);
// ignore non-utf8 bloom filter
Arrays.fill(bloomFilterKinds, null);
dataReader.resetCounts();
planner = new StripePlanner(schema, encryption, dataReader,
OrcFile.WriterVersion.HIVE_4243, true, Integer.MAX_VALUE);
planner.parseStripe(stripe, new boolean[]{true, true, true, false});
planner.readRowIndex(new boolean[]{false, true, true, false}, index);
assertEquals(OrcProto.Stream.Kind.BLOOM_FILTER, bloomFilterKinds[1]);
assertEquals(OrcProto.Stream.Kind.BLOOM_FILTER_UTF8, bloomFilterKinds[2]);
assertEquals(1, stripe.getStream(1, OrcProto.Stream.Kind.ROW_INDEX).readCount);
assertEquals(1, stripe.getStream(1, OrcProto.Stream.Kind.BLOOM_FILTER).readCount);
assertEquals(1, stripe.getStream(2, OrcProto.Stream.Kind.ROW_INDEX).readCount);
assertEquals(0, stripe.getStream(2, OrcProto.Stream.Kind.BLOOM_FILTER).readCount);
assertEquals(1, stripe.getStream(2, OrcProto.Stream.Kind.BLOOM_FILTER_UTF8).readCount);
assertEquals(0, stripe.getStream(3, OrcProto.Stream.Kind.ROW_INDEX).readCount);
assertEquals(0, stripe.getStream(3, OrcProto.Stream.Kind.BLOOM_FILTER).readCount);
assertEquals(0, stripe.getStream(3, OrcProto.Stream.Kind.BLOOM_FILTER_UTF8).readCount);
}
@Test
public void testNewBloomFilters() throws Exception {
TypeDescription schema = TypeDescription.fromString("struct<x:int,y:decimal(10,2),z:string>");
MockDataReader dataReader = createNewBlooms(schema);
MockStripe stripe = dataReader.getStripe(0);
// use old bloom filters
ReaderEncryption encryption = new ReaderEncryption();
StripePlanner planner = new StripePlanner(schema, encryption, dataReader,
OrcFile.WriterVersion.HIVE_4243, true, Integer.MAX_VALUE);
planner.parseStripe(stripe, new boolean[]{true, true, false, true});
OrcIndex index =
planner.readRowIndex(new boolean[]{false, true, false, true}, null);
OrcProto.Stream.Kind[] bloomFilterKinds = index.getBloomFilterKinds();
assertEquals(OrcProto.Stream.Kind.BLOOM_FILTER, bloomFilterKinds[1]);
assertEquals(OrcProto.Stream.Kind.BLOOM_FILTER_UTF8, bloomFilterKinds[3]);
assertEquals(1, stripe.getStream(1, OrcProto.Stream.Kind.ROW_INDEX).readCount);
assertEquals(1, stripe.getStream(1, OrcProto.Stream.Kind.BLOOM_FILTER).readCount);
assertEquals(0, stripe.getStream(2, OrcProto.Stream.Kind.ROW_INDEX).readCount);
assertEquals(0, stripe.getStream(2, OrcProto.Stream.Kind.BLOOM_FILTER_UTF8).readCount);
assertEquals(1, stripe.getStream(3, OrcProto.Stream.Kind.ROW_INDEX).readCount);
assertEquals(1, stripe.getStream(3, OrcProto.Stream.Kind.BLOOM_FILTER_UTF8).readCount);
// ignore non-utf8 bloom filter
Arrays.fill(bloomFilterKinds, null);
dataReader.resetCounts();
planner = new StripePlanner(schema, encryption, dataReader,
OrcFile.WriterVersion.HIVE_4243, true, Integer.MAX_VALUE);
planner.parseStripe(stripe, new boolean[]{true, true, true, false});
planner.readRowIndex(new boolean[]{false, true, true, false}, index);
assertEquals(OrcProto.Stream.Kind.BLOOM_FILTER, bloomFilterKinds[1]);
assertEquals(OrcProto.Stream.Kind.BLOOM_FILTER_UTF8, bloomFilterKinds[2]);
assertEquals(1, stripe.getStream(1, OrcProto.Stream.Kind.ROW_INDEX).readCount);
assertEquals(1, stripe.getStream(1, OrcProto.Stream.Kind.BLOOM_FILTER).readCount);
assertEquals(1, stripe.getStream(2, OrcProto.Stream.Kind.ROW_INDEX).readCount);
assertEquals(1, stripe.getStream(2, OrcProto.Stream.Kind.BLOOM_FILTER_UTF8).readCount);
assertEquals(0, stripe.getStream(3, OrcProto.Stream.Kind.ROW_INDEX).readCount);
assertEquals(0, stripe.getStream(3, OrcProto.Stream.Kind.BLOOM_FILTER_UTF8).readCount);
}
static OrcProto.RowIndexEntry createIndexEntry(Long min, Long max) {
return OrcProto.RowIndexEntry.newBuilder()
.setStatistics(createIntStats(min, max)).build();
}
@Test
public void testPickRowGroups() throws Exception {
Configuration conf = new Configuration();
TypeDescription schema = TypeDescription.fromString("struct<x:int,y:int>");
SchemaEvolution evolution = new SchemaEvolution(schema, schema,
new Reader.Options(conf));
SearchArgument sarg =
SearchArgumentFactory.newBuilder()
.startAnd()
.equals("x", PredicateLeaf.Type.LONG, 100L)
.equals("y", PredicateLeaf.Type.LONG, 10L)
.end().build();
RecordReaderImpl.SargApplier applier =
new RecordReaderImpl.SargApplier(sarg, 1000, evolution,
OrcFile.WriterVersion.ORC_135, false, false, false);
OrcProto.StripeInformation stripe =
OrcProto.StripeInformation.newBuilder().setNumberOfRows(4000).build();
OrcProto.RowIndex[] indexes = new OrcProto.RowIndex[3];
indexes[1] = OrcProto.RowIndex.newBuilder()
.addEntry(createIndexEntry(0L, 10L))
.addEntry(createIndexEntry(100L, 200L))
.addEntry(createIndexEntry(300L, 500L))
.addEntry(createIndexEntry(100L, 100L))
.build();
indexes[2] = OrcProto.RowIndex.newBuilder()
.addEntry(createIndexEntry(0L, 9L))
.addEntry(createIndexEntry(11L, 20L))
.addEntry(createIndexEntry(10L, 10L))
.addEntry(createIndexEntry(0L, 100L))
.build();
List<OrcProto.ColumnEncoding> encodings = new ArrayList<>();
encodings.add(OrcProto.ColumnEncoding.newBuilder()
.setKind(OrcProto.ColumnEncoding.Kind.DIRECT).build());
encodings.add(OrcProto.ColumnEncoding.newBuilder()
.setKind(OrcProto.ColumnEncoding.Kind.DIRECT_V2).build());
encodings.add(OrcProto.ColumnEncoding.newBuilder()
.setKind(OrcProto.ColumnEncoding.Kind.DIRECT_V2).build());
boolean[] rows = applier.pickRowGroups(
new ReaderImpl.StripeInformationImpl(stripe, 1, -1, null),
indexes, null, encodings, null, false);
assertEquals(4, rows.length);
assertFalse(rows[0]);
assertFalse(rows[1]);
assertFalse(rows[2]);
assertTrue(rows[3]);
assertEquals(0, applier.getExceptionCount()[0]);
assertEquals(0, applier.getExceptionCount()[1]);
}
@Test
public void testPickRowGroupsError() throws Exception {
Configuration conf = new Configuration();
TypeDescription schema = TypeDescription.fromString("struct<x:int,y:int>");
SchemaEvolution evolution = new SchemaEvolution(schema, schema,
new Reader.Options(conf));
SearchArgument sarg =
SearchArgumentFactory.newBuilder()
.startAnd()
.equals("x", PredicateLeaf.Type.DATE, Date.valueOf("2017-01-02"))
.equals("y", PredicateLeaf.Type.LONG, 10L)
.end().build();
RecordReaderImpl.SargApplier applier =
new RecordReaderImpl.SargApplier(sarg, 1000, evolution,
OrcFile.WriterVersion.ORC_135, false, false, false);
OrcProto.StripeInformation stripe =
OrcProto.StripeInformation.newBuilder().setNumberOfRows(3000).build();
OrcProto.RowIndex[] indexes = new OrcProto.RowIndex[3];
indexes[1] = OrcProto.RowIndex.newBuilder()
.addEntry(createIndexEntry(0L, 10L))
.addEntry(createIndexEntry(10L, 20L))
.addEntry(createIndexEntry(20L, 30L))
.build();
indexes[2] = OrcProto.RowIndex.newBuilder()
.addEntry(createIndexEntry(0L, 9L))
.addEntry(createIndexEntry(10L, 20L))
.addEntry(createIndexEntry(0L, 30L))
.build();
List<OrcProto.ColumnEncoding> encodings = new ArrayList<>();
encodings.add(OrcProto.ColumnEncoding.newBuilder()
.setKind(OrcProto.ColumnEncoding.Kind.DIRECT).build());
encodings.add(OrcProto.ColumnEncoding.newBuilder()
.setKind(OrcProto.ColumnEncoding.Kind.DIRECT_V2).build());
encodings.add(OrcProto.ColumnEncoding.newBuilder()
.setKind(OrcProto.ColumnEncoding.Kind.DIRECT_V2).build());
boolean[] rows = applier.pickRowGroups(
new ReaderImpl.StripeInformationImpl(stripe, 1, -1, null),
indexes, null, encodings, null, false);
assertEquals(3, rows.length);
assertFalse(rows[0]);
assertTrue(rows[1]);
assertTrue(rows[2]);
assertEquals(1, applier.getExceptionCount()[0]);
assertEquals(0, applier.getExceptionCount()[1]);
}
@Test
public void testPositionalEvolutionAddColumnPPD() throws IOException {
Reader.Options opts = new Reader.Options();
opts.forcePositionalEvolution(true);
TypeDescription file = TypeDescription.fromString("struct<x:int>");
// new column added on reader side
TypeDescription read = TypeDescription.fromString("struct<x:int,y:boolean>");
opts.include(includeAll(read));
SchemaEvolution evo = new SchemaEvolution(file, read, opts);
SearchArgument sarg = SearchArgumentFactory.newBuilder().startAnd()
.equals("y", PredicateLeaf.Type.BOOLEAN, true).end().build();
RecordReaderImpl.SargApplier applier =
new RecordReaderImpl.SargApplier(sarg, 1000, evo,
OrcFile.WriterVersion.ORC_135, false, false, false);
OrcProto.StripeInformation stripe =
OrcProto.StripeInformation.newBuilder().setNumberOfRows(2000).build();
OrcProto.RowIndex[] indexes = new OrcProto.RowIndex[3];
indexes[1] = OrcProto.RowIndex.newBuilder() // index for original x column
.addEntry(createIndexEntry(0L, 10L))
.addEntry(createIndexEntry(100L, 200L))
.build();
indexes[2] = null; // no-op, just for clarifying that new reader column doesn't have an index
List<OrcProto.ColumnEncoding> encodings = new ArrayList<>();
encodings.add(OrcProto.ColumnEncoding.newBuilder().setKind(OrcProto.ColumnEncoding.Kind.DIRECT).build());
boolean[] rows = applier.pickRowGroups(new ReaderImpl.StripeInformationImpl(stripe, 1, -1, null),
indexes, null, encodings, null, false);
assertTrue(Arrays.equals(SargApplier.READ_ALL_RGS, rows)); //cannot filter for new column, return all rows
}
private boolean[] includeAll(TypeDescription readerType) {
int numColumns = readerType.getMaximumId() + 1;
boolean[] result = new boolean[numColumns];
Arrays.fill(result, true);
return result;
}
@Test
public void testSkipDataReaderOpen() throws Exception {
IOException ioe = new IOException("Don't open when there is no stripe");
DataReader mockedDataReader = mock(DataReader.class);
doThrow(ioe).when(mockedDataReader).open();
when(mockedDataReader.clone()).thenReturn(mockedDataReader);
doNothing().when(mockedDataReader).close();
Configuration conf = new Configuration();
Path path = new Path(workDir, "empty.orc");
FileSystem.get(conf).delete(path, true);
OrcFile.WriterOptions options = OrcFile.writerOptions(conf).setSchema(TypeDescription.createLong());
Writer writer = OrcFile.createWriter(path, options);
writer.close();
Reader reader = OrcFile.createReader(path, OrcFile.readerOptions(conf));
Reader.Options readerOptions = reader.options().dataReader(mockedDataReader);
RecordReader recordReader = reader.rows(readerOptions);
recordReader.close();
}
@Test
public void testCloseAtConstructorException() throws Exception {
Configuration conf = new Configuration();
Path path = new Path(workDir, "oneRow.orc");
FileSystem.get(conf).delete(path, true);
TypeDescription schema = TypeDescription.createLong();
OrcFile.WriterOptions options = OrcFile.writerOptions(conf).setSchema(schema);
Writer writer = OrcFile.createWriter(path, options);
VectorizedRowBatch writeBatch = schema.createRowBatch();
int row = writeBatch.size++;
((LongColumnVector) writeBatch.cols[0]).vector[row] = 0;
writer.addRowBatch(writeBatch);
writer.close();
DataReader mockedDataReader = mock(DataReader.class);
when(mockedDataReader.clone()).thenReturn(mockedDataReader);
doThrow(new IOException()).when(mockedDataReader).readStripeFooter(any());
Reader reader = OrcFile.createReader(path, OrcFile.readerOptions(conf));
Reader.Options readerOptions = reader.options().dataReader(mockedDataReader);
boolean isCalled = false;
try {
reader.rows(readerOptions);
} catch (IOException ie) {
isCalled = true;
}
assertTrue(isCalled);
verify(mockedDataReader, times(1)).close();
}
@Test
public void testSargApplier() throws Exception {
Configuration conf = new Configuration();
TypeDescription schema = TypeDescription.createLong();
SearchArgument sarg = SearchArgumentFactory.newBuilder().build();
SchemaEvolution evo = new SchemaEvolution(schema, schema, new Reader.Options(conf));
RecordReaderImpl.SargApplier applier1 =
new RecordReaderImpl.SargApplier(sarg, 0, evo, OrcFile.WriterVersion.ORC_135, false);
Field f1 = RecordReaderImpl.SargApplier.class.getDeclaredField("writerUsedProlepticGregorian");
f1.setAccessible(true);
assertFalse((boolean)f1.get(applier1));
Field f2 = RecordReaderImpl.SargApplier.class.getDeclaredField("convertToProlepticGregorian");
f2.setAccessible(true);
assertFalse((boolean)f2.get(applier1));
}
@Test
public void testWithoutStatistics() {
OrcProto.ColumnEncoding encoding = OrcProto.ColumnEncoding.newBuilder()
.setKind(OrcProto.ColumnEncoding.Kind.DIRECT_V2)
.build();
PredicateLeaf pred = createPredicateLeaf(
PredicateLeaf.Operator.EQUALS, PredicateLeaf.Type.LONG, "x", 2L, null);
TruthValue truthValue = RecordReaderImpl.evaluatePredicateProto(
RecordReaderImpl.EMPTY_COLUMN_STATISTICS,
pred, null, encoding, null,
CURRENT_WRITER, TypeDescription.createInt());
assertEquals(TruthValue.YES_NO_NULL, truthValue);
}
@Test
public void testMissMinOrMaxInStatistics() {
OrcProto.ColumnEncoding encoding = OrcProto.ColumnEncoding.newBuilder()
.setKind(OrcProto.ColumnEncoding.Kind.DIRECT_V2)
.build();
PredicateLeaf pred = createPredicateLeaf(
PredicateLeaf.Operator.EQUALS, PredicateLeaf.Type.LONG, "x", 2L, null);
OrcProto.ColumnStatistics hasValuesAndHasNullStatistics =
OrcProto.ColumnStatistics.newBuilder().setNumberOfValues(10)
.setHasNull(true)
.setBytesOnDisk(40)
.build();
OrcProto.ColumnStatistics hasValuesAndNoHasNullStatistics =
OrcProto.ColumnStatistics.newBuilder().setNumberOfValues(5)
.setHasNull(false)
.setBytesOnDisk(20)
.build();
OrcProto.ColumnStatistics noHasValuesAndHasNullStatistics =
OrcProto.ColumnStatistics.newBuilder().setNumberOfValues(0)
.setHasNull(true)
.setBytesOnDisk(0)
.build();
TruthValue whenHasValuesAndHasNullTruthValue = RecordReaderImpl.evaluatePredicateProto(
hasValuesAndHasNullStatistics,
pred, null, encoding, null,
CURRENT_WRITER, TypeDescription.createInt());
assertEquals(TruthValue.YES_NO_NULL, whenHasValuesAndHasNullTruthValue);
TruthValue whenHasValuesAndNoHasNullTruthValue = RecordReaderImpl.evaluatePredicateProto(
hasValuesAndNoHasNullStatistics,
pred, null, encoding, null,
CURRENT_WRITER, TypeDescription.createInt());
assertEquals(TruthValue.YES_NO, whenHasValuesAndNoHasNullTruthValue);
TruthValue whenNoHasValuesAndHasNullStatistics = RecordReaderImpl.evaluatePredicateProto(
noHasValuesAndHasNullStatistics,
pred, null, encoding, null,
CURRENT_WRITER, TypeDescription.createInt());
assertEquals(TruthValue.NULL, whenNoHasValuesAndHasNullStatistics);
}
@Test
public void testRgEndOffset() throws IOException {
for (int compressionSize = 64; compressionSize < 4096; compressionSize *= 2) {
testSmallCompressionSizeOrc(compressionSize);
}
}
private void testSmallCompressionSizeOrc(int compressionSize) throws IOException {
Configuration conf = new Configuration();
Path path = new Path(workDir, "smallCompressionSize.orc");
FileSystem.get(conf).delete(path, true);
TypeDescription schema = TypeDescription.fromString("struct<x:int>");
conf.setLong(OrcConf.BUFFER_SIZE.getAttribute(), compressionSize);
OrcFile.WriterOptions options = OrcFile.writerOptions(conf).setSchema(schema);
Writer writer = OrcFile.createWriter(path, options);
VectorizedRowBatch writeBatch = schema.createRowBatch();
LongColumnVector writeX = (LongColumnVector) writeBatch.cols[0];
for (int row = 0; row < 30_000; ++row) {
int idx = writeBatch.size++;
writeX.vector[idx] = row >= 10_000 && row < 20_000 ? row + 100_000 : row;
if (writeBatch.size == writeBatch.getMaxSize()) {
writer.addRowBatch(writeBatch);
writeBatch.reset();
}
}
if (writeBatch.size != 0) {
writer.addRowBatch(writeBatch);
}
writer.close();
Reader reader = OrcFile.createReader(path, OrcFile.readerOptions(conf));
// only the second row group will be selected
SearchArgument sarg = SearchArgumentFactory.newBuilder()
.startNot()
.lessThan("x", PredicateLeaf.Type.LONG, 100000L)
.end().build();
VectorizedRowBatch batch = reader.getSchema().createRowBatch();
LongColumnVector readX = (LongColumnVector) batch.cols[0];
try (RecordReader rows = reader.rows(reader.options().searchArgument(sarg, null))) {
int row = 10_000;
while (rows.nextBatch(batch)) {
for (int i = 0; i < batch.size; i++) {
final int current_row = row++;
final int expectedVal = current_row >= 10_000 && current_row < 20_000 ? current_row + 100_000 : current_row;
assertEquals(expectedVal, readX.vector[i]);
}
}
}
}
@Test
public void testRowIndexStrideNegativeFilter() throws Exception {
Path testFilePath = new Path(workDir, "rowIndexStrideNegative.orc");
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
fs.delete(testFilePath, true);
TypeDescription schema =
TypeDescription.fromString("struct<str:string>");
Writer writer = OrcFile.createWriter(
testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).rowIndexStride(-1));
VectorizedRowBatch batch = schema.createRowBatch();
BytesColumnVector strVector = (BytesColumnVector) batch.cols[0];
for (int i = 0; i < 32 * 1024; i++) {
if (batch.size == batch.getMaxSize()) {
writer.addRowBatch(batch);
batch.reset();
}
byte[] value = String.format("row %06d", i).getBytes(StandardCharsets.UTF_8);
strVector.setRef(batch.size, value, 0, value.length);
++batch.size;
}
writer.addRowBatch(batch);
batch.reset();
writer.close();
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf).filesystem(fs));
SearchArgument sarg = SearchArgumentFactory.newBuilder(conf)
.startNot().isNull("str", PredicateLeaf.Type.STRING).end()
.build();
RecordReader recordReader = reader.rows(reader.options().searchArgument(sarg, null));
batch = reader.getSchema().createRowBatch();
strVector = (BytesColumnVector) batch.cols[0];
long base = 0;
while (recordReader.nextBatch(batch)) {
for (int r = 0; r < batch.size; ++r) {
String value = String.format("row %06d", r + base);
assertEquals(value, strVector.toString(r), "row " + (r + base));
}
base += batch.size;
}
}
}
| 118,066 | 44.046547 | 120 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/TestRecordReaderUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.junit.jupiter.api.Test;
import java.nio.ByteBuffer;
import java.util.Objects;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
class TestRecordReaderUtils {
private final BufferChunkList rangeList = new TestOrcLargeStripe.RangeBuilder()
.range(1000, 1000)
.range(2000, 1000)
.range(4000, 1000)
.range(4100, 100)
.range(8000, 1000).build();
private static void assertChunkEquals(BufferChunk expected, BufferChunk actual) {
assertTrue(Objects.equals(expected, actual)
&& expected.getOffset() == actual.getOffset()
&& expected.getLength() == actual.getLength());
}
@Test
public void testDeterminationOfSingleRead() {
BufferChunk toChunk = RecordReaderUtils.ChunkReader.create(rangeList.get(), 0).getTo();
assertChunkEquals(rangeList.get(1), toChunk);
assertTrue(RecordReaderUtils.ChunkReader.create(rangeList.get(), toChunk)
.getExtraBytesFraction()
< 0.001);
toChunk = RecordReaderUtils.ChunkReader.create(rangeList.get(), 1000).getTo();
assertChunkEquals(rangeList.get(3), toChunk);
assertTrue(RecordReaderUtils.ChunkReader.create(rangeList.get(), toChunk)
.getExtraBytesFraction()
>= .2);
toChunk = RecordReaderUtils.ChunkReader.create(rangeList.get(), 999).getTo();
assertChunkEquals(rangeList.get(1), toChunk);
assertTrue(RecordReaderUtils.ChunkReader.create(rangeList.get(), toChunk)
.getExtraBytesFraction()
< 0.001);
}
@Test
public void testNoGapCombine() {
BufferChunk toChunk = RecordReaderUtils.findSingleRead(rangeList.get());
assertChunkEquals(rangeList.get(1), toChunk);
}
@Test
public void testReadExtraBytes() {
RecordReaderUtils.ChunkReader chunkReader =
RecordReaderUtils.ChunkReader.create(rangeList.get(),
1000);
assertChunkEquals(rangeList.get(3), chunkReader.getTo());
populateAndValidateChunks(chunkReader, false);
}
@Test
public void testRemoveBytes() {
RecordReaderUtils.ChunkReader chunkReader =
RecordReaderUtils.ChunkReader.create(rangeList.get(),
1000);
assertChunkEquals(rangeList.get(3), chunkReader.getTo());
populateAndValidateChunks(chunkReader, true);
}
@Test
public void testRemoveBytesSmallerOverlapFirst() {
BufferChunkList rangeList = new TestOrcLargeStripe.RangeBuilder()
.range(1000, 1000)
.range(2000, 1000)
.range(4000, 100)
.range(4000, 1000)
.range(8000, 1000).build();
RecordReaderUtils.ChunkReader chunkReader =
RecordReaderUtils.ChunkReader.create(rangeList.get(),
1000);
assertChunkEquals(rangeList.get(3), chunkReader.getTo());
populateAndValidateChunks(chunkReader, true);
}
@Test
public void testRemoveBytesWithOverlap() {
BufferChunkList rangeList = new TestOrcLargeStripe.RangeBuilder()
.range(1000, 1000)
.range(1800, 400)
.range(2000, 1000)
.range(4000, 100)
.range(4000, 1000)
.range(8000, 1000).build();
RecordReaderUtils.ChunkReader chunkReader =
RecordReaderUtils.ChunkReader.create(rangeList.get(),
1000);
assertChunkEquals(rangeList.get(4), chunkReader.getTo());
populateAndValidateChunks(chunkReader, true);
}
@Test
public void testExtraBytesReadWithinThreshold() {
BufferChunkList rangeList = new TestOrcLargeStripe.RangeBuilder()
.range(1000, 1000)
.range(1800, 400)
.range(2000, 1000)
.range(4000, 100)
.range(4000, 1000)
.range(8000, 1000).build();
RecordReaderUtils.ChunkReader chunkReader =
RecordReaderUtils.ChunkReader.create(rangeList.get(),
1000);
assertChunkEquals(rangeList.get(4), chunkReader.getTo());
chunkReader.populateChunks(makeByteBuffer(chunkReader.getReadBytes(),
chunkReader.getFrom().getOffset()),
false,
1.0);
validateChunks(chunkReader);
assertNotEquals(chunkReader.getReadBytes(), chunkReader.getReqBytes());
assertEquals(chunkReader.getReadBytes(), chunkReader.getFrom().getData().array().length);
}
private ByteBuffer makeByteBuffer(int length, long offset) {
byte[] readBytes = new byte[length];
for (int i = 0; i < readBytes.length; i++) {
readBytes[i] = (byte) ((i + offset) % Byte.MAX_VALUE);
}
return ByteBuffer.wrap(readBytes);
}
private void populateAndValidateChunks(RecordReaderUtils.ChunkReader chunkReader,
boolean withRemove) {
if (withRemove) {
assertTrue(chunkReader.getReadBytes() > chunkReader.getReqBytes());
}
ByteBuffer bytes = makeByteBuffer(chunkReader.getReadBytes(),
chunkReader.getFrom().getOffset());
if (withRemove) {
chunkReader.populateChunksReduceSize(bytes, false);
assertEquals(chunkReader.getReqBytes(), chunkReader.getFrom().getData().array().length);
} else {
chunkReader.populateChunksAsIs(bytes);
assertEquals(chunkReader.getReadBytes(), chunkReader.getFrom().getData().array().length);
}
validateChunks(chunkReader);
}
private void validateChunks(RecordReaderUtils.ChunkReader chunkReader) {
BufferChunk current = chunkReader.getFrom();
while (current != chunkReader.getTo().next) {
assertTrue(current.hasData());
assertEquals(current.getOffset() % Byte.MAX_VALUE, current.getData().get(),
String.format("Failed for %s", current));
current = (BufferChunk) current.next;
}
}
}
| 6,804 | 37.01676 | 95 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/TestRunLengthByteReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.orc.CompressionCodec;
import org.apache.orc.impl.writer.StreamOptions;
import org.junit.jupiter.api.Test;
import java.nio.ByteBuffer;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class TestRunLengthByteReader {
@Test
public void testUncompressedSeek() throws Exception {
TestInStream.OutputCollector collect = new TestInStream.OutputCollector();
RunLengthByteWriter out = new RunLengthByteWriter(new OutStream("test",
new StreamOptions(100), collect));
TestInStream.PositionCollector[] positions =
new TestInStream.PositionCollector[2048];
for(int i=0; i < 2048; ++i) {
positions[i] = new TestInStream.PositionCollector();
out.getPosition(positions[i]);
if (i < 1024) {
out.write((byte) (i/4));
} else {
out.write((byte) i);
}
}
out.flush();
ByteBuffer inBuf = ByteBuffer.allocate(collect.buffer.size());
collect.buffer.setByteBuffer(inBuf, 0, collect.buffer.size());
inBuf.flip();
RunLengthByteReader in = new RunLengthByteReader(InStream.create("test",
new BufferChunk(inBuf, 0), 0, inBuf.remaining()));
for(int i=0; i < 2048; ++i) {
int x = in.next() & 0xff;
if (i < 1024) {
assertEquals((i/4) & 0xff, x);
} else {
assertEquals(i & 0xff, x);
}
}
for(int i=2047; i >= 0; --i) {
in.seek(positions[i]);
int x = in.next() & 0xff;
if (i < 1024) {
assertEquals((i/4) & 0xff, x);
} else {
assertEquals(i & 0xff, x);
}
}
}
@Test
public void testCompressedSeek() throws Exception {
CompressionCodec codec = new SnappyCodec();
StreamOptions options = new StreamOptions(500)
.withCodec(codec, codec.getDefaultOptions());
TestInStream.OutputCollector collect = new TestInStream.OutputCollector();
RunLengthByteWriter out = new RunLengthByteWriter(
new OutStream("test", options, collect));
TestInStream.PositionCollector[] positions =
new TestInStream.PositionCollector[2048];
for(int i=0; i < 2048; ++i) {
positions[i] = new TestInStream.PositionCollector();
out.getPosition(positions[i]);
if (i < 1024) {
out.write((byte) (i/4));
} else {
out.write((byte) i);
}
}
out.flush();
ByteBuffer inBuf = ByteBuffer.allocate(collect.buffer.size());
collect.buffer.setByteBuffer(inBuf, 0, collect.buffer.size());
inBuf.flip();
RunLengthByteReader in = new RunLengthByteReader(InStream.create("test",
new BufferChunk(inBuf, 0), 0, inBuf.remaining(),
InStream.options().withCodec(codec).withBufferSize(500)));
for(int i=0; i < 2048; ++i) {
int x = in.next() & 0xff;
if (i < 1024) {
assertEquals((i/4) & 0xff, x);
} else {
assertEquals(i & 0xff, x);
}
}
for(int i=2047; i >= 0; --i) {
in.seek(positions[i]);
int x = in.next() & 0xff;
if (i < 1024) {
assertEquals((i/4) & 0xff, x);
} else {
assertEquals(i & 0xff, x);
}
}
}
@Test
public void testSkips() throws Exception {
TestInStream.OutputCollector collect = new TestInStream.OutputCollector();
RunLengthByteWriter out = new RunLengthByteWriter(new OutStream("test",
new StreamOptions(100), collect));
for(int i=0; i < 2048; ++i) {
if (i < 1024) {
out.write((byte) (i/16));
} else {
out.write((byte) i);
}
}
out.flush();
ByteBuffer inBuf = ByteBuffer.allocate(collect.buffer.size());
collect.buffer.setByteBuffer(inBuf, 0, collect.buffer.size());
inBuf.flip();
RunLengthByteReader in = new RunLengthByteReader(InStream.create("test",
new BufferChunk(inBuf, 0), 0, inBuf.remaining()));
for(int i=0; i < 2048; i += 10) {
int x = in.next() & 0xff;
if (i < 1024) {
assertEquals((i/16) & 0xff, x);
} else {
assertEquals(i & 0xff, x);
}
if (i < 2038) {
in.skip(9);
}
in.skip(0);
}
}
}
| 4,944 | 32.412162 | 78 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/TestRunLengthIntegerReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.orc.CompressionCodec;
import org.apache.orc.impl.writer.StreamOptions;
import org.junit.jupiter.api.Test;
import java.nio.ByteBuffer;
import java.util.Random;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class TestRunLengthIntegerReader {
public void runSeekTest(CompressionCodec codec) throws Exception {
TestInStream.OutputCollector collect = new TestInStream.OutputCollector();
StreamOptions options = new StreamOptions(1000);
if (codec != null) {
options.withCodec(codec, codec.getDefaultOptions());
}
RunLengthIntegerWriter out = new RunLengthIntegerWriter(
new OutStream("test", options, collect), true);
TestInStream.PositionCollector[] positions =
new TestInStream.PositionCollector[4096];
Random random = new Random(99);
int[] junk = new int[2048];
for(int i=0; i < junk.length; ++i) {
junk[i] = random.nextInt();
}
for(int i=0; i < 4096; ++i) {
positions[i] = new TestInStream.PositionCollector();
out.getPosition(positions[i]);
// test runs, incrementing runs, non-runs
if (i < 1024) {
out.write(i/4);
} else if (i < 2048) {
out.write(2*i);
} else {
out.write(junk[i-2048]);
}
}
out.flush();
ByteBuffer inBuf = ByteBuffer.allocate(collect.buffer.size());
collect.buffer.setByteBuffer(inBuf, 0, collect.buffer.size());
inBuf.flip();
RunLengthIntegerReader in = new RunLengthIntegerReader(InStream.create
("test", new BufferChunk(inBuf, 0), 0, inBuf.remaining(),
InStream.options().withCodec(codec).withBufferSize(1000)), true);
for(int i=0; i < 2048; ++i) {
int x = (int) in.next();
if (i < 1024) {
assertEquals(i/4, x);
} else {
assertEquals(2*i, x);
}
}
for(int i=2047; i >= 0; --i) {
in.seek(positions[i]);
int x = (int) in.next();
if (i < 1024) {
assertEquals(i/4, x);
} else {
assertEquals(2*i, x);
}
}
}
@Test
public void testUncompressedSeek() throws Exception {
runSeekTest(null);
}
@Test
public void testCompressedSeek() throws Exception {
runSeekTest(new ZlibCodec());
}
@Test
public void testSkips() throws Exception {
TestInStream.OutputCollector collect = new TestInStream.OutputCollector();
RunLengthIntegerWriter out = new RunLengthIntegerWriter(
new OutStream("test", new StreamOptions(100), collect), true);
for(int i=0; i < 2048; ++i) {
if (i < 1024) {
out.write(i);
} else {
out.write(256 * i);
}
}
out.flush();
ByteBuffer inBuf = ByteBuffer.allocate(collect.buffer.size());
collect.buffer.setByteBuffer(inBuf, 0, collect.buffer.size());
inBuf.flip();
RunLengthIntegerReader in = new RunLengthIntegerReader(InStream.create
("test", new BufferChunk(inBuf, 0), 0, inBuf.remaining()), true);
for(int i=0; i < 2048; i += 10) {
int x = (int) in.next();
if (i < 1024) {
assertEquals(i, x);
} else {
assertEquals(256 * i, x);
}
if (i < 2038) {
in.skip(9);
}
in.skip(0);
}
}
}
| 4,056 | 31.198413 | 78 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/TestSchemaEvolution.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.Decimal64ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.orc.OrcFile;
import org.apache.orc.OrcProto;
import org.apache.orc.Reader;
import org.apache.orc.RecordReader;
import org.apache.orc.TypeDescription;
import org.apache.orc.Writer;
import org.apache.orc.impl.reader.ReaderEncryption;
import org.apache.orc.impl.reader.StripePlanner;
import org.apache.orc.impl.reader.tree.BatchReader;
import org.apache.orc.impl.reader.tree.StructBatchReader;
import org.apache.orc.impl.reader.tree.TypeReader;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInfo;
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.time.Instant;
import java.time.LocalDate;
import java.time.ZoneId;
import java.time.format.DateTimeFormatter;
import java.time.temporal.ChronoField;
import java.util.Arrays;
import java.util.TimeZone;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertSame;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestSchemaEvolution {
Configuration conf;
Reader.Options options;
Path testFilePath;
FileSystem fs;
Path workDir = new Path(System.getProperty("test.tmp.dir",
"target" + File.separator + "test" + File.separator + "tmp"));
@BeforeEach
public void setup(TestInfo testInfo) throws Exception {
conf = new Configuration();
options = new Reader.Options(conf);
fs = FileSystem.getLocal(conf);
testFilePath = new Path(workDir, "TestSchemaEvolution." +
testInfo.getTestMethod().get().getName() + ".orc");
fs.delete(testFilePath, false);
}
@Test
public void testDataTypeConversion1() throws IOException {
TypeDescription fileStruct1 = TypeDescription.createStruct()
.addField("f1", TypeDescription.createInt())
.addField("f2", TypeDescription.createString())
.addField("f3", TypeDescription.createDecimal().withPrecision(38).withScale(10));
SchemaEvolution same1 = new SchemaEvolution(fileStruct1, null, options);
assertFalse(same1.hasConversion());
TypeDescription readerStruct1 = TypeDescription.createStruct()
.addField("f1", TypeDescription.createInt())
.addField("f2", TypeDescription.createString())
.addField("f3", TypeDescription.createDecimal().withPrecision(38).withScale(10));
SchemaEvolution both1 = new SchemaEvolution(fileStruct1, readerStruct1, options);
assertFalse(both1.hasConversion());
TypeDescription readerStruct1diff = TypeDescription.createStruct()
.addField("f1", TypeDescription.createLong())
.addField("f2", TypeDescription.createString())
.addField("f3", TypeDescription.createDecimal().withPrecision(38).withScale(10));
SchemaEvolution both1diff = new SchemaEvolution(fileStruct1, readerStruct1diff, options);
assertTrue(both1diff.hasConversion());
assertTrue(both1diff.isOnlyImplicitConversion());
TypeDescription readerStruct1diffPrecision = TypeDescription.createStruct()
.addField("f1", TypeDescription.createInt())
.addField("f2", TypeDescription.createString())
.addField("f3", TypeDescription.createDecimal().withPrecision(12).withScale(10));
SchemaEvolution both1diffPrecision = new SchemaEvolution(fileStruct1,
readerStruct1diffPrecision, options);
assertTrue(both1diffPrecision.hasConversion());
assertFalse(both1diffPrecision.isOnlyImplicitConversion());
}
@Test
public void testDataTypeConversion2() throws IOException {
TypeDescription fileStruct2 = TypeDescription.createStruct()
.addField("f1", TypeDescription.createUnion()
.addUnionChild(TypeDescription.createByte())
.addUnionChild(TypeDescription.createDecimal()
.withPrecision(20).withScale(10)))
.addField("f2", TypeDescription.createStruct()
.addField("f3", TypeDescription.createDate())
.addField("f4", TypeDescription.createDouble())
.addField("f5", TypeDescription.createBoolean()))
.addField("f6", TypeDescription.createChar().withMaxLength(100));
SchemaEvolution same2 = new SchemaEvolution(fileStruct2, null, options);
assertFalse(same2.hasConversion());
TypeDescription readerStruct2 = TypeDescription.createStruct()
.addField("f1", TypeDescription.createUnion()
.addUnionChild(TypeDescription.createByte())
.addUnionChild(TypeDescription.createDecimal()
.withPrecision(20).withScale(10)))
.addField("f2", TypeDescription.createStruct()
.addField("f3", TypeDescription.createDate())
.addField("f4", TypeDescription.createDouble())
.addField("f5", TypeDescription.createBoolean()))
.addField("f6", TypeDescription.createChar().withMaxLength(100));
SchemaEvolution both2 = new SchemaEvolution(fileStruct2, readerStruct2, options);
assertFalse(both2.hasConversion());
TypeDescription readerStruct2diff = TypeDescription.createStruct()
.addField("f1", TypeDescription.createUnion()
.addUnionChild(TypeDescription.createByte())
.addUnionChild(TypeDescription.createDecimal()
.withPrecision(20).withScale(10)))
.addField("f2", TypeDescription.createStruct()
.addField("f3", TypeDescription.createDate())
.addField("f4", TypeDescription.createDouble())
.addField("f5", TypeDescription.createByte()))
.addField("f6", TypeDescription.createChar().withMaxLength(100));
SchemaEvolution both2diff = new SchemaEvolution(fileStruct2, readerStruct2diff, options);
assertTrue(both2diff.hasConversion());
assertFalse(both2diff.isOnlyImplicitConversion());
TypeDescription readerStruct2diffChar = TypeDescription.createStruct()
.addField("f1", TypeDescription.createUnion()
.addUnionChild(TypeDescription.createByte())
.addUnionChild(TypeDescription.createDecimal()
.withPrecision(20).withScale(10)))
.addField("f2", TypeDescription.createStruct()
.addField("f3", TypeDescription.createDate())
.addField("f4", TypeDescription.createDouble())
.addField("f5", TypeDescription.createBoolean()))
.addField("f6", TypeDescription.createChar().withMaxLength(80));
SchemaEvolution both2diffChar = new SchemaEvolution(fileStruct2, readerStruct2diffChar, options);
assertTrue(both2diffChar.hasConversion());
assertFalse(both2diffChar.isOnlyImplicitConversion());
}
@Test
public void testIntegerImplicitConversion() throws IOException {
TypeDescription fileStructByte = TypeDescription.createStruct()
.addField("f1", TypeDescription.createByte())
.addField("f2", TypeDescription.createString());
SchemaEvolution sameByte = new SchemaEvolution(fileStructByte, null, options);
assertFalse(sameByte.hasConversion());
TypeDescription readerStructByte = TypeDescription.createStruct()
.addField("f1", TypeDescription.createByte())
.addField("f2", TypeDescription.createString());
SchemaEvolution bothByte = new SchemaEvolution(fileStructByte, readerStructByte, options);
assertFalse(bothByte.hasConversion());
TypeDescription readerStructByte1diff = TypeDescription.createStruct()
.addField("f1", TypeDescription.createShort())
.addField("f2", TypeDescription.createString());
SchemaEvolution bothByte1diff = new SchemaEvolution(fileStructByte, readerStructByte1diff, options);
assertTrue(bothByte1diff.hasConversion());
assertTrue(bothByte1diff.isOnlyImplicitConversion());
TypeDescription readerStructByte2diff = TypeDescription.createStruct()
.addField("f1", TypeDescription.createInt())
.addField("f2", TypeDescription.createString());
SchemaEvolution bothByte2diff = new SchemaEvolution(fileStructByte, readerStructByte2diff, options);
assertTrue(bothByte2diff.hasConversion());
assertTrue(bothByte2diff.isOnlyImplicitConversion());
TypeDescription readerStruct3diff = TypeDescription.createStruct()
.addField("f1", TypeDescription.createLong())
.addField("f2", TypeDescription.createString());
SchemaEvolution bothByte3diff = new SchemaEvolution(fileStructByte, readerStruct3diff, options);
assertTrue(bothByte3diff.hasConversion());
assertTrue(bothByte3diff.isOnlyImplicitConversion());
TypeDescription fileStructShort = TypeDescription.createStruct()
.addField("f1", TypeDescription.createShort())
.addField("f2", TypeDescription.createString());
SchemaEvolution sameShort = new SchemaEvolution(fileStructShort, null, options);
assertFalse(sameShort.hasConversion());
TypeDescription readerStructShort = TypeDescription.createStruct()
.addField("f1", TypeDescription.createShort())
.addField("f2", TypeDescription.createString());
SchemaEvolution bothShort = new SchemaEvolution(fileStructShort, readerStructShort, options);
assertFalse(bothShort.hasConversion());
TypeDescription readerStructShort1diff = TypeDescription.createStruct()
.addField("f1", TypeDescription.createInt())
.addField("f2", TypeDescription.createString());
SchemaEvolution bothShort1diff = new SchemaEvolution(fileStructShort, readerStructShort1diff, options);
assertTrue(bothShort1diff.hasConversion());
assertTrue(bothShort1diff.isOnlyImplicitConversion());
TypeDescription readerStructShort2diff = TypeDescription.createStruct()
.addField("f1", TypeDescription.createLong())
.addField("f2", TypeDescription.createString());
SchemaEvolution bothShort2diff = new SchemaEvolution(fileStructShort, readerStructShort2diff, options);
assertTrue(bothShort2diff.hasConversion());
assertTrue(bothShort2diff.isOnlyImplicitConversion());
TypeDescription fileStructInt = TypeDescription.createStruct()
.addField("f1", TypeDescription.createInt())
.addField("f2", TypeDescription.createString());
SchemaEvolution sameInt = new SchemaEvolution(fileStructInt, null, options);
assertFalse(sameInt.hasConversion());
TypeDescription readerStructInt = TypeDescription.createStruct()
.addField("f1", TypeDescription.createInt())
.addField("f2", TypeDescription.createString());
SchemaEvolution bothInt = new SchemaEvolution(fileStructInt, readerStructInt, options);
assertFalse(bothInt.hasConversion());
TypeDescription readerStructInt1diff = TypeDescription.createStruct()
.addField("f1", TypeDescription.createLong())
.addField("f2", TypeDescription.createString());
SchemaEvolution bothInt1diff = new SchemaEvolution(fileStructInt, readerStructInt1diff, options);
assertTrue(bothInt1diff.hasConversion());
assertTrue(bothInt1diff.isOnlyImplicitConversion());
}
@Test
public void testFloatImplicitConversion() throws IOException {
TypeDescription fileStructFloat = TypeDescription.createStruct()
.addField("f1", TypeDescription.createFloat())
.addField("f2", TypeDescription.createString());
SchemaEvolution sameFloat = new SchemaEvolution(fileStructFloat, null, options);
assertFalse(sameFloat.hasConversion());
TypeDescription readerStructFloat = TypeDescription.createStruct()
.addField("f1", TypeDescription.createFloat())
.addField("f2", TypeDescription.createString());
SchemaEvolution bothFloat = new SchemaEvolution(fileStructFloat, readerStructFloat, options);
assertFalse(bothFloat.hasConversion());
TypeDescription readerStructFloat1diff = TypeDescription.createStruct()
.addField("f1", TypeDescription.createDouble())
.addField("f2", TypeDescription.createString());
SchemaEvolution bothFloat1diff = new SchemaEvolution(fileStructFloat, readerStructFloat1diff, options);
assertTrue(bothFloat1diff.hasConversion());
assertTrue(bothFloat1diff.isOnlyImplicitConversion());
}
@Test
public void testCharImplicitConversion() throws IOException {
TypeDescription fileStructChar = TypeDescription.createStruct()
.addField("f1", TypeDescription.createChar().withMaxLength(15))
.addField("f2", TypeDescription.createString());
SchemaEvolution sameChar = new SchemaEvolution(fileStructChar, null, options);
assertFalse(sameChar.hasConversion());
TypeDescription readerStructChar = TypeDescription.createStruct()
.addField("f1", TypeDescription.createChar().withMaxLength(15))
.addField("f2", TypeDescription.createString());
SchemaEvolution bothChar = new SchemaEvolution(fileStructChar, readerStructChar, options);
assertFalse(bothChar.hasConversion());
TypeDescription readerStructChar1diff = TypeDescription.createStruct()
.addField("f1", TypeDescription.createString())
.addField("f2", TypeDescription.createString());
SchemaEvolution bothChar1diff = new SchemaEvolution(fileStructChar, readerStructChar1diff, options);
assertTrue(bothChar1diff.hasConversion());
assertTrue(bothChar1diff.isOnlyImplicitConversion());
TypeDescription readerStructChar2diff = TypeDescription.createStruct()
.addField("f1", TypeDescription.createChar().withMaxLength(14))
.addField("f2", TypeDescription.createString());
SchemaEvolution bothChar2diff = new SchemaEvolution(fileStructChar, readerStructChar2diff, options);
assertTrue(bothChar2diff.hasConversion());
assertFalse(bothChar2diff.isOnlyImplicitConversion());
TypeDescription readerStructChar3diff = TypeDescription.createStruct()
.addField("f1", TypeDescription.createVarchar().withMaxLength(15))
.addField("f2", TypeDescription.createString());
SchemaEvolution bothChar3diff = new SchemaEvolution(fileStructChar, readerStructChar3diff, options);
assertTrue(bothChar3diff.hasConversion());
assertTrue(bothChar3diff.isOnlyImplicitConversion());
TypeDescription readerStructChar4diff = TypeDescription.createStruct()
.addField("f1", TypeDescription.createVarchar().withMaxLength(14))
.addField("f2", TypeDescription.createString());
SchemaEvolution bothChar4diff = new SchemaEvolution(fileStructChar, readerStructChar4diff, options);
assertTrue(bothChar4diff.hasConversion());
assertFalse(bothChar4diff.isOnlyImplicitConversion());
}
@Test
public void testVarcharImplicitConversion() throws IOException {
TypeDescription fileStructVarchar = TypeDescription.createStruct()
.addField("f1", TypeDescription.createVarchar().withMaxLength(15))
.addField("f2", TypeDescription.createString());
SchemaEvolution sameVarchar = new SchemaEvolution(fileStructVarchar, null, options);
assertFalse(sameVarchar.hasConversion());
TypeDescription readerStructVarchar = TypeDescription.createStruct()
.addField("f1", TypeDescription.createVarchar().withMaxLength(15))
.addField("f2", TypeDescription.createString());
SchemaEvolution bothVarchar = new SchemaEvolution(fileStructVarchar, readerStructVarchar, options);
assertFalse(bothVarchar.hasConversion());
TypeDescription readerStructVarchar1diff = TypeDescription.createStruct()
.addField("f1", TypeDescription.createString())
.addField("f2", TypeDescription.createString());
SchemaEvolution bothVarchar1diff = new SchemaEvolution(fileStructVarchar, readerStructVarchar1diff, options);
assertTrue(bothVarchar1diff.hasConversion());
assertTrue(bothVarchar1diff.isOnlyImplicitConversion());
TypeDescription readerStructVarchar2diff = TypeDescription.createStruct()
.addField("f1", TypeDescription.createVarchar().withMaxLength(14))
.addField("f2", TypeDescription.createString());
SchemaEvolution bothVarchar2diff = new SchemaEvolution(fileStructVarchar, readerStructVarchar2diff, options);
assertTrue(bothVarchar2diff.hasConversion());
assertFalse(bothVarchar2diff.isOnlyImplicitConversion());
TypeDescription readerStructVarchar3diff = TypeDescription.createStruct()
.addField("f1", TypeDescription.createChar().withMaxLength(15))
.addField("f2", TypeDescription.createString());
SchemaEvolution bothVarchar3diff = new SchemaEvolution(fileStructVarchar, readerStructVarchar3diff, options);
assertTrue(bothVarchar3diff.hasConversion());
assertTrue(bothVarchar3diff.isOnlyImplicitConversion());
TypeDescription readerStructVarchar4diff = TypeDescription.createStruct()
.addField("f1", TypeDescription.createChar().withMaxLength(14))
.addField("f2", TypeDescription.createString());
SchemaEvolution bothVarchar4diff = new SchemaEvolution(fileStructVarchar, readerStructVarchar4diff, options);
assertTrue(bothVarchar4diff.hasConversion());
assertFalse(bothVarchar4diff.isOnlyImplicitConversion());
}
@Test
public void testFloatToDoubleEvolution(TestInfo testInfo) throws Exception {
testFilePath = new Path(workDir, "TestSchemaEvolution." +
testInfo.getTestMethod().get().getName() + ".orc");
TypeDescription schema = TypeDescription.createFloat();
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).stripeSize(100000)
.bufferSize(10000));
VectorizedRowBatch batch = new VectorizedRowBatch(1, 1024);
DoubleColumnVector dcv = new DoubleColumnVector(1024);
batch.cols[0] = dcv;
batch.reset();
batch.size = 1;
dcv.vector[0] = 74.72f;
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
TypeDescription schemaOnRead = TypeDescription.createDouble();
RecordReader rows = reader.rows(reader.options().schema(schemaOnRead));
batch = schemaOnRead.createRowBatch();
rows.nextBatch(batch);
assertEquals(74.72, ((DoubleColumnVector) batch.cols[0]).vector[0], 0.00001);
rows.close();
}
@Test
public void testFloatToDecimalEvolution(TestInfo testInfo) throws Exception {
testFilePath = new Path(workDir, "TestSchemaEvolution." +
testInfo.getTestMethod().get().getName() + ".orc");
TypeDescription schema = TypeDescription.createFloat();
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).stripeSize(100000)
.bufferSize(10000));
VectorizedRowBatch batch = new VectorizedRowBatch(1, 1024);
DoubleColumnVector dcv = new DoubleColumnVector(1024);
batch.cols[0] = dcv;
batch.reset();
batch.size = 1;
dcv.vector[0] = 74.72f;
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
TypeDescription schemaOnRead = TypeDescription.createDecimal().withPrecision(38).withScale(2);
RecordReader rows = reader.rows(reader.options().schema(schemaOnRead));
batch = schemaOnRead.createRowBatch();
rows.nextBatch(batch);
assertEquals("74.72", ((DecimalColumnVector) batch.cols[0]).vector[0].toString());
rows.close();
}
@Test
public void testFloatToDecimal64Evolution(TestInfo testInfo) throws Exception {
testFilePath = new Path(workDir, "TestSchemaEvolution." +
testInfo.getTestMethod().get().getName() + ".orc");
TypeDescription schema = TypeDescription.createFloat();
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).stripeSize(100000)
.bufferSize(10000));
VectorizedRowBatch batch = new VectorizedRowBatch(1, 1024);
DoubleColumnVector dcv = new DoubleColumnVector(1024);
batch.cols[0] = dcv;
batch.reset();
batch.size = 1;
dcv.vector[0] = 74.72f;
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
TypeDescription schemaOnRead = TypeDescription.createDecimal().withPrecision(10).withScale(2);
RecordReader rows = reader.rows(reader.options().schema(schemaOnRead));
batch = schemaOnRead.createRowBatchV2();
rows.nextBatch(batch);
assertEquals("74.72", ((Decimal64ColumnVector) batch.cols[0]).getScratchWritable().toString());
rows.close();
}
@Test
public void testDoubleToDecimalEvolution(TestInfo testInfo) throws Exception {
testFilePath = new Path(workDir, "TestSchemaEvolution." +
testInfo.getTestMethod().get().getName() + ".orc");
TypeDescription schema = TypeDescription.createDouble();
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).stripeSize(100000)
.bufferSize(10000));
VectorizedRowBatch batch = new VectorizedRowBatch(1, 1024);
DoubleColumnVector dcv = new DoubleColumnVector(1024);
batch.cols[0] = dcv;
batch.reset();
batch.size = 1;
dcv.vector[0] = 74.72d;
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
TypeDescription schemaOnRead = TypeDescription.createDecimal().withPrecision(38).withScale(2);
RecordReader rows = reader.rows(reader.options().schema(schemaOnRead));
batch = schemaOnRead.createRowBatch();
rows.nextBatch(batch);
assertEquals("74.72", ((DecimalColumnVector) batch.cols[0]).vector[0].toString());
rows.close();
}
@Test
public void testDoubleToDecimal64Evolution(TestInfo testInfo) throws Exception {
testFilePath = new Path(workDir, "TestSchemaEvolution." +
testInfo.getTestMethod().get().getName() + ".orc");
TypeDescription schema = TypeDescription.createDouble();
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).stripeSize(100000)
.bufferSize(10000));
VectorizedRowBatch batch = new VectorizedRowBatch(1, 1024);
DoubleColumnVector dcv = new DoubleColumnVector(1024);
batch.cols[0] = dcv;
batch.reset();
batch.size = 1;
dcv.vector[0] = 74.72d;
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
TypeDescription schemaOnRead = TypeDescription.createDecimal().withPrecision(10).withScale(2);
RecordReader rows = reader.rows(reader.options().schema(schemaOnRead));
batch = schemaOnRead.createRowBatchV2();
rows.nextBatch(batch);
assertEquals("74.72", ((Decimal64ColumnVector) batch.cols[0]).getScratchWritable().toString());
rows.close();
}
@Test
public void testLongToDecimalEvolution(TestInfo testInfo) throws Exception {
testFilePath = new Path(workDir, "TestSchemaEvolution." +
testInfo.getTestMethod().get().getName() + ".orc");
TypeDescription schema = TypeDescription.createLong();
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).stripeSize(100000)
.bufferSize(10000));
VectorizedRowBatch batch = new VectorizedRowBatch(1, 1024);
LongColumnVector lcv = new LongColumnVector(1024);
batch.cols[0] = lcv;
batch.reset();
batch.size = 1;
lcv.vector[0] = 74L;
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
TypeDescription schemaOnRead = TypeDescription.createDecimal().withPrecision(38).withScale(2);
RecordReader rows = reader.rows(reader.options().schema(schemaOnRead));
batch = schemaOnRead.createRowBatch();
rows.nextBatch(batch);
assertEquals("74", ((DecimalColumnVector) batch.cols[0]).vector[0].toString());
rows.close();
}
@Test
public void testLongToDecimal64Evolution(TestInfo testInfo) throws Exception {
testFilePath = new Path(workDir, "TestSchemaEvolution." +
testInfo.getTestMethod().get().getName() + ".orc");
TypeDescription schema = TypeDescription.createLong();
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).stripeSize(100000)
.bufferSize(10000));
VectorizedRowBatch batch = new VectorizedRowBatch(1, 1024);
LongColumnVector lcv = new LongColumnVector(1024);
batch.cols[0] = lcv;
batch.reset();
batch.size = 1;
lcv.vector[0] = 74L;
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
TypeDescription schemaOnRead = TypeDescription.createDecimal().withPrecision(10).withScale(2);
RecordReader rows = reader.rows(reader.options().schema(schemaOnRead));
batch = schemaOnRead.createRowBatchV2();
rows.nextBatch(batch);
assertEquals("74", ((Decimal64ColumnVector) batch.cols[0]).getScratchWritable().toString());
rows.close();
}
@Test
public void testDecimalToDecimalEvolution(TestInfo testInfo) throws Exception {
testFilePath = new Path(workDir, "TestSchemaEvolution." +
testInfo.getTestMethod().get().getName() + ".orc");
TypeDescription schema = TypeDescription.createDecimal().withPrecision(38).withScale(0);
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).stripeSize(100000)
.bufferSize(10000));
VectorizedRowBatch batch = new VectorizedRowBatch(1, 1024);
DecimalColumnVector dcv = new DecimalColumnVector(1024, 38, 2);
batch.cols[0] = dcv;
batch.reset();
batch.size = 1;
dcv.vector[0] = new HiveDecimalWritable("74.19");
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
TypeDescription schemaOnRead = TypeDescription.createDecimal().withPrecision(38).withScale(1);
RecordReader rows = reader.rows(reader.options().schema(schemaOnRead));
batch = schemaOnRead.createRowBatch();
rows.nextBatch(batch);
assertEquals("74.2", ((DecimalColumnVector) batch.cols[0]).vector[0].toString());
rows.close();
}
@Test
public void testDecimalToDecimal64Evolution(TestInfo testInfo) throws Exception {
testFilePath = new Path(workDir, "TestSchemaEvolution." +
testInfo.getTestMethod().get().getName() + ".orc");
TypeDescription schema = TypeDescription.createDecimal().withPrecision(38).withScale(2);
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).stripeSize(100000)
.bufferSize(10000));
VectorizedRowBatch batch = new VectorizedRowBatch(1, 1024);
DecimalColumnVector dcv = new DecimalColumnVector(1024, 38, 0);
batch.cols[0] = dcv;
batch.reset();
batch.size = 1;
dcv.vector[0] = new HiveDecimalWritable("74.19");
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
TypeDescription schemaOnRead = TypeDescription.createDecimal().withPrecision(10).withScale(1);
RecordReader rows = reader.rows(reader.options().schema(schemaOnRead));
batch = schemaOnRead.createRowBatchV2();
rows.nextBatch(batch);
assertEquals(742, ((Decimal64ColumnVector) batch.cols[0]).vector[0]);
rows.close();
}
@Test
public void testBooleanToStringEvolution(TestInfo testInfo) throws Exception {
testFilePath = new Path(workDir, "TestSchemaEvolution." +
testInfo.getTestMethod().get().getName() + ".orc");
TypeDescription schema = TypeDescription.createBoolean();
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).stripeSize(100000)
.bufferSize(10000));
VectorizedRowBatch batch = new VectorizedRowBatch(1, 1024);
LongColumnVector lcv = new LongColumnVector(1024);
batch.cols[0] = lcv;
batch.reset();
batch.size = 3;
lcv.vector[0] = 1L; // True
lcv.vector[1] = 0L; // False
lcv.vector[2] = 1L; // True
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
TypeDescription schemaOnRead = TypeDescription.createString();
RecordReader rows = reader.rows(reader.options().schema(schemaOnRead));
batch = schemaOnRead.createRowBatch();
rows.nextBatch(batch);
assertEquals("TRUE", ((BytesColumnVector) batch.cols[0]).toString(0));
assertEquals("FALSE", ((BytesColumnVector) batch.cols[0]).toString(1));
assertEquals("TRUE", ((BytesColumnVector) batch.cols[0]).toString(2));
rows.close();
}
@Test
public void testCharToStringEvolution() throws IOException {
TypeDescription fileType = TypeDescription.fromString("struct<x:char(10)>");
TypeDescription readType = TypeDescription.fromString("struct<x:string>");
SchemaEvolution evo = new SchemaEvolution(fileType, readType, options);
TreeReaderFactory.Context treeContext =
new TreeReaderFactory.ReaderContext().setSchemaEvolution(evo);
TypeReader reader =
TreeReaderFactory.createTreeReader(readType, treeContext);
// Make sure the tree reader is built properly
assertEquals(TreeReaderFactory.StructTreeReader.class, reader.getClass());
TypeReader[] children =
((TreeReaderFactory.StructTreeReader) reader).getChildReaders();
assertEquals(1, children.length);
assertEquals(ConvertTreeReaderFactory.StringGroupFromStringGroupTreeReader.class, children[0].getClass());
// Make sure that varchar behaves the same as char
fileType = TypeDescription.fromString("struct<x:varchar(10)>");
evo = new SchemaEvolution(fileType, readType, options);
treeContext = new TreeReaderFactory.ReaderContext().setSchemaEvolution(evo);
reader = TreeReaderFactory.createTreeReader(readType, treeContext);
// Make sure the tree reader is built properly
assertEquals(TreeReaderFactory.StructTreeReader.class, reader.getClass());
children = ((TreeReaderFactory.StructTreeReader) reader).getChildReaders();
assertEquals(1, children.length);
assertEquals(ConvertTreeReaderFactory.StringGroupFromStringGroupTreeReader.class, children[0].getClass());
}
@Test
public void testStringToDecimalEvolution(TestInfo testInfo) throws Exception {
testFilePath = new Path(workDir, "TestSchemaEvolution." +
testInfo.getTestMethod().get().getName() + ".orc");
TypeDescription schema = TypeDescription.createString();
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).stripeSize(100000)
.bufferSize(10000));
VectorizedRowBatch batch = new VectorizedRowBatch(1, 1024);
BytesColumnVector bcv = new BytesColumnVector(1024);
batch.cols[0] = bcv;
batch.reset();
batch.size = 1;
bcv.vector[0] = "74.19".getBytes(StandardCharsets.UTF_8);
bcv.length[0] = "74.19".getBytes(StandardCharsets.UTF_8).length;
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
TypeDescription schemaOnRead = TypeDescription.createDecimal().withPrecision(38).withScale(1);
RecordReader rows = reader.rows(reader.options().schema(schemaOnRead));
batch = schemaOnRead.createRowBatch();
rows.nextBatch(batch);
assertEquals("74.2", ((DecimalColumnVector) batch.cols[0]).vector[0].toString());
rows.close();
}
@Test
public void testStringToDecimal64Evolution(TestInfo testInfo) throws Exception {
testFilePath = new Path(workDir, "TestSchemaEvolution." +
testInfo.getTestMethod().get().getName() + ".orc");
TypeDescription schema = TypeDescription.createString();
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).stripeSize(100000)
.bufferSize(10000));
VectorizedRowBatch batch = new VectorizedRowBatch(1, 1024);
BytesColumnVector bcv = new BytesColumnVector(1024);
batch.cols[0] = bcv;
batch.reset();
batch.size = 1;
bcv.vector[0] = "74.19".getBytes(StandardCharsets.UTF_8);
bcv.length[0] = "74.19".getBytes(StandardCharsets.UTF_8).length;
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
TypeDescription schemaOnRead = TypeDescription.createDecimal().withPrecision(10).withScale(1);
RecordReader rows = reader.rows(reader.options().schema(schemaOnRead));
batch = schemaOnRead.createRowBatchV2();
rows.nextBatch(batch);
assertEquals(742, ((Decimal64ColumnVector) batch.cols[0]).vector[0]);
rows.close();
}
@Test
public void testTimestampToDecimalEvolution(TestInfo testInfo) throws Exception {
testFilePath = new Path(workDir, "TestSchemaEvolution." +
testInfo.getTestMethod().get().getName() + ".orc");
TypeDescription schema = TypeDescription.createTimestamp();
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).stripeSize(100000)
.bufferSize(10000));
VectorizedRowBatch batch = new VectorizedRowBatch(1, 1024);
TimestampColumnVector tcv = new TimestampColumnVector(1024);
batch.cols[0] = tcv;
batch.reset();
batch.size = 3;
tcv.time[0] = 74000L;
tcv.nanos[0] = 123456789;
tcv.time[1] = 123000L;
tcv.nanos[1] = 456000000;
tcv.time[2] = 987000;
tcv.nanos[2] = 0;
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
TypeDescription schemaOnRead = TypeDescription.createDecimal().withPrecision(38).withScale(9);
RecordReader rows = reader.rows(reader.options().schema(schemaOnRead));
batch = schemaOnRead.createRowBatch();
assertTrue(rows.nextBatch(batch));
assertEquals(3, batch.size);
DecimalColumnVector dcv = (DecimalColumnVector) batch.cols[0];
assertEquals("74.123456789", dcv.vector[0].toString());
assertEquals("123.456", dcv.vector[1].toString());
assertEquals("987", dcv.vector[2].toString());
rows.close();
}
@Test
public void testTimestampToDecimal64Evolution(TestInfo testInfo) throws Exception {
testFilePath = new Path(workDir, "TestSchemaEvolution." +
testInfo.getTestMethod().get().getName() + ".orc");
TypeDescription schema = TypeDescription.createTimestamp();
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).stripeSize(100000)
.bufferSize(10000));
VectorizedRowBatch batch = new VectorizedRowBatch(1, 1024);
TimestampColumnVector tcv = new TimestampColumnVector(1024);
batch.cols[0] = tcv;
batch.reset();
batch.size = 1;
tcv.time[0] = 74000L;
writer.addRowBatch(batch);
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
TypeDescription schemaOnRead = TypeDescription.createDecimal().withPrecision(10).withScale(1);
RecordReader rows = reader.rows(reader.options().schema(schemaOnRead));
batch = schemaOnRead.createRowBatchV2();
rows.nextBatch(batch);
assertEquals(740, ((Decimal64ColumnVector) batch.cols[0]).vector[0]);
rows.close();
}
@Test
public void testTimestampToStringEvolution(TestInfo testInfo) throws Exception {
testFilePath = new Path(workDir, "TestSchemaEvolution." +
testInfo.getTestMethod().get().getName() + ".orc");
TypeDescription schema = TypeDescription.fromString("timestamp");
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema).stripeSize(100000)
.bufferSize(10000).useUTCTimestamp(true));
VectorizedRowBatch batch = schema.createRowBatchV2();
TimestampColumnVector tcv = (TimestampColumnVector) batch.cols[0];
batch.size = 3;
tcv.time[0] = 74000L;
tcv.nanos[0] = 123456789;
tcv.time[1] = 123000L;
tcv.nanos[1] = 456000000;
tcv.time[2] = 987000;
tcv.nanos[2] = 0;
writer.addRowBatch(batch);
writer.close();
schema = TypeDescription.fromString("string");
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows(reader.options().schema(schema));
batch = schema.createRowBatchV2();
BytesColumnVector bcv = (BytesColumnVector) batch.cols[0];
assertTrue(rows.nextBatch(batch));
assertEquals(3, batch.size);
assertEquals("1970-01-01 00:01:14.123456789", bcv.toString(0));
assertEquals("1970-01-01 00:02:03.456", bcv.toString(1));
assertEquals("1970-01-01 00:16:27", bcv.toString(2));
rows.close();
}
@Test
public void testSafePpdEvaluation() throws IOException {
TypeDescription fileStruct1 = TypeDescription.createStruct()
.addField("f1", TypeDescription.createInt())
.addField("f2", TypeDescription.createString())
.addField("f3", TypeDescription.createDecimal().withPrecision(38).withScale(10));
SchemaEvolution same1 = new SchemaEvolution(fileStruct1, null, options);
assertTrue(same1.isPPDSafeConversion(0));
assertFalse(same1.hasConversion());
TypeDescription readerStruct1 = TypeDescription.createStruct()
.addField("f1", TypeDescription.createInt())
.addField("f2", TypeDescription.createString())
.addField("f3", TypeDescription.createDecimal().withPrecision(38).withScale(10));
SchemaEvolution both1 = new SchemaEvolution(fileStruct1, readerStruct1, options);
assertFalse(both1.hasConversion());
assertTrue(both1.isPPDSafeConversion(0));
assertTrue(both1.isPPDSafeConversion(1));
assertTrue(both1.isPPDSafeConversion(2));
assertTrue(both1.isPPDSafeConversion(3));
// int -> long
TypeDescription readerStruct1diff = TypeDescription.createStruct()
.addField("f1", TypeDescription.createLong())
.addField("f2", TypeDescription.createString())
.addField("f3", TypeDescription.createDecimal().withPrecision(38).withScale(10));
SchemaEvolution both1diff = new SchemaEvolution(fileStruct1, readerStruct1diff, options);
assertTrue(both1diff.hasConversion());
assertFalse(both1diff.isPPDSafeConversion(0));
assertTrue(both1diff.isPPDSafeConversion(1));
assertTrue(both1diff.isPPDSafeConversion(2));
assertTrue(both1diff.isPPDSafeConversion(3));
// decimal(38,10) -> decimal(12, 10)
TypeDescription readerStruct1diffPrecision = TypeDescription.createStruct()
.addField("f1", TypeDescription.createInt())
.addField("f2", TypeDescription.createString())
.addField("f3", TypeDescription.createDecimal().withPrecision(12).withScale(10));
options.include(new boolean[] {true, false, false, true});
SchemaEvolution both1diffPrecision = new SchemaEvolution(fileStruct1,
readerStruct1diffPrecision, options);
assertTrue(both1diffPrecision.hasConversion());
assertFalse(both1diffPrecision.isPPDSafeConversion(0));
assertFalse(both1diffPrecision.isPPDSafeConversion(1)); // column not included
assertFalse(both1diffPrecision.isPPDSafeConversion(2)); // column not included
assertFalse(both1diffPrecision.isPPDSafeConversion(3));
// add columns
readerStruct1 = TypeDescription.createStruct()
.addField("f1", TypeDescription.createInt())
.addField("f2", TypeDescription.createString())
.addField("f3", TypeDescription.createDecimal().withPrecision(38).withScale(10))
.addField("f4", TypeDescription.createBoolean());
options.include(null);
both1 = new SchemaEvolution(fileStruct1, readerStruct1, options);
assertTrue(both1.hasConversion());
assertFalse(both1.isPPDSafeConversion(0));
assertTrue(both1.isPPDSafeConversion(1));
assertTrue(both1.isPPDSafeConversion(2));
assertTrue(both1.isPPDSafeConversion(3));
assertFalse(both1.isPPDSafeConversion(4));
// column pruning
readerStruct1 = TypeDescription.createStruct()
.addField("f2", TypeDescription.createString());
both1 = new SchemaEvolution(fileStruct1, readerStruct1, options);
assertTrue(both1.hasConversion());
assertFalse(both1.isPPDSafeConversion(0));
assertFalse(both1.isPPDSafeConversion(1));
assertTrue(both1.isPPDSafeConversion(2));
assertFalse(both1.isPPDSafeConversion(3));
}
@Test
public void testSafePpdEvaluationForInts() throws IOException {
// byte -> short -> int -> long
TypeDescription fileSchema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createByte());
SchemaEvolution schemaEvolution = new SchemaEvolution(fileSchema, null, options);
assertFalse(schemaEvolution.hasConversion());
// byte -> short
TypeDescription readerSchema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createShort());
schemaEvolution = new SchemaEvolution(fileSchema, readerSchema, options);
assertTrue(schemaEvolution.hasConversion());
assertFalse(schemaEvolution.isPPDSafeConversion(0));
assertTrue(schemaEvolution.isPPDSafeConversion(1));
// byte -> int
readerSchema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createInt());
schemaEvolution = new SchemaEvolution(fileSchema, readerSchema, options);
assertTrue(schemaEvolution.hasConversion());
assertFalse(schemaEvolution.isPPDSafeConversion(0));
assertTrue(schemaEvolution.isPPDSafeConversion(1));
// byte -> long
readerSchema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createLong());
schemaEvolution = new SchemaEvolution(fileSchema, readerSchema, options);
assertTrue(schemaEvolution.hasConversion());
assertFalse(schemaEvolution.isPPDSafeConversion(0));
assertTrue(schemaEvolution.isPPDSafeConversion(1));
// short -> int -> long
fileSchema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createShort());
schemaEvolution = new SchemaEvolution(fileSchema, null, options);
assertFalse(schemaEvolution.hasConversion());
// unsafe conversion short -> byte
readerSchema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createByte());
schemaEvolution = new SchemaEvolution(fileSchema, readerSchema, options);
assertTrue(schemaEvolution.hasConversion());
assertFalse(schemaEvolution.isPPDSafeConversion(0));
assertFalse(schemaEvolution.isPPDSafeConversion(1));
// short -> int
readerSchema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createInt());
schemaEvolution = new SchemaEvolution(fileSchema, readerSchema, options);
assertTrue(schemaEvolution.hasConversion());
assertFalse(schemaEvolution.isPPDSafeConversion(0));
assertTrue(schemaEvolution.isPPDSafeConversion(1));
// short -> long
readerSchema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createLong());
schemaEvolution = new SchemaEvolution(fileSchema, readerSchema, options);
assertTrue(schemaEvolution.hasConversion());
assertFalse(schemaEvolution.isPPDSafeConversion(0));
assertTrue(schemaEvolution.isPPDSafeConversion(1));
// int -> long
fileSchema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createInt());
schemaEvolution = new SchemaEvolution(fileSchema, null, options);
assertFalse(schemaEvolution.hasConversion());
// unsafe conversion int -> byte
readerSchema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createByte());
schemaEvolution = new SchemaEvolution(fileSchema, readerSchema, options);
assertTrue(schemaEvolution.hasConversion());
assertFalse(schemaEvolution.isPPDSafeConversion(0));
assertFalse(schemaEvolution.isPPDSafeConversion(1));
// unsafe conversion int -> short
readerSchema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createShort());
schemaEvolution = new SchemaEvolution(fileSchema, readerSchema, options);
assertTrue(schemaEvolution.hasConversion());
assertFalse(schemaEvolution.isPPDSafeConversion(0));
assertFalse(schemaEvolution.isPPDSafeConversion(1));
// int -> long
readerSchema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createLong());
schemaEvolution = new SchemaEvolution(fileSchema, readerSchema, options);
assertTrue(schemaEvolution.hasConversion());
assertFalse(schemaEvolution.isPPDSafeConversion(0));
assertTrue(schemaEvolution.isPPDSafeConversion(1));
// long
fileSchema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createLong());
schemaEvolution = new SchemaEvolution(fileSchema, null, options);
assertTrue(schemaEvolution.isPPDSafeConversion(0));
assertFalse(schemaEvolution.hasConversion());
// unsafe conversion long -> byte
readerSchema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createByte());
schemaEvolution = new SchemaEvolution(fileSchema, readerSchema, options);
assertTrue(schemaEvolution.hasConversion());
assertFalse(schemaEvolution.isPPDSafeConversion(0));
assertFalse(schemaEvolution.isPPDSafeConversion(1));
// unsafe conversion long -> short
readerSchema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createShort());
schemaEvolution = new SchemaEvolution(fileSchema, readerSchema, options);
assertTrue(schemaEvolution.hasConversion());
assertFalse(schemaEvolution.isPPDSafeConversion(0));
assertFalse(schemaEvolution.isPPDSafeConversion(1));
// unsafe conversion long -> int
readerSchema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createInt());
schemaEvolution = new SchemaEvolution(fileSchema, readerSchema, options);
assertTrue(schemaEvolution.hasConversion());
assertFalse(schemaEvolution.isPPDSafeConversion(0));
assertFalse(schemaEvolution.isPPDSafeConversion(1));
// invalid
readerSchema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createString());
schemaEvolution = new SchemaEvolution(fileSchema, readerSchema, options);
assertTrue(schemaEvolution.hasConversion());
assertFalse(schemaEvolution.isPPDSafeConversion(0));
assertFalse(schemaEvolution.isPPDSafeConversion(1));
// invalid
readerSchema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createFloat());
schemaEvolution = new SchemaEvolution(fileSchema, readerSchema, options);
assertTrue(schemaEvolution.hasConversion());
assertFalse(schemaEvolution.isPPDSafeConversion(0));
assertFalse(schemaEvolution.isPPDSafeConversion(1));
// invalid
readerSchema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createTimestamp());
schemaEvolution = new SchemaEvolution(fileSchema, readerSchema, options);
assertTrue(schemaEvolution.hasConversion());
assertFalse(schemaEvolution.isPPDSafeConversion(0));
assertFalse(schemaEvolution.isPPDSafeConversion(1));
}
@Test
public void testSafePpdEvaluationForStrings() throws IOException {
TypeDescription fileSchema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createString());
SchemaEvolution schemaEvolution = new SchemaEvolution(fileSchema, null, options);
assertTrue(schemaEvolution.isPPDSafeConversion(0));
assertFalse(schemaEvolution.hasConversion());
// string -> char
TypeDescription readerSchema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createChar());
schemaEvolution = new SchemaEvolution(fileSchema, readerSchema, options);
assertTrue(schemaEvolution.hasConversion());
assertFalse(schemaEvolution.isPPDSafeConversion(0));
assertFalse(schemaEvolution.isPPDSafeConversion(1));
// string -> varchar
readerSchema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createVarchar());
schemaEvolution = new SchemaEvolution(fileSchema, readerSchema, options);
assertTrue(schemaEvolution.hasConversion());
assertFalse(schemaEvolution.isPPDSafeConversion(0));
assertTrue(schemaEvolution.isPPDSafeConversion(1));
fileSchema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createChar());
schemaEvolution = new SchemaEvolution(fileSchema, null, options);
assertTrue(schemaEvolution.isPPDSafeConversion(0));
assertFalse(schemaEvolution.hasConversion());
// char -> string
readerSchema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createString());
schemaEvolution = new SchemaEvolution(fileSchema, readerSchema, options);
assertTrue(schemaEvolution.hasConversion());
assertFalse(schemaEvolution.isPPDSafeConversion(0));
assertFalse(schemaEvolution.isPPDSafeConversion(1));
// char -> varchar
readerSchema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createVarchar());
schemaEvolution = new SchemaEvolution(fileSchema, readerSchema, options);
assertTrue(schemaEvolution.hasConversion());
assertFalse(schemaEvolution.isPPDSafeConversion(0));
assertFalse(schemaEvolution.isPPDSafeConversion(1));
fileSchema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createVarchar());
schemaEvolution = new SchemaEvolution(fileSchema, null, options);
assertTrue(schemaEvolution.isPPDSafeConversion(0));
assertFalse(schemaEvolution.hasConversion());
// varchar -> string
readerSchema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createString());
schemaEvolution = new SchemaEvolution(fileSchema, readerSchema, options);
assertTrue(schemaEvolution.hasConversion());
assertFalse(schemaEvolution.isPPDSafeConversion(0));
assertTrue(schemaEvolution.isPPDSafeConversion(1));
// varchar -> char
readerSchema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createChar());
schemaEvolution = new SchemaEvolution(fileSchema, readerSchema, options);
assertTrue(schemaEvolution.hasConversion());
assertFalse(schemaEvolution.isPPDSafeConversion(0));
assertFalse(schemaEvolution.isPPDSafeConversion(1));
// invalid
readerSchema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createDecimal());
schemaEvolution = new SchemaEvolution(fileSchema, readerSchema, options);
assertTrue(schemaEvolution.hasConversion());
assertFalse(schemaEvolution.isPPDSafeConversion(0));
assertFalse(schemaEvolution.isPPDSafeConversion(1));
// invalid
readerSchema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createDate());
schemaEvolution = new SchemaEvolution(fileSchema, readerSchema, options);
assertTrue(schemaEvolution.hasConversion());
assertFalse(schemaEvolution.isPPDSafeConversion(0));
assertFalse(schemaEvolution.isPPDSafeConversion(1));
// invalid
readerSchema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createInt());
schemaEvolution = new SchemaEvolution(fileSchema, readerSchema, options);
assertTrue(schemaEvolution.hasConversion());
assertFalse(schemaEvolution.isPPDSafeConversion(0));
assertFalse(schemaEvolution.isPPDSafeConversion(1));
}
private boolean[] includeAll(TypeDescription readerType) {
int numColumns = readerType.getMaximumId() + 1;
boolean[] result = new boolean[numColumns];
Arrays.fill(result, true);
return result;
}
@Test
public void testAddFieldToEnd() {
TypeDescription fileType =
TypeDescription.fromString("struct<a:int,b:string>");
TypeDescription readerType =
TypeDescription.fromString("struct<a:int,b:string,c:double>");
boolean[] included = includeAll(readerType);
options.tolerateMissingSchema(false);
SchemaEvolution transition =
new SchemaEvolution(fileType, readerType, options.include(included));
// a -> a
TypeDescription reader = readerType.getChildren().get(0);
TypeDescription mapped = transition.getFileType(reader);
TypeDescription original = fileType.getChildren().get(0);
assertSame(original, mapped);
// b -> b
reader = readerType.getChildren().get(1);
mapped = transition.getFileType(reader);
original = fileType.getChildren().get(1);
assertSame(original, mapped);
// c -> null
reader = readerType.getChildren().get(2);
mapped = transition.getFileType(reader);
original = null;
assertSame(original, mapped);
}
@Test
public void testAddFieldBeforeEnd() {
TypeDescription fileType =
TypeDescription.fromString("struct<a:int,b:string>");
TypeDescription readerType =
TypeDescription.fromString("struct<a:int,c:double,b:string>");
boolean[] included = includeAll(readerType);
options.tolerateMissingSchema(false);
SchemaEvolution transition =
new SchemaEvolution(fileType, readerType, options.include(included));
// a -> a
TypeDescription reader = readerType.getChildren().get(0);
TypeDescription mapped = transition.getFileType(reader);
TypeDescription original = fileType.getChildren().get(0);
assertSame(original, mapped);
// c -> null
reader = readerType.getChildren().get(1);
mapped = transition.getFileType(reader);
original = null;
assertSame(original, mapped);
// b -> b
reader = readerType.getChildren().get(2);
mapped = transition.getFileType(reader);
original = fileType.getChildren().get(1);
assertSame(original, mapped);
}
@Test
public void testRemoveLastField() {
TypeDescription fileType =
TypeDescription.fromString("struct<a:int,b:string,c:double>");
TypeDescription readerType =
TypeDescription.fromString("struct<a:int,b:string>");
boolean[] included = includeAll(readerType);
options.tolerateMissingSchema(false);
SchemaEvolution transition =
new SchemaEvolution(fileType, readerType, options.include(included));
// a -> a
TypeDescription reader = readerType.getChildren().get(0);
TypeDescription mapped = transition.getFileType(reader);
TypeDescription original = fileType.getChildren().get(0);
assertSame(original, mapped);
// b -> b
reader = readerType.getChildren().get(1);
mapped = transition.getFileType(reader);
original = fileType.getChildren().get(1);
assertSame(original, mapped);
}
@Test
public void testRemoveFieldBeforeEnd() {
TypeDescription fileType =
TypeDescription.fromString("struct<a:int,b:string,c:double>");
TypeDescription readerType =
TypeDescription.fromString("struct<a:int,c:double>");
boolean[] included = includeAll(readerType);
options.tolerateMissingSchema(false);
SchemaEvolution transition =
new SchemaEvolution(fileType, readerType, options.include(included));
// a -> a
TypeDescription reader = readerType.getChildren().get(0);
TypeDescription mapped = transition.getFileType(reader);
TypeDescription original = fileType.getChildren().get(0);
assertSame(original, mapped);
// c -> b
reader = readerType.getChildren().get(1);
mapped = transition.getFileType(reader);
original = fileType.getChildren().get(2);
assertSame(original, mapped);
}
@Test
public void testRemoveAndAddField() {
TypeDescription fileType =
TypeDescription.fromString("struct<a:int,b:string>");
TypeDescription readerType =
TypeDescription.fromString("struct<a:int,c:double>");
boolean[] included = includeAll(readerType);
options.tolerateMissingSchema(false);
SchemaEvolution transition =
new SchemaEvolution(fileType, readerType, options.include(included));
// a -> a
TypeDescription reader = readerType.getChildren().get(0);
TypeDescription mapped = transition.getFileType(reader);
TypeDescription original = fileType.getChildren().get(0);
assertSame(original, mapped);
// c -> null
reader = readerType.getChildren().get(1);
mapped = transition.getFileType(reader);
original = null;
assertSame(original, mapped);
}
@Test
public void testReorderFields() {
TypeDescription fileType =
TypeDescription.fromString("struct<a:int,b:string>");
TypeDescription readerType =
TypeDescription.fromString("struct<b:string,a:int>");
boolean[] included = includeAll(readerType);
options.tolerateMissingSchema(false);
SchemaEvolution transition =
new SchemaEvolution(fileType, readerType, options.include(included));
// b -> b
TypeDescription reader = readerType.getChildren().get(0);
TypeDescription mapped = transition.getFileType(reader);
TypeDescription original = fileType.getChildren().get(1);
assertSame(original, mapped);
// a -> a
reader = readerType.getChildren().get(1);
mapped = transition.getFileType(reader);
original = fileType.getChildren().get(0);
assertSame(original, mapped);
}
@Test
public void testAddFieldEndOfStruct() {
TypeDescription fileType =
TypeDescription.fromString("struct<a:struct<b:int>,c:string>");
TypeDescription readerType =
TypeDescription.fromString("struct<a:struct<b:int,d:double>,c:string>");
boolean[] included = includeAll(readerType);
options.tolerateMissingSchema(false);
SchemaEvolution transition =
new SchemaEvolution(fileType, readerType, options.include(included));
// a -> a
TypeDescription reader = readerType.getChildren().get(0);
TypeDescription mapped = transition.getFileType(reader);
TypeDescription original = fileType.getChildren().get(0);
assertSame(original, mapped);
// a.b -> a.b
TypeDescription readerChild = reader.getChildren().get(0);
mapped = transition.getFileType(readerChild);
TypeDescription originalChild = original.getChildren().get(0);
assertSame(originalChild, mapped);
// a.d -> null
readerChild = reader.getChildren().get(1);
mapped = transition.getFileType(readerChild);
originalChild = null;
assertSame(originalChild, mapped);
// c -> c
reader = readerType.getChildren().get(1);
mapped = transition.getFileType(reader);
original = fileType.getChildren().get(1);
assertSame(original, mapped);
}
@Test
public void testAddFieldBeforeEndOfStruct() {
TypeDescription fileType =
TypeDescription.fromString("struct<a:struct<b:int>,c:string>");
TypeDescription readerType =
TypeDescription.fromString("struct<a:struct<d:double,b:int>,c:string>");
boolean[] included = includeAll(readerType);
options.tolerateMissingSchema(false);
SchemaEvolution transition =
new SchemaEvolution(fileType, readerType, options.include(included));
// a -> a
TypeDescription reader = readerType.getChildren().get(0);
TypeDescription mapped = transition.getFileType(reader);
TypeDescription original = fileType.getChildren().get(0);
assertSame(original, mapped);
// a.b -> a.b
TypeDescription readerChild = reader.getChildren().get(1);
mapped = transition.getFileType(readerChild);
TypeDescription originalChild = original.getChildren().get(0);
assertSame(originalChild, mapped);
// a.d -> null
readerChild = reader.getChildren().get(0);
mapped = transition.getFileType(readerChild);
originalChild = null;
assertSame(originalChild, mapped);
// c -> c
reader = readerType.getChildren().get(1);
mapped = transition.getFileType(reader);
original = fileType.getChildren().get(1);
assertSame(original, mapped);
}
@Test
public void testCaseMismatchInReaderAndWriterSchema() {
TypeDescription fileType =
TypeDescription.fromString("struct<a:struct<b:int>,c:string>");
TypeDescription readerType =
TypeDescription.fromString("struct<A:struct<b:int>,c:string>");
boolean[] included = includeAll(readerType);
options.tolerateMissingSchema(false);
SchemaEvolution transition =
new SchemaEvolution(fileType, readerType, options.include(included).isSchemaEvolutionCaseAware(false));
// a -> A
TypeDescription reader = readerType.getChildren().get(0);
TypeDescription mapped = transition.getFileType(reader);
TypeDescription original = fileType.getChildren().get(0);
assertSame(original, mapped);
// a.b -> a.b
TypeDescription readerChild = reader.getChildren().get(0);
mapped = transition.getFileType(readerChild);
TypeDescription originalChild = original.getChildren().get(0);
assertSame(originalChild, mapped);
// c -> c
reader = readerType.getChildren().get(1);
mapped = transition.getFileType(reader);
original = fileType.getChildren().get(1);
assertSame(original, mapped);
}
/**
* Two structs can be equal but in different locations. They can converge to this.
*/
@Test
public void testAddSimilarField() {
TypeDescription fileType =
TypeDescription.fromString("struct<a:struct<b:int>>");
TypeDescription readerType =
TypeDescription.fromString("struct<a:struct<b:int>,c:struct<b:int>>");
boolean[] included = includeAll(readerType);
options.tolerateMissingSchema(false);
SchemaEvolution transition =
new SchemaEvolution(fileType, readerType, options.include(included));
// a -> a
TypeDescription reader = readerType.getChildren().get(0);
TypeDescription mapped = transition.getFileType(reader);
TypeDescription original = fileType.getChildren().get(0);
assertSame(original, mapped);
// a.b -> a.b
TypeDescription readerChild = reader.getChildren().get(0);
mapped = transition.getFileType(readerChild);
TypeDescription originalChild = original.getChildren().get(0);
assertSame(originalChild, mapped);
// c -> null
reader = readerType.getChildren().get(1);
mapped = transition.getFileType(reader);
original = null;
assertSame(original, mapped);
// c.b -> null
readerChild = reader.getChildren().get(0);
mapped = transition.getFileType(readerChild);
original = null;
assertSame(original, mapped);
}
/**
* Two structs can be equal but in different locations. They can converge to this.
*/
@Test
public void testConvergentEvolution() {
TypeDescription fileType = TypeDescription
.fromString("struct<a:struct<a:int,b:string>,c:struct<a:int>>");
TypeDescription readerType = TypeDescription.fromString(
"struct<a:struct<a:int,b:string>,c:struct<a:int,b:string>>");
boolean[] included = includeAll(readerType);
options.tolerateMissingSchema(false);
SchemaEvolution transition =
new SchemaEvolution(fileType, readerType, options.include(included));
// c -> c
TypeDescription reader = readerType.getChildren().get(1);
TypeDescription mapped = transition.getFileType(reader);
TypeDescription original = fileType.getChildren().get(1);
assertSame(original, mapped);
// c.a -> c.a
TypeDescription readerchild = reader.getChildren().get(0);
mapped = transition.getFileType(readerchild);
original = original.getChildren().get(0);
assertSame(original, mapped);
// c.b -> null
readerchild = reader.getChildren().get(1);
mapped = transition.getFileType(readerchild);
original = null;
assertSame(original, mapped);
}
@Test
public void testMapEvolution() {
TypeDescription fileType =
TypeDescription
.fromString("struct<a:map<struct<a:int>,struct<a:int>>>");
TypeDescription readerType = TypeDescription.fromString(
"struct<a:map<struct<a:int,b:string>,struct<a:int,c:string>>>");
boolean[] included = includeAll(readerType);
options.tolerateMissingSchema(false);
SchemaEvolution transition =
new SchemaEvolution(fileType, readerType, options.include(included));
// a -> a
TypeDescription reader = readerType.getChildren().get(0);
TypeDescription mapped = transition.getFileType(reader);
TypeDescription original = fileType.getChildren().get(0);
assertSame(original, mapped);
// a.key -> a.key
TypeDescription readerchild = reader.getChildren().get(0);
mapped = transition.getFileType(readerchild);
original = original.getChildren().get(0);
assertSame(original, mapped);
// a.value -> a.value
readerchild = reader.getChildren().get(1);
mapped = transition.getFileType(readerchild);
original = fileType.getChildren().get(0).getChildren().get(1);
assertSame(original, mapped);
}
@Test
public void testListEvolution() {
TypeDescription fileType =
TypeDescription.fromString("struct<a:array<struct<b:int>>>");
TypeDescription readerType =
TypeDescription.fromString("struct<a:array<struct<b:int,c:string>>>");
boolean[] included = includeAll(readerType);
options.tolerateMissingSchema(false);
SchemaEvolution transition =
new SchemaEvolution(fileType, readerType, options.include(included));
// a -> a
TypeDescription reader = readerType.getChildren().get(0);
TypeDescription mapped = transition.getFileType(reader);
TypeDescription original = fileType.getChildren().get(0);
assertSame(original, mapped);
// a.element -> a.element
TypeDescription readerchild = reader.getChildren().get(0);
mapped = transition.getFileType(readerchild);
original = original.getChildren().get(0);
assertSame(original, mapped);
// a.b -> a.b
readerchild = reader.getChildren().get(0).getChildren().get(0);
mapped = transition.getFileType(readerchild);
original = original.getChildren().get(0);
assertSame(original, mapped);
// a.c -> null
readerchild = reader.getChildren().get(0).getChildren().get(1);
mapped = transition.getFileType(readerchild);
original = null;
assertSame(original, mapped);
}
@Test
public void testIncompatibleTypes() {
assertThrows(SchemaEvolution.IllegalEvolutionException.class, () -> {
TypeDescription fileType = TypeDescription.fromString("struct<a:int>");
TypeDescription readerType = TypeDescription.fromString("struct<a:date>");
boolean[] included = includeAll(readerType);
options.tolerateMissingSchema(false);
new SchemaEvolution(fileType, readerType, options.include(included));
});
}
@Test
public void testAcidNamedEvolution() {
TypeDescription fileType = TypeDescription.fromString(
"struct<operation:int,originalTransaction:bigint,bucket:int," +
"rowId:bigint,currentTransaction:bigint," +
"row:struct<x:int,z:bigint,y:string>>");
TypeDescription readerType = TypeDescription.fromString(
"struct<x:int,y:string,z:bigint>");
SchemaEvolution evo = new SchemaEvolution(fileType, readerType, options);
assertTrue(evo.isAcid());
assertEquals("struct<operation:int,originalTransaction:bigint,bucket:int," +
"rowId:bigint,currentTransaction:bigint," +
"row:struct<x:int,y:string,z:bigint>>", evo.getReaderSchema().toString());
assertEquals("struct<x:int,y:string,z:bigint>",
evo.getReaderBaseSchema().toString());
// the first stuff should be an identity
for(int c=0; c < 8; ++c) {
assertEquals(c, evo.getFileType(c).getId(), "column " + c);
}
// y and z should swap places
assertEquals(9, evo.getFileType(8).getId());
assertEquals(8, evo.getFileType(9).getId());
}
@Test
public void testAcidPositionEvolutionAddField() {
TypeDescription fileType = TypeDescription.fromString(
"struct<operation:int,originalTransaction:bigint,bucket:int," +
"rowId:bigint,currentTransaction:bigint," +
"row:struct<_col0:int,_col1:string>>");
TypeDescription readerType = TypeDescription.fromString(
"struct<x:int,y:string,z:bigint>");
SchemaEvolution evo = new SchemaEvolution(fileType, readerType, options);
assertTrue(evo.isAcid());
assertEquals("struct<operation:int,originalTransaction:bigint,bucket:int," +
"rowId:bigint,currentTransaction:bigint," +
"row:struct<x:int,y:string,z:bigint>>", evo.getReaderSchema().toString());
assertEquals("struct<x:int,y:string,z:bigint>",
evo.getReaderBaseSchema().toString());
// the first stuff should be an identity
for(int c=0; c < 9; ++c) {
assertEquals(c, evo.getFileType(c).getId(), "column " + c);
}
// the file doesn't have z
assertNull(evo.getFileType(9));
}
@Test
public void testAcidPositionEvolutionSkipAcid() {
TypeDescription fileType = TypeDescription.fromString(
"struct<operation:int,originalTransaction:bigint,bucket:int," +
"rowId:bigint,currentTransaction:bigint," +
"row:struct<_col0:int,_col1:string,_col2:double>>");
TypeDescription readerType = TypeDescription.fromString(
"struct<x:int,y:string>");
SchemaEvolution evo = new SchemaEvolution(fileType, readerType,
options.includeAcidColumns(false));
assertTrue(evo.isAcid());
assertEquals("struct<operation:int,originalTransaction:bigint,bucket:int," +
"rowId:bigint,currentTransaction:bigint," +
"row:struct<x:int,y:string>>", evo.getReaderSchema().toString());
assertEquals("struct<x:int,y:string>",
evo.getReaderBaseSchema().toString());
// the first stuff should be an identity
boolean[] fileInclude = evo.getFileIncluded();
//get top level struct col
assertEquals(0, evo.getFileType(0).getId(), "column " + 0);
assertTrue(fileInclude[0], "column " + 0);
for(int c=1; c < 6; ++c) {
assertNull(evo.getFileType(c), "column " + c);
//skip all acid metadata columns
assertFalse(fileInclude[c], "column " + c);
}
for(int c=6; c < 9; ++c) {
assertEquals(c, evo.getFileType(c).getId(), "column " + c);
assertTrue(fileInclude[c], "column " + c);
}
// don't read the last column
assertFalse(fileInclude[9]);
}
@Test
public void testAcidPositionEvolutionRemoveField() {
TypeDescription fileType = TypeDescription.fromString(
"struct<operation:int,originalTransaction:bigint,bucket:int," +
"rowId:bigint,currentTransaction:bigint," +
"row:struct<_col0:int,_col1:string,_col2:double>>");
TypeDescription readerType = TypeDescription.fromString(
"struct<x:int,y:string>");
SchemaEvolution evo = new SchemaEvolution(fileType, readerType, options);
assertTrue(evo.isAcid());
assertEquals("struct<operation:int,originalTransaction:bigint,bucket:int," +
"rowId:bigint,currentTransaction:bigint," +
"row:struct<x:int,y:string>>", evo.getReaderSchema().toString());
assertEquals("struct<x:int,y:string>",
evo.getReaderBaseSchema().toString());
// the first stuff should be an identity
boolean[] fileInclude = evo.getFileIncluded();
for(int c=0; c < 9; ++c) {
assertEquals(c, evo.getFileType(c).getId(), "column " + c);
assertTrue(fileInclude[c], "column " + c);
}
// don't read the last column
assertFalse(fileInclude[9]);
}
@Test
public void testAcidPositionSubstructure() {
TypeDescription fileType = TypeDescription.fromString(
"struct<operation:int,originalTransaction:bigint,bucket:int," +
"rowId:bigint,currentTransaction:bigint," +
"row:struct<_col0:int,_col1:struct<z:int,x:double,y:string>," +
"_col2:double>>");
TypeDescription readerType = TypeDescription.fromString(
"struct<a:int,b:struct<x:double,y:string,z:int>,c:double>");
SchemaEvolution evo = new SchemaEvolution(fileType, readerType, options);
assertTrue(evo.isAcid());
// the first stuff should be an identity
boolean[] fileInclude = evo.getFileIncluded();
for(int c=0; c < 9; ++c) {
assertEquals(c, evo.getFileType(c).getId(), "column " + c);
}
assertEquals(10, evo.getFileType(9).getId());
assertEquals(11, evo.getFileType(10).getId());
assertEquals(9, evo.getFileType(11).getId());
assertEquals(12, evo.getFileType(12).getId());
assertEquals(13, fileInclude.length);
for(int c=0; c < fileInclude.length; ++c) {
assertTrue(fileInclude[c], "column " + c);
}
}
@Test
public void testNonAcidPositionSubstructure() {
TypeDescription fileType = TypeDescription.fromString(
"struct<_col0:int,_col1:struct<x:double,z:int>," +
"_col2:double>");
TypeDescription readerType = TypeDescription.fromString(
"struct<a:int,b:struct<x:double,y:string,z:int>,c:double>");
SchemaEvolution evo = new SchemaEvolution(fileType, readerType, options);
assertFalse(evo.isAcid());
// the first stuff should be an identity
boolean[] fileInclude = evo.getFileIncluded();
assertEquals(0, evo.getFileType(0).getId());
assertEquals(1, evo.getFileType(1).getId());
assertEquals(2, evo.getFileType(2).getId());
assertEquals(3, evo.getFileType(3).getId());
assertNull(evo.getFileType(4));
assertEquals(4, evo.getFileType(5).getId());
assertEquals(5, evo.getFileType(6).getId());
assertEquals(6, fileInclude.length);
for(int c=0; c < fileInclude.length; ++c) {
assertTrue(fileInclude[c], "column " + c);
}
}
@Test
public void testFileIncludeWithNoEvolution() {
TypeDescription fileType = TypeDescription.fromString(
"struct<a:int,b:double,c:string>");
SchemaEvolution evo = new SchemaEvolution(fileType, null,
options.include(new boolean[]{true, false, true, false}));
assertFalse(evo.isAcid());
assertEquals("struct<a:int,b:double,c:string>",
evo.getReaderBaseSchema().toString());
boolean[] fileInclude = evo.getFileIncluded();
assertTrue(fileInclude[0]);
assertFalse(fileInclude[1]);
assertTrue(fileInclude[2]);
assertFalse(fileInclude[3]);
}
static ByteBuffer createBuffer(int... values) {
ByteBuffer result = ByteBuffer.allocate(values.length);
for(int v: values) {
result.put((byte) v);
}
result.flip();
return result;
}
@Test
public void testTypeConversion() throws IOException {
TypeDescription fileType = TypeDescription.fromString("struct<x:int,y:string>");
TypeDescription readType = TypeDescription.fromString("struct<z:int,y:string,x:bigint>");
SchemaEvolution evo = new SchemaEvolution(fileType, readType, options);
// check to make sure the fields are mapped correctly
assertNull(evo.getFileType(1));
assertEquals(2, evo.getFileType(2).getId());
assertEquals(1, evo.getFileType(3).getId());
TreeReaderFactory.Context treeContext =
new TreeReaderFactory.ReaderContext().setSchemaEvolution(evo);
BatchReader reader =
TreeReaderFactory.createRootReader(readType, treeContext);
// check to make sure the tree reader is built right
assertEquals(StructBatchReader.class, reader.getClass());
assertEquals(TreeReaderFactory.StructTreeReader.class, reader.rootType.getClass());
TypeReader[] children =
((TreeReaderFactory.StructTreeReader) reader.rootType).getChildReaders();
assertEquals(3, children.length);
assertEquals(TreeReaderFactory.NullTreeReader.class, children[0].getClass());
assertEquals(TreeReaderFactory.StringTreeReader.class, children[1].getClass());
assertEquals(ConvertTreeReaderFactory.AnyIntegerFromAnyIntegerTreeReader.class,
children[2].getClass());
// check to make sure the data is read correctly
MockDataReader dataReader = new MockDataReader(fileType)
.addStream(1, OrcProto.Stream.Kind.DATA, createBuffer(7, 1, 0))
.addStream(2, OrcProto.Stream.Kind.DATA, createBuffer(65, 66, 67, 68,
69, 70, 71, 72, 73, 74))
.addStream(2, OrcProto.Stream.Kind.LENGTH, createBuffer(7, 0, 1))
.addStripeFooter(100, null);
StripePlanner planner = new StripePlanner(fileType, new ReaderEncryption(),
dataReader, OrcFile.WriterVersion.ORC_14, true, Integer.MAX_VALUE);
boolean[] columns = new boolean[]{true, true, true};
planner.parseStripe(dataReader.getStripe(0), columns)
.readData(null, null, false, TypeReader.ReadPhase.ALL);
reader.startStripe(planner, TypeReader.ReadPhase.ALL);
VectorizedRowBatch batch = readType.createRowBatch();
reader.nextBatch(batch, 10, TypeReader.ReadPhase.ALL);
final String EXPECTED = "ABCDEFGHIJ";
assertTrue(batch.cols[0].isRepeating);
assertTrue(batch.cols[0].isNull[0]);
for(int r=0; r < 10; ++r) {
assertEquals(EXPECTED.substring(r, r+1),
((BytesColumnVector) batch.cols[1]).toString(r), "col1." + r);
assertEquals(r, ((LongColumnVector) batch.cols[2]).vector[r], "col2." + r);
}
}
@Test
public void testTypeConversionShouldIgnoreAttributes() throws IOException {
TypeDescription fileType = TypeDescription.fromString("struct<x:int,y:smallint>");
TypeDescription readType = TypeDescription.fromString("struct<x:int,y:int>");
readType.findSubtype("x").setAttribute("iceberg.id", "12");
readType.findSubtype("y").setAttribute("iceberg.id", "13");
SchemaEvolution evo = new SchemaEvolution(fileType, readType, options);
// check to make sure the fields are mapped correctly
assertEquals(1, evo.getFileType(1).getId());
assertEquals(2, evo.getFileType(2).getId());
TreeReaderFactory.Context treeContext =
new TreeReaderFactory.ReaderContext().setSchemaEvolution(evo);
TypeReader reader =
TreeReaderFactory.createTreeReader(readType, treeContext);
// check to make sure the tree reader is built right
assertEquals(TreeReaderFactory.StructTreeReader.class, reader.getClass());
TypeReader[] children =
((TreeReaderFactory.StructTreeReader) reader).getChildReaders();
assertEquals(2, children.length);
assertEquals(TreeReaderFactory.IntTreeReader.class, children[0].getClass());
assertEquals(ConvertTreeReaderFactory.AnyIntegerFromAnyIntegerTreeReader.class,
children[1].getClass());
}
@Test
public void testPositionalEvolution() throws IOException {
options.forcePositionalEvolution(true);
TypeDescription file = TypeDescription.fromString("struct<x:int,y:int,z:int>");
TypeDescription read = TypeDescription.fromString("struct<z:int,x:int,a:int,b:int>");
SchemaEvolution evo = new SchemaEvolution(file, read, options);
assertEquals(1, evo.getFileType(1).getId());
assertEquals(2, evo.getFileType(2).getId());
assertEquals(3, evo.getFileType(3).getId());
assertNull(evo.getFileType(4));
}
@Test
public void testPositionalEvolutionLevel() throws IOException {
options.forcePositionalEvolution(true);
options.positionalEvolutionLevel(2);
TypeDescription file = TypeDescription.fromString("struct<a:int,b:struct<y:int,y:int>>");
TypeDescription read = TypeDescription.fromString("struct<a:int,b:struct<y:int,y:int>>");
SchemaEvolution evo = new SchemaEvolution(file, read, options);
assertEquals(1, evo.getFileType(1).getId());
assertEquals(2, evo.getFileType(2).getId());
assertEquals(3, evo.getFileType(3).getId());
assertEquals(4, evo.getFileType(4).getId());
}
@Test
public void testStructInArrayWithoutPositionalEvolution() throws IOException {
options.forcePositionalEvolution(false);
options.positionalEvolutionLevel(Integer.MAX_VALUE);
TypeDescription file = TypeDescription.fromString("array<struct<x:string,y:int,z:double>>");
TypeDescription read = TypeDescription.fromString("array<struct<z:double,x:string,a:int,b:int>>");
SchemaEvolution evo = new SchemaEvolution(file, read, options);
assertEquals(1, evo.getFileType(1).getId());
assertEquals(4, evo.getFileType(2).getId());
assertEquals(2, evo.getFileType(3).getId());
assertNull(evo.getFileType(4));
assertNull(evo.getFileType(5));
}
@Test
public void testPositionalEvolutionForStructInArray() throws IOException {
options.forcePositionalEvolution(true);
options.positionalEvolutionLevel(Integer.MAX_VALUE);
TypeDescription file = TypeDescription.fromString("array<struct<x:int,y:int,z:int>>");
TypeDescription read = TypeDescription.fromString("array<struct<z:int,x:int,a:int,b:int>>");
SchemaEvolution evo = new SchemaEvolution(file, read, options);
assertEquals(1, evo.getFileType(1).getId());
assertEquals(2, evo.getFileType(2).getId());
assertEquals(3, evo.getFileType(3).getId());
assertEquals(4, evo.getFileType(4).getId());
assertNull(evo.getFileType(5));
}
@Test
public void testPositionalEvolutionForTwoLayerNestedStruct() throws IOException {
options.forcePositionalEvolution(true);
options.positionalEvolutionLevel(Integer.MAX_VALUE);
TypeDescription file = TypeDescription.fromString("struct<s:struct<x:int,y:int,z:int>>");
TypeDescription read = TypeDescription.fromString("struct<s:struct<z:int,x:int,a:int,b:int>>");
SchemaEvolution evo = new SchemaEvolution(file, read, options);
assertEquals(1, evo.getFileType(1).getId());
assertEquals(2, evo.getFileType(2).getId());
assertEquals(3, evo.getFileType(3).getId());
assertEquals(4, evo.getFileType(4).getId());
assertNull(evo.getFileType(5));
}
@Test
public void testPositionalEvolutionForThreeLayerNestedStruct() throws IOException {
options.forcePositionalEvolution(true);
options.positionalEvolutionLevel(Integer.MAX_VALUE);
TypeDescription file = TypeDescription.fromString("struct<s1:struct<s2:struct<x:int,y:int,z:int>>>");
TypeDescription read = TypeDescription.fromString("struct<s1:struct<s:struct<z:int,x:int,a:int,b:int>>>");
SchemaEvolution evo = new SchemaEvolution(file, read, options);
assertEquals(1, evo.getFileType(1).getId());
assertEquals(2, evo.getFileType(2).getId());
assertEquals(3, evo.getFileType(3).getId());
assertEquals(4, evo.getFileType(4).getId());
assertEquals(5, evo.getFileType(5).getId());
assertNull(evo.getFileType(6));
}
// These are helper methods that pull some of the common code into one
// place.
static String decimalTimestampToString(long centiseconds, ZoneId zone) {
int nano = (int) (Math.floorMod(centiseconds, 100) * 10_000_000);
return timestampToString(centiseconds * 10, nano, zone);
}
static String doubleTimestampToString(double seconds, ZoneId zone) {
long sec = (long) Math.floor(seconds);
int nano = 1_000_000 * (int) Math.round((seconds - sec) * 1000);
return timestampToString(sec * 1000, nano, zone);
}
static String timestampToString(long millis, int nanos, ZoneId zone) {
return timestampToString(Instant.ofEpochSecond(Math.floorDiv(millis, 1000),
nanos), zone);
}
static String longTimestampToString(long seconds, ZoneId zone) {
return timestampToString(Instant.ofEpochSecond(seconds), zone);
}
static String timestampToString(Instant time, ZoneId zone) {
return time.atZone(zone)
.format(ConvertTreeReaderFactory.INSTANT_TIMESTAMP_FORMAT);
}
static void writeTimestampDataFile(Path path,
Configuration conf,
ZoneId writerZone,
DateTimeFormatter formatter,
String[] values) throws IOException {
TimeZone oldDefault = TimeZone.getDefault();
try {
TimeZone.setDefault(TimeZone.getTimeZone(writerZone));
TypeDescription fileSchema =
TypeDescription.fromString("struct<t1:timestamp," +
"t2:timestamp with local time zone>");
Writer writer = OrcFile.createWriter(path,
OrcFile.writerOptions(conf).setSchema(fileSchema).stripeSize(10000));
VectorizedRowBatch batch = fileSchema.createRowBatch(1024);
TimestampColumnVector t1 = (TimestampColumnVector) batch.cols[0];
TimestampColumnVector t2 = (TimestampColumnVector) batch.cols[1];
for (int r = 0; r < values.length; ++r) {
int row = batch.size++;
Instant t = Instant.from(formatter.parse(values[r]));
t1.time[row] = t.getEpochSecond() * 1000;
t1.nanos[row] = t.getNano();
t2.time[row] = t1.time[row];
t2.nanos[row] = t1.nanos[row];
if (batch.size == 1024) {
writer.addRowBatch(batch);
batch.reset();
}
}
if (batch.size != 0) {
writer.addRowBatch(batch);
}
writer.close();
} finally {
TimeZone.setDefault(oldDefault);
}
}
/**
* Tests the various conversions from timestamp and timestamp with local
* timezone.
*
* It writes an ORC file with timestamp and timestamp with local time zone
* and then reads it back in with each of the relevant types.
*
* This test test both with and without the useUtc flag.
*
* It uses Australia/Sydney and America/New_York because they both have
* DST and they move in opposite directions on different days. Thus, we
* end up with four sets of offsets.
*
* Picking the 27th of the month puts it around when DST changes.
*/
@Test
public void testEvolutionFromTimestamp() throws Exception {
// The number of rows in the file that we test with.
final int VALUES = 1024;
// The different timezones that we'll use for this test.
final ZoneId UTC = ZoneId.of("UTC");
final ZoneId WRITER_ZONE = ZoneId.of("America/New_York");
final ZoneId READER_ZONE = ZoneId.of("Australia/Sydney");
final TimeZone oldDefault = TimeZone.getDefault();
// generate the timestamps to use
String[] timeStrings = new String[VALUES];
for(int r=0; r < timeStrings.length; ++r) {
timeStrings[r] = String.format("%04d-%02d-27 23:45:56.7",
2000 + (r / 12), (r % 12) + 1);
}
final DateTimeFormatter WRITER_FORMAT =
ConvertTreeReaderFactory.TIMESTAMP_FORMAT.withZone(WRITER_ZONE);
writeTimestampDataFile(testFilePath, conf, WRITER_ZONE, WRITER_FORMAT, timeStrings);
try {
TimeZone.setDefault(TimeZone.getTimeZone(READER_ZONE));
OrcFile.ReaderOptions options = OrcFile.readerOptions(conf);
Reader.Options rowOptions = new Reader.Options();
try (Reader reader = OrcFile.createReader(testFilePath, options)) {
// test conversion to long
TypeDescription readerSchema = TypeDescription.fromString("struct<t1:bigint,t2:bigint>");
try (RecordReader rows = reader.rows(rowOptions.schema(readerSchema))) {
VectorizedRowBatch batch = readerSchema.createRowBatch(VALUES);
LongColumnVector t1 = (LongColumnVector) batch.cols[0];
LongColumnVector t2 = (LongColumnVector) batch.cols[1];
int current = 0;
for (int r = 0; r < VALUES; ++r) {
if (current == batch.size) {
assertTrue(rows.nextBatch(batch), "row " + r);
current = 0;
}
assertEquals(
(timeStrings[r] + " " + READER_ZONE.getId()).replace(".7 ", " "),
longTimestampToString(t1.vector[current], READER_ZONE),
"row " + r);
assertEquals(
(timeStrings[r] + " " + WRITER_ZONE.getId()).replace(".7 ", " "),
longTimestampToString(t2.vector[current], WRITER_ZONE),
"row " + r);
current += 1;
}
assertFalse(rows.nextBatch(batch));
}
// test conversion to decimal
readerSchema = TypeDescription.fromString("struct<t1:decimal(14,2),t2:decimal(14,2)>");
try (RecordReader rows = reader.rows(rowOptions.schema(readerSchema))) {
VectorizedRowBatch batch = readerSchema.createRowBatchV2();
Decimal64ColumnVector t1 = (Decimal64ColumnVector) batch.cols[0];
Decimal64ColumnVector t2 = (Decimal64ColumnVector) batch.cols[1];
int current = 0;
for (int r = 0; r < VALUES; ++r) {
if (current == batch.size) {
assertTrue(rows.nextBatch(batch), "row " + r);
current = 0;
}
assertEquals( timeStrings[r] + " " + READER_ZONE.getId(),
decimalTimestampToString(t1.vector[current], READER_ZONE), "row " + r);
assertEquals(timeStrings[r] + " " + WRITER_ZONE.getId(),
decimalTimestampToString(t2.vector[current], WRITER_ZONE), "row " + r);
current += 1;
}
assertFalse(rows.nextBatch(batch));
}
// test conversion to double
readerSchema = TypeDescription.fromString("struct<t1:double,t2:double>");
try (RecordReader rows = reader.rows(rowOptions.schema(readerSchema))) {
VectorizedRowBatch batch = readerSchema.createRowBatchV2();
DoubleColumnVector t1 = (DoubleColumnVector) batch.cols[0];
DoubleColumnVector t2 = (DoubleColumnVector) batch.cols[1];
int current = 0;
for (int r = 0; r < VALUES; ++r) {
if (current == batch.size) {
assertTrue(rows.nextBatch(batch), "row " + r);
current = 0;
}
assertEquals( timeStrings[r] + " " + READER_ZONE.getId(),
doubleTimestampToString(t1.vector[current], READER_ZONE), "row " + r);
assertEquals( timeStrings[r] + " " + WRITER_ZONE.getId(),
doubleTimestampToString(t2.vector[current], WRITER_ZONE), "row " + r);
current += 1;
}
assertFalse(rows.nextBatch(batch));
}
// test conversion to date
readerSchema = TypeDescription.fromString("struct<t1:date,t2:date>");
try (RecordReader rows = reader.rows(rowOptions.schema(readerSchema))) {
VectorizedRowBatch batch = readerSchema.createRowBatchV2();
LongColumnVector t1 = (LongColumnVector) batch.cols[0];
LongColumnVector t2 = (LongColumnVector) batch.cols[1];
int current = 0;
for (int r = 0; r < VALUES; ++r) {
if (current == batch.size) {
assertTrue(rows.nextBatch(batch), "row " + r);
current = 0;
}
String date = timeStrings[r].substring(0, 10);
assertEquals(date,
ConvertTreeReaderFactory.DATE_FORMAT.format(
LocalDate.ofEpochDay(t1.vector[current])), "row " + r);
// NYC -> Sydney moves forward a day for instant
assertEquals(date.replace("-27", "-28"),
ConvertTreeReaderFactory.DATE_FORMAT.format(
LocalDate.ofEpochDay(t2.vector[current])), "row " + r);
current += 1;
}
assertFalse(rows.nextBatch(batch));
}
// test conversion to string
readerSchema = TypeDescription.fromString("struct<t1:string,t2:string>");
try (RecordReader rows = reader.rows(rowOptions.schema(readerSchema))) {
VectorizedRowBatch batch = readerSchema.createRowBatch(VALUES);
BytesColumnVector bytesT1 = (BytesColumnVector) batch.cols[0];
BytesColumnVector bytesT2 = (BytesColumnVector) batch.cols[1];
int current = 0;
for (int r = 0; r < VALUES; ++r) {
if (current == batch.size) {
assertTrue(rows.nextBatch(batch), "row " + r);
current = 0;
}
assertEquals(timeStrings[r], bytesT1.toString(current), "row " + r);
Instant t = Instant.from(WRITER_FORMAT.parse(timeStrings[r]));
assertEquals(
timestampToString(Instant.from(WRITER_FORMAT.parse(timeStrings[r])),
READER_ZONE),
bytesT2.toString(current), "row " + r);
current += 1;
}
assertFalse(rows.nextBatch(batch));
}
// test conversion between timestamps
readerSchema = TypeDescription.fromString("struct<t1:timestamp with local time zone,t2:timestamp>");
try (RecordReader rows = reader.rows(rowOptions.schema(readerSchema))) {
VectorizedRowBatch batch = readerSchema.createRowBatch(VALUES);
TimestampColumnVector timeT1 = (TimestampColumnVector) batch.cols[0];
TimestampColumnVector timeT2 = (TimestampColumnVector) batch.cols[1];
int current = 0;
for (int r = 0; r < VALUES; ++r) {
if (current == batch.size) {
assertTrue(rows.nextBatch(batch), "row " + r);
current = 0;
}
assertEquals(timeStrings[r] + " " + READER_ZONE.getId(),
timestampToString(timeT1.time[current], timeT1.nanos[current], READER_ZONE),
"row " + r);
assertEquals(
timestampToString(Instant.from(WRITER_FORMAT.parse(timeStrings[r])), READER_ZONE),
timestampToString(timeT2.time[current], timeT2.nanos[current], READER_ZONE),
"row " + r);
current += 1;
}
assertFalse(rows.nextBatch(batch));
}
}
// Now test using UTC as local
options.useUTCTimestamp(true);
try (Reader reader = OrcFile.createReader(testFilePath, options)) {
DateTimeFormatter UTC_FORMAT =
ConvertTreeReaderFactory.TIMESTAMP_FORMAT.withZone(UTC);
// test conversion to int in UTC
TypeDescription readerSchema =
TypeDescription.fromString("struct<t1:bigint,t2:bigint>");
try (RecordReader rows = reader.rows(rowOptions.schema(readerSchema))) {
VectorizedRowBatch batch = readerSchema.createRowBatch(VALUES);
LongColumnVector t1 = (LongColumnVector) batch.cols[0];
LongColumnVector t2 = (LongColumnVector) batch.cols[1];
int current = 0;
for (int r = 0; r < VALUES; ++r) {
if (current == batch.size) {
assertTrue(rows.nextBatch(batch), "row " + r);
current = 0;
}
assertEquals(
(timeStrings[r] + " " + UTC.getId()).replace(".7 ", " "),
longTimestampToString(t1.vector[current], UTC), "row " + r);
assertEquals(
(timeStrings[r] + " " + WRITER_ZONE.getId()).replace(".7 ", " "),
longTimestampToString(t2.vector[current], WRITER_ZONE), "row " + r);
current += 1;
}
assertFalse(rows.nextBatch(batch));
}
// test conversion to decimal
readerSchema = TypeDescription.fromString("struct<t1:decimal(14,2),t2:decimal(14,2)>");
try (RecordReader rows = reader.rows(rowOptions.schema(readerSchema))) {
VectorizedRowBatch batch = readerSchema.createRowBatchV2();
Decimal64ColumnVector t1 = (Decimal64ColumnVector) batch.cols[0];
Decimal64ColumnVector t2 = (Decimal64ColumnVector) batch.cols[1];
int current = 0;
for (int r = 0; r < VALUES; ++r) {
if (current == batch.size) {
assertTrue(rows.nextBatch(batch), "row " + r);
current = 0;
}
assertEquals(timeStrings[r] + " " + UTC.getId(),
decimalTimestampToString(t1.vector[current], UTC), "row " + r);
assertEquals(timeStrings[r] + " " + WRITER_ZONE.getId(),
decimalTimestampToString(t2.vector[current], WRITER_ZONE), "row " + r);
current += 1;
}
assertFalse(rows.nextBatch(batch));
}
// test conversion to double
readerSchema = TypeDescription.fromString("struct<t1:double,t2:double>");
try (RecordReader rows = reader.rows(rowOptions.schema(readerSchema))) {
VectorizedRowBatch batch = readerSchema.createRowBatchV2();
DoubleColumnVector t1 = (DoubleColumnVector) batch.cols[0];
DoubleColumnVector t2 = (DoubleColumnVector) batch.cols[1];
int current = 0;
for (int r = 0; r < VALUES; ++r) {
if (current == batch.size) {
assertTrue(rows.nextBatch(batch), "row " + r);
current = 0;
}
assertEquals(timeStrings[r] + " " + UTC.getId(),
doubleTimestampToString(t1.vector[current], UTC),
"row " + r);
assertEquals(timeStrings[r] + " " + WRITER_ZONE.getId(),
doubleTimestampToString(t2.vector[current], WRITER_ZONE),
"row " + r);
current += 1;
}
assertFalse(rows.nextBatch(batch));
}
// test conversion to date
readerSchema = TypeDescription.fromString("struct<t1:date,t2:date>");
try (RecordReader rows = reader.rows(rowOptions.schema(readerSchema))) {
VectorizedRowBatch batch = readerSchema.createRowBatchV2();
LongColumnVector t1 = (LongColumnVector) batch.cols[0];
LongColumnVector t2 = (LongColumnVector) batch.cols[1];
int current = 0;
for (int r = 0; r < VALUES; ++r) {
if (current == batch.size) {
assertTrue(rows.nextBatch(batch), "row " + r);
current = 0;
}
String date = timeStrings[r].substring(0, 10);
assertEquals(date,
ConvertTreeReaderFactory.DATE_FORMAT.format(
LocalDate.ofEpochDay(t1.vector[current])),
"row " + r);
// NYC -> UTC still moves forward a day
assertEquals(date.replace("-27", "-28"),
ConvertTreeReaderFactory.DATE_FORMAT.format(
LocalDate.ofEpochDay(t2.vector[current])),
"row " + r);
current += 1;
}
assertFalse(rows.nextBatch(batch));
}
// test conversion to string in UTC
readerSchema = TypeDescription.fromString("struct<t1:string,t2:string>");
try (RecordReader rows = reader.rows(rowOptions.schema(readerSchema))) {
VectorizedRowBatch batch = readerSchema.createRowBatch(VALUES);
BytesColumnVector bytesT1 = (BytesColumnVector) batch.cols[0];
BytesColumnVector bytesT2 = (BytesColumnVector) batch.cols[1];
int current = 0;
for (int r = 0; r < VALUES; ++r) {
if (current == batch.size) {
assertTrue(rows.nextBatch(batch), "row " + r);
current = 0;
}
assertEquals(timeStrings[r], bytesT1.toString(current), "row " + r);
assertEquals(
timestampToString(Instant.from(WRITER_FORMAT.parse(timeStrings[r])),
UTC),
bytesT2.toString(current),
"row " + r);
current += 1;
}
assertFalse(rows.nextBatch(batch));
}
// test conversion between timestamps in UTC
readerSchema = TypeDescription.fromString("struct<t1:timestamp with local time zone,t2:timestamp>");
try (RecordReader rows = reader.rows(rowOptions.schema(readerSchema))) {
VectorizedRowBatch batch = readerSchema.createRowBatch(VALUES);
TimestampColumnVector timeT1 = (TimestampColumnVector) batch.cols[0];
TimestampColumnVector timeT2 = (TimestampColumnVector) batch.cols[1];
int current = 0;
for (int r = 0; r < VALUES; ++r) {
if (current == batch.size) {
assertTrue(rows.nextBatch(batch), "row " + r);
current = 0;
}
assertEquals(timeStrings[r] + " UTC",
timestampToString(timeT1.time[current], timeT1.nanos[current], UTC),
"row " + r);
assertEquals(
timestampToString(Instant.from(WRITER_FORMAT.parse(timeStrings[r])), UTC),
timestampToString(timeT2.time[current], timeT2.nanos[current], UTC),
"row " + r);
current += 1;
}
assertFalse(rows.nextBatch(batch));
}
}
} finally {
TimeZone.setDefault(oldDefault);
}
}
static void writeEvolutionToTimestamp(Path path,
Configuration conf,
ZoneId writerZone,
String[] values) throws IOException {
TypeDescription fileSchema =
TypeDescription.fromString("struct<l1:bigint,l2:bigint," +
"t1:tinyint,t2:tinyint," +
"d1:decimal(14,2),d2:decimal(14,2)," +
"dbl1:double,dbl2:double," +
"dt1:date,dt2:date," +
"s1:string,s2:string>");
ZoneId UTC = ZoneId.of("UTC");
DateTimeFormatter WRITER_FORMAT = ConvertTreeReaderFactory.TIMESTAMP_FORMAT
.withZone(writerZone);
DateTimeFormatter UTC_FORMAT = ConvertTreeReaderFactory.TIMESTAMP_FORMAT
.withZone(UTC);
DateTimeFormatter UTC_DATE = ConvertTreeReaderFactory.DATE_FORMAT
.withZone(UTC);
Writer writer = OrcFile.createWriter(path,
OrcFile.writerOptions(conf).setSchema(fileSchema).stripeSize(10000));
VectorizedRowBatch batch = fileSchema.createRowBatchV2();
int batchSize = batch.getMaxSize();
LongColumnVector l1 = (LongColumnVector) batch.cols[0];
LongColumnVector l2 = (LongColumnVector) batch.cols[1];
LongColumnVector t1 = (LongColumnVector) batch.cols[2];
LongColumnVector t2 = (LongColumnVector) batch.cols[3];
Decimal64ColumnVector d1 = (Decimal64ColumnVector) batch.cols[4];
Decimal64ColumnVector d2 = (Decimal64ColumnVector) batch.cols[5];
DoubleColumnVector dbl1 = (DoubleColumnVector) batch.cols[6];
DoubleColumnVector dbl2 = (DoubleColumnVector) batch.cols[7];
LongColumnVector dt1 = (LongColumnVector) batch.cols[8];
LongColumnVector dt2 = (LongColumnVector) batch.cols[9];
BytesColumnVector s1 = (BytesColumnVector) batch.cols[10];
BytesColumnVector s2 = (BytesColumnVector) batch.cols[11];
for (int r = 0; r < values.length; ++r) {
int row = batch.size++;
Instant utcTime = Instant.from(UTC_FORMAT.parse(values[r]));
Instant writerTime = Instant.from(WRITER_FORMAT.parse(values[r]));
l1.vector[row] = utcTime.getEpochSecond();
l2.vector[row] = writerTime.getEpochSecond();
t1.vector[row] = r % 128;
t2.vector[row] = r % 128;
// balance out the 2 digits of scale
d1.vector[row] = utcTime.toEpochMilli() / 10;
d2.vector[row] = writerTime.toEpochMilli() / 10;
// convert to double
dbl1.vector[row] = utcTime.toEpochMilli() / 1000.0;
dbl2.vector[row] = writerTime.toEpochMilli() / 1000.0;
// convert to date
dt1.vector[row] = UTC_DATE.parse(values[r].substring(0, 10))
.getLong(ChronoField.EPOCH_DAY);
dt2.vector[row] = dt1.vector[row];
// set the strings
s1.setVal(row, values[r].getBytes(StandardCharsets.UTF_8));
String withZone = values[r] + " " + writerZone.getId();
s2.setVal(row, withZone.getBytes(StandardCharsets.UTF_8));
if (batch.size == batchSize) {
writer.addRowBatch(batch);
batch.reset();
}
}
if (batch.size != 0) {
writer.addRowBatch(batch);
}
writer.close();
}
/**
* Tests the various conversions to timestamp.
*
* It writes an ORC file with two longs, two decimals, and two strings and
* then reads it back with the types converted to timestamp and timestamp
* with local time zone.
*
* This test is run both with and without setting the useUtc flag.
*
* It uses Australia/Sydney and America/New_York because they both have
* DST and they move in opposite directions on different days. Thus, we
* end up with four sets of offsets.
*/
@Test
public void testEvolutionToTimestamp() throws Exception {
// The number of rows in the file that we test with.
final int VALUES = 1024;
// The different timezones that we'll use for this test.
final ZoneId WRITER_ZONE = ZoneId.of("America/New_York");
final ZoneId READER_ZONE = ZoneId.of("Australia/Sydney");
final TimeZone oldDefault = TimeZone.getDefault();
final ZoneId UTC = ZoneId.of("UTC");
// generate the timestamps to use
String[] timeStrings = new String[VALUES];
for(int r=0; r < timeStrings.length; ++r) {
timeStrings[r] = String.format("%04d-%02d-27 12:34:56.1",
1960 + (r / 12), (r % 12) + 1);
}
writeEvolutionToTimestamp(testFilePath, conf, WRITER_ZONE, timeStrings);
try {
TimeZone.setDefault(TimeZone.getTimeZone(READER_ZONE));
// test timestamp, timestamp with local time zone to long
TypeDescription readerSchema = TypeDescription.fromString(
"struct<l1:timestamp," +
"l2:timestamp with local time zone," +
"t1:timestamp," +
"t2:timestamp with local time zone," +
"d1:timestamp," +
"d2:timestamp with local time zone," +
"dbl1:timestamp," +
"dbl2:timestamp with local time zone," +
"dt1:timestamp," +
"dt2:timestamp with local time zone," +
"s1:timestamp," +
"s2:timestamp with local time zone>");
VectorizedRowBatch batch = readerSchema.createRowBatchV2();
TimestampColumnVector l1 = (TimestampColumnVector) batch.cols[0];
TimestampColumnVector l2 = (TimestampColumnVector) batch.cols[1];
TimestampColumnVector t1 = (TimestampColumnVector) batch.cols[2];
TimestampColumnVector t2 = (TimestampColumnVector) batch.cols[3];
TimestampColumnVector d1 = (TimestampColumnVector) batch.cols[4];
TimestampColumnVector d2 = (TimestampColumnVector) batch.cols[5];
TimestampColumnVector dbl1 = (TimestampColumnVector) batch.cols[6];
TimestampColumnVector dbl2 = (TimestampColumnVector) batch.cols[7];
TimestampColumnVector dt1 = (TimestampColumnVector) batch.cols[8];
TimestampColumnVector dt2 = (TimestampColumnVector) batch.cols[9];
TimestampColumnVector s1 = (TimestampColumnVector) batch.cols[10];
TimestampColumnVector s2 = (TimestampColumnVector) batch.cols[11];
OrcFile.ReaderOptions options = OrcFile.readerOptions(conf);
Reader.Options rowOptions = new Reader.Options().schema(readerSchema);
int offset = READER_ZONE.getRules().getOffset(Instant.ofEpochSecond(0, 0)).getTotalSeconds();
try (Reader reader = OrcFile.createReader(testFilePath, options);
RecordReader rows = reader.rows(rowOptions)) {
int current = 0;
for (int r = 0; r < VALUES; ++r) {
if (current == batch.size) {
assertTrue(rows.nextBatch(batch), "row " + r);
current = 0;
}
String expected1 = timeStrings[r] + " " + READER_ZONE.getId();
String expected2 = timeStrings[r] + " " + WRITER_ZONE.getId();
String midnight = timeStrings[r].substring(0, 10) + " 00:00:00";
String expectedDate1 = midnight + " " + READER_ZONE.getId();
String expectedDate2 = midnight + " " + UTC.getId();
String msg = "row " + r;
assertEquals(expected1.replace(".1 ", " "),
timestampToString(l1.time[current], l1.nanos[current], READER_ZONE),
msg);
assertEquals(expected2.replace(".1 ", " "),
timestampToString(l2.time[current], l2.nanos[current], WRITER_ZONE),
msg);
assertEquals(longTimestampToString(((r % 128) - offset), READER_ZONE),
timestampToString(t1.time[current], t1.nanos[current], READER_ZONE),
msg);
assertEquals(longTimestampToString((r % 128), WRITER_ZONE),
timestampToString(t2.time[current], t2.nanos[current], WRITER_ZONE),
msg);
assertEquals(expected1,
timestampToString(d1.time[current], d1.nanos[current], READER_ZONE),
msg);
assertEquals(expected2,
timestampToString(d2.time[current], d2.nanos[current], WRITER_ZONE),
msg);
assertEquals(expected1,
timestampToString(dbl1.time[current], dbl1.nanos[current], READER_ZONE),
msg);
assertEquals(expected2,
timestampToString(dbl2.time[current], dbl2.nanos[current], WRITER_ZONE),
msg);
assertEquals(expectedDate1,
timestampToString(dt1.time[current], dt1.nanos[current], READER_ZONE),
msg);
assertEquals(expectedDate2,
timestampToString(dt2.time[current], dt2.nanos[current], UTC),
msg);
assertEquals(expected1,
timestampToString(s1.time[current], s1.nanos[current], READER_ZONE),
msg);
assertEquals(expected2,
timestampToString(s2.time[current], s2.nanos[current], WRITER_ZONE),
msg);
current += 1;
}
assertFalse(rows.nextBatch(batch));
}
// try the tests with useUtc set on
options.useUTCTimestamp(true);
try (Reader reader = OrcFile.createReader(testFilePath, options);
RecordReader rows = reader.rows(rowOptions)) {
int current = 0;
for (int r = 0; r < VALUES; ++r) {
if (current == batch.size) {
assertTrue(rows.nextBatch(batch), "row " + r);
current = 0;
}
String expected1 = timeStrings[r] + " " + UTC.getId();
String expected2 = timeStrings[r] + " " + WRITER_ZONE.getId();
String midnight = timeStrings[r].substring(0, 10) + " 00:00:00";
String expectedDate = midnight + " " + UTC.getId();
String msg = "row " + r;
assertEquals(expected1.replace(".1 ", " "),
timestampToString(l1.time[current], l1.nanos[current], UTC),
msg);
assertEquals(expected2.replace(".1 ", " "),
timestampToString(l2.time[current], l2.nanos[current], WRITER_ZONE),
msg);
assertEquals(expected1,
timestampToString(d1.time[current], d1.nanos[current], UTC),
msg);
assertEquals(expected2,
timestampToString(d2.time[current], d2.nanos[current], WRITER_ZONE),
msg);
assertEquals(expected1,
timestampToString(dbl1.time[current], dbl1.nanos[current], UTC),
msg);
assertEquals(expected2,
timestampToString(dbl2.time[current], dbl2.nanos[current], WRITER_ZONE),
msg);
assertEquals(expectedDate,
timestampToString(dt1.time[current], dt1.nanos[current], UTC),
msg);
assertEquals(expectedDate,
timestampToString(dt2.time[current], dt2.nanos[current], UTC),
msg);
assertEquals(expected1,
timestampToString(s1.time[current], s1.nanos[current], UTC),
msg);
assertEquals(expected2,
timestampToString(s2.time[current], s2.nanos[current], WRITER_ZONE),
msg);
current += 1;
}
assertFalse(rows.nextBatch(batch));
}
} finally {
TimeZone.setDefault(oldDefault);
}
}
@Test
public void doubleToTimeStampOverflow() throws Exception {
floatAndDoubleToTimeStampOverflow("double",
340282347000000000000000000000000000000000.0,
1e16,
9223372036854778.0,
9000000000000000.1,
10000000000.0,
10000000.123,
-1000000.123,
-10000000000.0,
-9000000000000000.1,
-9223372036854778.0,
-1e16,
-340282347000000000000000000000000000000000.0);
}
@Test
public void floatToTimeStampPositiveOverflow() throws Exception {
floatAndDoubleToTimeStampOverflow("float",
340282347000000000000000000000000000000000.0,
1e16,
9223372036854778.0,
9000000000000000.1,
10000000000.0,
10000000.123,
-1000000.123,
-10000000000.0,
-9000000000000000.1,
-9223372036854778.0,
-1e16,
-340282347000000000000000000000000000000000.0);
}
private void floatAndDoubleToTimeStampOverflow(String typeInFileSchema,
double... values) throws Exception {
boolean isFloat = typeInFileSchema.equals("float");
TypeDescription fileSchema =
TypeDescription.fromString(String.format("struct<c1:%s>", typeInFileSchema));
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(fileSchema)
.stripeSize(10000)
.useUTCTimestamp(true));
VectorizedRowBatch batch = fileSchema.createRowBatchV2();
DoubleColumnVector fl1 = (DoubleColumnVector) batch.cols[0];
for (double v : values) {
int row = batch.size++;
fl1.vector[row] = v;
if (batch.size == batch.getMaxSize()) {
writer.addRowBatch(batch);
batch.reset();
}
}
if (batch.size != 0) {
writer.addRowBatch(batch);
}
writer.close();
TypeDescription readerSchema = TypeDescription.fromString("struct<c1:timestamp>");
VectorizedRowBatch batchTimeStamp = readerSchema.createRowBatchV2();
TimestampColumnVector t1 = (TimestampColumnVector) batchTimeStamp.cols[0];
OrcFile.ReaderOptions options = OrcFile
.readerOptions(conf)
.useUTCTimestamp(true);
try (Reader reader = OrcFile.createReader(testFilePath, options);
RecordReader rows = reader.rows(reader.options().schema(readerSchema))) {
int value = 0;
while (value < values.length) {
assertTrue(rows.nextBatch(batchTimeStamp), "value " + value);
for(int row=0; row < batchTimeStamp.size; ++row) {
double expected = values[value + row];
String rowName = String.format("value %d", value + row);
boolean isPositive = ((long)Math.floor(expected) * 1000) >= 0;
if (expected * 1000 < Long.MIN_VALUE ||
expected * 1000 > Long.MAX_VALUE ||
((expected >= 0) != isPositive)) {
assertFalse(t1.noNulls, rowName);
assertTrue(t1.isNull[row], rowName);
} else {
double actual = Math.floorDiv(t1.time[row], 1000) +
t1.nanos[row] / 1_000_000_000.0;
assertEquals(expected, actual,
Math.abs(expected * (isFloat ? 0.000001 : 0.0000000000000001)), rowName);
assertFalse(t1.isNull[row], rowName);
assertTrue(t1.nanos[row] >= 0 && t1.nanos[row] < 1_000_000_000,
String.format(
"%s nanos should be 0 to 1,000,000,000 instead it's: %d",
rowName, t1.nanos[row]));
}
}
value += batchTimeStamp.size;
}
assertFalse(rows.nextBatch(batchTimeStamp));
}
}
@Test
public void testCheckAcidSchema() {
String ccSchema = "struct<operation:int,originalTransaction:bigint,bucket:int," +
"rowId:bigint,currentTransaction:bigint," +
"row:struct<a:int,b:int>>";
String lcSchema = "struct<operation:int,originaltransaction:bigint,bucket:int," +
"rowid:bigint,currenttransaction:bigint," +
"row:struct<a:int,b:int>>";
TypeDescription typeCamelCaseColumns = TypeDescription.fromString(ccSchema);
TypeDescription typeLowerCaseColumns = TypeDescription.fromString(lcSchema);
SchemaEvolution evoCc = new SchemaEvolution(typeCamelCaseColumns, null, options);
SchemaEvolution evoLc = new SchemaEvolution(typeLowerCaseColumns, null, options);
assertTrue(evoCc.isAcid(), "Schema (" + ccSchema +") was found to be non-acid ");
assertTrue(evoLc.isAcid(), "Schema (" + lcSchema +") was found to be non-acid ");
}
@Test
public void testAcidReaderSchema() {
String acidSchema = "struct<operation:int,originalTransaction:bigint,bucket:int," +
"rowId:bigint,currentTransaction:bigint," +
"row:struct<a:int,b:int>>";
TypeDescription fileSchema = TypeDescription.fromString(acidSchema);
TypeDescription readerSchema = TypeDescription.fromString(acidSchema);
SchemaEvolution schemaEvolution = new SchemaEvolution(fileSchema, readerSchema, options);
assertEquals(acidSchema, schemaEvolution.getReaderSchema().toString(),
String.format("Reader schema %s is not acid", schemaEvolution.getReaderSchema().toString()));
String notAcidSchema ="struct<a:int,b:int>";
readerSchema = TypeDescription.fromString(notAcidSchema);
schemaEvolution = new SchemaEvolution(fileSchema, readerSchema, options);
assertEquals(acidSchema, schemaEvolution.getReaderSchema().toString(),
String.format("Reader schema %s is not acid", schemaEvolution.getReaderSchema().toString()));
}
}
| 119,280 | 43.557714 | 115 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/TestSerializationUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import com.google.common.math.LongMath;
import org.junit.jupiter.api.Test;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.InputStream;
import java.math.BigInteger;
import java.util.Random;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
public class TestSerializationUtils {
private InputStream fromBuffer(ByteArrayOutputStream buffer) {
return new ByteArrayInputStream(buffer.toByteArray());
}
@Test
public void testDoubles() throws Exception {
double tolerance = 0.0000000000000001;
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
SerializationUtils utils = new SerializationUtils();
utils.writeDouble(buffer, 1343822337.759);
assertEquals(1343822337.759, utils.readDouble(fromBuffer(buffer)), tolerance);
buffer = new ByteArrayOutputStream();
utils.writeDouble(buffer, 0.8);
double got = utils.readDouble(fromBuffer(buffer));
assertEquals(0.8, got, tolerance);
}
@Test
public void testBigIntegers() throws Exception {
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
SerializationUtils.writeBigInteger(buffer, BigInteger.valueOf(0));
assertArrayEquals(new byte[]{0}, buffer.toByteArray());
assertEquals(0L,
SerializationUtils.readBigInteger(fromBuffer(buffer)).longValue());
buffer.reset();
SerializationUtils.writeBigInteger(buffer, BigInteger.valueOf(1));
assertArrayEquals(new byte[]{2}, buffer.toByteArray());
assertEquals(1L,
SerializationUtils.readBigInteger(fromBuffer(buffer)).longValue());
buffer.reset();
SerializationUtils.writeBigInteger(buffer, BigInteger.valueOf(-1));
assertArrayEquals(new byte[]{1}, buffer.toByteArray());
assertEquals(-1L,
SerializationUtils.readBigInteger(fromBuffer(buffer)).longValue());
buffer.reset();
SerializationUtils.writeBigInteger(buffer, BigInteger.valueOf(50));
assertArrayEquals(new byte[]{100}, buffer.toByteArray());
assertEquals(50L,
SerializationUtils.readBigInteger(fromBuffer(buffer)).longValue());
buffer.reset();
SerializationUtils.writeBigInteger(buffer, BigInteger.valueOf(-50));
assertArrayEquals(new byte[]{99}, buffer.toByteArray());
assertEquals(-50L,
SerializationUtils.readBigInteger(fromBuffer(buffer)).longValue());
for(int i=-8192; i < 8192; ++i) {
buffer.reset();
SerializationUtils.writeBigInteger(buffer, BigInteger.valueOf(i));
assertEquals(i >= -64 && i < 64 ? 1 : 2, buffer.size(),
"compare length for " + i);
assertEquals(i, SerializationUtils.readBigInteger(fromBuffer(buffer)).intValue(),
"compare result for " + i);
}
buffer.reset();
SerializationUtils.writeBigInteger(buffer,
new BigInteger("123456789abcdef0",16));
assertEquals(new BigInteger("123456789abcdef0",16),
SerializationUtils.readBigInteger(fromBuffer(buffer)));
buffer.reset();
SerializationUtils.writeBigInteger(buffer,
new BigInteger("-123456789abcdef0",16));
assertEquals(new BigInteger("-123456789abcdef0",16),
SerializationUtils.readBigInteger(fromBuffer(buffer)));
StringBuilder buf = new StringBuilder();
for(int i=0; i < 256; ++i) {
String num = Integer.toHexString(i);
if (num.length() == 1) {
buf.append('0');
}
buf.append(num);
}
buffer.reset();
SerializationUtils.writeBigInteger(buffer,
new BigInteger(buf.toString(),16));
assertEquals(new BigInteger(buf.toString(),16),
SerializationUtils.readBigInteger(fromBuffer(buffer)));
buffer.reset();
SerializationUtils.writeBigInteger(buffer,
new BigInteger("ff000000000000000000000000000000000000000000ff",16));
assertEquals(
new BigInteger("ff000000000000000000000000000000000000000000ff",16),
SerializationUtils.readBigInteger(fromBuffer(buffer)));
}
@Test
public void testSubtractionOverflow() {
// cross check results with Guava results below
SerializationUtils utils = new SerializationUtils();
assertFalse(utils.isSafeSubtract(22222222222L, Long.MIN_VALUE));
assertFalse(utils.isSafeSubtract(-22222222222L, Long.MAX_VALUE));
assertFalse(utils.isSafeSubtract(Long.MIN_VALUE, Long.MAX_VALUE));
assertTrue(utils.isSafeSubtract(-1553103058346370095L, 6553103058346370095L));
assertTrue(utils.isSafeSubtract(0, Long.MAX_VALUE));
assertTrue(utils.isSafeSubtract(Long.MIN_VALUE, 0));
}
@Test
public void testSubtractionOverflowGuava() {
try {
LongMath.checkedSubtract(22222222222L, Long.MIN_VALUE);
fail("expected ArithmeticException for overflow");
} catch (ArithmeticException ex) {
assertEquals("overflow: checkedSubtract(22222222222, -9223372036854775808)", ex.getMessage());
}
try {
LongMath.checkedSubtract(-22222222222L, Long.MAX_VALUE);
fail("expected ArithmeticException for overflow");
} catch (ArithmeticException ex) {
assertEquals("overflow: checkedSubtract(-22222222222, 9223372036854775807)", ex.getMessage());
}
try {
LongMath.checkedSubtract(Long.MIN_VALUE, Long.MAX_VALUE);
fail("expected ArithmeticException for overflow");
} catch (ArithmeticException ex) {
assertEquals("overflow: checkedSubtract(-9223372036854775808, 9223372036854775807)", ex.getMessage());
}
assertEquals(-8106206116692740190L,
LongMath.checkedSubtract(-1553103058346370095L, 6553103058346370095L));
assertEquals(-Long.MAX_VALUE, LongMath.checkedSubtract(0, Long.MAX_VALUE));
assertEquals(Long.MIN_VALUE, LongMath.checkedSubtract(Long.MIN_VALUE, 0));
}
@Test
public void testRandomFloats() throws Exception {
float tolerance = 0.0000000000000001f;
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
SerializationUtils utils = new SerializationUtils();
Random rand = new Random();
int n = 100_000;
float[] expected = new float[n];
for (int i = 0; i < n; i++) {
float f = rand.nextFloat();
expected[i] = f;
utils.writeFloat(buffer, f);
}
InputStream newBuffer = fromBuffer(buffer);
for (int i = 0; i < n; i++) {
float got = utils.readFloat(newBuffer);
assertEquals(expected[i], got, tolerance);
}
}
@Test
public void testRandomDoubles() throws Exception {
double tolerance = 0.0000000000000001;
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
SerializationUtils utils = new SerializationUtils();
Random rand = new Random();
int n = 100_000;
double[] expected = new double[n];
for (int i = 0; i < n; i++) {
double d = rand.nextDouble();
expected[i] = d;
utils.writeDouble(buffer, d);
}
InputStream newBuffer = fromBuffer(buffer);
for (int i = 0; i < n; i++) {
double got = utils.readDouble(newBuffer);
assertEquals(expected[i], got, tolerance);
}
}
}
| 8,033 | 38.970149 | 108 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/TestStreamName.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.orc.OrcProto;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestStreamName {
@Test
public void test1() throws Exception {
StreamName s1 = new StreamName(3, OrcProto.Stream.Kind.DATA);
StreamName s2 = new StreamName(3,
OrcProto.Stream.Kind.DICTIONARY_DATA);
StreamName s3 = new StreamName(5, OrcProto.Stream.Kind.DATA);
StreamName s4 = new StreamName(5,
OrcProto.Stream.Kind.DICTIONARY_DATA);
StreamName s1p = new StreamName(3, OrcProto.Stream.Kind.DATA);
assertTrue(s1.equals(s1));
assertFalse(s1.equals(s2));
assertFalse(s1.equals(s3));
assertTrue(s1.equals(s1p));
assertNotEquals(null, s1);
assertTrue(s1.compareTo(s2) < 0);
assertTrue(s2.compareTo(s3) < 0);
assertTrue(s3.compareTo(s4) < 0);
assertTrue(s4.compareTo(s1p) > 0);
assertEquals(0, s1p.compareTo(s1));
}
}
| 1,956 | 36.634615 | 75 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/TestStringHashTableDictionary.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.io.Text;
import org.apache.orc.StringDictTestingUtils;
import org.junit.jupiter.api.Test;
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class TestStringHashTableDictionary {
/**
* Basic test cases by adding bytes directly and uses real hash function.
*/
@Test
public void test0()
throws Exception {
StringHashTableDictionary htDict = new StringHashTableDictionary(5);
List<Text> testTexts =
Stream.of(new String[]{"Alice", "Bob", "Cindy", "David", "Eason"}).map(Text::new).collect(Collectors.toList());
List<byte[]> testBytes = testTexts.stream().map(Text::getBytes).collect(Collectors.toList());
assertEquals(0, htDict.getSizeInBytes());
assertEquals(0, htDict.add(testBytes.get(0), 0, testBytes.get(0).length));
assertEquals(1, htDict.add(testBytes.get(1), 0, testBytes.get(1).length));
assertEquals(0, htDict.add(testBytes.get(0), 0, testBytes.get(0).length));
assertEquals(1, htDict.add(testBytes.get(1), 0, testBytes.get(1).length));
assertEquals(2, htDict.add(testBytes.get(2), 0, testBytes.get(2).length));
Text text = new Text();
htDict.getText(text, 0);
assertEquals("Alice", text.toString());
htDict.getText(text, 1);
assertEquals("Bob", text.toString());
htDict.getText(text, 2);
assertEquals("Cindy", text.toString());
assertEquals(htDict.size(), 3);
// entering the fourth and fifth element which triggers rehash
assertEquals(3, htDict.add(testBytes.get(3), 0, testBytes.get(3).length));
htDict.getText(text, 3);
assertEquals("David", text.toString());
assertEquals(4, htDict.add(testBytes.get(4), 0, testBytes.get(4).length));
htDict.getText(text, 4);
assertEquals("Eason", text.toString());
assertEquals(htDict.size(), 5);
// Re-ensure no all previously existed string still have correct encoded value
htDict.getText(text, 0);
assertEquals("Alice", text.toString());
htDict.getText(text, 1);
assertEquals("Bob", text.toString());
htDict.getText(text, 2);
assertEquals("Cindy", text.toString());
// Peaking the hashtable and obtain the order sequence since the hashArray object needs to be private.
StringDictTestingUtils.checkContents(htDict, new int[]{1, 2, 3, 0 ,4}, "Bob", "Cindy", "David", "Alice", "Eason");
htDict.clear();
assertEquals(0, htDict.size());
}
/**
* A extension for {@link StringHashTableDictionary} for testing purpose by overwriting the hash function,
* just to save the effort of obtaining order sequence manually.
*/
private static class SimpleHashDictionary extends StringHashTableDictionary {
public SimpleHashDictionary(int initialCapacity) {
super(initialCapacity);
}
/**
* Obtain the prefix for each string as the hash value.
* All the string being used in this test suite will contains its hash value as the prefix for the string content.
* this way we know the order of the traverse() method.
*/
@Override
int getIndex(byte[] bytes, int offset, int length) {
return (char) bytes[0] - '0';
}
}
@Test
public void test1()
throws Exception {
SimpleHashDictionary hashTableDictionary = new SimpleHashDictionary(5);
// Non-resize trivial cases
assertEquals(0, hashTableDictionary.getSizeInBytes());
assertEquals(0, hashTableDictionary.add(new Text("2_Alice")));
assertEquals(1, hashTableDictionary.add(new Text("3_Bob")));
assertEquals(0, hashTableDictionary.add(new Text("2_Alice")));
assertEquals(1, hashTableDictionary.add(new Text("3_Bob")));
assertEquals(2, hashTableDictionary.add(new Text("1_Cindy")));
Text text = new Text();
hashTableDictionary.getText(text, 0);
assertEquals("2_Alice", text.toString());
hashTableDictionary.getText(text, 1);
assertEquals("3_Bob", text.toString());
hashTableDictionary.getText(text, 2);
assertEquals("1_Cindy", text.toString());
// entering the fourth and fifth element which triggers rehash
assertEquals(3, hashTableDictionary.add(new Text("0_David")));
hashTableDictionary.getText(text, 3);
assertEquals("0_David", text.toString());
assertEquals(4, hashTableDictionary.add(new Text("4_Eason")));
hashTableDictionary.getText(text, 4);
assertEquals("4_Eason", text.toString());
// Re-ensure no all previously existed string still have correct encoded value
hashTableDictionary.getText(text, 0);
assertEquals("2_Alice", text.toString());
hashTableDictionary.getText(text, 1);
assertEquals("3_Bob", text.toString());
hashTableDictionary.getText(text, 2);
assertEquals("1_Cindy", text.toString());
// The order of words are based on each string's prefix given their index in the hashArray will be based on that.
StringDictTestingUtils
.checkContents(hashTableDictionary, new int[]{3, 2, 0, 1, 4}, "0_David", "1_Cindy", "2_Alice", "3_Bob",
"4_Eason");
hashTableDictionary.clear();
assertEquals(0, hashTableDictionary.size());
}
}
| 6,011 | 38.552632 | 119 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/TestStringRedBlackTree.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.io.IntWritable;
import org.apache.orc.StringDictTestingUtils;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import static org.junit.jupiter.api.Assertions.assertEquals;
/**
* Test the red-black tree with string keys.
*/
public class TestStringRedBlackTree {
/**
* Checks the red-black tree rules to make sure that we have correctly built
* a valid tree.
*
* Properties:
* 1. Red nodes must have black children
* 2. Each node must have the same black height on both sides.
*
* @param node The id of the root of the subtree to check for the red-black
* tree properties.
* @return The black-height of the subtree.
*/
private int checkSubtree(RedBlackTree tree, int node, IntWritable count
) throws IOException {
if (node == RedBlackTree.NULL) {
return 1;
}
count.set(count.get() + 1);
boolean is_red = tree.isRed(node);
int left = tree.getLeft(node);
int right = tree.getRight(node);
if (is_red) {
if (tree.isRed(left)) {
printTree(tree, "", tree.root);
throw new IllegalStateException("Left node of " + node + " is " + left +
" and both are red.");
}
if (tree.isRed(right)) {
printTree(tree, "", tree.root);
throw new IllegalStateException("Right node of " + node + " is " +
right + " and both are red.");
}
}
int left_depth = checkSubtree(tree, left, count);
int right_depth = checkSubtree(tree, right, count);
if (left_depth != right_depth) {
printTree(tree, "", tree.root);
throw new IllegalStateException("Lopsided tree at node " + node +
" with depths " + left_depth + " and " + right_depth);
}
if (is_red) {
return left_depth;
} else {
return left_depth + 1;
}
}
/**
* Checks the validity of the entire tree. Also ensures that the number of
* nodes visited is the same as the size of the set.
*/
void checkTree(RedBlackTree tree) throws IOException {
IntWritable count = new IntWritable(0);
if (tree.isRed(tree.root)) {
printTree(tree, "", tree.root);
throw new IllegalStateException("root is red");
}
checkSubtree(tree, tree.root, count);
if (count.get() != tree.size) {
printTree(tree, "", tree.root);
throw new IllegalStateException("Broken tree! visited= " + count.get() +
" size=" + tree.size);
}
}
void printTree(RedBlackTree tree, String indent, int node
) throws IOException {
if (node == RedBlackTree.NULL) {
System.err.println(indent + "NULL");
} else {
System.err.println(indent + "Node " + node + " color " +
(tree.isRed(node) ? "red" : "black"));
printTree(tree, indent + " ", tree.getLeft(node));
printTree(tree, indent + " ", tree.getRight(node));
}
}
StringRedBlackTree buildTree(String... params) throws IOException {
StringRedBlackTree result = new StringRedBlackTree(1000);
for(String word: params) {
result.add(word);
checkTree(result);
}
return result;
}
@Test
public void test1() throws Exception {
StringRedBlackTree tree = new StringRedBlackTree(5);
assertEquals(0, tree.getSizeInBytes());
checkTree(tree);
assertEquals(0, tree.add("owen"));
checkTree(tree);
assertEquals(1, tree.add("ashutosh"));
checkTree(tree);
assertEquals(0, tree.add("owen"));
checkTree(tree);
assertEquals(2, tree.add("alan"));
checkTree(tree);
assertEquals(2, tree.add("alan"));
checkTree(tree);
assertEquals(1, tree.add("ashutosh"));
checkTree(tree);
assertEquals(3, tree.add("greg"));
checkTree(tree);
assertEquals(4, tree.add("eric"));
checkTree(tree);
assertEquals(5, tree.add("arun"));
checkTree(tree);
assertEquals(6, tree.size());
checkTree(tree);
assertEquals(6, tree.add("eric14"));
checkTree(tree);
assertEquals(7, tree.add("o"));
checkTree(tree);
assertEquals(8, tree.add("ziggy"));
checkTree(tree);
assertEquals(9, tree.add("z"));
checkTree(tree);
StringDictTestingUtils.checkContents(tree, new int[]{2,5,1,4,6,3,7,0,9,8},
"alan", "arun", "ashutosh", "eric", "eric14", "greg",
"o", "owen", "z", "ziggy");
assertEquals(32888, tree.getSizeInBytes());
// check that adding greg again bumps the count
assertEquals(3, tree.add("greg"));
assertEquals(41, tree.getCharacterSize());
// add some more strings to test the different branches of the
// rebalancing
assertEquals(10, tree.add("zak"));
checkTree(tree);
assertEquals(11, tree.add("eric1"));
checkTree(tree);
assertEquals(12, tree.add("ash"));
checkTree(tree);
assertEquals(13, tree.add("harry"));
checkTree(tree);
assertEquals(14, tree.add("john"));
checkTree(tree);
tree.clear();
checkTree(tree);
assertEquals(0, tree.getSizeInBytes());
assertEquals(0, tree.getCharacterSize());
}
@Test
public void test2() throws Exception {
StringRedBlackTree tree =
buildTree("a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l",
"m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z");
assertEquals(26, tree.size());
StringDictTestingUtils.checkContents(tree, new int[]{0,1,2, 3,4,5, 6,7,8, 9,10,11, 12,13,14,
15,16,17, 18,19,20, 21,22,23, 24,25},
"a", "b", "c", "d", "e", "f", "g", "h", "i", "j","k", "l", "m", "n", "o",
"p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z");
}
@Test
public void test3() throws Exception {
StringRedBlackTree tree =
buildTree("z", "y", "x", "w", "v", "u", "t", "s", "r", "q", "p", "o", "n",
"m", "l", "k", "j", "i", "h", "g", "f", "e", "d", "c", "b", "a");
assertEquals(26, tree.size());
StringDictTestingUtils.checkContents(tree, new int[]{25,24,23, 22,21,20, 19,18,17, 16,15,14,
13,12,11, 10,9,8, 7,6,5, 4,3,2, 1,0},
"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o",
"p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z");
}
}
| 6,999 | 33.653465 | 96 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/TestWriterImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.OrcConf;
import org.apache.orc.OrcFile;
import org.apache.orc.Reader;
import org.apache.orc.TypeDescription;
import org.apache.orc.Writer;
import org.apache.orc.*;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertFalse;
public class TestWriterImpl {
Path workDir = new Path(System.getProperty("test.tmp.dir"));
Configuration conf;
FileSystem fs;
Path testFilePath;
TypeDescription schema;
@BeforeEach
public void openFileSystem() throws Exception {
conf = new Configuration();
fs = FileSystem.getLocal(conf);
fs.setWorkingDirectory(workDir);
testFilePath = new Path("testWriterImpl.orc");
fs.create(testFilePath, true);
schema = TypeDescription.fromString("struct<x:int,y:int>");
}
@AfterEach
public void deleteTestFile() throws Exception {
fs.delete(testFilePath, false);
}
@Test
public void testDefaultOverwriteFlagForWriter() throws Exception {
assertThrows(IOException.class, () -> {
// default value of the overwrite flag is false, so this should fail
Writer w = OrcFile.createWriter(testFilePath, OrcFile.writerOptions(conf).setSchema(schema));
w.close();
});
}
@Test
public void testOverriddenOverwriteFlagForWriter() throws Exception {
// overriding the flag should result in a successful write (no exception)
conf.set(OrcConf.OVERWRITE_OUTPUT_FILE.getAttribute(), "true");
Writer w = OrcFile.createWriter(testFilePath, OrcFile.writerOptions(conf).setSchema(schema));
w.close();
// We should have no stripes available
assertEquals(0, w.getStripes().size());
}
@Test
public void testNoBFIfNoIndex() throws Exception {
// overriding the flag should result in a successful write (no exception)
conf.set(OrcConf.OVERWRITE_OUTPUT_FILE.getAttribute(), "true");
// Enable bloomfilter, but disable index
conf.set(OrcConf.ROW_INDEX_STRIDE.getAttribute(), "0");
conf.set(OrcConf.BLOOM_FILTER_COLUMNS.getAttribute(), "*");
Writer w = OrcFile.createWriter(testFilePath, OrcFile.writerOptions(conf).setSchema(schema));
w.close();
}
@Test
public void testNoIndexIfEnableIndexIsFalse() throws Exception {
conf.set(OrcConf.OVERWRITE_OUTPUT_FILE.getAttribute(), "true");
conf.set(OrcConf.ROW_INDEX_STRIDE.getAttribute(), "0");
conf.setBoolean(OrcConf.ENABLE_INDEXES.getAttribute(), false);
VectorizedRowBatch b = schema.createRowBatch();
LongColumnVector f1 = (LongColumnVector) b.cols[0];
LongColumnVector f2 = (LongColumnVector) b.cols[1];
Writer w = OrcFile.createWriter(testFilePath, OrcFile.writerOptions(conf).setSchema(schema));
long rowCount = 1000;
for (int i = 0; i < rowCount; i++) {
f1.vector[b.size] = 1 ;
f2.vector[b.size] = 2 ;
b.size += 1;
if (b.size == 10) {
w.addRowBatch(b);
b.reset();
}
}
w.close();
for (StripeInformation information: w.getStripes()) {
assertEquals(0, information.getIndexLength());
}
}
@Test
public void testEnableDisableIndex() {
conf.set(OrcConf.ROW_INDEX_STRIDE.getAttribute(), "10000");
OrcFile.WriterOptions writerOptions = OrcFile.writerOptions(conf);
writerOptions.buildIndex(false);
assertEquals(writerOptions.getRowIndexStride(), 0);
conf.set(OrcConf.ENABLE_INDEXES.getAttribute(), "true");
OrcFile.WriterOptions writerOptions2 = OrcFile.writerOptions(conf);
writerOptions2.rowIndexStride(0);
assertFalse(writerOptions2.isBuildIndex());
}
@Test
public void testStripes() throws Exception {
conf.set(OrcConf.OVERWRITE_OUTPUT_FILE.getAttribute(), "true");
VectorizedRowBatch b = schema.createRowBatch();
LongColumnVector f1 = (LongColumnVector) b.cols[0];
LongColumnVector f2 = (LongColumnVector) b.cols[1];
Writer w = OrcFile.createWriter(testFilePath, OrcFile.writerOptions(conf).setSchema(schema));
long value = 0;
long rowCount = 1024;
while (value < rowCount) {
f1.vector[b.size] = Long.MIN_VALUE + value;
f2.vector[b.size] = Long.MAX_VALUE - value;
value += 1;
b.size += 1;
if (b.size == b.getMaxSize()) {
w.addRowBatch(b);
b.reset();
}
}
assertEquals(0, w.getStripes().size());
w.close();
assertEquals(1, w.getStripes().size());
assertEquals(rowCount, w.getNumberOfRows());
Reader r = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf));
assertEquals(r.getStripes(), w.getStripes());
}
@Test
public void testStripeRowCountLimit() throws Exception {
conf.set(OrcConf.OVERWRITE_OUTPUT_FILE.getAttribute(), "true");
conf.set(OrcConf.STRIPE_ROW_COUNT.getAttribute(),"100");
VectorizedRowBatch b = schema.createRowBatch();
LongColumnVector f1 = (LongColumnVector) b.cols[0];
LongColumnVector f2 = (LongColumnVector) b.cols[1];
Writer w = OrcFile.createWriter(testFilePath, OrcFile.writerOptions(conf).setSchema(schema));
long rowCount = 1000;
for (int i = 0; i < rowCount; i++) {
f1.vector[b.size] = Long.MIN_VALUE ;
f2.vector[b.size] = Long.MAX_VALUE ;
b.size += 1;
if (b.size == 10) {
w.addRowBatch(b);
b.reset();
}
}
w.close();
assertEquals(10, w.getStripes().size());
}
@Test
public void testCloseIsIdempotent() throws IOException {
conf.set(OrcConf.OVERWRITE_OUTPUT_FILE.getAttribute(), "true");
Writer w = OrcFile.createWriter(testFilePath, OrcFile.writerOptions(conf).setSchema(schema));
w.close();
w.close();
}
}
| 6,896 | 34.735751 | 99 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/TestZlib.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.orc.CompressionCodec;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.nio.ByteBuffer;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.fail;
public class TestZlib {
@Test
public void testNoOverflow() throws Exception {
ByteBuffer in = ByteBuffer.allocate(10);
ByteBuffer out = ByteBuffer.allocate(10);
in.put(new byte[]{1,2,3,4,5,6,7,10});
in.flip();
CompressionCodec codec = new ZlibCodec();
assertFalse(codec.compress(in, out, null,
codec.getDefaultOptions()));
}
@Test
public void testCorrupt() throws Exception {
ByteBuffer buf = ByteBuffer.allocate(1000);
buf.put(new byte[]{127,-128,0,99,98,-1});
buf.flip();
CompressionCodec codec = new ZlibCodec();
ByteBuffer out = ByteBuffer.allocate(1000);
try {
codec.decompress(buf, out);
fail();
} catch (IOException ioe) {
// EXPECTED
}
}
}
| 1,828 | 30.534483 | 75 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/TestZstd.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import io.airlift.compress.zstd.ZstdCompressor;
import io.airlift.compress.zstd.ZstdDecompressor;
import org.apache.orc.CompressionCodec;
import org.apache.orc.CompressionKind;
import org.junit.jupiter.api.Test;
import java.nio.ByteBuffer;
import static org.junit.jupiter.api.Assertions.assertFalse;
public class TestZstd {
@Test
public void testNoOverflow() throws Exception {
ByteBuffer in = ByteBuffer.allocate(10);
ByteBuffer out = ByteBuffer.allocate(10);
in.put(new byte[]{1,2,3,4,5,6,7,10});
in.flip();
CompressionCodec codec = new AircompressorCodec(
CompressionKind.ZSTD, new ZstdCompressor(), new ZstdDecompressor());
assertFalse(codec.compress(in, out, null,
codec.getDefaultOptions()));
}
}
| 1,591 | 33.608696 | 76 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/filter/ATestFilter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.filter;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.orc.OrcFilterContext;
import org.apache.orc.TypeDescription;
import org.apache.orc.impl.OrcFilterContextImpl;
import java.nio.charset.StandardCharsets;
import java.sql.Timestamp;
import java.util.Arrays;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class ATestFilter {
protected final TypeDescription schema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createLong())
.addField("f2", TypeDescription.createString())
.addField("f3", TypeDescription.createDecimal().withPrecision(38).withScale(2))
.addField("f4", TypeDescription.createDouble())
.addField("f5", TypeDescription.createTimestamp());
protected final OrcFilterContextImpl fc = new OrcFilterContextImpl(schema, false);
protected final VectorizedRowBatch batch = schema.createRowBatch();
protected void setBatch(Long[] f1Values, String[] f2Values) {
setBatch(f1Values, f2Values, null, null, null);
}
protected void setBatch(Long[] f1Values,
String[] f2Values,
HiveDecimalWritable[] f3Values,
Double[] f4Values,
Timestamp[] f5Values) {
batch.reset();
for (int i = 0; i < f1Values.length; i++) {
setLong(f1Values[i], (LongColumnVector) batch.cols[0], i);
setString(f2Values[i], (BytesColumnVector) batch.cols[1], i);
if (f3Values != null) {
setDecimal(f3Values[i], (DecimalColumnVector) batch.cols[2], i);
}
if (f4Values != null) {
setDouble(f4Values[i], (DoubleColumnVector) batch.cols[3], i);
}
if (f5Values != null) {
setTimestamp(f5Values[i], (TimestampColumnVector) batch.cols[4], i);
}
}
batch.size = f1Values.length;
fc.setBatch(batch);
}
private void setTimestamp(Timestamp value, TimestampColumnVector v, int idx) {
if (value == null) {
v.noNulls = false;
v.isNull[idx] = true;
} else {
v.isNull[idx] = false;
v.getScratchTimestamp().setTime(value.getTime());
v.getScratchTimestamp().setNanos(value.getNanos());
v.setFromScratchTimestamp(idx);
}
}
private void setDouble(Double value, DoubleColumnVector v, int idx) {
if (value == null) {
v.noNulls = false;
v.isNull[idx] = true;
} else {
v.isNull[idx] = false;
v.vector[idx] = value;
}
}
private void setDecimal(HiveDecimalWritable value, DecimalColumnVector v, int idx) {
if (value == null) {
v.noNulls = false;
v.isNull[idx] = true;
} else {
v.isNull[idx] = false;
v.vector[idx] = value;
}
}
private void setString(String value, BytesColumnVector v, int idx) {
if (value == null) {
v.noNulls = false;
v.isNull[idx] = true;
} else {
v.isNull[idx] = false;
byte[] bytes = value.getBytes(StandardCharsets.UTF_8);
v.vector[idx] = bytes;
v.start[idx] = 0;
v.length[idx] = bytes.length;
}
}
private void setLong(Long value, LongColumnVector v, int idx) {
if (value == null) {
v.noNulls = false;
v.isNull[idx] = true;
} else {
v.isNull[idx] = false;
v.vector[idx] = value;
}
}
protected void validateSelected(int... v) {
validateSelected(fc, v);
}
static void validateSelected(OrcFilterContext fc, int... v) {
assertTrue(fc.isSelectedInUse());
assertEquals(v.length, fc.getSelectedSize());
assertArrayEquals(v, Arrays.copyOf(fc.getSelected(), v.length));
}
protected void validateAllSelected(int size) {
validateAllSelected(fc, size);
}
static void validateAllSelected(OrcFilterContext fc, int size) {
assertFalse(fc.isSelectedInUse());
assertEquals(size, fc.getSelectedSize());
}
protected void validateNoneSelected() {
validateNoneSelected(fc);
}
static void validateNoneSelected(OrcFilterContext fc) {
assertTrue(fc.isSelectedInUse());
assertEquals(0, fc.getSelectedSize());
}
protected void filter(VectorFilter filter) {
BatchFilterFactory.create(filter, null).accept(fc);
}
}
| 5,590 | 32.279762 | 86 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/filter/FilterUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.filter;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.orc.OrcFile;
import org.apache.orc.TypeDescription;
import org.apache.orc.filter.BatchFilter;
import org.apache.orc.impl.filter.leaf.TestFilters;
public class FilterUtils {
public static BatchFilter createVectorFilter(SearchArgument sArg,
TypeDescription readSchema) {
return TestFilters.createBatchFilter(sArg,
readSchema,
OrcFile.Version.UNSTABLE_PRE_2_0,
false);
}
}
| 1,469 | 38.72973 | 76 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/filter/IsNullFilterTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.filter;
import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertFalse;
public class IsNullFilterTest extends ATestFilter {
@Test
public void nullFilterTest() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startOr()
.isNull("f1", PredicateLeaf.Type.LONG)
.isNull("f2", PredicateLeaf.Type.STRING)
.end()
.build();
setBatch(new Long[] {1L, 2L, null, 4L, 5L, null},
new String[] {"a", "b", "c", null, "e", "f"});
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(2, 3, 5);
}
@Test
public void repeatedNullFilterTest() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startOr()
.equals("f2", PredicateLeaf.Type.STRING, "c")
.isNull("f1", PredicateLeaf.Type.LONG)
.end()
.build();
setBatch(new Long[] {null, null, null, null, null, null},
new String[] {"a", "b", "c", "d", "e", "f"});
batch.cols[0].isRepeating = true;
batch.cols[0].noNulls = false;
batch.cols[1].isRepeating = false;
batch.cols[1].noNulls = false;
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateAllSelected(6);
}
@Test
public void notNullFilterTest() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startNot()
.startOr()
.isNull("f1", PredicateLeaf.Type.LONG)
.isNull("f2", PredicateLeaf.Type.STRING)
.end()
.end()
.build();
setBatch(new Long[] {1L, 2L, null, 4L, 5L, null},
new String[] {"a", "b", "c", null, "e", "f"});
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(0, 1, 4);
}
@Test
public void repeatedNotNullFilterTest() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startOr()
.equals("f2", PredicateLeaf.Type.STRING, "c")
.startNot()
.isNull("f1", PredicateLeaf.Type.LONG)
.end()
.end()
.build();
setBatch(new Long[] {null, null, null, null, null, null},
new String[] {"a", "b", "c", "d", "e", "f"});
batch.cols[0].isRepeating = true;
batch.cols[0].noNulls = false;
batch.cols[1].isRepeating = false;
batch.cols[1].noNulls = true;
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(2);
}
@Test
public void repeatedNotNullFilterNoNullsTest() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startOr()
.equals("f2", PredicateLeaf.Type.STRING, "c")
.startNot()
.isNull("f1", PredicateLeaf.Type.LONG)
.end()
.end()
.build();
setBatch(new Long[] {1L, 1L, 1L, 1L, 1L, 1L},
new String[] {"a", "b", "c", "d", "e", "f"});
batch.cols[0].isRepeating = true;
batch.cols[0].noNulls = true;
batch.cols[1].isRepeating = false;
batch.cols[1].noNulls = true;
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateAllSelected(6);
}
} | 4,213 | 31.167939 | 75 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/filter/MyFilterService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.filter;
import com.google.auto.service.AutoService;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.orc.filter.BatchFilter;
import org.apache.orc.filter.PluginFilterService;
import java.util.Locale;
@AutoService(PluginFilterService.class)
public class MyFilterService implements PluginFilterService {
@Override
public BatchFilter getFilter(String filePath, Configuration conf) {
if (!filePath.matches(conf.get("my.filter.scope", ""))) {
return null;
}
switch (conf.get("my.filter.name", "")) {
case "my_str_i_eq":
return makeFilter(new StringIgnoreCaseEquals(conf));
case "my_long_abs_eq":
return makeFilter(new LongAbsEquals(conf));
default:
return null;
}
}
private static BatchFilter makeFilter(LeafFilter filter) {
return BatchFilterFactory.create(filter, new String[] {filter.getColName()});
}
public static class StringIgnoreCaseEquals extends LeafFilter {
private final String value;
private final Locale locale;
protected StringIgnoreCaseEquals(Configuration conf) {
this(conf.get("my.filter.col.name"),
conf.get("my.filter.col.value"),
conf.get("my.filter.lang_tag") == null ?
Locale.ROOT :
Locale.forLanguageTag(conf.get("my.filter.lang_tag")));
}
protected StringIgnoreCaseEquals(String colName, String value, Locale locale) {
super(colName, false);
if (colName.isEmpty()) {
throw new IllegalArgumentException("Filter needs a valid column name");
}
this.locale = locale;
this.value = value.toLowerCase(locale);
}
@Override
protected boolean allow(ColumnVector v, int rowIdx) {
return ((BytesColumnVector) v).toString(rowIdx).toLowerCase(locale).equals(value);
}
}
public static class LongAbsEquals extends LeafFilter {
private final long value;
protected LongAbsEquals(Configuration conf) {
this(conf.get("my.filter.col.name"),
conf.getLong("my.filter.col.value", -1));
}
protected LongAbsEquals(String colName, long value) {
super(colName, false);
assert !colName.isEmpty() : "Filter needs a valid column name";
this.value = Math.abs(value);
}
@Override
protected boolean allow(ColumnVector v, int rowIdx) {
return Math.abs(((LongColumnVector) v).vector[rowIdx]) == value;
}
}
}
| 3,434 | 33.009901 | 88 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/filter/TestAndFilter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.filter;
import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory;
import org.apache.orc.OrcFile;
import org.apache.orc.OrcFilterContext;
import org.apache.orc.impl.filter.leaf.TestFilters;
import org.junit.jupiter.api.Test;
import java.util.HashSet;
import java.util.Set;
import java.util.function.Consumer;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestAndFilter extends ATestFilter {
@Test
public void testAndSelectsNothing() {
setBatch(new Long[] {1L, 2L, 3L, 4L, 5L, 6L},
new String[] {"a", "b", "c", "d", "e", "f"});
SearchArgument s = SearchArgumentFactory.newBuilder()
.startAnd()
.equals("f1", PredicateLeaf.Type.LONG, 3L)
.equals("f1", PredicateLeaf.Type.LONG, 4L)
.end()
.build();
Consumer<OrcFilterContext> f = TestFilters.createBatchFilter(s,
schema,
OrcFile.Version.CURRENT);
assertFalse(fc.isSelectedInUse());
f.accept(fc);
validateNoneSelected();
}
@Test
public void testANDConversion() throws FilterFactory.UnSupportedSArgException {
SearchArgument sarg = SearchArgumentFactory.newBuilder()
.startAnd()
.in("f1", PredicateLeaf.Type.LONG, 1L, 2L, 3L)
.in("f2", PredicateLeaf.Type.STRING, "a", "b", "c")
.end()
.build();
Set<String> colIds = new HashSet<>();
VectorFilter f = FilterFactory.createSArgFilter(sarg.getCompactExpression(),
colIds,
sarg.getLeaves(),
schema,
false,
OrcFile.Version.CURRENT);
assertNotNull(f);
assertTrue(f instanceof AndFilter);
assertEquals(2, ((AndFilter) f).filters.length);
assertEquals(2, colIds.size());
assertTrue(colIds.contains("f1"));
assertTrue(colIds.contains("f2"));
// Setup the data such that the AND condition should not select any row
setBatch(
new Long[] {1L, 0L, 2L, 4L, 3L},
new String[] {"z", "a", "y", "b", "x"});
fc.setBatch(batch);
filter(f);
assertTrue(fc.isSelectedInUse());
assertEquals(0, fc.getSelectedSize());
}
} | 3,534 | 37.010753 | 90 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/filter/TestConvFilter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.filter;
import org.apache.hadoop.hive.common.type.HiveDecimal;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.orc.TypeDescription;
import org.apache.orc.impl.OrcFilterContextImpl;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.sql.Date;
import java.text.ParseException;
import java.text.SimpleDateFormat;
public class TestConvFilter {
private final int scale = 4;
private final TypeDescription schema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createBoolean())
.addField("f2", TypeDescription.createDate())
.addField("f3", TypeDescription.createDecimal().withPrecision(18).withScale(scale));
private final OrcFilterContextImpl fc = new OrcFilterContextImpl(schema, false);
private final VectorizedRowBatch batch = schema.createRowBatchV2();
@BeforeEach
public void setup() throws ParseException {
setBatch();
}
@Test
public void testBooleanEquals() {
// Equals
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.equals("f1", PredicateLeaf.Type.BOOLEAN, true)
.build();
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
ATestFilter.validateSelected(fc, 0, 3, 4);
}
@Test
public void testBooleanIn() {
// Equals
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.equals("f1", PredicateLeaf.Type.BOOLEAN, false)
.build();
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
ATestFilter.validateSelected(fc, 1, 2);
}
@Test
public void testBooleanBetween() {
// Equals
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startOr()
.between("f1", PredicateLeaf.Type.BOOLEAN, false, true)
.end()
.build();
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
ATestFilter.validateSelected(fc, 0, 1, 2, 3, 4);
}
@Test
public void testDateEquals() throws ParseException {
// Equals
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.equals("f2", PredicateLeaf.Type.DATE, date("2000-01-01"))
.build();
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
ATestFilter.validateSelected(fc, 1);
}
@Test
public void testDateIn() throws ParseException {
// Equals
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.in("f2", PredicateLeaf.Type.DATE, date("2000-01-01"), date("2100-06-07"))
.build();
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
ATestFilter.validateSelected(fc, 1, 4);
}
@Test
public void testDateBetween() throws ParseException {
// Equals
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startOr()
.between("f2", PredicateLeaf.Type.DATE, date("2000-01-01"), date("2100-06-07"))
.end()
.build();
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
ATestFilter.validateSelected(fc, 1, 2, 3, 4);
}
@Test
public void testDecimalEquals() {
// Equals
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.equals("f3", PredicateLeaf.Type.DECIMAL, decimal(12345678))
.build();
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
ATestFilter.validateSelected(fc, 2);
}
@Test
public void testDecimalIn() {
// Equals
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.in("f3", PredicateLeaf.Type.DECIMAL, decimal(0), decimal(Long.MAX_VALUE / 18))
.build();
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
ATestFilter.validateSelected(fc, 1, 3);
}
@Test
public void testDecimalBetween() {
// Equals
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startOr()
.between("f3", PredicateLeaf.Type.DECIMAL, decimal(0), decimal(Long.MAX_VALUE / 18))
.end()
.build();
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
ATestFilter.validateSelected(fc, 1, 2, 3);
}
protected void setBatch(Boolean[] f1Values, Date[] f2Values, HiveDecimalWritable[] f3Values) {
batch.reset();
for (int i = 0; i < f1Values.length; i++) {
setBoolean(f1Values[i], (LongColumnVector) batch.cols[0], i);
setDate(f2Values[i], (LongColumnVector) batch.cols[1], i);
setDecimal(f3Values[i], (LongColumnVector) batch.cols[2], i);
}
batch.size = f1Values.length;
fc.setBatch(batch);
}
private void setDecimal(HiveDecimalWritable value, LongColumnVector v, int idx) {
if (value == null) {
v.noNulls = false;
v.isNull[idx] = true;
} else {
assert (HiveDecimalWritable.isPrecisionDecimal64(value.precision())
&& value.scale() <= scale);
v.isNull[idx] = false;
v.vector[idx] = value.serialize64(scale);
}
}
private void setBoolean(Boolean value, LongColumnVector v, int idx) {
if (value == null) {
v.noNulls = false;
v.isNull[idx] = true;
} else {
v.isNull[idx] = false;
v.vector[idx] = value ? 1 : 0;
}
}
private void setDate(Date value, LongColumnVector v, int idx) {
if (value == null) {
v.noNulls = false;
v.isNull[idx] = true;
} else {
v.isNull[idx] = false;
v.vector[idx] = value.toLocalDate().toEpochDay();
}
}
private final SimpleDateFormat fmt = new SimpleDateFormat("yyyy-MM-dd");
private Date date(String value) throws ParseException {
return new Date(fmt.parse(value).getTime());
}
private HiveDecimalWritable decimal(long lValue) {
return new HiveDecimalWritable(HiveDecimal.create(lValue, scale));
}
private void setBatch() throws ParseException {
setBatch(new Boolean[] {true, false, false, true, true, null},
new Date[] {
date("1900-01-01"),
date("2000-01-01"),
date("2000-01-02"),
date("2019-12-31"),
date("2100-06-07"),
null
},
new HiveDecimalWritable[] {
new HiveDecimalWritable(HiveDecimal.create(Long.MIN_VALUE / 9, scale)),
new HiveDecimalWritable(HiveDecimal.create(0, scale)),
new HiveDecimalWritable(HiveDecimal.create(12345678, scale)),
new HiveDecimalWritable(HiveDecimal.create(Long.MAX_VALUE / 18, scale)),
new HiveDecimalWritable(HiveDecimal.create(Long.MAX_VALUE / 9, scale)),
null
});
}
}
| 7,568 | 31.625 | 96 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/filter/TestNotFilter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.filter;
import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory;
import org.apache.orc.OrcFilterContext;
import org.apache.orc.impl.filter.leaf.TestFilters;
import org.junit.jupiter.api.Test;
import java.util.Arrays;
import java.util.function.Consumer;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class TestNotFilter extends ATestFilter {
@Test
public void testUnboundedNot() {
setBatch(new Long[] {1L, 2L, 3L, 4L, 5L, 6L},
new String[] {"a", "b", "c", "d", "e", "f"});
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startNot()
.in("f1", PredicateLeaf.Type.LONG, 3L, 5L)
.end()
.build();
Consumer<OrcFilterContext> f = TestFilters.createBatchFilter(sArg, schema);
f.accept(fc.setBatch(batch));
validateSelected(0, 1, 3, 5);
}
@Test
public void testEmptyUnbounded() {
setBatch(new Long[] {1L, 2L, 3L, 4L, 5L, 6L},
new String[] {"a", "b", "c", "d", "e", "f"});
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startNot()
.in("f1", PredicateLeaf.Type.LONG, 7L, 8L)
.end()
.build();
Consumer<OrcFilterContext> f = TestFilters.createBatchFilter(sArg, schema);
f.accept(fc.setBatch(batch));
assertEquals(6, fc.getSelectedSize());
assertArrayEquals(new int[] {0, 1, 2, 3, 4, 5},
Arrays.copyOf(fc.getSelected(), fc.getSelectedSize()));
}
@Test
public void testBounded() {
setBatch(new Long[] {1L, 2L, 3L, 4L, 5L, 6L},
new String[] {"a", "b", "c", "d", "e", "f"});
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startAnd()
.in("f2", PredicateLeaf.Type.STRING, "b", "c")
.startNot()
.in("f1", PredicateLeaf.Type.LONG, 2L, 8L)
.end()
.end()
.build();
Consumer<OrcFilterContext> f = TestFilters.createBatchFilter(sArg, schema);
f.accept(fc.setBatch(batch));
validateSelected(2);
}
@Test
public void testEmptyBounded() {
setBatch(new Long[] {1L, 2L, 3L, 4L, 5L, 6L},
new String[] {"a", "b", "c", "d", "e", "f"});
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startAnd()
.in("f2", PredicateLeaf.Type.STRING, "b", "c")
.startNot()
.in("f1", PredicateLeaf.Type.LONG, 7L, 8L)
.end()
.end()
.build();
Consumer<OrcFilterContext> f = TestFilters.createBatchFilter(sArg, schema);
f.accept(fc.setBatch(batch));
validateSelected(1, 2);
}
@Test
public void testNotAndPushDown() {
setBatch(new Long[] {1L, 2L, 3L, 4L, 5L, 6L},
new String[] {"a", "b", "c", "d", "e", "f"});
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startNot()
.startAnd()
.equals("f1", PredicateLeaf.Type.LONG, 3L)
.equals("f2", PredicateLeaf.Type.STRING, "c")
.end()
.end()
.build();
Consumer<OrcFilterContext> f = TestFilters.createBatchFilter(sArg, schema);
f.accept(fc.setBatch(batch));
validateSelected(0, 1, 3, 4, 5);
}
@Test
public void testNotOrPushDown() {
setBatch(new Long[] {1L, 2L, 3L, 4L, 5L, 6L},
new String[] {"a", "b", "c", "d", "e", "f"});
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startNot()
.startOr()
.equals("f1", PredicateLeaf.Type.LONG, 3L)
.equals("f2", PredicateLeaf.Type.STRING, "d")
.end()
.end()
.build();
Consumer<OrcFilterContext> f = TestFilters.createBatchFilter(sArg, schema);
f.accept(fc.setBatch(batch));
validateSelected(0, 1, 4, 5);
}
} | 4,636 | 31.886525 | 84 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/filter/TestOrFilter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.filter;
import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory;
import org.apache.orc.OrcFile;
import org.junit.jupiter.api.Test;
import java.util.HashSet;
import java.util.Set;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestOrFilter extends ATestFilter {
@Test
public void testORConversion() throws FilterFactory.UnSupportedSArgException {
SearchArgument sarg = SearchArgumentFactory.newBuilder()
.startOr()
.in("f1", PredicateLeaf.Type.LONG, 1L, 2L, 3L)
.in("f2", PredicateLeaf.Type.STRING, "a", "b", "c")
.end()
.build();
Set<String> colIds = new HashSet<>();
VectorFilter f = FilterFactory.createSArgFilter(sarg.getCompactExpression(),
colIds,
sarg.getLeaves(),
schema,
false,
OrcFile.Version.CURRENT);
assertNotNull(f);
assertTrue(f instanceof OrFilter);
assertEquals(2, ((OrFilter) f).filters.length);
assertEquals(2, colIds.size());
assertTrue(colIds.contains("f1"));
assertTrue(colIds.contains("f2"));
// Setup the data such that the OR condition should select every row
setBatch(
new Long[] {1L, 0L, 2L, 4L, 3L},
new String[] {"z", "a", "y", "b", "x"});
fc.setBatch(batch);
filter(f);
validateAllSelected(5);
}
}
| 2,600 | 37.820896 | 80 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/filter/TestPluginFilterService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.filter;
import org.apache.hadoop.conf.Configuration;
import org.apache.orc.filter.BatchFilter;
import org.junit.jupiter.api.Test;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.List;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class TestPluginFilterService {
private final Configuration conf;
public TestPluginFilterService() {
conf = new Configuration();
conf.set("my.filter.col.name", "f2");
conf.set("my.filter.col.value", "aBcd");
conf.set("my.filter.scope", "file://db/table1/.*");
}
@Test
public void testFoundFilter() {
conf.set("my.filter.name", "my_str_i_eq");
assertNotNull(FilterFactory.findPluginFilters("file://db/table1/file1", conf));
}
@Test
public void testErrorCreatingFilter() {
Configuration localConf = new Configuration(conf);
localConf.set("my.filter.name", "my_str_i_eq");
localConf.set("my.filter.col.name", "");
assertThrows(IllegalArgumentException.class,
() -> FilterFactory.findPluginFilters("file://db/table1/file1", localConf),
"Filter needs a valid column name");
}
@Test
public void testMissingFilter() {
assertTrue(FilterFactory.findPluginFilters("file://db/table11/file1", conf).isEmpty());
}
private Method getAllowedFilters() {
Method method = null;
try {
method = FilterFactory.class.getDeclaredMethod("getAllowedFilters", List.class, List.class);
} catch (NoSuchMethodException e) {
assert(false);
}
method.setAccessible(true);
return method;
}
@Test
public void testHitAllowListFilter() throws Exception {
conf.set("my.filter.name", "my_str_i_eq");
// Hit the allowlist.
List<String> allowListHit = new ArrayList<>();
allowListHit.add("org.apache.orc.impl.filter.BatchFilterFactory$BatchFilterImpl");
List<BatchFilter> pluginFilters = FilterFactory.findPluginFilters("file://db/table1/file1", conf);
List<BatchFilter> allowListFilter = (List<BatchFilter>)getAllowedFilters().invoke(null, pluginFilters, allowListHit);
assertEquals(1, allowListFilter.size());
}
@Test
public void testAllowListFilterAllowAll() throws Exception {
conf.set("my.filter.name", "my_str_i_eq");
// Hit the allowlist.
List<String> allowListHit = new ArrayList<>();
allowListHit.add("*");
List<BatchFilter> pluginFilters = FilterFactory.findPluginFilters("file://db/table1/file1", conf);
List<BatchFilter> allowListFilter = (List<BatchFilter>)getAllowedFilters().invoke(null, pluginFilters, allowListHit);
assertEquals(1, allowListFilter.size());
}
@Test
public void testAllowListFilterDisallowAll() throws Exception {
conf.set("my.filter.name", "my_str_i_eq");
List<BatchFilter> pluginFilters = FilterFactory.findPluginFilters("file://db/table1/file1", conf);
List<BatchFilter> allowListFilter = (List<BatchFilter>)getAllowedFilters().invoke(null, pluginFilters, new ArrayList<>());
List<BatchFilter> allowListFilterWithNull = (List<BatchFilter>)getAllowedFilters().invoke(null, pluginFilters, null);
assertEquals(0, allowListFilter.size());
assertEquals(0, allowListFilterWithNull.size());
}
}
| 4,292 | 36.330435 | 126 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/filter/TestPluginFilters.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.filter;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory;
import org.apache.orc.OrcConf;
import org.apache.orc.OrcFile;
import org.apache.orc.OrcFilterContext;
import org.apache.orc.Reader;
import org.apache.orc.TypeDescription;
import org.apache.orc.filter.BatchFilter;
import org.junit.jupiter.api.Test;
import java.util.Arrays;
import java.util.function.Consumer;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestPluginFilters extends ATestFilter {
@Test
public void testPluginFilterWithSArg() {
setBatch(new Long[] {1L, 2L, null, 4L, 5L, 6L},
new String[] {"a", "B", "c", "dE", "e", "f"});
// Define the plugin filter
Configuration conf = new Configuration();
OrcConf.ALLOW_PLUGIN_FILTER.setBoolean(conf, true);
conf.set("my.filter.name", "my_str_i_eq");
conf.set("my.filter.col.name", "f2");
conf.set("my.filter.col.value", "de");
conf.set("my.filter.scope", "file://db/table1/.*");
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.in("f1", PredicateLeaf.Type.LONG, 2L, 4L, 6L)
.build();
// Setup Options
Reader.Options opts = new Reader.Options(conf)
.searchArgument(sArg, new String[] {"f1"})
.allowSARGToFilter(true);
BatchFilter f = FilterFactory.createBatchFilter(opts,
schema,
false,
OrcFile.Version.CURRENT,
false,
"file://db/table1/file1",
conf);
assertTrue(f instanceof BatchFilterFactory.AndBatchFilterImpl,
"Filter should be an AND Batch filter");
assertArrayEquals(new String[] {"f1", "f2"}, f.getColumnNames());
f.accept(fc.setBatch(batch));
validateSelected(3);
}
@Test
public void testPluginSelectsNone() {
setBatch(new Long[] {1L, 2L, null, 4L, 5L, 6L},
new String[] {"a", "B", "c", "dE", "e", "f"});
// Define the plugin filter
Configuration conf = new Configuration();
OrcConf.ALLOW_PLUGIN_FILTER.setBoolean(conf, true);
conf.set("my.filter.name", "my_str_i_eq");
conf.set("my.filter.col.name", "f2");
conf.set("my.filter.col.value", "g");
conf.set("my.filter.scope", "file://db/table1/.*");
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.in("f1", PredicateLeaf.Type.LONG, 2L, 4L, 6L)
.build();
// Setup Options
Reader.Options opts = new Reader.Options(conf)
.searchArgument(sArg, new String[] {"f1"})
.allowSARGToFilter(true);
BatchFilter f = FilterFactory.createBatchFilter(opts,
schema,
false,
OrcFile.Version.CURRENT,
false,
"file://db/table1/file1",
conf);
assertTrue(f instanceof BatchFilterFactory.AndBatchFilterImpl,
"Filter should be an AND Batch filter");
f.accept(fc.setBatch(batch));
validateNoneSelected();
}
@Test
public void testPluginDisabled() {
setBatch(new Long[] {1L, 2L, null, 4L, 5L, 6L},
new String[] {"a", "B", "c", "dE", "e", "f"});
// Define the plugin filter
Configuration conf = new Configuration();
OrcConf.ALLOW_PLUGIN_FILTER.setBoolean(conf, false);
conf.set("my.filter.name", "my_str_i_eq");
conf.set("my.filter.col.name", "f2");
conf.set("my.filter.col.value", "g");
conf.set("my.filter.scope", "file://db/table1/.*");
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.in("f1", PredicateLeaf.Type.LONG, 2L, 4L, 6L)
.build();
// Setup Options
Reader.Options opts = new Reader.Options(conf)
.searchArgument(sArg, new String[] {"f1"})
.allowSARGToFilter(true);
BatchFilter f = FilterFactory.createBatchFilter(opts,
schema,
false,
OrcFile.Version.CURRENT,
false,
"file://db/table1/file1",
conf);
assertFalse(f instanceof BatchFilterFactory.AndBatchFilterImpl,
"Filter should not be an AND Batch filter");
f.accept(fc.setBatch(batch));
validateSelected(1, 3, 5);
}
@Test
public void testPluginNonMatchingPath() {
setBatch(new Long[] {1L, 2L, null, 4L, 5L, 6L},
new String[] {"a", "B", "c", "dE", "e", "f"});
// Define the plugin filter
Configuration conf = new Configuration();
OrcConf.ALLOW_PLUGIN_FILTER.setBoolean(conf, true);
conf.set("my.filter.name", "my_str_i_eq");
conf.set("my.filter.col.name", "f2");
conf.set("my.filter.col.value", "g");
conf.set("my.filter.scope", "file://db/table1/.*");
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.in("f1", PredicateLeaf.Type.LONG, 2L, 4L, 6L)
.build();
// Setup Options
Reader.Options opts = new Reader.Options(conf)
.searchArgument(sArg, new String[] {"f1"})
.allowSARGToFilter(true);
BatchFilter f = FilterFactory.createBatchFilter(opts,
schema,
false,
OrcFile.Version.CURRENT,
false,
"file://db/table2/file1",
conf);
assertFalse(f instanceof BatchFilterFactory.AndBatchFilterImpl,
"Filter should not be an AND Batch filter");
f.accept(fc.setBatch(batch));
validateSelected(1, 3, 5);
}
@Test
public void testPluginSelectsAll() {
setBatch(new Long[] {1L, 2L, null, 4L, 5L, 6L},
new String[] {"abcdef", "Abcdef", "aBcdef", null, "abcDef", "abcdEf"});
// Define the plugin filter
Configuration conf = new Configuration();
OrcConf.ALLOW_PLUGIN_FILTER.setBoolean(conf, true);
conf.set("my.filter.name", "my_str_i_eq");
conf.set("my.filter.col.name", "f2");
conf.set("my.filter.col.value", "abcdef");
conf.set("my.filter.scope", "file://db/table1/.*");
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.in("f1", PredicateLeaf.Type.LONG, 2L, 4L, 6L)
.build();
// Setup Options
Reader.Options opts = new Reader.Options(conf)
.searchArgument(sArg, new String[] {"f1"})
.allowSARGToFilter(true);
BatchFilter f = FilterFactory.createBatchFilter(opts,
schema,
false,
OrcFile.Version.CURRENT,
false,
"file://db/table1/file1",
conf);
assertTrue(f instanceof BatchFilterFactory.AndBatchFilterImpl,
"Filter should be an AND Batch filter");
f.accept(fc.setBatch(batch));
validateSelected(1, 5);
}
@Test
public void testPluginSameColumn() {
setBatch(new Long[] {1L, 2L, null, 4L, 5L, 6L},
new String[] {"abcdef", "Abcdef", "aBcdef", null, "abcDef", "abcdEf"});
// Define the plugin filter
Configuration conf = new Configuration();
OrcConf.ALLOW_PLUGIN_FILTER.setBoolean(conf, true);
conf.set("my.filter.name", "my_str_i_eq");
conf.set("my.filter.col.name", "f2");
conf.set("my.filter.col.value", "abcdef");
conf.set("my.filter.scope", "file://db/table1/.*");
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.in("f2", PredicateLeaf.Type.STRING, "Abcdef", "abcdEf")
.build();
// Setup Options
Reader.Options opts = new Reader.Options(conf)
.searchArgument(sArg, new String[] {"f2"})
.allowSARGToFilter(true);
BatchFilter f = FilterFactory.createBatchFilter(opts,
schema,
false,
OrcFile.Version.CURRENT,
false,
"file://db/table1/file1",
conf);
assertTrue(f instanceof BatchFilterFactory.AndBatchFilterImpl,
"Filter should be an AND Batch filter");
assertArrayEquals(new String[] {"f2"}, f.getColumnNames());
f.accept(fc.setBatch(batch));
validateSelected(1, 5);
}
}
| 10,402 | 40.446215 | 84 | java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.