repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
null | orc-main/java/core/src/test/org/apache/orc/impl/filter/TestSelected.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.filter;
import org.junit.jupiter.api.Test;
import java.util.Arrays;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class TestSelected {
private final Selected src = new Selected();
private final Selected tgt = new Selected();
@Test
public void testUnionBothEmpty() {
// Both are empty
src.sel = new int[10];
tgt.sel = new int[10];
tgt.unionDisjoint(src);
assertArrayEquals(new int[10], tgt.sel);
assertEquals(0, tgt.selSize);
}
@Test
public void testUnionTgtEmpty() {
// tgt has no selection
src.sel = new int[] {1, 3, 7, 0, 0};
src.selSize = 3;
tgt.sel = new int[5];
tgt.selSize = 0;
tgt.unionDisjoint(src);
assertEquals(src.selSize, tgt.selSize);
assertArrayEquals(src.sel, tgt.sel);
}
@Test
public void testUnionSrcEmpty() {
// current size is zero
src.sel = new int[5];
src.selSize = 0;
tgt.sel = new int[] {1, 3, 7, 0, 0};
tgt.selSize = 3;
tgt.unionDisjoint(src);
validate(tgt, 1, 3, 7);
}
@Test
public void testUnionCurrSmallerThanAdd() {
// current size is zero
src.sel = new int[] {7, 0, 0, 0, 0};
src.selSize = 1;
tgt.sel = new int[] {1, 3, 0, 0, 0};
tgt.selSize = 2;
tgt.unionDisjoint(src);
validate(tgt, 1, 3, 7);
}
@Test
public void testUnionAddSmallerThanCurr() {
// current size is zero
src.sel = new int[] {1, 7, 0, 0, 0};
src.selSize = 2;
tgt.sel = new int[] {3, 0, 0, 0, 0};
tgt.selSize = 1;
tgt.unionDisjoint(src);
validate(tgt, 1, 3, 7);
}
@Test
public void testUnionNoChange() {
// current size is zero
src.sel = new int[] {0, 0, 0, 0, 0};
src.selSize = 0;
tgt.sel = new int[] {1, 3, 7, 0, 0};
tgt.selSize = 3;
tgt.unionDisjoint(src);
validate(tgt, 1, 3, 7);
}
@Test
public void testUnionNewEnclosed() {
// current size is zero
src.sel = new int[] {1, 7, 0, 0, 0};
src.selSize = 2;
tgt.sel = new int[] {3, 4, 0, 0, 0};
tgt.selSize = 2;
tgt.unionDisjoint(src);
validate(tgt, 1, 3, 4, 7);
}
@Test
public void testUnionPrevEnclosed() {
// current size is zero
src.sel = new int[] {3, 4, 0, 0, 0};
src.selSize = 2;
tgt.sel = new int[] {1, 7, 0, 0, 0};
tgt.selSize = 2;
tgt.unionDisjoint(src);
validate(tgt, 1, 3, 4, 7);
}
@Test
public void testMinus() {
src.sel = new int[] {3, 4, 0, 0, 0};
src.selSize = 2;
tgt.sel = new int[] {1, 7, 0, 0, 0};
tgt.selSize = 2;
tgt.minus(src);
validate(tgt, 1, 7);
}
@Test
public void testMinusAllElements() {
src.sel = new int[] {1, 7, 0, 0, 0};
src.selSize = 2;
tgt.sel = new int[] {1, 7, 0, 0, 0};
tgt.selSize = 2;
tgt.minus(src);
assertEquals(0, tgt.selSize);
}
@Test
public void testMinusInterleavedElements() {
src.sel = new int[] {1, 5, 9, 0, 0};
src.selSize = 3;
tgt.sel = new int[] {1, 3, 5, 7, 9};
tgt.selSize = 5;
tgt.minus(src);
validate(tgt, 3, 7);
}
@Test
public void testMinusEmpty() {
src.sel = new int[] {1, 5, 9, 0, 0};
src.selSize = 0;
tgt.sel = new int[] {1, 3, 5, 7, 9};
tgt.selSize = 5;
tgt.minus(src);
validate(tgt, 1, 3, 5, 7, 9);
}
@Test
public void testMinusSrcLarger() {
src.sel = new int[] {10, 50, 90, 0, 0};
src.selSize = 3;
tgt.sel = new int[] {1, 3, 5, 7, 9};
tgt.selSize = 5;
tgt.minus(src);
validate(tgt, 1, 3, 5, 7, 9);
}
@Test
public void testMinusSrcSmaller() {
tgt.sel = new int[] {10, 50, 90, 0, 0};
tgt.selSize = 3;
src.sel = new int[] {1, 3, 5, 7, 9};
src.selSize = 5;
tgt.minus(src);
validate(tgt, 10, 50, 90);
}
private void validate(Selected tgt, int... expected) {
assertArrayEquals(expected, Arrays.copyOf(tgt.sel, tgt.selSize));
}
} | 4,734 | 24.874317 | 75 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/filter/leaf/ATestLeafFilter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.filter.leaf;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.orc.impl.filter.ATestFilter;
import org.junit.jupiter.api.BeforeEach;
import java.sql.Timestamp;
import java.util.stream.IntStream;
import static java.nio.charset.StandardCharsets.UTF_8;
public class ATestLeafFilter extends ATestFilter {
static final int lowIdx = 2;
static final int highIdx = 4;
static final int size = 6;
@BeforeEach
public void setup() {
HiveDecimalWritable[] decValues = new HiveDecimalWritable[] {
new HiveDecimalWritable(Long.MIN_VALUE + "100.01"),
new HiveDecimalWritable(0),
new HiveDecimalWritable(Long.MAX_VALUE + "100.01"),
new HiveDecimalWritable(Long.MAX_VALUE + "101.00"),
new HiveDecimalWritable(Long.MAX_VALUE + "101.01"),
null};
Timestamp[] tsValues = new Timestamp[] {
createTimestamp(-100000, 55),
createTimestamp(0, 0),
createTimestamp(0, 1),
createTimestamp(0, 2),
createTimestamp(123456, 1),
null
};
setBatch(new Long[] {1L, 2L, 3L, 4L, 5L, null},
new String[] {"a", "b", "c", "d", "e", null},
decValues,
new Double[] {1.01, 2.0, 2.1, 3.55, 4.0, null},
tsValues);
}
private Timestamp createTimestamp(long time, int nano) {
Timestamp result = new Timestamp(0);
result.setTime(time);
result.setNanos(nano);
return result;
}
protected Object getPredicateValue(PredicateLeaf.Type type, int idx) {
switch (type) {
case LONG:
return ((LongColumnVector) batch.cols[0]).vector[idx];
case STRING:
BytesColumnVector bv = (BytesColumnVector) batch.cols[1];
return new String(bv.vector[idx], bv.start[idx], bv.length[idx], UTF_8);
case DECIMAL:
return ((DecimalColumnVector) batch.cols[2]).vector[idx];
case FLOAT:
return ((DoubleColumnVector) batch.cols[3]).vector[idx];
case TIMESTAMP:
TimestampColumnVector tv = (TimestampColumnVector) batch.cols[4];
Timestamp value = new Timestamp(0);
value.setTime(tv.time[idx]);
value.setNanos(tv.nanos[idx]);
return value;
default:
throw new IllegalArgumentException(String.format("Type: %s is unsupported", type));
}
}
protected void validateSelected(PredicateLeaf.Operator op, boolean not) {
// Except for IS_NULL restrict the range to size - 1 as the last element is a null
switch (op) {
case EQUALS:
validateSelected(IntStream.range(0, size - 1)
.filter(i -> not ^ (i == lowIdx))
.toArray());
break;
case LESS_THAN:
validateSelected(IntStream.range(0, size - 1)
.filter(i -> not ^ (i < lowIdx))
.toArray());
break;
case LESS_THAN_EQUALS:
validateSelected(IntStream.range(0, size - 1)
.filter(i -> not ^ (i <= lowIdx))
.toArray());
break;
case IN:
validateSelected(IntStream.range(0, size - 1)
.filter(i -> not ^ (i == lowIdx || i == highIdx))
.toArray());
break;
case BETWEEN:
validateSelected(IntStream.range(0, size - 1)
.filter(i -> not ^ (i >= lowIdx && i <= highIdx))
.toArray());
break;
case IS_NULL:
validateSelected(IntStream.range(0, size)
.filter(i -> not ^ (i == 5))
.toArray());
break;
default:
throw new IllegalArgumentException();
}
}
}
| 4,934 | 36.386364 | 91 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/filter/leaf/TestDecimalFilters.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.filter.leaf;
import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory;
import org.apache.orc.impl.filter.FilterUtils;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
public class TestDecimalFilters extends ATestLeafFilter {
@Test
public void testEquals() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.equals("f3", PredicateLeaf.Type.DECIMAL, getPredicateValue(PredicateLeaf.Type.DECIMAL, lowIdx))
.build();
Assertions.assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.EQUALS, false);
}
@Test
public void testNotEquals() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startNot()
.equals("f3", PredicateLeaf.Type.DECIMAL, getPredicateValue(PredicateLeaf.Type.DECIMAL, lowIdx))
.end()
.build();
Assertions.assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.EQUALS, true);
}
@Test
public void testLessThan() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.lessThan("f3", PredicateLeaf.Type.DECIMAL, getPredicateValue(PredicateLeaf.Type.DECIMAL, lowIdx))
.build();
Assertions.assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.LESS_THAN, false);
}
@Test
public void testNotLessThan() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startNot()
.lessThan("f3", PredicateLeaf.Type.DECIMAL, getPredicateValue(PredicateLeaf.Type.DECIMAL, lowIdx))
.end()
.build();
Assertions.assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.LESS_THAN, true);
}
@Test
public void testLessThanEquals() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.lessThanEquals("f3", PredicateLeaf.Type.DECIMAL, getPredicateValue(PredicateLeaf.Type.DECIMAL, lowIdx))
.build();
Assertions.assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.LESS_THAN_EQUALS, false);
}
@Test
public void testNotLessThanEquals() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startNot()
.lessThanEquals("f3", PredicateLeaf.Type.DECIMAL, getPredicateValue(PredicateLeaf.Type.DECIMAL, lowIdx))
.end()
.build();
Assertions.assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.LESS_THAN_EQUALS, true);
}
@Test
public void testBetween() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.between("f3", PredicateLeaf.Type.DECIMAL,
getPredicateValue(PredicateLeaf.Type.DECIMAL, lowIdx),
getPredicateValue(PredicateLeaf.Type.DECIMAL, highIdx))
.build();
Assertions.assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.BETWEEN, false);
}
@Test
public void testNotBetween() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startNot()
.between("f3", PredicateLeaf.Type.DECIMAL,
getPredicateValue(PredicateLeaf.Type.DECIMAL, lowIdx),
getPredicateValue(PredicateLeaf.Type.DECIMAL, highIdx))
.end()
.build();
Assertions.assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.BETWEEN, true);
}
@Test
public void testIn() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.in("f3", PredicateLeaf.Type.DECIMAL,
getPredicateValue(PredicateLeaf.Type.DECIMAL, lowIdx),
getPredicateValue(PredicateLeaf.Type.DECIMAL, highIdx))
.build();
Assertions.assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.IN, false);
}
@Test
public void testNotIn() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startNot()
.in("f3", PredicateLeaf.Type.DECIMAL,
getPredicateValue(PredicateLeaf.Type.DECIMAL, lowIdx),
getPredicateValue(PredicateLeaf.Type.DECIMAL, highIdx))
.end()
.build();
Assertions.assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.IN, true);
}
@Test
public void testIsNull() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.isNull("f3", PredicateLeaf.Type.DECIMAL)
.build();
Assertions.assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.IS_NULL, false);
}
@Test
public void testNotIsNull() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startNot()
.isNull("f3", PredicateLeaf.Type.DECIMAL)
.end()
.build();
Assertions.assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.IS_NULL, true);
}
}
| 6,405 | 34.005464 | 110 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/filter/leaf/TestEquals.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.filter.leaf;
import org.apache.orc.impl.filter.ATestFilter;
import org.apache.orc.impl.filter.VectorFilter;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertFalse;
public class TestEquals extends ATestFilter {
@Test
public void testFoundMatching() {
setBatch(new Long[] {1L, 2L, 3L, 4L, 5L, 6L},
new String[] {"a", "b", "c", "d", "e", "f"});
VectorFilter f = new LongFilters.LongEquals("f1", 3L, false);
assertFalse(fc.isSelectedInUse());
filter(f);
validateSelected(2);
}
@Test
public void testNothingFound() {
setBatch(new Long[] {1L, 2L, 3L, 4L, 5L, null},
new String[] {"a", "b", "c", "d", "e", "f"});
VectorFilter f = new LongFilters.LongEquals("f1", 8L, false);
assertFalse(fc.isSelectedInUse());
filter(f);
validateNoneSelected();
}
@Test
public void testRepeatingVector() {
setBatch(new Long[] {1L, null, null, null, null, null},
new String[] {"a", "b", "c", "d", "e", "f"});
fc.getCols()[0].isRepeating = true;
VectorFilter f = new LongFilters.LongEquals("f1", 1L, false);
filter(f);
validateAllSelected(6);
}
@Test
public void testRepeatingNull() {
setBatch(new Long[] {null, null, null, null, null, null},
new String[] {"a", "b", "c", "d", "e", "f"});
fc.getCols()[0].isRepeating = true;
VectorFilter f = new LongFilters.LongEquals("f1", 1L, false);
filter(f);
validateNoneSelected();
}
} | 2,338 | 32.414286 | 75 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/filter/leaf/TestFilters.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.filter.leaf;
import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory;
import org.apache.orc.OrcFile;
import org.apache.orc.OrcFilterContext;
import org.apache.orc.Reader;
import org.apache.orc.TypeDescription;
import org.apache.orc.filter.BatchFilter;
import org.apache.orc.impl.filter.ATestFilter;
import org.apache.orc.impl.filter.AndFilter;
import org.apache.orc.impl.filter.FilterFactory;
import org.apache.orc.impl.filter.FilterUtils;
import org.apache.orc.impl.filter.OrFilter;
import org.apache.orc.impl.filter.VectorFilter;
import org.junit.jupiter.api.Test;
import java.util.Arrays;
import java.util.function.Consumer;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNull;
public class TestFilters extends ATestFilter {
public static BatchFilter createBatchFilter(SearchArgument sArg,
TypeDescription readSchema) {
return createBatchFilter(sArg, readSchema, OrcFile.Version.UNSTABLE_PRE_2_0);
}
public static BatchFilter createBatchFilter(SearchArgument sArg,
TypeDescription readSchema,
OrcFile.Version version) {
return createBatchFilter(sArg, readSchema, version, false);
}
public static BatchFilter createBatchFilter(SearchArgument sArg,
TypeDescription readSchema,
OrcFile.Version version,
boolean normalize) {
Reader.Options options = new Reader.Options().allowSARGToFilter(true);
options.searchArgument(sArg, new String[0]);
return FilterFactory.createBatchFilter(options, readSchema, false,
version, normalize, null, null);
}
@Test
public void testAndOfOr() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startAnd()
.startOr()
.in("f1", PredicateLeaf.Type.LONG, 1L, 6L)
.in("f1", PredicateLeaf.Type.LONG, 3L, 4L)
.end()
.startOr()
.in("f1", PredicateLeaf.Type.LONG, 1L, 6L)
.in("f2", PredicateLeaf.Type.STRING, "c", "e")
.end()
.end()
.build();
setBatch(new Long[] {1L, 2L, 3L, 4L, 5L, 6L},
new String[] {"a", "b", "c", "d", "e", "f"});
BatchFilter filter = FilterUtils.createVectorFilter(sArg, schema);
filter.accept(fc);
assertArrayEquals(new String[] {"f1", "f2"}, filter.getColumnNames());
validateSelected(0, 2, 5);
}
@Test
public void testOrOfAnd() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startOr()
.startAnd()
.in("f1", PredicateLeaf.Type.LONG, 1L, 6L)
.in("f2", PredicateLeaf.Type.STRING, "a", "c")
.end()
.startAnd()
.in("f1", PredicateLeaf.Type.LONG, 3L, 4L)
.in("f2", PredicateLeaf.Type.STRING, "c", "e")
.end()
.end()
.build();
setBatch(new Long[] {1L, 2L, 3L, 4L, 5L, 6L},
new String[] {"a", "b", "c", "d", "e", "f"});
FilterUtils.createVectorFilter(sArg, schema).accept(fc.setBatch(batch));
validateSelected(0, 2);
}
@Test
public void testOrOfAndNative() {
VectorFilter f = new OrFilter(
new VectorFilter[] {
new AndFilter(new VectorFilter[] {
new LongFilters.LongIn("f1",
Arrays.asList(1L, 6L), false),
new StringFilters.StringIn("f2",
Arrays.asList("a", "c"), false)
}),
new AndFilter(new VectorFilter[] {
new LongFilters.LongIn("f1",
Arrays.asList(3L, 4L), false),
new StringFilters.StringIn("f2",
Arrays.asList("c", "e"), false)
})
}
);
setBatch(new Long[] {1L, 2L, 3L, 4L, 5L, 6L},
new String[] {"a", "b", "c", "d", "e", "f"});
filter(f);
assertEquals(2, fc.getSelectedSize());
assertArrayEquals(new int[] {0, 2},
Arrays.copyOf(fc.getSelected(), fc.getSelectedSize()));
}
@Test
public void testAndNotNot() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startAnd()
.startNot()
.in("f1", PredicateLeaf.Type.LONG, 7L)
.end()
.startNot()
.isNull("f2", PredicateLeaf.Type.STRING)
.end()
.end()
.build();
setBatch(new Long[] {1L, 2L, 3L, 4L, 5L, 6L},
new String[] {"a", "b", "c", "d", "e", "f"});
Consumer<OrcFilterContext> filter = createBatchFilter(sArg, schema);
filter.accept(fc.setBatch(batch));
assertEquals(6, fc.getSelectedSize());
assertArrayEquals(new int[] {0, 1, 2, 3, 4, 5},
Arrays.copyOf(fc.getSelected(), fc.getSelectedSize()));
}
@Test
public void testUnSupportedSArg() {
SearchArgument sarg = SearchArgumentFactory.newBuilder()
.nullSafeEquals("f1", PredicateLeaf.Type.LONG, 0L)
.build();
assertNull(FilterUtils.createVectorFilter(sarg, schema));
}
@Test
public void testRepeatedProtected() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startOr()
.in("f2", PredicateLeaf.Type.STRING, "a", "d")
.lessThan("f1", PredicateLeaf.Type.LONG, 6L)
.end()
.build();
setBatch(new Long[] {1L, 1L, 1L, 1L, 1L, 1L},
new String[] {"a", "b", "c", "d", "e", "f"});
batch.cols[0].isRepeating = true;
FilterUtils.createVectorFilter(sArg, schema).accept(fc.setBatch(batch));
validateAllSelected(6);
}
@Test
public void testNullProtected() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startOr()
.in("f2", PredicateLeaf.Type.STRING, "a", "d")
.lessThan("f1", PredicateLeaf.Type.LONG, 4L)
.end()
.build();
setBatch(new Long[] {1L, 2L, null, 4L, 5L, 6L},
new String[] {"a", "b", "c", "d", "e", "f"});
FilterUtils.createVectorFilter(sArg, schema).accept(fc.setBatch(batch));
validateSelected(0, 1, 3);
}
@Test
public void testUnsupportedNotLeaf() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startNot()
.nullSafeEquals("f1", PredicateLeaf.Type.LONG, 2L)
.end()
.build();
assertNull(FilterUtils.createVectorFilter(sArg, schema));
}
@Test
public void testAndOrAnd() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startAnd()
.startOr()
.lessThan("f1", PredicateLeaf.Type.LONG, 3L)
.startAnd()
.equals("f2", PredicateLeaf.Type.STRING, "a")
.equals("f1", PredicateLeaf.Type.LONG, 5L)
.end()
.end()
.in("f2", PredicateLeaf.Type.STRING, "a", "c")
.end()
.build();
setBatch(new Long[] {1L, 2L, null, 4L, 5L, 6L},
new String[] {"a", "b", "c", "d", "e", "f"});
FilterUtils.createVectorFilter(sArg, schema).accept(fc.setBatch(batch));
validateSelected(0);
}
}
| 8,058 | 33.148305 | 81 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/filter/leaf/TestFloatFilters.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.filter.leaf;
import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory;
import org.apache.orc.impl.filter.FilterUtils;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertFalse;
public class TestFloatFilters extends ATestLeafFilter {
@Test
public void testEquals() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.equals("f4", PredicateLeaf.Type.FLOAT, getPredicateValue(PredicateLeaf.Type.FLOAT, lowIdx))
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.EQUALS, false);
}
@Test
public void testNotEquals() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startNot()
.equals("f4", PredicateLeaf.Type.FLOAT, getPredicateValue(PredicateLeaf.Type.FLOAT, lowIdx))
.end()
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.EQUALS, true);
}
@Test
public void testLessThan() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.lessThan("f4", PredicateLeaf.Type.FLOAT, getPredicateValue(PredicateLeaf.Type.FLOAT, lowIdx))
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.LESS_THAN, false);
}
@Test
public void testNotLessThan() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startNot()
.lessThan("f4", PredicateLeaf.Type.FLOAT, getPredicateValue(PredicateLeaf.Type.FLOAT, lowIdx))
.end()
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.LESS_THAN, true);
}
@Test
public void testLessThanEquals() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.lessThanEquals("f4", PredicateLeaf.Type.FLOAT, getPredicateValue(PredicateLeaf.Type.FLOAT, lowIdx))
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.LESS_THAN_EQUALS, false);
}
@Test
public void testNotLessThanEquals() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startNot()
.lessThanEquals("f4", PredicateLeaf.Type.FLOAT, getPredicateValue(PredicateLeaf.Type.FLOAT, lowIdx))
.end()
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.LESS_THAN_EQUALS, true);
}
@Test
public void testBetween() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.between("f4", PredicateLeaf.Type.FLOAT,
getPredicateValue(PredicateLeaf.Type.FLOAT, lowIdx),
getPredicateValue(PredicateLeaf.Type.FLOAT, highIdx))
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.BETWEEN, false);
}
@Test
public void testNotBetween() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startNot()
.between("f4", PredicateLeaf.Type.FLOAT,
getPredicateValue(PredicateLeaf.Type.FLOAT, lowIdx),
getPredicateValue(PredicateLeaf.Type.FLOAT, highIdx))
.end()
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.BETWEEN, true);
}
@Test
public void testIn() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.in("f4", PredicateLeaf.Type.FLOAT,
getPredicateValue(PredicateLeaf.Type.FLOAT, lowIdx),
getPredicateValue(PredicateLeaf.Type.FLOAT, highIdx))
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.IN, false);
}
@Test
public void testNotIn() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startNot()
.in("f4", PredicateLeaf.Type.FLOAT,
getPredicateValue(PredicateLeaf.Type.FLOAT, lowIdx),
getPredicateValue(PredicateLeaf.Type.FLOAT, highIdx))
.end()
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.IN, true);
}
@Test
public void testIsNull() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.isNull("f4", PredicateLeaf.Type.FLOAT)
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.IS_NULL, false);
}
@Test
public void testNotIsNull() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startNot()
.isNull("f4", PredicateLeaf.Type.FLOAT)
.end()
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.IS_NULL, true);
}
}
| 6,239 | 32.913043 | 106 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/filter/leaf/TestLongFilters.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.filter.leaf;
import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory;
import org.apache.orc.impl.filter.FilterUtils;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertFalse;
public class TestLongFilters extends ATestLeafFilter {
@Test
public void testEquals() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.equals("f1", PredicateLeaf.Type.LONG, getPredicateValue(PredicateLeaf.Type.LONG, lowIdx))
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.EQUALS, false);
}
@Test
public void testNotEquals() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startNot()
.equals("f1", PredicateLeaf.Type.LONG, getPredicateValue(PredicateLeaf.Type.LONG, lowIdx))
.end()
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.EQUALS, true);
}
@Test
public void testLessThan() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.lessThan("f1", PredicateLeaf.Type.LONG, getPredicateValue(PredicateLeaf.Type.LONG, lowIdx))
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.LESS_THAN, false);
}
@Test
public void testNotLessThan() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startNot()
.lessThan("f1", PredicateLeaf.Type.LONG, getPredicateValue(PredicateLeaf.Type.LONG, lowIdx))
.end()
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.LESS_THAN, true);
}
@Test
public void testLessThanEquals() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.lessThanEquals("f1", PredicateLeaf.Type.LONG, getPredicateValue(PredicateLeaf.Type.LONG, lowIdx))
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.LESS_THAN_EQUALS, false);
}
@Test
public void testNotLessThanEquals() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startNot()
.lessThanEquals("f1", PredicateLeaf.Type.LONG, getPredicateValue(PredicateLeaf.Type.LONG, lowIdx))
.end()
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.LESS_THAN_EQUALS, true);
}
@Test
public void testBetween() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.between("f1", PredicateLeaf.Type.LONG,
getPredicateValue(PredicateLeaf.Type.LONG, lowIdx),
getPredicateValue(PredicateLeaf.Type.LONG, highIdx))
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.BETWEEN, false);
}
@Test
public void testNotBetween() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startNot()
.between("f1", PredicateLeaf.Type.LONG,
getPredicateValue(PredicateLeaf.Type.LONG, lowIdx),
getPredicateValue(PredicateLeaf.Type.LONG, highIdx))
.end()
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.BETWEEN, true);
}
@Test
public void testIn() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.in("f1", PredicateLeaf.Type.LONG,
getPredicateValue(PredicateLeaf.Type.LONG, lowIdx),
getPredicateValue(PredicateLeaf.Type.LONG, highIdx))
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.IN, false);
}
@Test
public void testNotIn() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startNot()
.in("f1", PredicateLeaf.Type.LONG,
getPredicateValue(PredicateLeaf.Type.LONG, lowIdx),
getPredicateValue(PredicateLeaf.Type.LONG, highIdx))
.end()
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.IN, true);
}
@Test
public void testIsNull() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.isNull("f1", PredicateLeaf.Type.LONG)
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.IS_NULL, false);
}
@Test
public void testNotIsNull() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startNot()
.isNull("f1", PredicateLeaf.Type.LONG)
.end()
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.IS_NULL, true);
}
}
| 6,212 | 32.766304 | 104 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/filter/leaf/TestStringFilters.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.filter.leaf;
import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory;
import org.apache.orc.impl.filter.FilterUtils;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertFalse;
public class TestStringFilters extends ATestLeafFilter {
@Test
public void testEquals() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.equals("f2", PredicateLeaf.Type.STRING, getPredicateValue(PredicateLeaf.Type.STRING, lowIdx))
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.EQUALS, false);
}
@Test
public void testNotEquals() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startNot()
.equals("f2", PredicateLeaf.Type.STRING, getPredicateValue(PredicateLeaf.Type.STRING, lowIdx))
.end()
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.EQUALS, true);
}
@Test
public void testLessThan() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.lessThan("f2", PredicateLeaf.Type.STRING, getPredicateValue(PredicateLeaf.Type.STRING, lowIdx))
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.LESS_THAN, false);
}
@Test
public void testNotLessThan() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startNot()
.lessThan("f2", PredicateLeaf.Type.STRING, getPredicateValue(PredicateLeaf.Type.STRING, lowIdx))
.end()
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.LESS_THAN, true);
}
@Test
public void testLessThanEquals() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.lessThanEquals("f2", PredicateLeaf.Type.STRING, getPredicateValue(PredicateLeaf.Type.STRING, lowIdx))
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.LESS_THAN_EQUALS, false);
}
@Test
public void testNotLessThanEquals() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startNot()
.lessThanEquals("f2", PredicateLeaf.Type.STRING, getPredicateValue(PredicateLeaf.Type.STRING, lowIdx))
.end()
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.LESS_THAN_EQUALS, true);
}
@Test
public void testBetween() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.between("f2", PredicateLeaf.Type.STRING,
getPredicateValue(PredicateLeaf.Type.STRING, lowIdx),
getPredicateValue(PredicateLeaf.Type.STRING, highIdx))
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.BETWEEN, false);
}
@Test
public void testNotBetween() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startNot()
.between("f2", PredicateLeaf.Type.STRING,
getPredicateValue(PredicateLeaf.Type.STRING, lowIdx),
getPredicateValue(PredicateLeaf.Type.STRING, highIdx))
.end()
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.BETWEEN, true);
}
@Test
public void testIn() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.in("f2", PredicateLeaf.Type.STRING,
getPredicateValue(PredicateLeaf.Type.STRING, lowIdx),
getPredicateValue(PredicateLeaf.Type.STRING, highIdx))
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.IN, false);
}
@Test
public void testNotIn() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startNot()
.in("f2", PredicateLeaf.Type.STRING,
getPredicateValue(PredicateLeaf.Type.STRING, lowIdx),
getPredicateValue(PredicateLeaf.Type.STRING, highIdx))
.end()
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.IN, true);
}
@Test
public void testIsNull() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.isNull("f2", PredicateLeaf.Type.STRING)
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.IS_NULL, false);
}
@Test
public void testNotIsNull() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startNot()
.isNull("f2", PredicateLeaf.Type.STRING)
.end()
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.IS_NULL, true);
}
}
| 6,266 | 33.059783 | 108 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/filter/leaf/TestTimestampFilters.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.filter.leaf;
import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory;
import org.apache.orc.impl.filter.FilterUtils;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertFalse;
public class TestTimestampFilters extends ATestLeafFilter {
@Test
public void testEquals() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.equals("f5", PredicateLeaf.Type.TIMESTAMP, getPredicateValue(PredicateLeaf.Type.TIMESTAMP, lowIdx))
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.EQUALS, false);
}
@Test
public void testNotEquals() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startNot()
.equals("f5", PredicateLeaf.Type.TIMESTAMP, getPredicateValue(PredicateLeaf.Type.TIMESTAMP, lowIdx))
.end()
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.EQUALS, true);
}
@Test
public void testLessThan() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.lessThan("f5", PredicateLeaf.Type.TIMESTAMP, getPredicateValue(PredicateLeaf.Type.TIMESTAMP, lowIdx))
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.LESS_THAN, false);
}
@Test
public void testNotLessThan() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startNot()
.lessThan("f5", PredicateLeaf.Type.TIMESTAMP, getPredicateValue(PredicateLeaf.Type.TIMESTAMP, lowIdx))
.end()
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.LESS_THAN, true);
}
@Test
public void testLessThanEquals() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.lessThanEquals("f5", PredicateLeaf.Type.TIMESTAMP, getPredicateValue(PredicateLeaf.Type.TIMESTAMP, lowIdx))
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.LESS_THAN_EQUALS, false);
}
@Test
public void testNotLessThanEquals() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startNot()
.lessThanEquals("f5", PredicateLeaf.Type.TIMESTAMP, getPredicateValue(PredicateLeaf.Type.TIMESTAMP, lowIdx))
.end()
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.LESS_THAN_EQUALS, true);
}
@Test
public void testBetween() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.between("f5", PredicateLeaf.Type.TIMESTAMP,
getPredicateValue(PredicateLeaf.Type.TIMESTAMP, lowIdx),
getPredicateValue(PredicateLeaf.Type.TIMESTAMP, highIdx))
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.BETWEEN, false);
}
@Test
public void testNotBetween() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startNot()
.between("f5", PredicateLeaf.Type.TIMESTAMP,
getPredicateValue(PredicateLeaf.Type.TIMESTAMP, lowIdx),
getPredicateValue(PredicateLeaf.Type.TIMESTAMP, highIdx))
.end()
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.BETWEEN, true);
}
@Test
public void testIn() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.in("f5", PredicateLeaf.Type.TIMESTAMP,
getPredicateValue(PredicateLeaf.Type.TIMESTAMP, lowIdx),
getPredicateValue(PredicateLeaf.Type.TIMESTAMP, highIdx))
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.IN, false);
}
@Test
public void testNotIn() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startNot()
.in("f5", PredicateLeaf.Type.TIMESTAMP,
getPredicateValue(PredicateLeaf.Type.TIMESTAMP, lowIdx),
getPredicateValue(PredicateLeaf.Type.TIMESTAMP, highIdx))
.end()
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.IN, true);
}
@Test
public void testIsNull() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.isNull("f5", PredicateLeaf.Type.TIMESTAMP)
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.IS_NULL, false);
}
@Test
public void testNotIsNull() {
SearchArgument sArg = SearchArgumentFactory.newBuilder()
.startNot()
.isNull("f5", PredicateLeaf.Type.TIMESTAMP)
.end()
.build();
assertFalse(fc.isSelectedInUse());
FilterUtils.createVectorFilter(sArg, schema).accept(fc);
validateSelected(PredicateLeaf.Operator.IS_NULL, true);
}
}
| 6,347 | 33.5 | 114 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/mask/TestDataMask.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.mask;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ListColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.MapColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.StructColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.UnionColumnVector;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.orc.DataMask;
import org.apache.orc.TypeDescription;
import org.junit.jupiter.api.Test;
import java.nio.charset.StandardCharsets;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestDataMask {
@Test
public void testNullFactory() throws Exception {
TypeDescription schema = TypeDescription.fromString("struct<x:int>");
// take the first column's type
DataMask mask = DataMask.Factory.build(DataMask.Standard.NULLIFY.getDescription(),
schema.findSubtype(1), (type) -> null);;
assertEquals(NullifyMask.class.toString(), mask.getClass().toString());
LongColumnVector cv = (LongColumnVector) schema.createRowBatch().cols[0];
LongColumnVector masked = (LongColumnVector) schema.createRowBatch().cols[0];
cv.vector[0] = 10;
cv.vector[1] = 20;
mask.maskData(cv, masked, 0, 2);
assertTrue(masked.isRepeating);
assertFalse(masked.noNulls);
assertTrue(masked.isNull[0]);
}
@Test
public void testRedactFactory() throws Exception {
TypeDescription schema =
TypeDescription.fromString("struct<s:struct<x:int,y:string>>");
DataMask mask = DataMask.Factory.build(DataMask.Standard.REDACT.getDescription(),
schema.findSubtype(1), (type) -> null);
assertEquals(StructIdentity.class.toString(), mask.getClass().toString());
StructColumnVector cv = (StructColumnVector)schema.createRowBatch().cols[0];
StructColumnVector masked = (StructColumnVector)schema.createRowBatch().cols[0];
LongColumnVector x = (LongColumnVector) cv.fields[0];
BytesColumnVector y = (BytesColumnVector) cv.fields[1];
x.vector[0] = 123;
y.setVal(0, "Owen".getBytes(StandardCharsets.UTF_8));
x.vector[1] = 456789;
y.setVal(1, "ORC".getBytes(StandardCharsets.UTF_8));
mask.maskData(cv, masked, 0, 2);
x = (LongColumnVector) masked.fields[0];
y = (BytesColumnVector) masked.fields[1];
assertEquals(999, x.vector[0]);
assertEquals(999999, x.vector[1]);
assertEquals("Xxxx", y.toString(0));
assertEquals("XXX", y.toString(1));
}
@Test
public void testIdentityRedact() throws Exception {
TypeDescription schema =
TypeDescription.fromString("struct<s:struct<a:decimal(18,6),b:double," +
"c:array<int>,d:map<timestamp,date>,e:uniontype<int,binary>,f:string>>");
DataMask nullify =
DataMask.Factory.build(DataMask.Standard.NULLIFY.getDescription(),
schema.findSubtype(1), (type) -> null);
// create a redact mask that passes everything though
DataMask identity =
DataMask.Factory.build(DataMask.Standard.REDACT
.getDescription("__________", "_ _ _ _ _ _"),
schema.findSubtype(1), (type) -> null);
// allow easier access to fields
StructColumnVector cv = (StructColumnVector)schema.createRowBatch().cols[0];
StructColumnVector masked = (StructColumnVector)schema.createRowBatch().cols[0];
DecimalColumnVector a = (DecimalColumnVector) cv.fields[0];
DoubleColumnVector b = (DoubleColumnVector) cv.fields[1];
ListColumnVector c = (ListColumnVector) cv.fields[2];
LongColumnVector ce = (LongColumnVector) c.child;
MapColumnVector d = (MapColumnVector) cv.fields[3];
TimestampColumnVector dk = (TimestampColumnVector) d.keys;
LongColumnVector dv = (LongColumnVector) d.values;
UnionColumnVector e = (UnionColumnVector) cv.fields[4];
LongColumnVector e1 = (LongColumnVector) e.fields[0];
BytesColumnVector e2 = (BytesColumnVector) e.fields[1];
BytesColumnVector f = (BytesColumnVector) cv.fields[5];
// set up the input data
for(int i=0; i < 3; ++i) {
a.set(i, new HiveDecimalWritable((i + 1) + "." + (i + 1)));
b.vector[i] = 1.25 * (i + 1);
// layout c normally
c.offsets[i] = i == 0 ? 0 : c.offsets[i-1] + c.lengths[i-1];
c.lengths[i] = 2 * i;
// layout d backward
d.offsets[i] = 2 * (2 - i);
d.lengths[i] = 2;
e.tags[i] = i % 2;
e1.vector[i] = i * 10;
f.setVal(i, Integer.toHexString(0x123 * i).getBytes(StandardCharsets.UTF_8));
}
e2.setVal(1, "Foobar".getBytes(StandardCharsets.UTF_8));
for(int i=0; i < 6; ++i) {
ce.vector[i] = i;
dk.time[i] = 1111 * i;
dk.nanos[i] = 0;
dv.vector[i] = i * 11;
}
// send it through the nullify mask
nullify.maskData(cv, masked, 0, 3);
assertFalse(masked.noNulls);
assertTrue(masked.isRepeating);
assertTrue(masked.isNull[0]);
// send it through our identity mask
identity.maskData(cv, masked, 0 , 3);
assertTrue(masked.noNulls);
assertFalse(masked.isRepeating);
// point accessors to masked values
a = (DecimalColumnVector) masked.fields[0];
b = (DoubleColumnVector) masked.fields[1];
c = (ListColumnVector) masked.fields[2];
ce = (LongColumnVector) c.child;
d = (MapColumnVector) masked.fields[3];
dk = (TimestampColumnVector) d.keys;
dv = (LongColumnVector) d.values;
e = (UnionColumnVector) masked.fields[4];
e1 = (LongColumnVector) e.fields[0];
e2 = (BytesColumnVector) e.fields[1];
f = (BytesColumnVector) masked.fields[5];
// check the outputs
for(int i=0; i < 3; ++i) {
String msg = "iter " + i;
assertEquals((i + 1) + "." + (i + 1), a.vector[i].toString(), msg);
assertEquals(1.25 * (i + 1), b.vector[i], 0.0001, msg);
assertEquals(i == 0 ? 0 : c.offsets[i-1] + c.lengths[i-1], c.offsets[i], msg);
assertEquals(2 * i, c.lengths[i], msg);
assertEquals(i == 0 ? 4 : d.offsets[i-1] - d.lengths[i], d.offsets[i], msg);
assertEquals(2, d.lengths[i], msg);
assertEquals(i % 2, e.tags[i], msg);
assertEquals(Integer.toHexString(0x123 * i), f.toString(i), msg);
}
// check the subvalues for the list and map
for(int i=0; i < 6; ++i) {
String msg = "iter " + i;
assertEquals(i, ce.vector[i], msg);
assertEquals(i * 1111, dk.time[i], msg);
assertEquals(i * 11, dv.vector[i], msg);
}
assertEquals(0, e1.vector[0]);
assertEquals(20, e1.vector[2]);
// the redact mask always replaces binary with null
assertFalse(e2.noNulls);
assertTrue(e2.isRepeating);
assertTrue(e2.isNull[0]);
}
}
| 7,869 | 41.311828 | 86 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/mask/TestRedactMask.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.mask;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.serde2.io.DateWritable;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.junit.jupiter.api.Test;
import java.nio.charset.StandardCharsets;
import java.sql.Date;
import java.sql.Timestamp;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class TestRedactMask {
@Test
public void testSimpleReplaceLongDigits() throws Exception {
RedactMaskFactory mask = new RedactMaskFactory("Xx7");
assertEquals(7, mask.maskLong(0));
assertEquals(7, mask.maskLong(9));
assertEquals(-7, mask.maskLong(-9));
assertEquals(-7, mask.maskLong(-1));
assertEquals(77, mask.maskLong(10));
assertEquals(-77, mask.maskLong(-10));
assertEquals(7_777_777_777_777_777_777L,
mask.maskLong(Long.MAX_VALUE));
assertEquals(-7_777_777_777_777_777_777L,
mask.maskLong(Long.MIN_VALUE + 1));
assertEquals(-7_777_777_777_777_777_777L,
mask.maskLong(Long.MIN_VALUE));
}
@Test
public void testPow10ReplaceLongDigits() throws Exception {
for(int digit=0; digit < 10; ++digit) {
RedactMaskFactory mask = new RedactMaskFactory("Xx" + digit);
long expected = digit;
long input = 1;
for(int i=0; i < 19; ++i) {
// 9_999_999_999_999_999_999 is bigger than 2**63, so it overflows.
// The routine uses one less digit for that case.
if (i == 18 && digit == 9) {
expected = 999_999_999_999_999_999L;
}
assertEquals(expected, mask.maskLong(input),
"digit " + digit + " value " + input);
assertEquals(expected, mask.maskLong(5 * input),
"digit " + digit + " value " + (5 * input));
assertEquals(expected, mask.maskLong(9 * input),
"digit " + digit + " value " + (9 * input));
expected = expected * 10 + digit;
input *= 10;
}
}
}
@Test
public void testSimpleReplaceDoubleDigits() throws Exception {
RedactMaskFactory mask = new RedactMaskFactory("Xx7");
assertEquals(7.77777, mask.maskDouble(0.0), 0.000001);
assertEquals(7.77777, mask.maskDouble(9.9), 0.000001);
assertEquals(-7.77777, mask.maskDouble(-9.9), 0.000001);
assertEquals(-7.77777, mask.maskDouble(-1.0), 0.000001);
assertEquals(77.7777, mask.maskDouble(10.0), 0.000001);
assertEquals(-77.7777, mask.maskDouble(-10.0), 0.000001);
assertEquals(7_777_770_000_000_000_000.0,
mask.maskDouble(Long.MAX_VALUE), 0.000001);
assertEquals(-7_777_770_000_000_000_000.0,
mask.maskDouble(Long.MIN_VALUE), 0.000001);
assertEquals(7.77777e-308,
mask.maskDouble(Double.MIN_NORMAL), 1e-310);
assertEquals(7.77777e307,
mask.maskDouble(Double.MAX_VALUE), 1e299);
// change to mask of 1
mask = new RedactMaskFactory("Xx1");
assertEquals(-1.11111e-308,
mask.maskDouble(-Double.MIN_NORMAL), 1e-310);
// change to mask of 9
mask = new RedactMaskFactory();
assertEquals(-9.99999e307,
mask.maskDouble(-Double.MAX_VALUE), 1e299);
}
@Test
public void testSimpleMaskTimestamp() throws Exception {
RedactMaskFactory mask = new RedactMaskFactory();
Timestamp ts = Timestamp.valueOf("2011-10-02 18:48:05.123456");
assertEquals("2011-01-01 00:00:00.0",
new Timestamp(mask.maskTime(ts.getTime())).toString());
ts = Timestamp.valueOf("2012-02-28 01:23:45");
assertEquals("2012-01-01 00:00:00.0",
new Timestamp(mask.maskTime(ts.getTime())).toString());
ts = Timestamp.valueOf("2017-05-18 01:23:45");
assertEquals("2017-01-01 00:00:00.0",
new Timestamp(mask.maskTime(ts.getTime())).toString());
mask = new RedactMaskFactory("", "2000 _ _ 15 0 _");
assertEquals("2000-05-18 15:00:45.0",
new Timestamp(mask.maskTime(ts.getTime())).toString());
mask = new RedactMaskFactory("", "2000 _ _ 15 0 _");
assertEquals("2000-05-18 15:00:45.0",
new Timestamp(mask.maskTime(ts.getTime())).toString());
mask = new RedactMaskFactory("", "2007 _ _ _ _ _");
assertEquals("2007-05-18 01:23:45.0",
new Timestamp(mask.maskTime(ts.getTime())).toString());
mask = new RedactMaskFactory("", "_ 7 _ _ _ _");
assertEquals("2017-07-18 01:23:45.0",
new Timestamp(mask.maskTime(ts.getTime())).toString());
mask = new RedactMaskFactory("", "_ _ 7 _ _ _");
assertEquals("2017-05-07 01:23:45.0",
new Timestamp(mask.maskTime(ts.getTime())).toString());
mask = new RedactMaskFactory("", "_ _ _ 7 _ _");
assertEquals("2017-05-18 07:23:45.0",
new Timestamp(mask.maskTime(ts.getTime())).toString());
mask = new RedactMaskFactory("", "_ _ _ _ 7 _");
assertEquals("2017-05-18 01:07:45.0",
new Timestamp(mask.maskTime(ts.getTime())).toString());
mask = new RedactMaskFactory("", "_ _ _ _ _ 7");
assertEquals("2017-05-18 01:23:07.0",
new Timestamp(mask.maskTime(ts.getTime())).toString());
}
@Test
public void testSimpleMaskDate() throws Exception {
RedactMaskFactory mask = new RedactMaskFactory();
DateWritable date = new DateWritable(Date.valueOf("1965-03-12"));
assertEquals("1965-01-01",
new DateWritable(mask.maskDate(date.getDays())).toString());
mask = new RedactMaskFactory("", "2000 _ _");
assertEquals("2000-03-12",
new DateWritable(mask.maskDate(date.getDays())).toString());
mask = new RedactMaskFactory(new String[]{"", "_ 7 _"});
assertEquals("1965-07-12",
new DateWritable(mask.maskDate(date.getDays())).toString());
mask = new RedactMaskFactory("", "_ _ 7");
assertEquals("1965-03-07",
new DateWritable(mask.maskDate(date.getDays())).toString());
date = new DateWritable(Date.valueOf("2017-09-20"));
assertEquals("2017-09-07",
new DateWritable(mask.maskDate(date.getDays())).toString());
}
@Test
public void testSimpleMaskDecimal() throws Exception {
RedactMaskFactory mask = new RedactMaskFactory("Xx7");
assertEquals(new HiveDecimalWritable("777.777"),
mask.maskDecimal(new HiveDecimalWritable("123.456")));
// test removal of leading and trailing zeros.
assertEquals(new HiveDecimalWritable("777777777777777777.7777"),
mask.maskDecimal(new HiveDecimalWritable("0123456789123456789.01230")));
}
@Test
public void testReplacements() throws Exception {
RedactMaskFactory mask = new RedactMaskFactory("1234567890");
assertEquals("1".codePointAt(0), mask.getReplacement("X".codePointAt(0)));
assertEquals("2".codePointAt(0), mask.getReplacement("x".codePointAt(0)));
assertEquals("3".codePointAt(0), mask.getReplacement("0".codePointAt(0)));
assertEquals("4".codePointAt(0), mask.getReplacement("$".codePointAt(0)));
assertEquals("5".codePointAt(0), mask.getReplacement(".".codePointAt(0)));
assertEquals("6".codePointAt(0), mask.getReplacement(" ".codePointAt(0)));
assertEquals("7".codePointAt(0), mask.getReplacement("ה".codePointAt(0)));
assertEquals("8".codePointAt(0), mask.getReplacement("ी".codePointAt(0)));
assertEquals("9".codePointAt(0), mask.getReplacement("ↂ".codePointAt(0)));
assertEquals("0".codePointAt(0), mask.getReplacement("\u06DD".codePointAt(0)));
mask = new RedactMaskFactory();
assertEquals("_".codePointAt(0), mask.getReplacement(" ".codePointAt(0)));
}
@Test
public void testStringMasking() throws Exception {
RedactMaskFactory mask = new RedactMaskFactory();
BytesColumnVector source = new BytesColumnVector();
BytesColumnVector target = new BytesColumnVector();
target.reset();
byte[] input = "Mary had 1 little lamb!!".getBytes(StandardCharsets.UTF_8);
source.setRef(0, input, 0, input.length);
// Set a 4 byte chinese character (U+2070E), which is letter other
input = "\uD841\uDF0E".getBytes(StandardCharsets.UTF_8);
source.setRef(1, input, 0, input.length);
for(int r=0; r < 2; ++r) {
mask.maskString(source, r, target);
}
assertEquals("Xxxx xxx 9 xxxxxx xxxx..", new String(target.vector[0],
target.start[0], target.length[0], StandardCharsets.UTF_8));
assertEquals("ª", new String(target.vector[1],
target.start[1], target.length[1], StandardCharsets.UTF_8));
}
@Test
public void testStringMaskBufferOverflow() throws Exception {
// set upper and lower letters to replace with 4 byte replacements
// (U+267CC and U+28CCA)
RedactMaskFactory mask = new RedactMaskFactory("\uD859\uDFCC\uD863\uDCCA");
BytesColumnVector source = new BytesColumnVector();
BytesColumnVector target = new BytesColumnVector();
target.reset();
// Set the input to 1024 copies of the input string.
// input is 14 bytes * 1024 = 14336 bytes
// output is (4 * 12 + 1 * 2) * 1024 = 51200 bytes
byte[] input = "text overflow."
.getBytes(StandardCharsets.UTF_8);
for(int r=0; r < 1024; ++r) {
source.setRef(r, input, 0, input.length);
}
for(int r=0; r < 1024; ++r) {
mask.maskString(source, r, target);
}
// should have doubled twice to 64k
assertEquals(64*1024, target.getValPreallocatedBytes().length);
// Make sure all of the translations are correct
String expected ="\uD863\uDCCA\uD863\uDCCA\uD863\uDCCA\uD863\uDCCA" +
" \uD863\uDCCA\uD863\uDCCA\uD863\uDCCA\uD863\uDCCA" +
"\uD863\uDCCA\uD863\uDCCA\uD863\uDCCA\uD863\uDCCA.";
for(int r=0; r < 1024; ++r) {
assertEquals(expected,
new String(target.vector[r], target.start[r], target.length[r],
StandardCharsets.UTF_8), "r = " + r);
}
// Make sure that the target keeps the larger output buffer.
target.reset();
assertEquals(64*1024, target.getValPreallocatedBytes().length);
}
}
| 10,650 | 41.947581 | 83 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/mask/TestSHA256Mask.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package org.apache.orc.impl.mask;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.orc.TypeDescription;
import org.junit.jupiter.api.Test;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class TestSHA256Mask {
final byte[] inputLong = (
"Lorem ipsum dolor sit amet, consectetur adipiscing "
+ "elit. Curabitur quis vehicula ligula. In hac habitasse platea dictumst."
+ " Curabitur mollis finibus erat fringilla vestibulum. In eu leo eget"
+ " massa luctus convallis nec vitae ligula. Donec vitae diam convallis,"
+ " efficitur orci in, imperdiet turpis. In quis semper ex. Duis faucibus "
+ "tellus vitae molestie convallis. Fusce fermentum vestibulum lacus "
+ "vel malesuada. Pellentesque viverra odio a justo aliquet tempus.")
.getBytes(StandardCharsets.UTF_8);
final byte[] input32 = "Every flight begins with a fall."
.getBytes(StandardCharsets.UTF_8);
final byte[] inputShort = "\uD841\uDF0E".getBytes(StandardCharsets.UTF_8);
final MessageDigest md;
final byte[] expectedHash32 ;
final byte[] expectedHashShort ;
final byte[] expectedHashLong ;
final byte[] expectedHash32_hex ;
final byte[] expectedHashShort_hex ;
final byte[] expectedHashLong_hex ;
public TestSHA256Mask() {
super();
try {
md = MessageDigest.getInstance("SHA-256");
expectedHash32 = md.digest(input32);
expectedHashShort = md.digest(inputShort);
expectedHashLong = md.digest(inputLong);
expectedHash32_hex = SHA256MaskFactory.printHexBinary(expectedHash32).getBytes(StandardCharsets.UTF_8);
expectedHashShort_hex = SHA256MaskFactory.printHexBinary(expectedHashShort).getBytes(StandardCharsets.UTF_8);
expectedHashLong_hex = SHA256MaskFactory.printHexBinary(expectedHashLong).getBytes(StandardCharsets.UTF_8);
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
}
/**
* Test to make sure that the output is always 64 bytes (equal to hash len) <br>
* This is because String type does not have bounds on length.
*
* @throws Exception
*/
@Test
public void testStringSHA256Masking() throws Exception {
final SHA256MaskFactory sha256Mask = new SHA256MaskFactory();
final BytesColumnVector source = new BytesColumnVector();
final BytesColumnVector target = new BytesColumnVector();
target.reset();
source.setRef(0, input32, 0, input32.length);
source.setRef(1, inputShort, 0, inputShort.length);
for (int r = 0; r < 2; ++r) {
sha256Mask.maskString(source, r, target, TypeDescription.createString());
}
/* Make sure the the mask length is equal to 64 length of SHA-256 */
assertEquals(64, target.length[0]);
assertEquals(64, target.length[1]);
/* gather the results into an array to compare */
byte[] reasultInput32 = new byte[target.length[0]];
System.arraycopy(target.vector[0], target.start[0], reasultInput32, 0, target.length[0]);
byte[] reasultInputShort = new byte[target.length[1]];
System.arraycopy(target.vector[1], target.start[1], reasultInputShort, 0, target.length[1]);
/* prepare the expected byte[] to compare */
final byte[] expected1 = new byte[target.length[0]];
System.arraycopy(expectedHash32_hex, 0, expected1, 0, target.length[0]);
final byte[] expected2 = new byte[target.length[1]];
System.arraycopy(expectedHashShort_hex, 0, expected2, 0, target.length[1]);
/* Test the actual output. NOTE: our varchar has max length 32 */
assertArrayEquals(expected1, reasultInput32);
assertArrayEquals(expected2, reasultInputShort);
}
/**
* Test to make sure that the length of input is equal to the output. <br>
* If input is shorter than the hash, truncate it. <br>
* If the input is larger than hash (64) pad it with blank space. <br>
*
* @throws Exception
*/
@Test
public void testChar256Masking() throws Exception {
final SHA256MaskFactory sha256Mask = new SHA256MaskFactory();
final BytesColumnVector source = new BytesColumnVector();
final BytesColumnVector target = new BytesColumnVector();
target.reset();
int[] length = new int[3];
length[0] = input32.length;
source.setRef(0, input32, 0, input32.length);
length[1] = inputShort.length;
source.setRef(1, inputShort, 0, inputShort.length);
length[2] = inputLong.length;
source.setRef(2, inputLong, 0, inputLong.length);
for (int r = 0; r < 3; ++r) {
sha256Mask.maskString(source, r, target,
TypeDescription.createChar().withMaxLength(length[r]));
}
/* Make sure the the mask length is equal to 64 length of SHA-256 */
assertEquals(length[0], target.length[0]);
assertEquals(length[1], target.length[1]);
assertEquals(length[2], target.length[2]);
}
@Test
public void testVarChar256Masking() throws Exception {
final SHA256MaskFactory sha256Mask = new SHA256MaskFactory();
final BytesColumnVector source = new BytesColumnVector();
final BytesColumnVector target = new BytesColumnVector();
target.reset();
source.setRef(0, input32, 0, input32.length);
source.setRef(1, inputShort, 0, inputShort.length);
source.setRef(2, inputLong, 0, inputLong.length);
for (int r = 0; r < 3; ++r) {
sha256Mask.maskString(source, r, target,
TypeDescription.createVarchar().withMaxLength(32));
}
/* gather the results into an array to compare */
byte[] reasultInput32 = new byte[target.length[0]];
System.arraycopy(target.vector[0], target.start[0], reasultInput32, 0, target.length[0]);
byte[] reasultInputShort = new byte[target.length[1]];
System.arraycopy(target.vector[1], target.start[1], reasultInputShort, 0, target.length[1]);
byte[] reasultInputLong = new byte[target.length[2]];
System.arraycopy(target.vector[2], target.start[2], reasultInputLong, 0, target.length[2]);
/* prepare the expected byte[] to compare */
final byte[] expected1 = new byte[target.length[0]];
System.arraycopy(expectedHash32_hex, 0, expected1, 0, target.length[0]);
final byte[] expected2 = new byte[target.length[1]];
System.arraycopy(expectedHashShort_hex, 0, expected2, 0, target.length[1]);
final byte[] expected3 = new byte[target.length[2]];
System.arraycopy(expectedHashLong_hex, 0, expected3, 0, target.length[2]);
// Hash is 64 in length greater than max len 32, so make sure output length is 32
assertEquals(32, target.length[0]);
assertEquals(32, target.length[1]);
assertEquals(32, target.length[2]);
/* Test the actual output. NOTE: our varchar has max length 32 */
assertArrayEquals(expected1, reasultInput32);
assertArrayEquals(expected2, reasultInputShort);
assertArrayEquals(expected3, reasultInputLong);
for (int r = 0; r < 3; ++r) {
sha256Mask.maskString(source, r, target,
TypeDescription.createVarchar().withMaxLength(100));
}
/* gather the results into an array to compare */
reasultInput32 = new byte[target.length[0]];
System.arraycopy(target.vector[0], target.start[0], reasultInput32, 0, target.length[0]);
reasultInputShort = new byte[target.length[1]];
System.arraycopy(target.vector[1], target.start[1], reasultInputShort, 0, target.length[1]);
reasultInputLong = new byte[target.length[2]];
System.arraycopy(target.vector[2], target.start[2], reasultInputLong, 0, target.length[2]);
/* Hash is 64 in length, less than max len 100 so the outpur will always be 64 */
assertEquals(64, target.length[0]);
assertEquals(64, target.length[1]);
assertEquals(64, target.length[2]);
/* Test the actual output */
assertArrayEquals(expectedHash32_hex, reasultInput32);
assertArrayEquals(expectedHashShort_hex, reasultInputShort);
assertArrayEquals(expectedHashLong_hex, reasultInputLong);
}
@Test
public void testBinary() {
final SHA256MaskFactory sha256Mask = new SHA256MaskFactory();
final BytesColumnVector source = new BytesColumnVector();
final BytesColumnVector target = new BytesColumnVector();
target.reset();
source.setRef(0, input32, 0, input32.length);
source.setRef(1, inputShort, 0, inputShort.length);
source.setRef(2, inputLong, 0, inputLong.length);
for (int r = 0; r < 3; ++r) {
sha256Mask.maskBinary(source, r, target);
}
final byte[] reasultInput32 = ByteBuffer.wrap(target.vector[0], target.start[0], target.length[0]).array();
final byte[] reasultInputShort = ByteBuffer.wrap(target.vector[1], target.start[1], target.length[1]).array();
final byte[] reasultInputLong = ByteBuffer.wrap(target.vector[2], target.start[2], target.length[2]).array();
/* Hash is 32 in length (length in binary), less than max len 100 so the output will always be 32 */
assertEquals(32, target.length[0]);
assertEquals(32, target.length[1]);
assertEquals(32, target.length[2]);
assertArrayEquals(expectedHash32, reasultInput32);
assertArrayEquals(expectedHashShort, reasultInputShort);
assertArrayEquals(expectedHashLong, reasultInputLong);
}
}
| 10,247 | 38.114504 | 115 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/mask/TestUnmaskRange.java | package org.apache.orc.impl.mask;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.junit.jupiter.api.Test;
import java.nio.charset.StandardCharsets;
import static org.junit.jupiter.api.Assertions.assertEquals;
/**
* Test Unmask option
*/
public class TestUnmaskRange {
public TestUnmaskRange() {
super();
}
/* Test for Long */
@Test
public void testSimpleLongRangeMask() {
RedactMaskFactory mask = new RedactMaskFactory("9", "", "0:2");
long result = mask.maskLong(123456);
assertEquals(123_999, result);
assertEquals(-129_999, mask.maskLong(-123456));
// negative index
mask = new RedactMaskFactory("9", "", "-3:-1");
result = mask.maskLong(123456);
assertEquals(999_456, result);
assertEquals(-999_456, mask.maskLong(-123456));
// out of range mask, return the original mask
mask = new RedactMaskFactory("9", "", "7:10");
result = mask.maskLong(123456);
assertEquals(999999, result);
// if the masked value overflows, we get the overflow answer
result = mask.maskLong(1_234_567_890_123_456_789L);
assertEquals(999_999_999_999_999_999L, result);
}
@Test
public void testDefaultRangeMask() {
RedactMaskFactory mask = new RedactMaskFactory("9", "", "");
long result = mask.maskLong(123456);
assertEquals(999999, result);
mask = new RedactMaskFactory("9");
result = mask.maskLong(123456);
assertEquals(999999, result);
}
@Test
public void testCCRangeMask() {
long cc = 4716885592186382L;
long maskedCC = 4716_77777777_6382L;
// Range unmask for first 4 and last 4 of credit card number
final RedactMaskFactory mask = new RedactMaskFactory("Xx7", "", "0:3,-4:-1");
long result = mask.maskLong(cc);
assertEquals(String.valueOf(cc).length(), String.valueOf(result).length());
assertEquals(4716_77777777_6382L, result);
}
/* Tests for Double */
@Test
public void testSimpleDoubleRangeMask() {
RedactMaskFactory mask = new RedactMaskFactory("Xx7", "", "0:2");
assertEquals(1237.77, mask.maskDouble(1234.99), 0.000001);
assertEquals(12377.7, mask.maskDouble(12345.9), 0.000001);
mask = new RedactMaskFactory("Xx7", "", "-3:-1");
assertEquals(7774.9, mask.maskDouble(1234.9), 0.000001);
}
/* test for String */
@Test
public void testStringRangeMask() {
BytesColumnVector source = new BytesColumnVector();
BytesColumnVector target = new BytesColumnVector();
target.reset();
byte[] input = "Mary had 1 little lamb!!".getBytes(StandardCharsets.UTF_8);
source.setRef(0, input, 0, input.length);
// Set a 4 byte chinese character (U+2070E), which is letter other
input = "\uD841\uDF0E".getBytes(StandardCharsets.UTF_8);
source.setRef(1, input, 0, input.length);
RedactMaskFactory mask = new RedactMaskFactory("", "", "0:3, -5:-1");
for(int r=0; r < 2; ++r) {
mask.maskString(source, r, target);
}
assertEquals("Mary xxx 9 xxxxxx xamb!!", new String(target.vector[0],
target.start[0], target.length[0], StandardCharsets.UTF_8));
assertEquals("\uD841\uDF0E", new String(target.vector[1],
target.start[1], target.length[1], StandardCharsets.UTF_8));
// test defaults, no-unmask range
mask = new RedactMaskFactory();
for(int r=0; r < 2; ++r) {
mask.maskString(source, r, target);
}
assertEquals("Xxxx xxx 9 xxxxxx xxxx..", new String(target.vector[0],
target.start[0], target.length[0], StandardCharsets.UTF_8));
assertEquals("ª", new String(target.vector[1],
target.start[1], target.length[1], StandardCharsets.UTF_8));
// test out of range string mask
mask = new RedactMaskFactory("", "", "-1:-5");
for(int r=0; r < 2; ++r) {
mask.maskString(source, r, target);
}
assertEquals("Xxxx xxx 9 xxxxxx xxxx..", new String(target.vector[0],
target.start[0], target.length[0], StandardCharsets.UTF_8));
assertEquals("ª", new String(target.vector[1],
target.start[1], target.length[1], StandardCharsets.UTF_8));
}
/* test for Decimal */
@Test
public void testDecimalRangeMask() {
RedactMaskFactory mask = new RedactMaskFactory("Xx7", "", "0:3");
assertEquals(new HiveDecimalWritable("123477.777"),
mask.maskDecimal(new HiveDecimalWritable("123456.789")));
// try with a reverse index
mask = new RedactMaskFactory("Xx7", "", "-3:-1, 0:3");
assertEquals(new HiveDecimalWritable("123477777.777654"),
mask.maskDecimal(new HiveDecimalWritable("123456789.987654")));
}
}
| 5,460 | 32.503067 | 81 | java |
null | orc-main/java/core/src/test/org/apache/orc/impl/reader/TestReaderEncryptionVariant.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl.reader;
import org.apache.orc.OrcProto;
import org.apache.orc.StripeInformation;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import static org.junit.jupiter.api.Assertions.assertNull;
public class TestReaderEncryptionVariant {
@Test
public void testInvalidKeyProvider() throws IOException {
OrcProto.EncryptionAlgorithm algorithm = OrcProto.EncryptionAlgorithm.AES_CTR_256;
ReaderEncryptionKey key =
new ReaderEncryptionKey(OrcProto.EncryptionKey.newBuilder().setAlgorithm(algorithm).build());
List<StripeInformation> strips = new ArrayList<>();
ReaderEncryptionVariant readerEncryptionVariant =
new ReaderEncryptionVariant(key, 0, null, null, strips, 0L, null, null);
assertNull(readerEncryptionVariant.getFileFooterKey());
assertNull(readerEncryptionVariant.getStripeKey(0L));
}
}
| 1,740 | 36.847826 | 101 | java |
null | orc-main/java/core/src/test/org/apache/orc/util/CuckooSetBytesTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.util;
import org.junit.jupiter.api.Test;
import java.nio.charset.StandardCharsets;
import java.util.Random;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class CuckooSetBytesTest {
// maximum table size
private static final int MAX_SIZE = 65437;
@Test
public void testSetBytes() {
String[] strings = {"foo", "bar", "baz", "a", "", "x1341", "Z"};
String[] negativeStrings = {"not", "in", "the", "set", "foobar"};
byte[][] values = getByteArrays(strings);
byte[][] negatives = getByteArrays(negativeStrings);
// load set
CuckooSetBytes s = new CuckooSetBytes(strings.length);
for(byte[] v : values) {
s.insert(v);
}
// test that the values we added are there
for(byte[] v : values) {
assertTrue(s.lookup(v, 0, v.length));
}
// test that values that we know are missing are shown to be absent
for (byte[] v : negatives) {
assertFalse(s.lookup(v, 0, v.length));
}
// Test that we can search correctly using a buffer and pulling
// a sequence of bytes out of the middle of it. In this case it
// is the 3 letter sequence "foo".
byte[] buf = getUTF8Bytes("thewordfooisinhere");
assertTrue(s.lookup(buf, 7, 3));
}
@Test
public void testSetBytesLargeRandom() {
byte[][] values;
Random gen = new Random(98763537);
for(int i = 0; i < 200;) {
// Make a random array of byte arrays
int size = gen.nextInt() % MAX_SIZE;
if (size <= 0) { // ensure size is >= 1, otherwise try again
continue;
}
i++;
values = new byte[size][];
loadRandomBytes(values, gen);
// load them into a set
CuckooSetBytes s = new CuckooSetBytes(size);
loadSet(s, values);
// look them up to make sure they are all there
for (int j = 0; j != size; j++) {
assertTrue(s.lookup(values[j], 0, values[j].length));
}
}
}
public void loadRandomBytes(byte[][] values, Random gen) {
for (int i = 0; i != values.length; i++) {
values[i] = getUTF8Bytes(Integer.toString(gen.nextInt()));
}
}
private byte[] getUTF8Bytes(String s) {
byte[] v = null;
try {
v = s.getBytes(StandardCharsets.UTF_8);
} catch (Exception e) {
; // won't happen
}
return v;
}
// Get an array of UTF-8 byte arrays from an array of strings
private byte[][] getByteArrays(String[] strings) {
byte[][] values = new byte[strings.length][];
for(int i = 0; i != strings.length; i++) {
try {
values[i] = strings[i].getBytes(StandardCharsets.UTF_8);
} catch (Exception e) {
; // can't happen
}
}
return values;
}
private void loadSet(CuckooSetBytes s, byte[][] values) {
for (byte[] v: values) {
s.insert(v);
}
}
}
| 3,708 | 27.976563 | 75 | java |
null | orc-main/java/core/src/test/org/apache/orc/util/TestBloomFilter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.util;
import com.google.protobuf.ByteString;
import org.apache.orc.OrcFile;
import org.apache.orc.OrcProto;
import org.apache.orc.TypeDescription;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Tests for BloomFilter
*/
public class TestBloomFilter {
@Test
public void testBitset() {
BloomFilter.BitSet bitset = new BloomFilter.BitSet(128);
// set every 9th bit for a rotating pattern
for(int l=0; l < 8; ++l) {
bitset.set(l*9);
}
// set every non-9th bit
for(int l=8; l < 16; ++l) {
for(int b=0; b < 8; ++b) {
if (b != l - 8) {
bitset.set(l*8+b);
}
}
}
for(int b=0; b < 64; ++b) {
assertEquals(b % 9 == 0, bitset.get(b));
}
for(int b=64; b < 128; ++b) {
assertEquals((b % 8) != (b - 64) / 8, bitset.get(b));
}
// test that the longs are mapped correctly
long[] longs = bitset.getData();
assertEquals(2, longs.length);
assertEquals(0x8040201008040201L, longs[0]);
assertEquals(~0x8040201008040201L, longs[1]);
}
/**
* Same test as TestBloomFilter_testLongHash in C++ codes. Make sure the hash values
* are consistent between the Java client and C++ client.
* TODO(ORC-1025): Add exhaustive test on all numbers.
*/
@Test
public void testLongHash() {
assertEquals(0, BloomFilter.getLongHash(0));
assertEquals(6614246905173314819L, BloomFilter.getLongHash(-1));
assertEquals(-5218250166726157773L, BloomFilter.getLongHash(-2));
assertEquals(1396019780946710816L, BloomFilter.getLongHash(-3));
assertEquals(3691278333958578070L, BloomFilter.getLongHash(-9223372036854775805L));
assertEquals(-1192099642781211952L, BloomFilter.getLongHash(-9223372036854775806L));
assertEquals(-9102499068535824902L, BloomFilter.getLongHash(-9223372036854775807L));
assertEquals(1499534499340523007L, BloomFilter.getLongHash(790302201));
assertEquals(-5108695154500810163L, BloomFilter.getLongHash(790302202));
assertEquals(-2450623810987162260L, BloomFilter.getLongHash(790302203));
assertEquals(-1097054448615658549L, BloomFilter.getLongHash(18000000000L));
assertEquals(-4986173376161118712L, BloomFilter.getLongHash(9223372036064673413L));
assertEquals(3785699328822078862L, BloomFilter.getLongHash(9223372036064673414L));
assertEquals(294188322706112357L, BloomFilter.getLongHash(9223372036064673415L));
}
private void checkBitSet(BloomFilter bf, int[] pos) {
for (int i : pos) {
assertTrue(bf.testBitSetPos(i));
}
}
/**
* Same test as TestBloomFilter_testBloomFilterBasicOperations in C++ codes. We also
* verifies the bitSet positions that are set, to make sure both the Java and C++ codes
* hash the same value into the same position.
*/
@Test
public void testBasicOperations() {
BloomFilter bloomFilter = new BloomFilterUtf8(128, BloomFilter.DEFAULT_FPP);
// test integers
bloomFilter.reset();
assertFalse(bloomFilter.testLong(1));
assertFalse(bloomFilter.testLong(11));
assertFalse(bloomFilter.testLong(111));
assertFalse(bloomFilter.testLong(1111));
assertFalse(bloomFilter.testLong(0));
assertFalse(bloomFilter.testLong(-1));
assertFalse(bloomFilter.testLong(-11));
assertFalse(bloomFilter.testLong(-111));
assertFalse(bloomFilter.testLong(-1111));
bloomFilter.addLong(1);
checkBitSet(bloomFilter, new int[]{567, 288, 246, 306, 228});
bloomFilter.addLong(11);
checkBitSet(bloomFilter, new int[]{228, 285, 342, 399, 456});
bloomFilter.addLong(111);
checkBitSet(bloomFilter, new int[]{802, 630, 458, 545, 717});
bloomFilter.addLong(1111);
checkBitSet(bloomFilter, new int[]{826, 526, 40, 480, 86});
bloomFilter.addLong(0);
checkBitSet(bloomFilter, new int[]{0, 0, 0, 0, 0});
bloomFilter.addLong(-1);
checkBitSet(bloomFilter, new int[]{120, 308, 335, 108, 535});
bloomFilter.addLong(-11);
checkBitSet(bloomFilter, new int[]{323, 685, 215, 577, 107});
bloomFilter.addLong(-111);
checkBitSet(bloomFilter, new int[]{357, 318, 279, 15, 54});
bloomFilter.addLong(-1111);
checkBitSet(bloomFilter, new int[]{572, 680, 818, 434, 232});
assertTrue(bloomFilter.testLong(1));
assertTrue(bloomFilter.testLong(11));
assertTrue(bloomFilter.testLong(111));
assertTrue(bloomFilter.testLong(1111));
assertTrue(bloomFilter.testLong(0));
assertTrue(bloomFilter.testLong(-1));
assertTrue(bloomFilter.testLong(-11));
assertTrue(bloomFilter.testLong(-111));
assertTrue(bloomFilter.testLong(-1111));
// test doubles
bloomFilter.reset();
assertFalse(bloomFilter.testDouble(1.1));
assertFalse(bloomFilter.testDouble(11.11));
assertFalse(bloomFilter.testDouble(111.111));
assertFalse(bloomFilter.testDouble(1111.1111));
assertFalse(bloomFilter.testDouble(0.0));
assertFalse(bloomFilter.testDouble(-1.1));
assertFalse(bloomFilter.testDouble(-11.11));
assertFalse(bloomFilter.testDouble(-111.111));
assertFalse(bloomFilter.testDouble(-1111.1111));
bloomFilter.addDouble(1.1);
checkBitSet(bloomFilter, new int[]{522, 692, 12, 370, 753});
bloomFilter.addDouble(11.11);
checkBitSet(bloomFilter, new int[]{210, 188, 89, 720, 389});
bloomFilter.addDouble(111.111);
checkBitSet(bloomFilter, new int[]{831, 252, 583, 500, 335});
bloomFilter.addDouble(1111.1111);
checkBitSet(bloomFilter, new int[]{725, 175, 374, 92, 642});
bloomFilter.addDouble(0.0);
checkBitSet(bloomFilter, new int[]{0, 0, 0, 0, 0});
bloomFilter.addDouble(-1.1);
checkBitSet(bloomFilter, new int[]{636, 163, 565, 206, 679});
bloomFilter.addDouble(-11.11);
checkBitSet(bloomFilter, new int[]{473, 192, 743, 462, 181});
bloomFilter.addDouble(-111.111);
checkBitSet(bloomFilter, new int[]{167, 152, 472, 295, 24});
bloomFilter.addDouble(-1111.1111);
checkBitSet(bloomFilter, new int[]{308, 346, 384, 422, 371});
assertTrue(bloomFilter.testDouble(1.1));
assertTrue(bloomFilter.testDouble(11.11));
assertTrue(bloomFilter.testDouble(111.111));
assertTrue(bloomFilter.testDouble(1111.1111));
assertTrue(bloomFilter.testDouble(0.0));
assertTrue(bloomFilter.testDouble(-1.1));
assertTrue(bloomFilter.testDouble(-11.11));
assertTrue(bloomFilter.testDouble(-111.111));
assertTrue(bloomFilter.testDouble(-1111.1111));
// test strings
bloomFilter.reset();
String emptyStr = "";
String enStr = "english";
String cnStr = "中国字";
assertFalse(bloomFilter.testString(emptyStr));
assertFalse(bloomFilter.testString(enStr));
assertFalse(bloomFilter.testString(cnStr));
bloomFilter.addString(emptyStr);
checkBitSet(bloomFilter, new int[]{656, 807, 480, 151, 304});
bloomFilter.addString(enStr);
checkBitSet(bloomFilter, new int[]{576, 221, 68, 729, 392});
bloomFilter.addString(cnStr);
checkBitSet(bloomFilter, new int[]{602, 636, 44, 362, 318});
assertTrue(bloomFilter.testString(emptyStr));
assertTrue(bloomFilter.testString(enStr));
assertTrue(bloomFilter.testString(cnStr));
}
@Test
public void testBloomFilterSerialize() {
long[] bits = new long[]{0x8040201008040201L, ~0x8040201008040201L};
BloomFilter bloom = new BloomFilterUtf8(bits, 1);
OrcProto.BloomFilter.Builder builder = OrcProto.BloomFilter.newBuilder();
BloomFilterIO.serialize(builder, bloom);
OrcProto.BloomFilter proto = builder.build();
assertEquals(1, proto.getNumHashFunctions());
assertEquals(0, proto.getBitsetCount());
ByteString bs = proto.getUtf8Bitset();
byte[] expected = new byte[]{0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40,
(byte) 0x80, ~0x01, ~0x02, ~0x04, ~0x08, ~0x10, ~0x20, ~0x40,
(byte) ~0x80};
OrcProto.ColumnEncoding.Builder encoding =
OrcProto.ColumnEncoding.newBuilder();
encoding.setKind(OrcProto.ColumnEncoding.Kind.DIRECT)
.setBloomEncoding(BloomFilterIO.Encoding.UTF8_UTC.getId());
assertArrayEquals(expected, bs.toByteArray());
BloomFilter rebuilt = BloomFilterIO.deserialize(
OrcProto.Stream.Kind.BLOOM_FILTER_UTF8,
encoding.build(),
OrcFile.WriterVersion.ORC_135,
TypeDescription.Category.INT,
proto);
assertEquals(bloom, rebuilt);
}
@Test
public void testBloomFilterEquals() {
long[] bits = new long[]{0x8040201008040201L, ~0x8040201008040201L};
BloomFilter bloom = new BloomFilterUtf8(bits, 1);
BloomFilter other = new BloomFilterUtf8(new long[]{0,0}, 1);
assertFalse(bloom.equals(other));
}
}
| 9,660 | 38.432653 | 89 | java |
null | orc-main/java/core/src/test/org/apache/orc/util/TestMurmur3.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.util;
import com.google.common.hash.HashFunction;
import com.google.common.hash.Hashing;
import org.junit.jupiter.api.Test;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import java.util.Random;
import static org.junit.jupiter.api.Assertions.assertEquals;
/**
* Tests for Murmur3 variants.
*/
public class TestMurmur3 {
@Test
public void testHashCodesM3_32_string() {
String key = "test";
int seed = 123;
HashFunction hf = Hashing.murmur3_32(seed);
int hc1 = hf.hashBytes(key.getBytes(StandardCharsets.UTF_8)).asInt();
int hc2 = Murmur3.hash32(key.getBytes(StandardCharsets.UTF_8),
key.getBytes(StandardCharsets.UTF_8).length, seed);
assertEquals(hc1, hc2);
key = "testkey";
hc1 = hf.hashBytes(key.getBytes(StandardCharsets.UTF_8)).asInt();
hc2 = Murmur3.hash32(key.getBytes(StandardCharsets.UTF_8),
key.getBytes(StandardCharsets.UTF_8).length, seed);
assertEquals(hc1, hc2);
}
@Test
public void testHashCodesM3_32_ints() {
int seed = 123;
Random rand = new Random(seed);
HashFunction hf = Hashing.murmur3_32(seed);
for (int i = 0; i < 1000; i++) {
int val = rand.nextInt();
byte[] data = ByteBuffer.allocate(4).putInt(val).array();
int hc1 = hf.hashBytes(data).asInt();
int hc2 = Murmur3.hash32(data, data.length, seed);
assertEquals(hc1, hc2);
}
}
@Test
public void testHashCodesM3_32_longs() {
int seed = 123;
Random rand = new Random(seed);
HashFunction hf = Hashing.murmur3_32(seed);
for (int i = 0; i < 1000; i++) {
long val = rand.nextLong();
byte[] data = ByteBuffer.allocate(8).putLong(val).array();
int hc1 = hf.hashBytes(data).asInt();
int hc2 = Murmur3.hash32(data, data.length, seed);
assertEquals(hc1, hc2);
}
}
@Test
public void testHashCodesM3_32_double() {
int seed = 123;
Random rand = new Random(seed);
HashFunction hf = Hashing.murmur3_32(seed);
for (int i = 0; i < 1000; i++) {
double val = rand.nextDouble();
byte[] data = ByteBuffer.allocate(8).putDouble(val).array();
int hc1 = hf.hashBytes(data).asInt();
int hc2 = Murmur3.hash32(data, data.length, seed);
assertEquals(hc1, hc2);
}
}
@Test
public void testHashCodesM3_128_string() {
String key = "test";
int seed = 123;
HashFunction hf = Hashing.murmur3_128(seed);
// guava stores the hashcodes in little endian order
ByteBuffer buf = ByteBuffer.allocate(16).order(ByteOrder.LITTLE_ENDIAN);
buf.put(hf.hashBytes(key.getBytes(StandardCharsets.UTF_8)).asBytes());
buf.flip();
long gl1 = buf.getLong();
long gl2 = buf.getLong(8);
long[] hc = Murmur3.hash128(key.getBytes(StandardCharsets.UTF_8), 0,
key.getBytes(StandardCharsets.UTF_8).length, seed);
long m1 = hc[0];
long m2 = hc[1];
assertEquals(gl1, m1);
assertEquals(gl2, m2);
key = "testkey128_testkey128";
buf = ByteBuffer.allocate(16).order(ByteOrder.LITTLE_ENDIAN);
buf.put(hf.hashBytes(key.getBytes(StandardCharsets.UTF_8)).asBytes());
buf.flip();
gl1 = buf.getLong();
gl2 = buf.getLong(8);
byte[] keyBytes = key.getBytes(StandardCharsets.UTF_8);
hc = Murmur3.hash128(keyBytes, 0, keyBytes.length, seed);
m1 = hc[0];
m2 = hc[1];
assertEquals(gl1, m1);
assertEquals(gl2, m2);
byte[] offsetKeyBytes = new byte[keyBytes.length + 35];
Arrays.fill(offsetKeyBytes, (byte) -1);
System.arraycopy(keyBytes, 0, offsetKeyBytes, 35, keyBytes.length);
hc = Murmur3.hash128(offsetKeyBytes, 35, keyBytes.length, seed);
assertEquals(gl1, hc[0]);
assertEquals(gl2, hc[1]);
}
@Test
public void testHashCodeM3_64() {
byte[] origin = ("It was the best of times, it was the worst of times," +
" it was the age of wisdom, it was the age of foolishness," +
" it was the epoch of belief, it was the epoch of incredulity," +
" it was the season of Light, it was the season of Darkness," +
" it was the spring of hope, it was the winter of despair," +
" we had everything before us, we had nothing before us," +
" we were all going direct to Heaven," +
" we were all going direct the other way.").getBytes(StandardCharsets.UTF_8);
long hash = Murmur3.hash64(origin, 0, origin.length);
assertEquals(305830725663368540L, hash);
byte[] originOffset = new byte[origin.length + 150];
Arrays.fill(originOffset, (byte) 123);
System.arraycopy(origin, 0, originOffset, 150, origin.length);
hash = Murmur3.hash64(originOffset, 150, origin.length);
assertEquals(305830725663368540L, hash);
}
@Test
public void testHashCodesM3_128_ints() {
int seed = 123;
Random rand = new Random(seed);
HashFunction hf = Hashing.murmur3_128(seed);
for (int i = 0; i < 1000; i++) {
int val = rand.nextInt();
byte[] data = ByteBuffer.allocate(4).putInt(val).array();
// guava stores the hashcodes in little endian order
ByteBuffer buf = ByteBuffer.allocate(16).order(ByteOrder.LITTLE_ENDIAN);
buf.put(hf.hashBytes(data).asBytes());
buf.flip();
long gl1 = buf.getLong();
long gl2 = buf.getLong(8);
long[] hc = Murmur3.hash128(data, 0, data.length, seed);
long m1 = hc[0];
long m2 = hc[1];
assertEquals(gl1, m1);
assertEquals(gl2, m2);
byte[] offsetData = new byte[data.length + 50];
System.arraycopy(data, 0, offsetData, 50, data.length);
hc = Murmur3.hash128(offsetData, 50, data.length, seed);
assertEquals(gl1, hc[0]);
assertEquals(gl2, hc[1]);
}
}
@Test
public void testHashCodesM3_128_longs() {
int seed = 123;
Random rand = new Random(seed);
HashFunction hf = Hashing.murmur3_128(seed);
for (int i = 0; i < 1000; i++) {
long val = rand.nextLong();
byte[] data = ByteBuffer.allocate(8).putLong(val).array();
// guava stores the hashcodes in little endian order
ByteBuffer buf = ByteBuffer.allocate(16).order(ByteOrder.LITTLE_ENDIAN);
buf.put(hf.hashBytes(data).asBytes());
buf.flip();
long gl1 = buf.getLong();
long gl2 = buf.getLong(8);
long[] hc = Murmur3.hash128(data, 0, data.length, seed);
long m1 = hc[0];
long m2 = hc[1];
assertEquals(gl1, m1);
assertEquals(gl2, m2);
}
}
@Test
public void testHashCodesM3_128_double() {
int seed = 123;
Random rand = new Random(seed);
HashFunction hf = Hashing.murmur3_128(seed);
for (int i = 0; i < 1000; i++) {
double val = rand.nextDouble();
byte[] data = ByteBuffer.allocate(8).putDouble(val).array();
// guava stores the hashcodes in little endian order
ByteBuffer buf = ByteBuffer.allocate(16).order(ByteOrder.LITTLE_ENDIAN);
buf.put(hf.hashBytes(data).asBytes());
buf.flip();
long gl1 = buf.getLong();
long gl2 = buf.getLong(8);
long[] hc = Murmur3.hash128(data, 0, data.length, seed);
long m1 = hc[0];
long m2 = hc[1];
assertEquals(gl1, m1);
assertEquals(gl2, m2);
}
}
}
| 8,034 | 34.241228 | 85 | java |
null | orc-main/java/core/src/test/org/apache/orc/util/TestOrcUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.util;
import org.apache.orc.OrcUtils;
import org.apache.orc.TypeDescription;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
/**
* Tests for OrcUtils.
*/
public class TestOrcUtils {
@Test
public void testBloomFilterIncludeColumns() {
TypeDescription schema = TypeDescription.createStruct()
.addField("msisdn", TypeDescription.createString())
.addField("imsi", TypeDescription.createVarchar())
.addField("imei", TypeDescription.createInt());
boolean[] includeColumns = new boolean[3+1];
includeColumns[1] = true;
includeColumns[3] = true;
assertArrayEquals(includeColumns, OrcUtils.includeColumns("msisdn, imei", schema));
}
@Test
public void testBloomFilterIncludeColumns_ACID() {
TypeDescription rowSchema = TypeDescription.createStruct()
.addField("msisdn", TypeDescription.createString())
.addField("imei", TypeDescription.createInt());
TypeDescription schema = TypeDescription.createStruct()
.addField("operation", TypeDescription.createString())
.addField("originalTransaction", TypeDescription.createInt())
.addField("bucket", TypeDescription.createInt())
.addField("rowId", TypeDescription.createInt())
.addField("currentTransaction", TypeDescription.createInt())
.addField("row", rowSchema);
boolean[] includeColumns = new boolean[8+1];
includeColumns[7] = true;
assertArrayEquals(includeColumns, OrcUtils.includeColumns("msisdn", schema));
}
@Test
public void testBloomFilterIncludeColumns_Nested() {
TypeDescription rowSchema = TypeDescription.createStruct()
.addField("msisdn", TypeDescription.createString())
.addField("imei", TypeDescription.createInt());
TypeDescription schema = TypeDescription.createStruct()
.addField("row", rowSchema);
boolean[] includeColumns = new boolean[3+1];
includeColumns[2] = true;
assertArrayEquals(includeColumns, OrcUtils.includeColumns("row.msisdn", schema));
}
@Test
public void testBloomFilterIncludeColumns_NonExisting() {
TypeDescription rowSchema = TypeDescription.createStruct()
.addField("msisdn", TypeDescription.createString())
.addField("imei", TypeDescription.createInt());
TypeDescription schema = TypeDescription.createStruct()
.addField("row", rowSchema);
boolean[] includeColumns = new boolean[3+1];
assertArrayEquals(includeColumns, OrcUtils.includeColumns("msisdn, row.msisdn2", schema));
}
}
| 3,397 | 34.768421 | 94 | java |
null | orc-main/java/core/src/test/org/apache/orc/util/TestStreamWrapperFileSystem.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.util;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.OrcFile;
import org.apache.orc.Reader;
import org.apache.orc.RecordReader;
import org.apache.orc.TestVectorOrcFile;
import org.apache.orc.TypeDescription;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertSame;
import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Tests for StreamWrapperFileSystem.
*/
public class TestStreamWrapperFileSystem {
@Test
public void testWrapper() throws IOException {
Configuration conf = new Configuration();
Path realFilename = new Path(TestVectorOrcFile.getFileFromClasspath(
"orc-file-11-format.orc"));
FileSystem local = FileSystem.getLocal(conf);
FSDataInputStream stream = local.open(realFilename);
long fileSize = local.getFileStatus(realFilename).getLen();
FileSystem fs = new StreamWrapperFileSystem(stream, new Path("foo"),
fileSize, conf);
assertSame(stream, fs.open(new Path("foo")));
TypeDescription readerSchema =
TypeDescription.fromString("struct<boolean1:boolean>");
try (Reader reader = OrcFile.createReader(new Path("foo"),
OrcFile.readerOptions(conf).filesystem(fs));
RecordReader rows = reader.rows(reader.options().schema(readerSchema))) {
// make sure that the metadata is what we expect
assertEquals(7500, reader.getNumberOfRows());
assertEquals("struct<boolean1:boolean,byte1:tinyint,short1:smallint," +
"int1:int,long1:bigint," +"float1:float,double1:double," +
"bytes1:binary,string1:string,middle:struct<list:array<struct<" +
"int1:int,string1:string>>>,list:array<struct<int1:int," +
"string1:string>>,map:map<string,struct<int1:int," +
"string1:string>>,ts:timestamp,decimal1:decimal(38,10)>",
reader.getSchema().toString());
// read the boolean1 column and check the data
VectorizedRowBatch batch = readerSchema.createRowBatchV2();
LongColumnVector boolean1 = (LongColumnVector) batch.cols[0];
int current = 0;
for(int r=0; r < 7500; ++r) {
if (current >= batch.size) {
assertTrue(rows.nextBatch(batch), "row " + r);
current = 0;
}
assertEquals(r % 2, boolean1.vector[current++], "row " + r);
}
}
}
}
| 3,531 | 40.069767 | 81 | java |
null | orc-main/java/examples/src/java/org/apache/orc/examples/AdvancedReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.examples;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.MapColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.OrcFile;
import org.apache.orc.Reader;
import org.apache.orc.RecordReader;
import org.apache.orc.TypeDescription;
import java.io.IOException;
/**
* This example shows how to read compound data types in ORC.
*/
public class AdvancedReader {
public static void main(Configuration conf, String[] args) throws IOException {
// Get the information from the file footer
Reader reader = OrcFile.createReader(new Path("advanced-example.orc"),
OrcFile.readerOptions(conf));
System.out.println("File schema: " + reader.getSchema());
System.out.println("Row count: " + reader.getNumberOfRows());
// Pick the schema we want to read using schema evolution
TypeDescription readSchema =
TypeDescription.fromString("struct<first:int,second:int,third:map<string,int>>");
// Read the row data
VectorizedRowBatch batch = readSchema.createRowBatch();
RecordReader rowIterator = reader.rows(reader.options()
.schema(readSchema));
LongColumnVector x = (LongColumnVector) batch.cols[0];
LongColumnVector y = (LongColumnVector) batch.cols[1];
MapColumnVector z = (MapColumnVector) batch.cols[2];
/**
* cause the batch max size = 1024
* so at the row 1024 (from 0 begin,actually is row 1025)、the value is reset
* the final line is row 1499,and the map value from 2375 to 2379
*/
while (rowIterator.nextBatch(batch)) {
for (int row = 0; row < batch.size; ++row) {
int xRow = x.isRepeating ? 0 : row;
int yRow = y.isRepeating ? 0 : row;
int zRow = z.isRepeating ? 0 : row;
System.out.println("x: " +
(x.noNulls || !x.isNull[xRow] ? x.vector[xRow] : null));
System.out.println("y: " + (y.noNulls || !y.isNull[yRow] ? y.vector[yRow] : null));
System.out.print("z: [");
long index = z.offsets[zRow];
for (long i = 0; i < z.lengths[zRow]; i++) {
final BytesColumnVector keys = (BytesColumnVector) z.keys;
final LongColumnVector values = (LongColumnVector) z.values;
String key = keys.toString((int) (index + i));
final long value = values.vector[(int) (index + i)];
System.out.print(key + ":" + value);
System.out.print(" ");
}
System.out.println("]");
}
}
rowIterator.close();
}
public static void main(String[] args) throws IOException {
main(new Configuration(), args);
}
}
| 3,636 | 38.532609 | 91 | java |
null | orc-main/java/examples/src/java/org/apache/orc/examples/AdvancedWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.examples;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.MapColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.OrcFile;
import org.apache.orc.TypeDescription;
import org.apache.orc.Writer;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
/**
* This example shows how to write compound data types in ORC.
*
*/
public class AdvancedWriter {
public static void main(Configuration conf, String[] args) throws IOException {
Path testFilePath = new Path("advanced-example.orc");
TypeDescription schema =
TypeDescription.fromString("struct<first:int," +
"second:int,third:map<string,int>>");
Writer writer =
OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(schema));
VectorizedRowBatch batch = schema.createRowBatch();
LongColumnVector first = (LongColumnVector) batch.cols[0];
LongColumnVector second = (LongColumnVector) batch.cols[1];
//Define map. You need also to cast the key and value vectors
MapColumnVector map = (MapColumnVector) batch.cols[2];
BytesColumnVector mapKey = (BytesColumnVector) map.keys;
LongColumnVector mapValue = (LongColumnVector) map.values;
// Each map has 5 elements
final int MAP_SIZE = 5;
final int BATCH_SIZE = batch.getMaxSize();
// Ensure the map is big enough
mapKey.ensureSize(BATCH_SIZE * MAP_SIZE, false);
mapValue.ensureSize(BATCH_SIZE * MAP_SIZE, false);
// add 1500 rows to file
for(int r=0; r < 1500; ++r) {
int row = batch.size++;
first.vector[row] = r;
second.vector[row] = r * 3;
map.offsets[row] = map.childCount;
map.lengths[row] = MAP_SIZE;
map.childCount += MAP_SIZE;
for (int mapElem = (int) map.offsets[row];
mapElem < map.offsets[row] + MAP_SIZE; ++mapElem) {
String key = "row " + r + "." + (mapElem - map.offsets[row]);
mapKey.setVal(mapElem, key.getBytes(StandardCharsets.UTF_8));
mapValue.vector[mapElem] = mapElem;
}
if (row == BATCH_SIZE - 1) {
writer.addRowBatch(batch);
batch.reset();
}
}
if (batch.size != 0) {
writer.addRowBatch(batch);
batch.reset();
}
writer.close(); }
public static void main(String[] args) throws IOException {
main(new Configuration(), args);
}
}
| 3,434 | 33.69697 | 81 | java |
null | orc-main/java/examples/src/java/org/apache/orc/examples/CompressionWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.examples;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.OrcFile;
import org.apache.orc.OrcFile.WriterOptions;
import org.apache.orc.TypeDescription;
import org.apache.orc.Writer;
import java.io.IOException;
import static org.apache.orc.CompressionKind.SNAPPY;
public class CompressionWriter {
public static void main(Configuration conf, String[] args) throws IOException {
TypeDescription schema = TypeDescription.fromString("struct<x:int,y:string>");
// Set compress kind to snappy. Now we have much compression
// method such as ZLIB, SNAPPY, LZO, LZ4, ZSTD.
WriterOptions options = OrcFile.writerOptions(conf).setSchema(schema).compress(SNAPPY);
Writer snappyWriter = OrcFile.createWriter(new Path("compressed.orc"), options);
VectorizedRowBatch batch = schema.createRowBatch();
LongColumnVector x = (LongColumnVector) batch.cols[0];
BytesColumnVector y = (BytesColumnVector) batch.cols[1];
for (int r = 0; r < 10000; ++r) {
int row = batch.size++;
x.vector[row] = r;
byte[] buffer = ("byte-" + r).getBytes();
y.setRef(row, buffer, 0, buffer.length);
// If the batch is full, write it out and start over.
if (batch.size == batch.getMaxSize()) {
snappyWriter.addRowBatch(batch);
batch.reset();
}
}
if (batch.size != 0) {
snappyWriter.addRowBatch(batch);
}
snappyWriter.close();
}
public static void main(String[] args) throws IOException {
main(new Configuration(), args);
}
}
| 2,596 | 37.761194 | 91 | java |
null | orc-main/java/examples/src/java/org/apache/orc/examples/CoreReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.examples;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.OrcFile;
import org.apache.orc.Reader;
import org.apache.orc.RecordReader;
import org.apache.orc.TypeDescription;
import java.io.IOException;
public class CoreReader {
public static void main(Configuration conf, String[] args) throws IOException {
// Get the information from the file footer
Reader reader = OrcFile.createReader(new Path("my-file.orc"),
OrcFile.readerOptions(conf));
System.out.println("File schema: " + reader.getSchema());
System.out.println("Row count: " + reader.getNumberOfRows());
// Pick the schema we want to read using schema evolution
TypeDescription readSchema =
TypeDescription.fromString("struct<z:int,y:string,x:bigint>");
// Read the row data
VectorizedRowBatch batch = readSchema.createRowBatch();
RecordReader rowIterator = reader.rows(reader.options()
.schema(readSchema));
LongColumnVector z = (LongColumnVector) batch.cols[0];
BytesColumnVector y = (BytesColumnVector) batch.cols[1];
LongColumnVector x = (LongColumnVector) batch.cols[2];
while (rowIterator.nextBatch(batch)) {
for(int row=0; row < batch.size; ++row) {
int zRow = z.isRepeating ? 0: row;
int xRow = x.isRepeating ? 0: row;
System.out.println("z: " +
(z.noNulls || !z.isNull[zRow] ? z.vector[zRow] : null));
System.out.println("y: " + y.toString(row));
System.out.println("x: " +
(x.noNulls || !x.isNull[xRow] ? x.vector[xRow] : null));
}
}
rowIterator.close();
}
public static void main(String[] args) throws IOException {
main(new Configuration(), args);
}
}
| 2,844 | 40.231884 | 81 | java |
null | orc-main/java/examples/src/java/org/apache/orc/examples/CoreWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.examples;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.OrcFile;
import org.apache.orc.TypeDescription;
import org.apache.orc.Writer;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
public class CoreWriter {
public static void main(Configuration conf, String[] args) throws IOException {
TypeDescription schema = TypeDescription.fromString("struct<x:int,y:string>");
Writer writer = OrcFile.createWriter(new Path("my-file.orc"),
OrcFile.writerOptions(conf)
.setSchema(schema));
VectorizedRowBatch batch = schema.createRowBatch();
LongColumnVector x = (LongColumnVector) batch.cols[0];
BytesColumnVector y = (BytesColumnVector) batch.cols[1];
for(int r=0; r < 10000; ++r) {
int row = batch.size++;
x.vector[row] = r;
byte[] buffer = ("Last-" + (r * 3)).getBytes(StandardCharsets.UTF_8);
y.setRef(row, buffer, 0, buffer.length);
// If the batch is full, write it out and start over.
if (batch.size == batch.getMaxSize()) {
writer.addRowBatch(batch);
batch.reset();
}
}
if (batch.size != 0) {
writer.addRowBatch(batch);
}
writer.close();
}
public static void main(String[] args) throws IOException {
main(new Configuration(), args);
}
}
| 2,436 | 37.68254 | 82 | java |
null | orc-main/java/examples/src/java/org/apache/orc/examples/Driver.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.examples;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.hadoop.conf.Configuration;
/**
* Driver program for the java ORC examples.
*/
public class Driver {
@SuppressWarnings("static-access")
static Options createOptions() {
Options result = new Options();
result.addOption("h", "help", false, "Print help message");
result.addOption("D", "define", true, "Set a configuration property");
return result;
}
static class DriverOptions {
final CommandLine genericOptions;
final String command;
final String[] commandArgs;
DriverOptions(String[] args) throws ParseException {
genericOptions = new DefaultParser().parse(createOptions(), args, true);
String[] unprocessed = genericOptions.getArgs();
if (unprocessed.length == 0) {
command = null;
commandArgs = new String[0];
} else {
command = unprocessed[0];
if (genericOptions.hasOption('h')) {
commandArgs = new String[]{"-h"};
} else {
commandArgs = new String[unprocessed.length - 1];
System.arraycopy(unprocessed, 1, commandArgs, 0, commandArgs.length);
}
}
}
}
public static void main(String[] args) throws Exception {
DriverOptions options = new DriverOptions(args);
if (options.command == null) {
System.err.println("ORC Java Examples");
System.err.println();
System.err.println("usage: java -jar orc-examples-*.jar [--help]" +
" [--define X=Y] <command> <args>");
System.err.println();
System.err.println("Commands:");
System.err.println(" write - write a sample ORC file");
System.err.println(" read - read a sample ORC file");
System.err.println(" write2 - write a sample ORC file with a map");
System.err.println(" read2 - read a sample ORC file with a map");
System.err.println(" compressWriter - write a ORC file with snappy compression");
System.err.println(" inMemoryEncryptionWriter - write a ORC file with encryption");
System.err.println(" inMemoryEncryptionReader - read a ORC file with encryption");
System.err.println();
System.err.println("To get more help, provide -h to the command");
System.exit(1);
}
Configuration conf = new Configuration();
String[] confSettings = options.genericOptions.getOptionValues("D");
if (confSettings != null) {
for (String param : confSettings) {
String[] parts = param.split("=", 2);
conf.set(parts[0], parts[1]);
}
}
if ("read".equals(options.command)) {
CoreReader.main(conf, options.commandArgs);
} else if ("write".equals(options.command)) {
CoreWriter.main(conf, options.commandArgs);
} else if ("write2".equals(options.command)) {
AdvancedWriter.main(conf, options.commandArgs);
} else if ("read2".equals(options.command)) {
AdvancedReader.main(conf, options.commandArgs);
} else if ("compressWriter".equals(options.command)) {
CompressionWriter.main(conf, options.commandArgs);
} else if ("inMemoryEncryptionWriter".equals(options.command)) {
InMemoryEncryptionWriter.main(conf, options.commandArgs);
} else if ("inMemoryEncryptionReader".equals(options.command)) {
InMemoryEncryptionReader.main(conf, options.commandArgs);
} else {
System.err.println("Unknown subcommand: " + options.command);
System.exit(1);
}
}
}
| 4,434 | 38.247788 | 91 | java |
null | orc-main/java/examples/src/java/org/apache/orc/examples/InMemoryEncryptionReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.examples;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.InMemoryKeystore;
import org.apache.orc.OrcFile;
import org.apache.orc.OrcFile.ReaderOptions;
import org.apache.orc.Reader;
import org.apache.orc.RecordReader;
import org.apache.orc.TypeDescription;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import static org.apache.orc.EncryptionAlgorithm.AES_CTR_128;
public class InMemoryEncryptionReader {
public static void main(Configuration conf, String[] args) throws IOException {
byte[] kmsKey = "secret123".getBytes(StandardCharsets.UTF_8);
// InMemoryKeystore is used to get key to read encryption data.
InMemoryKeystore keyProvider = new InMemoryKeystore().addKey("pii", AES_CTR_128, kmsKey);
ReaderOptions readerOptions = OrcFile.readerOptions(conf).setKeyProvider(keyProvider);
Reader reader = OrcFile.createReader(new Path("encrypted.orc"), readerOptions);
System.out.println("File schema: " + reader.getSchema());
System.out.println("Row count: " + reader.getNumberOfRows());
// Pick the schema we want to read using schema evolution
TypeDescription schema = TypeDescription.fromString("struct<x:int,y:string>");
// Read the encryption data
VectorizedRowBatch batch = schema.createRowBatch();
RecordReader rowIterator = reader.rows(reader.options().schema(schema));
LongColumnVector x = (LongColumnVector) batch.cols[0];
BytesColumnVector y = (BytesColumnVector) batch.cols[1];
while (rowIterator.nextBatch(batch)) {
for (int row = 0; row < batch.size; ++row) {
System.out.println("x: " + x.vector[row]);
System.out.println("y: " + y.toString(row));
}
}
rowIterator.close();
}
public static void main(String[] args) throws IOException {
main(new Configuration(), args);
}
}
| 2,906 | 41.130435 | 93 | java |
null | orc-main/java/examples/src/java/org/apache/orc/examples/InMemoryEncryptionWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.examples;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.InMemoryKeystore;
import org.apache.orc.OrcFile;
import org.apache.orc.OrcFile.WriterOptions;
import org.apache.orc.TypeDescription;
import org.apache.orc.Writer;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import static org.apache.orc.EncryptionAlgorithm.AES_CTR_128;
public class InMemoryEncryptionWriter {
public static void main(Configuration conf, String[] args) throws IOException {
TypeDescription schema = TypeDescription.fromString("struct<x:int,y:string>");
byte[] kmsKey = "secret123".getBytes(StandardCharsets.UTF_8);
// The primary use of InMemoryKeystore is for used who doesn't have a
// Hadoop KMS.
InMemoryKeystore provider = new InMemoryKeystore().addKey("pii", AES_CTR_128, kmsKey);
String encryption = "pii:x,y";
WriterOptions writerOptions =
OrcFile.writerOptions(conf).setSchema(schema).setKeyProvider(provider).encrypt(encryption);
Writer writer = OrcFile.createWriter(new Path("encrypted.orc"), writerOptions);
VectorizedRowBatch batch = schema.createRowBatch();
LongColumnVector x = (LongColumnVector) batch.cols[0];
BytesColumnVector y = (BytesColumnVector) batch.cols[1];
for (int r = 0; r < 10000; ++r) {
int row = batch.size++;
x.vector[row] = r;
byte[] buffer = ("byte-" + r).getBytes();
y.setRef(row, buffer, 0, buffer.length);
// If the batch is full, write it out and start over.
if (batch.size == batch.getMaxSize()) {
writer.addRowBatch(batch);
batch.reset();
}
}
if (batch.size != 0) {
writer.addRowBatch(batch);
}
writer.close();
}
public static void main(String[] args) throws IOException {
main(new Configuration(), args);
}
}
| 2,885 | 38.534247 | 99 | java |
null | orc-main/java/mapreduce/src/java/org/apache/orc/mapred/OrcInputFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.mapred;
import com.esotericsoftware.kryo.Kryo;
import com.esotericsoftware.kryo.io.Input;
import com.esotericsoftware.kryo.io.Output;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentImpl;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.orc.OrcConf;
import org.apache.orc.OrcFile;
import org.apache.orc.Reader;
import org.apache.orc.TypeDescription;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Base64;
import java.util.List;
/**
* A MapReduce/Hive input format for ORC files.
*/
public class OrcInputFormat<V extends WritableComparable>
extends FileInputFormat<NullWritable, V> {
private static final int KRYO_SARG_MAX_BUFFER = 16777216;
/**
* Convert a string with a comma separated list of column ids into the
* array of boolean that match the schemas.
* @param schema the schema for the reader
* @param columnsStr the comma separated list of column ids
* @return a boolean array
*/
public static boolean[] parseInclude(TypeDescription schema,
String columnsStr) {
if (columnsStr == null ||
schema.getCategory() != TypeDescription.Category.STRUCT) {
return null;
}
boolean[] result = new boolean[schema.getMaximumId() + 1];
result[0] = true;
if (StringUtils.isBlank(columnsStr)) {
return result;
}
List<TypeDescription> types = schema.getChildren();
for(String idString: columnsStr.split(",")) {
TypeDescription type = types.get(Integer.parseInt(idString));
for(int c=type.getId(); c <= type.getMaximumId(); ++c) {
result[c] = true;
}
}
return result;
}
/**
* Put the given SearchArgument into the configuration for an OrcInputFormat.
* @param conf the configuration to modify
* @param sarg the SearchArgument to put in the configuration
* @param columnNames the list of column names for the SearchArgument
*/
public static void setSearchArgument(Configuration conf,
SearchArgument sarg,
String[] columnNames) {
int bufferSize = (int)OrcConf.KRYO_SARG_BUFFER.getLong(conf);
Output out = new Output(bufferSize, KRYO_SARG_MAX_BUFFER);
new Kryo().writeObject(out, sarg);
OrcConf.KRYO_SARG.setString(conf, Base64.getMimeEncoder().encodeToString(out.toBytes()));
StringBuilder buffer = new StringBuilder();
for (int i = 0; i < columnNames.length; ++i) {
if (i != 0) {
buffer.append(',');
}
buffer.append(columnNames[i]);
}
OrcConf.SARG_COLUMNS.setString(conf, buffer.toString());
}
/**
* Build the Reader.Options object based on the JobConf and the range of
* bytes.
* @param conf the job configuratoin
* @param reader the file footer reader
* @param start the byte offset to start reader
* @param length the number of bytes to read
* @return the options to read with
*/
public static Reader.Options buildOptions(Configuration conf,
Reader reader,
long start,
long length) {
TypeDescription schema =
TypeDescription.fromString(OrcConf.MAPRED_INPUT_SCHEMA.getString(conf));
Reader.Options options = reader.options()
.range(start, length)
.useZeroCopy(OrcConf.USE_ZEROCOPY.getBoolean(conf))
.skipCorruptRecords(OrcConf.SKIP_CORRUPT_DATA.getBoolean(conf))
.tolerateMissingSchema(OrcConf.TOLERATE_MISSING_SCHEMA.getBoolean(conf));
if (schema != null) {
options.schema(schema);
} else {
schema = reader.getSchema();
}
options.include(parseInclude(schema,
OrcConf.INCLUDE_COLUMNS.getString(conf)));
String kryoSarg = OrcConf.KRYO_SARG.getString(conf);
String sargColumns = OrcConf.SARG_COLUMNS.getString(conf);
if (kryoSarg != null && sargColumns != null) {
byte[] sargBytes = Base64.getMimeDecoder().decode(kryoSarg);
SearchArgument sarg =
new Kryo().readObject(new Input(sargBytes), SearchArgumentImpl.class);
options.searchArgument(sarg, sargColumns.split(","));
}
return options;
}
@Override
public RecordReader<NullWritable, V> getRecordReader(InputSplit inputSplit,
JobConf conf,
Reporter reporter) throws IOException {
FileSplit split = (FileSplit) inputSplit;
Reader file = OrcFile.createReader(split.getPath(),
OrcFile.readerOptions(conf)
.maxLength(OrcConf.MAX_FILE_LENGTH.getLong(conf)));
//Mapreduce supports selected vector
Reader.Options options = buildOptions(conf, file, split.getStart(), split.getLength())
.useSelected(true);
return new OrcMapredRecordReader<>(file, options);
}
/**
* Filter out the 0 byte files, so that we don't generate splits for the
* empty ORC files.
* @param job the job configuration
* @return a list of files that need to be read
* @throws IOException
*/
@Override
protected FileStatus[] listStatus(JobConf job) throws IOException {
FileStatus[] result = super.listStatus(job);
List<FileStatus> ok = new ArrayList<>(result.length);
for(FileStatus stat: result) {
if (stat.getLen() != 0) {
ok.add(stat);
}
}
if (ok.size() == result.length) {
return result;
} else {
return ok.toArray(new FileStatus[0]);
}
}
}
| 6,891 | 36.661202 | 94 | java |
null | orc-main/java/mapreduce/src/java/org/apache/orc/mapred/OrcKey.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.mapred;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobConfigurable;
import org.apache.orc.OrcConf;
import org.apache.orc.TypeDescription;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
/**
* This type provides a wrapper for OrcStruct so that it can be sent through
* the MapReduce shuffle as a key.
* <p>
* The user should set the JobConf with orc.mapred.key.type with the type
* string of the type.
*/
public final class OrcKey
implements WritableComparable<OrcKey>, JobConfigurable {
public WritableComparable key;
public OrcKey(WritableComparable key) {
this.key = key;
}
public OrcKey() {
key = null;
}
@Override
public void write(DataOutput dataOutput) throws IOException {
key.write(dataOutput);
}
@Override
public void readFields(DataInput dataInput) throws IOException {
key.readFields(dataInput);
}
@Override
public void configure(JobConf conf) {
if (key == null) {
TypeDescription schema =
TypeDescription.fromString(OrcConf.MAPRED_SHUFFLE_KEY_SCHEMA
.getString(conf));
key = OrcStruct.createValue(schema);
}
}
@Override
public int compareTo(OrcKey o) {
return key.compareTo(o.key);
}
@Override
public boolean equals(Object o) {
if (o == null || key == null) {
return false;
} else if (o.getClass() != getClass()) {
return false;
} else {
return key.equals(((OrcKey) o).key);
}
}
@Override
public int hashCode() {
return key.hashCode();
}
}
| 2,462 | 26.065934 | 76 | java |
null | orc-main/java/mapreduce/src/java/org/apache/orc/mapred/OrcList.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.mapred;
import org.apache.hadoop.io.WritableComparable;
import org.apache.orc.TypeDescription;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
/**
* An ArrayList implementation that implements Writable.
* @param <E> the element type, which must be Writable
*/
public class OrcList<E extends WritableComparable>
extends ArrayList<E> implements WritableComparable<OrcList<E>> {
private final TypeDescription childSchema;
public OrcList(TypeDescription schema) {
childSchema = schema.getChildren().get(0);
}
public OrcList(TypeDescription schema, int initialCapacity) {
super(initialCapacity);
childSchema = schema.getChildren().get(0);
}
@Override
public void write(DataOutput output) throws IOException {
Iterator<E> itr = iterator();
output.writeInt(size());
while (itr.hasNext()) {
E obj = itr.next();
output.writeBoolean(obj != null);
if (obj != null) {
obj.write(output);
}
}
}
@Override
public void readFields(DataInput input) throws IOException {
clear();
int size = input.readInt();
ensureCapacity(size);
for(int i=0; i < size; ++i) {
if (input.readBoolean()) {
E obj = (E) OrcStruct.createValue(childSchema);
obj.readFields(input);
add(obj);
} else {
add(null);
}
}
}
@Override
public int compareTo(OrcList<E> other) {
if (other == null) {
return -1;
}
int result = childSchema.compareTo(other.childSchema);
if (result != 0) {
return result;
}
int ourSize = size();
int otherSize = other.size();
for(int e=0; e < ourSize && e < otherSize; ++e) {
E ours = get(e);
E theirs = other.get(e);
if (ours == null) {
if (theirs != null) {
return 1;
}
} else if (theirs == null) {
return -1;
} else {
int val = ours.compareTo(theirs);
if (val != 0) {
return val;
}
}
}
return ourSize - otherSize;
}
@Override
public boolean equals(Object other) {
return other != null && other.getClass() == getClass() &&
compareTo((OrcList<E>) other) == 0;
}
@Override
public int hashCode() {
return super.hashCode();
}
}
| 3,179 | 26.413793 | 75 | java |
null | orc-main/java/mapreduce/src/java/org/apache/orc/mapred/OrcMap.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.mapred;
import org.apache.hadoop.io.WritableComparable;
import org.apache.orc.TypeDescription;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Iterator;
import java.util.Map;
import java.util.TreeMap;
/**
* A TreeMap implementation that implements Writable.
* @param <K> the key type, which must be WritableComparable
* @param <V> the value type, which must be WritableComparable
*/
public final class OrcMap<K extends WritableComparable,
V extends WritableComparable>
extends TreeMap<K, V> implements WritableComparable<OrcMap<K,V>> {
private final TypeDescription keySchema;
private final TypeDescription valueSchema;
public OrcMap(TypeDescription schema) {
keySchema = schema.getChildren().get(0);
valueSchema = schema.getChildren().get(1);
}
@Override
public void write(DataOutput output) throws IOException {
output.writeInt(size());
for(Map.Entry<K,V> entry: entrySet()) {
K key = entry.getKey();
V value = entry.getValue();
output.writeByte((key == null ? 0 : 2) | (value == null ? 0 : 1));
if (key != null) {
key.write(output);
}
if (value != null) {
value.write(output);
}
}
}
@Override
public void readFields(DataInput input) throws IOException {
clear();
int size = input.readInt();
for(int i=0; i < size; ++i) {
byte flag = input.readByte();
K key;
V value;
if ((flag & 2) != 0) {
key = (K) OrcStruct.createValue(keySchema);
key.readFields(input);
} else {
key = null;
}
if ((flag & 1) != 0) {
value = (V) OrcStruct.createValue(valueSchema);
value.readFields(input);
} else {
value = null;
}
put(key, value);
}
}
@Override
public int compareTo(OrcMap<K,V> other) {
if (other == null) {
return -1;
}
int result = keySchema.compareTo(other.keySchema);
if (result != 0) {
return result;
}
result = valueSchema.compareTo(other.valueSchema);
if (result != 0) {
return result;
}
Iterator<Map.Entry<K,V>> ourItr = entrySet().iterator();
Iterator<Map.Entry<K,V>> theirItr = other.entrySet().iterator();
while (ourItr.hasNext() && theirItr.hasNext()) {
Map.Entry<K,V> ourItem = ourItr.next();
Map.Entry<K,V> theirItem = theirItr.next();
K ourKey = ourItem.getKey();
K theirKey = theirItem.getKey();
int val = ourKey.compareTo(theirKey);
if (val != 0) {
return val;
}
Comparable<V> ourValue = ourItem.getValue();
V theirValue = theirItem.getValue();
if (ourValue == null) {
if (theirValue != null) {
return 1;
}
} else if (theirValue == null) {
return -1;
} else {
val = ourItem.getValue().compareTo(theirItem.getValue());
if (val != 0) {
return val;
}
}
}
if (ourItr.hasNext()) {
return 1;
} else if (theirItr.hasNext()) {
return -1;
} else {
return 0;
}
}
@Override
public boolean equals(Object other) {
return other != null && other.getClass() == getClass() &&
compareTo((OrcMap<K,V>) other) == 0;
}
@Override
public int hashCode() {
return super.hashCode();
}
}
| 4,200 | 27.972414 | 75 | java |
null | orc-main/java/mapreduce/src/java/org/apache/orc/mapred/OrcMapredRecordReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.mapred;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ListColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.MapColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.StructColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.UnionColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.serde2.io.DateWritable;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.hadoop.io.BooleanWritable;
import org.apache.hadoop.io.ByteWritable;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.FloatWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.ShortWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.orc.Reader;
import org.apache.orc.RecordReader;
import org.apache.orc.TypeDescription;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
/**
* This record reader implements the org.apache.hadoop.mapred API.
* @param <V> the root type of the file
*/
public class OrcMapredRecordReader<V extends WritableComparable>
implements org.apache.hadoop.mapred.RecordReader<NullWritable, V> {
private final TypeDescription schema;
private final RecordReader batchReader;
private final VectorizedRowBatch batch;
private int rowInBatch;
public OrcMapredRecordReader(RecordReader reader,
TypeDescription schema) throws IOException {
this.batchReader = reader;
this.batch = schema.createRowBatch();
this.schema = schema;
rowInBatch = 0;
}
protected OrcMapredRecordReader(Reader fileReader,
Reader.Options options) throws IOException {
this(fileReader, options, options.getRowBatchSize());
}
protected OrcMapredRecordReader(Reader fileReader,
Reader.Options options,
int rowBatchSize) throws IOException {
this.batchReader = fileReader.rows(options);
if (options.getSchema() == null) {
schema = fileReader.getSchema();
} else {
schema = options.getSchema();
}
this.batch = schema.createRowBatch(rowBatchSize);
rowInBatch = 0;
}
/**
* If the current batch is empty, get a new one.
* @return true if we have rows available.
* @throws IOException
*/
boolean ensureBatch() throws IOException {
if (rowInBatch >= batch.size) {
rowInBatch = 0;
return batchReader.nextBatch(batch);
}
return true;
}
@Override
public boolean next(NullWritable key, V value) throws IOException {
if (!ensureBatch()) {
return false;
}
int rowIdx = batch.selectedInUse ? batch.selected[rowInBatch] : rowInBatch;
if (schema.getCategory() == TypeDescription.Category.STRUCT) {
OrcStruct result = (OrcStruct) value;
List<TypeDescription> children = schema.getChildren();
int numberOfChildren = children.size();
for(int i=0; i < numberOfChildren; ++i) {
result.setFieldValue(i, nextValue(batch.cols[i], rowIdx,
children.get(i), result.getFieldValue(i)));
}
} else {
nextValue(batch.cols[0], rowIdx, schema, value);
}
rowInBatch += 1;
return true;
}
@Override
public NullWritable createKey() {
return NullWritable.get();
}
@Override
public V createValue() {
return (V) OrcStruct.createValue(schema);
}
@Override
public long getPos() throws IOException {
return 0;
}
@Override
public void close() throws IOException {
batchReader.close();
}
@Override
public float getProgress() throws IOException {
return 0;
}
static BooleanWritable nextBoolean(ColumnVector vector,
int row,
Object previous) {
if (vector.isRepeating) {
row = 0;
}
if (vector.noNulls || !vector.isNull[row]) {
BooleanWritable result;
if (previous == null || previous.getClass() != BooleanWritable.class) {
result = new BooleanWritable();
} else {
result = (BooleanWritable) previous;
}
result.set(((LongColumnVector) vector).vector[row] != 0);
return result;
} else {
return null;
}
}
static ByteWritable nextByte(ColumnVector vector,
int row,
Object previous) {
if (vector.isRepeating) {
row = 0;
}
if (vector.noNulls || !vector.isNull[row]) {
ByteWritable result;
if (previous == null || previous.getClass() != ByteWritable.class) {
result = new ByteWritable();
} else {
result = (ByteWritable) previous;
}
result.set((byte) ((LongColumnVector) vector).vector[row]);
return result;
} else {
return null;
}
}
static ShortWritable nextShort(ColumnVector vector,
int row,
Object previous) {
if (vector.isRepeating) {
row = 0;
}
if (vector.noNulls || !vector.isNull[row]) {
ShortWritable result;
if (previous == null || previous.getClass() != ShortWritable.class) {
result = new ShortWritable();
} else {
result = (ShortWritable) previous;
}
result.set((short) ((LongColumnVector) vector).vector[row]);
return result;
} else {
return null;
}
}
static IntWritable nextInt(ColumnVector vector,
int row,
Object previous) {
if (vector.isRepeating) {
row = 0;
}
if (vector.noNulls || !vector.isNull[row]) {
IntWritable result;
if (previous == null || previous.getClass() != IntWritable.class) {
result = new IntWritable();
} else {
result = (IntWritable) previous;
}
result.set((int) ((LongColumnVector) vector).vector[row]);
return result;
} else {
return null;
}
}
static LongWritable nextLong(ColumnVector vector,
int row,
Object previous) {
if (vector.isRepeating) {
row = 0;
}
if (vector.noNulls || !vector.isNull[row]) {
LongWritable result;
if (previous == null || previous.getClass() != LongWritable.class) {
result = new LongWritable();
} else {
result = (LongWritable) previous;
}
result.set(((LongColumnVector) vector).vector[row]);
return result;
} else {
return null;
}
}
static FloatWritable nextFloat(ColumnVector vector,
int row,
Object previous) {
if (vector.isRepeating) {
row = 0;
}
if (vector.noNulls || !vector.isNull[row]) {
FloatWritable result;
if (previous == null || previous.getClass() != FloatWritable.class) {
result = new FloatWritable();
} else {
result = (FloatWritable) previous;
}
result.set((float) ((DoubleColumnVector) vector).vector[row]);
return result;
} else {
return null;
}
}
static DoubleWritable nextDouble(ColumnVector vector,
int row,
Object previous) {
if (vector.isRepeating) {
row = 0;
}
if (vector.noNulls || !vector.isNull[row]) {
DoubleWritable result;
if (previous == null || previous.getClass() != DoubleWritable.class) {
result = new DoubleWritable();
} else {
result = (DoubleWritable) previous;
}
result.set(((DoubleColumnVector) vector).vector[row]);
return result;
} else {
return null;
}
}
static Text nextString(ColumnVector vector,
int row,
Object previous) {
if (vector.isRepeating) {
row = 0;
}
if (vector.noNulls || !vector.isNull[row]) {
Text result;
if (previous == null || previous.getClass() != Text.class) {
result = new Text();
} else {
result = (Text) previous;
}
BytesColumnVector bytes = (BytesColumnVector) vector;
result.set(bytes.vector[row], bytes.start[row], bytes.length[row]);
return result;
} else {
return null;
}
}
static BytesWritable nextBinary(ColumnVector vector,
int row,
Object previous) {
if (vector.isRepeating) {
row = 0;
}
if (vector.noNulls || !vector.isNull[row]) {
BytesWritable result;
if (previous == null || previous.getClass() != BytesWritable.class) {
result = new BytesWritable();
} else {
result = (BytesWritable) previous;
}
BytesColumnVector bytes = (BytesColumnVector) vector;
result.set(bytes.vector[row], bytes.start[row], bytes.length[row]);
return result;
} else {
return null;
}
}
static HiveDecimalWritable nextDecimal(ColumnVector vector,
int row,
Object previous) {
if (vector.isRepeating) {
row = 0;
}
if (vector.noNulls || !vector.isNull[row]) {
HiveDecimalWritable result;
if (previous == null || previous.getClass() != HiveDecimalWritable.class) {
result = new HiveDecimalWritable();
} else {
result = (HiveDecimalWritable) previous;
}
result.set(((DecimalColumnVector) vector).vector[row]);
return result;
} else {
return null;
}
}
static DateWritable nextDate(ColumnVector vector,
int row,
Object previous) {
if (vector.isRepeating) {
row = 0;
}
if (vector.noNulls || !vector.isNull[row]) {
DateWritable result;
if (previous == null || previous.getClass() != DateWritable.class) {
result = new DateWritable();
} else {
result = (DateWritable) previous;
}
int date = (int) ((LongColumnVector) vector).vector[row];
result.set(date);
return result;
} else {
return null;
}
}
static OrcTimestamp nextTimestamp(ColumnVector vector,
int row,
Object previous) {
if (vector.isRepeating) {
row = 0;
}
if (vector.noNulls || !vector.isNull[row]) {
OrcTimestamp result;
if (previous == null || previous.getClass() != OrcTimestamp.class) {
result = new OrcTimestamp();
} else {
result = (OrcTimestamp) previous;
}
TimestampColumnVector tcv = (TimestampColumnVector) vector;
result.setTime(tcv.time[row]);
result.setNanos(tcv.nanos[row]);
return result;
} else {
return null;
}
}
static OrcStruct nextStruct(ColumnVector vector,
int row,
TypeDescription schema,
Object previous) {
if (vector.isRepeating) {
row = 0;
}
if (vector.noNulls || !vector.isNull[row]) {
OrcStruct result;
List<TypeDescription> childrenTypes = schema.getChildren();
int numChildren = childrenTypes.size();
if (isReusable(previous, schema)) {
result = (OrcStruct) previous;
} else {
result = new OrcStruct(schema);
}
StructColumnVector struct = (StructColumnVector) vector;
for(int f=0; f < numChildren; ++f) {
result.setFieldValue(f, nextValue(struct.fields[f], row,
childrenTypes.get(f), result.getFieldValue(f)));
}
return result;
} else {
return null;
}
}
/**
* Determine if a OrcStruct object is reusable.
*/
private static boolean isReusable(Object previous, TypeDescription schema) {
if (previous == null || previous.getClass() != OrcStruct.class) {
return false;
}
return ((OrcStruct) previous).getSchema().equals(schema);
}
static OrcUnion nextUnion(ColumnVector vector,
int row,
TypeDescription schema,
Object previous) {
if (vector.isRepeating) {
row = 0;
}
if (vector.noNulls || !vector.isNull[row]) {
OrcUnion result;
List<TypeDescription> childrenTypes = schema.getChildren();
if (previous == null || previous.getClass() != OrcUnion.class) {
result = new OrcUnion(schema);
} else {
result = (OrcUnion) previous;
}
UnionColumnVector union = (UnionColumnVector) vector;
byte tag = (byte) union.tags[row];
result.set(tag, nextValue(union.fields[tag], row, childrenTypes.get(tag),
result.getObject()));
return result;
} else {
return null;
}
}
static OrcList nextList(ColumnVector vector,
int row,
TypeDescription schema,
Object previous) {
if (vector.isRepeating) {
row = 0;
}
if (vector.noNulls || !vector.isNull[row]) {
OrcList result;
List<TypeDescription> childrenTypes = schema.getChildren();
TypeDescription valueType = childrenTypes.get(0);
if (previous == null ||
previous.getClass() != ArrayList.class) {
result = new OrcList(schema);
} else {
result = (OrcList) previous;
}
ListColumnVector list = (ListColumnVector) vector;
int length = (int) list.lengths[row];
int offset = (int) list.offsets[row];
result.ensureCapacity(length);
int oldLength = result.size();
int idx = 0;
while (idx < length && idx < oldLength) {
result.set(idx, nextValue(list.child, offset + idx, valueType,
result.get(idx)));
idx += 1;
}
if (length < oldLength) {
for(int i= oldLength - 1; i >= length; --i) {
result.remove(i);
}
} else if (oldLength < length) {
while (idx < length) {
result.add(nextValue(list.child, offset + idx, valueType, null));
idx += 1;
}
}
return result;
} else {
return null;
}
}
static OrcMap nextMap(ColumnVector vector,
int row,
TypeDescription schema,
Object previous) {
if (vector.isRepeating) {
row = 0;
}
if (vector.noNulls || !vector.isNull[row]) {
MapColumnVector map = (MapColumnVector) vector;
int length = (int) map.lengths[row];
int offset = (int) map.offsets[row];
OrcMap result;
List<TypeDescription> childrenTypes = schema.getChildren();
TypeDescription keyType = childrenTypes.get(0);
TypeDescription valueType = childrenTypes.get(1);
if (previous == null ||
previous.getClass() != OrcMap.class) {
result = new OrcMap(schema);
} else {
result = (OrcMap) previous;
// I couldn't think of a good way to reuse the keys and value objects
// without even more allocations, so take the easy and safe approach.
result.clear();
}
for(int e=0; e < length; ++e) {
result.put(nextValue(map.keys, e + offset, keyType, null),
nextValue(map.values, e + offset, valueType, null));
}
return result;
} else {
return null;
}
}
public static WritableComparable nextValue(ColumnVector vector,
int row,
TypeDescription schema,
Object previous) {
switch (schema.getCategory()) {
case BOOLEAN:
return nextBoolean(vector, row, previous);
case BYTE:
return nextByte(vector, row, previous);
case SHORT:
return nextShort(vector, row, previous);
case INT:
return nextInt(vector, row, previous);
case LONG:
return nextLong(vector, row, previous);
case FLOAT:
return nextFloat(vector, row, previous);
case DOUBLE:
return nextDouble(vector, row, previous);
case STRING:
case CHAR:
case VARCHAR:
return nextString(vector, row, previous);
case BINARY:
return nextBinary(vector, row, previous);
case DECIMAL:
return nextDecimal(vector, row, previous);
case DATE:
return nextDate(vector, row, previous);
case TIMESTAMP:
case TIMESTAMP_INSTANT:
return nextTimestamp(vector, row, previous);
case STRUCT:
return nextStruct(vector, row, schema, previous);
case UNION:
return nextUnion(vector, row, schema, previous);
case LIST:
return nextList(vector, row, schema, previous);
case MAP:
return nextMap(vector, row, schema, previous);
default:
throw new IllegalArgumentException("Unknown type " + schema);
}
}
}
| 18,446 | 30.860104 | 81 | java |
null | orc-main/java/mapreduce/src/java/org/apache/orc/mapred/OrcMapredRecordWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.mapred;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ListColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.MapColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.MultiValuedColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.StructColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.UnionColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.serde2.io.DateWritable;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.hadoop.io.BinaryComparable;
import org.apache.hadoop.io.BooleanWritable;
import org.apache.hadoop.io.ByteWritable;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.FloatWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.ShortWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapred.RecordWriter;
import org.apache.hadoop.mapred.Reporter;
import org.apache.orc.OrcConf;
import org.apache.orc.TypeDescription;
import org.apache.orc.Writer;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
public class OrcMapredRecordWriter<V extends Writable>
implements RecordWriter<NullWritable, V> {
// The factor that we grow lists and maps by when they are too small.
private static final int GROWTH_FACTOR = 3;
private final Writer writer;
private final VectorizedRowBatch batch;
private final TypeDescription schema;
private final boolean isTopStruct;
private final List<MultiValuedColumnVector> variableLengthColumns =
new ArrayList<>();
private final int maxChildLength;
public OrcMapredRecordWriter(Writer writer) {
this(writer, VectorizedRowBatch.DEFAULT_SIZE);
}
public OrcMapredRecordWriter(Writer writer,
int rowBatchSize) {
this(writer, rowBatchSize,
(Integer) OrcConf.ROW_BATCH_CHILD_LIMIT.getDefaultValue());
}
public OrcMapredRecordWriter(Writer writer,
int rowBatchSize,
int maxChildLength) {
this.writer = writer;
schema = writer.getSchema();
this.batch = schema.createRowBatch(rowBatchSize);
addVariableLengthColumns(variableLengthColumns, batch);
isTopStruct = schema.getCategory() == TypeDescription.Category.STRUCT;
this.maxChildLength = maxChildLength;
}
/**
* Find variable length columns and add them to the list.
* @param result the list to be appended to
* @param vector the column vector to scan
*/
private static void addVariableLengthColumns(List<MultiValuedColumnVector> result,
ColumnVector vector) {
switch (vector.type) {
case LIST: {
ListColumnVector cv = (ListColumnVector) vector;
result.add(cv);
addVariableLengthColumns(result, cv.child);
break;
}
case MAP: {
MapColumnVector cv = (MapColumnVector) vector;
result.add(cv);
addVariableLengthColumns(result, cv.keys);
addVariableLengthColumns(result, cv.values);
break;
}
case STRUCT: {
for(ColumnVector child: ((StructColumnVector) vector).fields) {
addVariableLengthColumns(result, child);
}
break;
}
case UNION: {
for(ColumnVector child: ((UnionColumnVector) vector).fields) {
addVariableLengthColumns(result, child);
}
break;
}
default:
break;
}
}
/**
* Find variable length columns and add them to the list.
* @param result the list to be appended to
* @param batch the batch to scan
*/
public static void addVariableLengthColumns(List<MultiValuedColumnVector> result,
VectorizedRowBatch batch) {
for(ColumnVector cv: batch.cols) {
addVariableLengthColumns(result, cv);
}
}
static void setLongValue(ColumnVector vector, int row, long value) {
((LongColumnVector) vector).vector[row] = value;
}
static void setDoubleValue(ColumnVector vector, int row, double value) {
((DoubleColumnVector) vector).vector[row] = value;
}
static void setBinaryValue(ColumnVector vector, int row,
BinaryComparable value) {
((BytesColumnVector) vector).setVal(row, value.getBytes(), 0,
value.getLength());
}
static void setBinaryValue(ColumnVector vector, int row,
BinaryComparable value, int maxLength) {
((BytesColumnVector) vector).setVal(row, value.getBytes(), 0,
Math.min(maxLength, value.getLength()));
}
private static final ThreadLocal<byte[]> SPACE_BUFFER =
new ThreadLocal<byte[]>() {
@Override
protected byte[] initialValue() {
byte[] result = new byte[100];
Arrays.fill(result, (byte) ' ');
return result;
}
};
static void setCharValue(BytesColumnVector vector,
int row,
Text value,
int length) {
// we need to trim or pad the string with spaces to required length
int actualLength = value.getLength();
if (actualLength >= length) {
setBinaryValue(vector, row, value, length);
} else {
byte[] spaces = SPACE_BUFFER.get();
if (length - actualLength > spaces.length) {
spaces = new byte[length - actualLength];
Arrays.fill(spaces, (byte)' ');
SPACE_BUFFER.set(spaces);
}
vector.setConcat(row, value.getBytes(), 0, actualLength, spaces, 0,
length - actualLength);
}
}
static void setStructValue(TypeDescription schema,
StructColumnVector vector,
int row,
OrcStruct value) {
List<TypeDescription> children = schema.getChildren();
for(int c=0; c < value.getNumFields(); ++c) {
setColumn(children.get(c), vector.fields[c], row, value.getFieldValue(c));
}
}
static void setUnionValue(TypeDescription schema,
UnionColumnVector vector,
int row,
OrcUnion value) {
List<TypeDescription> children = schema.getChildren();
int tag = value.getTag() & 0xff;
vector.tags[row] = tag;
setColumn(children.get(tag), vector.fields[tag], row, value.getObject());
}
static void setListValue(TypeDescription schema,
ListColumnVector vector,
int row,
OrcList value) {
TypeDescription elemType = schema.getChildren().get(0);
vector.offsets[row] = vector.childCount;
vector.lengths[row] = value.size();
vector.childCount += vector.lengths[row];
if (vector.child.isNull.length < vector.childCount) {
vector.child.ensureSize(vector.childCount * GROWTH_FACTOR,
vector.offsets[row] != 0);
}
for(int e=0; e < vector.lengths[row]; ++e) {
setColumn(elemType, vector.child, (int) vector.offsets[row] + e,
(Writable) value.get(e));
}
}
static void setMapValue(TypeDescription schema,
MapColumnVector vector,
int row,
OrcMap<?,?> value) {
TypeDescription keyType = schema.getChildren().get(0);
TypeDescription valueType = schema.getChildren().get(1);
vector.offsets[row] = vector.childCount;
vector.lengths[row] = value.size();
vector.childCount += vector.lengths[row];
if (vector.keys.isNull.length < vector.childCount) {
vector.keys.ensureSize(vector.childCount * GROWTH_FACTOR,
vector.offsets[row] != 0);
}
if (vector.values.isNull.length < vector.childCount) {
vector.values.ensureSize(vector.childCount * GROWTH_FACTOR,
vector.offsets[row] != 0);
}
int e = 0;
for(Map.Entry<?,?> entry: value.entrySet()) {
setColumn(keyType, vector.keys, (int) vector.offsets[row] + e,
(Writable) entry.getKey());
setColumn(valueType, vector.values, (int) vector.offsets[row] + e,
(Writable) entry.getValue());
e += 1;
}
}
public static void setColumn(TypeDescription schema,
ColumnVector vector,
int row,
Writable value) {
if (value == null) {
vector.noNulls = false;
vector.isNull[row] = true;
} else {
switch (schema.getCategory()) {
case BOOLEAN:
setLongValue(vector, row, ((BooleanWritable) value).get() ? 1 : 0);
break;
case BYTE:
setLongValue(vector, row, ((ByteWritable) value).get());
break;
case SHORT:
setLongValue(vector, row, ((ShortWritable) value).get());
break;
case INT:
setLongValue(vector, row, ((IntWritable) value).get());
break;
case LONG:
setLongValue(vector, row, ((LongWritable) value).get());
break;
case FLOAT:
setDoubleValue(vector, row, ((FloatWritable) value).get());
break;
case DOUBLE:
setDoubleValue(vector, row, ((DoubleWritable) value).get());
break;
case STRING:
setBinaryValue(vector, row, (Text) value);
break;
case CHAR:
setCharValue((BytesColumnVector) vector, row, (Text) value,
schema.getMaxLength());
break;
case VARCHAR:
setBinaryValue(vector, row, (Text) value, schema.getMaxLength());
break;
case BINARY:
setBinaryValue(vector, row, (BytesWritable) value);
break;
case DATE:
setLongValue(vector, row, ((DateWritable) value).getDays());
break;
case TIMESTAMP:
case TIMESTAMP_INSTANT:
((TimestampColumnVector) vector).set(row, (OrcTimestamp) value);
break;
case DECIMAL:
((DecimalColumnVector) vector).set(row, (HiveDecimalWritable) value);
break;
case STRUCT:
setStructValue(schema, (StructColumnVector) vector, row,
(OrcStruct) value);
break;
case UNION:
setUnionValue(schema, (UnionColumnVector) vector, row,
(OrcUnion) value);
break;
case LIST:
setListValue(schema, (ListColumnVector) vector, row, (OrcList) value);
break;
case MAP:
setMapValue(schema, (MapColumnVector) vector, row, (OrcMap) value);
break;
default:
throw new IllegalArgumentException("Unknown type " + schema);
}
}
}
/**
* Get the longest variable length vector in a column vector
* @return the length of the longest sub-column
*/
public static int getMaxChildLength(List<MultiValuedColumnVector> columns) {
int result = 0;
for(MultiValuedColumnVector cv: columns) {
result = Math.max(result, cv.childCount);
}
return result;
}
@Override
public void write(NullWritable nullWritable, V v) throws IOException {
// if the batch is full, write it out.
if (batch.size == batch.getMaxSize() ||
getMaxChildLength(variableLengthColumns) >= maxChildLength) {
writer.addRowBatch(batch);
batch.reset();
}
// add the new row
int row = batch.size++;
// skip over the OrcKey or OrcValue
if (v instanceof OrcKey) {
v = (V)((OrcKey) v).key;
} else if (v instanceof OrcValue) {
v = (V)((OrcValue) v).value;
}
if (isTopStruct) {
for(int f=0; f < schema.getChildren().size(); ++f) {
setColumn(schema.getChildren().get(f), batch.cols[f], row,
((OrcStruct) v).getFieldValue(f));
}
} else {
setColumn(schema, batch.cols[0], row, v);
}
}
@Override
public void close(Reporter reporter) throws IOException {
if (batch.size != 0) {
writer.addRowBatch(batch);
batch.reset();
}
writer.close();
}
}
| 13,510 | 34.555263 | 84 | java |
null | orc-main/java/mapreduce/src/java/org/apache/orc/mapred/OrcOutputFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.mapred;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordWriter;
import org.apache.hadoop.util.Progressable;
import org.apache.orc.CompressionKind;
import org.apache.orc.OrcConf;
import org.apache.orc.OrcFile;
import org.apache.orc.TypeDescription;
import org.apache.orc.Writer;
import java.io.IOException;
/**
* An ORC output format that satisfies the org.apache.hadoop.mapred API.
*/
public class OrcOutputFormat<V extends Writable>
extends FileOutputFormat<NullWritable, V> {
/**
* This function builds the options for the ORC Writer based on the JobConf.
* @param conf the job configuration
* @return a new options object
*/
public static OrcFile.WriterOptions buildOptions(Configuration conf) {
return OrcFile.writerOptions(conf)
.version(OrcFile.Version.byName(OrcConf.WRITE_FORMAT.getString(conf)))
.setSchema(TypeDescription.fromString(OrcConf.MAPRED_OUTPUT_SCHEMA
.getString(conf)))
.compress(CompressionKind.valueOf(OrcConf.COMPRESS.getString(conf)))
.encodingStrategy(OrcFile.EncodingStrategy.valueOf
(OrcConf.ENCODING_STRATEGY.getString(conf)))
.bloomFilterColumns(OrcConf.BLOOM_FILTER_COLUMNS.getString(conf))
.bloomFilterFpp(OrcConf.BLOOM_FILTER_FPP.getDouble(conf))
.blockSize(OrcConf.BLOCK_SIZE.getLong(conf))
.blockPadding(OrcConf.BLOCK_PADDING.getBoolean(conf))
.stripeSize(OrcConf.STRIPE_SIZE.getLong(conf))
.rowIndexStride((int) OrcConf.ROW_INDEX_STRIDE.getLong(conf))
.bufferSize((int) OrcConf.BUFFER_SIZE.getLong(conf))
.paddingTolerance(OrcConf.BLOCK_PADDING_TOLERANCE.getDouble(conf))
.encrypt(OrcConf.ENCRYPTION.getString(conf))
.masks(OrcConf.DATA_MASK.getString(conf));
}
@Override
public RecordWriter<NullWritable, V> getRecordWriter(FileSystem fileSystem,
JobConf conf,
String name,
Progressable progressable
) throws IOException {
Path path = getTaskOutputPath(conf, name);
Writer writer = OrcFile.createWriter(path,
buildOptions(conf).fileSystem(fileSystem));
return new OrcMapredRecordWriter<>(writer,
OrcConf.ROW_BATCH_SIZE.getInt(conf),
OrcConf.ROW_BATCH_CHILD_LIMIT.getInt(conf));
}
}
| 3,560 | 41.903614 | 80 | java |
null | orc-main/java/mapreduce/src/java/org/apache/orc/mapred/OrcStruct.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.mapred;
import org.apache.hadoop.hive.serde2.io.DateWritable;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.hadoop.io.BooleanWritable;
import org.apache.hadoop.io.ByteWritable;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.FloatWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.ShortWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.orc.TypeDescription;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Arrays;
public final class OrcStruct implements WritableComparable<OrcStruct> {
private WritableComparable[] fields;
private final TypeDescription schema;
public OrcStruct(TypeDescription schema) {
this.schema = schema;
fields = new WritableComparable[schema.getChildren().size()];
}
public WritableComparable getFieldValue(int fieldIndex) {
return fields[fieldIndex];
}
public void setFieldValue(int fieldIndex, WritableComparable value) {
fields[fieldIndex] = value;
}
public int getNumFields() {
return fields.length;
}
@Override
public void write(DataOutput output) throws IOException {
for(WritableComparable field: fields) {
output.writeBoolean(field != null);
if (field != null) {
field.write(output);
}
}
}
@Override
public void readFields(DataInput input) throws IOException {
for(int f=0; f < fields.length; ++f) {
if (input.readBoolean()) {
if (fields[f] == null) {
fields[f] = createValue(schema.getChildren().get(f));
}
fields[f].readFields(input);
} else {
fields[f] = null;
}
}
}
/**
* Get the schema for this object.
* @return the schema object
*/
public TypeDescription getSchema() {
return schema;
}
/**
* Set all of the fields in the struct
* @param values the list of values for each of the fields.
*/
public void setAllFields(WritableComparable... values) {
if (fields.length != values.length) {
throw new IllegalArgumentException("Wrong number (" + values.length +
") of fields for " + schema);
}
for (int col = 0; col < fields.length && col < values.length; ++col) {
fields[col] = values[col];
}
}
public void setFieldValue(String fieldName, WritableComparable value) {
int fieldIdx = schema.getFieldNames().indexOf(fieldName);
if (fieldIdx == -1) {
throw new IllegalArgumentException("Field " + fieldName +
" not found in " + schema);
}
fields[fieldIdx] = value;
}
public WritableComparable getFieldValue(String fieldName) {
int fieldIdx = schema.getFieldNames().indexOf(fieldName);
if (fieldIdx == -1) {
throw new IllegalArgumentException("Field " + fieldName +
" not found in " + schema);
}
return fields[fieldIdx];
}
@Override
public boolean equals(Object other) {
if (other == null || other.getClass() != OrcStruct.class) {
return false;
} else {
OrcStruct oth = (OrcStruct) other;
if (fields.length != oth.fields.length) {
return false;
}
for(int i=0; i < fields.length; ++i) {
if (fields[i] == null) {
if (oth.fields[i] != null) {
return false;
}
} else {
if (!fields[i].equals(oth.fields[i])) {
return false;
}
}
}
return true;
}
}
@Override
public int hashCode() {
return Arrays.hashCode(fields);
}
@Override
public String toString() {
StringBuilder buffer = new StringBuilder();
buffer.append("{");
for(int i=0; i < fields.length; ++i) {
if (i != 0) {
buffer.append(", ");
}
buffer.append(fields[i]);
}
buffer.append("}");
return buffer.toString();
}
/* Routines for stubbing into Writables */
public static WritableComparable createValue(TypeDescription type) {
switch (type.getCategory()) {
case BOOLEAN: return new BooleanWritable();
case BYTE: return new ByteWritable();
case SHORT: return new ShortWritable();
case INT: return new IntWritable();
case LONG: return new LongWritable();
case FLOAT: return new FloatWritable();
case DOUBLE: return new DoubleWritable();
case BINARY: return new BytesWritable();
case CHAR:
case VARCHAR:
case STRING:
return new Text();
case DATE:
return new DateWritable();
case TIMESTAMP:
case TIMESTAMP_INSTANT:
return new OrcTimestamp();
case DECIMAL:
return new HiveDecimalWritable();
case STRUCT: {
OrcStruct result = new OrcStruct(type);
int c = 0;
for(TypeDescription child: type.getChildren()) {
result.setFieldValue(c++, createValue(child));
}
return result;
}
case UNION: return new OrcUnion(type);
case LIST: return new OrcList(type);
case MAP: return new OrcMap(type);
default:
throw new IllegalArgumentException("Unknown type " + type);
}
}
@Override
public int compareTo(OrcStruct other) {
if (other == null) {
return -1;
}
int result = schema.compareTo(other.schema);
if (result != 0) {
return result;
}
for(int c = 0; c < fields.length && c < other.fields.length; ++c) {
if (fields[c] == null) {
if (other.fields[c] != null) {
return 1;
}
} else if (other.fields[c] == null) {
return -1;
} else {
int val = fields[c].compareTo(other.fields[c]);
if (val != 0) {
return val;
}
}
}
return fields.length - other.fields.length;
}
}
| 6,735 | 27.909871 | 75 | java |
null | orc-main/java/mapreduce/src/java/org/apache/orc/mapred/OrcTimestamp.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.mapred;
import org.apache.hadoop.io.WritableComparable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.sql.Timestamp;
import java.util.Date;
/**
* A Timestamp implementation that implements Writable.
*/
public class OrcTimestamp extends Timestamp implements WritableComparable<Date> {
public OrcTimestamp() {
super(0);
}
public OrcTimestamp(long time) {
super(time);
}
public OrcTimestamp(String timeStr) {
super(0);
Timestamp t = Timestamp.valueOf(timeStr);
setTime(t.getTime());
setNanos(t.getNanos());
}
@Override
public void write(DataOutput output) throws IOException {
output.writeLong(getTime());
output.writeInt(getNanos());
}
@Override
public void readFields(DataInput input) throws IOException {
setTime(input.readLong());
setNanos(input.readInt());
}
public void set(String timeStr) {
Timestamp t = Timestamp.valueOf(timeStr);
setTime(t.getTime());
setNanos(t.getNanos());
}
}
| 1,854 | 27.106061 | 81 | java |
null | orc-main/java/mapreduce/src/java/org/apache/orc/mapred/OrcUnion.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.mapred;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.orc.TypeDescription;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
/**
* An in-memory representation of a union type.
*/
public final class OrcUnion implements WritableComparable<OrcUnion> {
private byte tag;
private WritableComparable object;
private final TypeDescription schema;
public OrcUnion(TypeDescription schema) {
this.schema = schema;
}
public void set(int tag, WritableComparable object) {
this.tag = (byte) tag;
this.object = object;
}
public byte getTag() {
return tag;
}
public Writable getObject() {
return object;
}
@Override
public boolean equals(Object other) {
if (other == null || other.getClass() != OrcUnion.class) {
return false;
}
OrcUnion oth = (OrcUnion) other;
if (tag != oth.tag) {
return false;
} else if (object == null) {
return oth.object == null;
} else {
return object.equals(oth.object);
}
}
@Override
public int hashCode() {
int result = tag;
if (object != null) {
result ^= object.hashCode();
}
return result;
}
@Override
public String toString() {
return "uniontype(" + Integer.toString(tag & 0xff) + ", " + object + ")";
}
@Override
public void write(DataOutput output) throws IOException {
output.writeByte(tag);
output.writeBoolean(object != null);
if (object != null) {
object.write(output);
}
}
@Override
public void readFields(DataInput input) throws IOException {
byte oldTag = tag;
tag = input.readByte();
if (input.readBoolean()) {
if (oldTag != tag || object == null) {
object = OrcStruct.createValue(schema.getChildren().get(tag));
}
object.readFields(input);
} else {
object = null;
}
}
@Override
public int compareTo(OrcUnion other) {
if (other == null) {
return -1;
}
int result = schema.compareTo(other.schema);
if (result != 0) {
return result;
}
if (tag != other.tag) {
return tag - other.tag;
}
if (object == null) {
return other.object == null ? 0 : 1;
}
return object.compareTo(other.object);
}
}
| 3,141 | 24.544715 | 77 | java |
null | orc-main/java/mapreduce/src/java/org/apache/orc/mapred/OrcValue.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.mapred;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobConfigurable;
import org.apache.orc.OrcConf;
import org.apache.orc.TypeDescription;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
/**
* This type provides a wrapper for OrcStruct so that it can be sent through
* the MapReduce shuffle as a value.
* <p>
* The user should set the JobConf with orc.mapred.value.type with the type
* string of the type.
*/
public final class OrcValue implements Writable, JobConfigurable {
public WritableComparable value;
public OrcValue(WritableComparable value) {
this.value = value;
}
public OrcValue() {
value = null;
}
@Override
public void write(DataOutput dataOutput) throws IOException {
value.write(dataOutput);
}
@Override
public void readFields(DataInput dataInput) throws IOException {
value.readFields(dataInput);
}
@Override
public void configure(JobConf conf) {
if (value == null) {
TypeDescription schema =
TypeDescription.fromString(OrcConf.MAPRED_SHUFFLE_VALUE_SCHEMA
.getString(conf));
value = OrcStruct.createValue(schema);
}
}
}
| 2,117 | 29.257143 | 76 | java |
null | orc-main/java/mapreduce/src/java/org/apache/orc/mapred/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* <p>
* This package provides convenient access to ORC files using Hadoop's
* MapReduce InputFormat and OutputFormat.
* </p>
*
* <p>
* For reading, set the InputFormat to OrcInputFormat and your map will
* receive a stream of OrcStruct objects for each row. (Note that ORC files
* may have any type as the root object instead of structs and then the
* object type will be the appropriate one.)
* </p>
*
* <p>The mapping of types is:</p>
* <table summary="Mapping of ORC types to Writable types"
* border="1">
* <thead>
* <tr><th>ORC Type</th><th>Writable Type</th></tr>
* </thead>
* <tbody>
* <tr><td>array</td><td>OrcList</td></tr>
* <tr><td>binary</td><td>BytesWritable</td></tr>
* <tr><td>bigint</td><td>LongWritable</td></tr>
* <tr><td>boolean</td><td>BooleanWritable</td></tr>
* <tr><td>char</td><td>Text</td></tr>
* <tr><td>date</td><td>DateWritable</td></tr>
* <tr><td>decimal</td><td>HiveDecimalWritable</td></tr>
* <tr><td>double</td><td>DoubleWritable</td></tr>
* <tr><td>float</td><td>FloatWritable</td></tr>
* <tr><td>int</td><td>IntWritable</td></tr>
* <tr><td>map</td><td>OrcMap</td></tr>
* <tr><td>smallint</td><td>ShortWritable</td></tr>
* <tr><td>string</td><td>Text</td></tr>
* <tr><td>struct</td><td>OrcStruct</td></tr>
* <tr><td>timestamp</td><td>OrcTimestamp</td></tr>
* <tr><td>tinyint</td><td>ByteWritable</td></tr>
* <tr><td>uniontype</td><td>OrcUnion</td></tr>
* <tr><td>varchar</td><td>Text</td></tr>
* </tbody>
* </table>
*
* <p>
* For writing, set the OutputFormat to OrcOutputFormat and define the
* property "orc.schema" in your configuration. The property defines the
* type of the file and uses the Hive type strings, such as
* "struct<x:int,y:string,z:timestamp>" for a row with an integer,
* string, and timestamp. You can create an example object using:</p>
*<pre>{@code
*String typeStr = "struct<x:int,y:string,z:timestamp>";
*OrcStruct row = (OrcStruct) OrcStruct.createValue(
* TypeDescription.fromString(typeStr));
*}</pre>
*
* <p>
* Please look at the OrcConf class for the configuration knobs that are
* available.
* </p>
*/
package org.apache.orc.mapred;
| 3,065 | 38.307692 | 75 | java |
null | orc-main/java/mapreduce/src/java/org/apache/orc/mapreduce/OrcInputFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.mapreduce;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.orc.OrcConf;
import org.apache.orc.OrcFile;
import org.apache.orc.Reader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
/**
* An ORC input format that satisfies the org.apache.hadoop.mapreduce API.
*/
public class OrcInputFormat<V extends WritableComparable>
extends FileInputFormat<NullWritable, V> {
/**
* Put the given SearchArgument into the configuration for an OrcInputFormat.
* @param conf the configuration to modify
* @param sarg the SearchArgument to put in the configuration
* @param columnNames the list of column names for the SearchArgument
*/
public static void setSearchArgument(Configuration conf,
SearchArgument sarg,
String[] columnNames) {
org.apache.orc.mapred.OrcInputFormat.setSearchArgument(conf, sarg,
columnNames);
}
@Override
public RecordReader<NullWritable, V>
createRecordReader(InputSplit inputSplit,
TaskAttemptContext taskAttemptContext
) throws IOException, InterruptedException {
FileSplit split = (FileSplit) inputSplit;
Configuration conf = taskAttemptContext.getConfiguration();
Reader file = OrcFile.createReader(split.getPath(),
OrcFile.readerOptions(conf)
.maxLength(OrcConf.MAX_FILE_LENGTH.getLong(conf)));
//Mapreduce supports selected vector
Reader.Options options = org.apache.orc.mapred.OrcInputFormat.buildOptions(
conf, file, split.getStart(), split.getLength())
.useSelected(true);
return new OrcMapreduceRecordReader<>(file, options);
}
@Override
protected List<FileStatus> listStatus(JobContext job) throws IOException {
List<FileStatus> complete = super.listStatus(job);
List<FileStatus> result = new ArrayList<>(complete.size());
for(FileStatus stat: complete) {
if (stat.getLen() != 0) {
result.add(stat);
}
}
return result;
}
}
| 3,414 | 37.806818 | 79 | java |
null | orc-main/java/mapreduce/src/java/org/apache/orc/mapreduce/OrcMapreduceRecordReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.mapreduce;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.orc.Reader;
import org.apache.orc.RecordReader;
import org.apache.orc.TypeDescription;
import org.apache.orc.mapred.OrcMapredRecordReader;
import org.apache.orc.mapred.OrcStruct;
import java.io.IOException;
import java.util.List;
/**
* This record reader implements the org.apache.hadoop.mapreduce API.
* It is in the org.apache.orc.mapred package to share implementation with
* the mapred API record reader.
* @param <V> the root type of the file
*/
public class OrcMapreduceRecordReader<V extends WritableComparable>
extends org.apache.hadoop.mapreduce.RecordReader<NullWritable, V> {
private final TypeDescription schema;
private final RecordReader batchReader;
private final VectorizedRowBatch batch;
private int rowInBatch;
private final V row;
public OrcMapreduceRecordReader(RecordReader reader,
TypeDescription schema) throws IOException {
this.batchReader = reader;
this.batch = schema.createRowBatch();
this.schema = schema;
rowInBatch = 0;
this.row = (V) OrcStruct.createValue(schema);
}
public OrcMapreduceRecordReader(Reader fileReader,
Reader.Options options) throws IOException {
this(fileReader, options, options.getRowBatchSize());
}
public OrcMapreduceRecordReader(Reader fileReader,
Reader.Options options,
int rowBatchSize) throws IOException {
this.batchReader = fileReader.rows(options);
if (options.getSchema() == null) {
schema = fileReader.getSchema();
} else {
schema = options.getSchema();
}
this.batch = schema.createRowBatch(rowBatchSize);
rowInBatch = 0;
this.row = (V) OrcStruct.createValue(schema);
}
/**
* If the current batch is empty, get a new one.
* @return true if we have rows available.
* @throws IOException
*/
boolean ensureBatch() throws IOException {
if (rowInBatch >= batch.size) {
rowInBatch = 0;
return batchReader.nextBatch(batch);
}
return true;
}
@Override
public void close() throws IOException {
batchReader.close();
}
@Override
public void initialize(InputSplit inputSplit,
TaskAttemptContext taskAttemptContext) {
// nothing required
}
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
if (!ensureBatch()) {
return false;
}
int rowIdx = batch.selectedInUse ? batch.selected[rowInBatch] : rowInBatch;
if (schema.getCategory() == TypeDescription.Category.STRUCT) {
OrcStruct result = (OrcStruct) row;
List<TypeDescription> children = schema.getChildren();
int numberOfChildren = children.size();
for(int i=0; i < numberOfChildren; ++i) {
result.setFieldValue(i, OrcMapredRecordReader.nextValue(batch.cols[i], rowIdx,
children.get(i), result.getFieldValue(i)));
}
} else {
OrcMapredRecordReader.nextValue(batch.cols[0], rowIdx, schema, row);
}
rowInBatch += 1;
return true;
}
@Override
public NullWritable getCurrentKey() throws IOException, InterruptedException {
return NullWritable.get();
}
@Override
public V getCurrentValue() throws IOException, InterruptedException {
return row;
}
@Override
public float getProgress() throws IOException {
return batchReader.getProgress();
}
}
| 4,544 | 32.419118 | 86 | java |
null | orc-main/java/mapreduce/src/java/org/apache/orc/mapreduce/OrcMapreduceRecordWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.mapreduce;
import org.apache.hadoop.hive.ql.exec.vector.MultiValuedColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.orc.OrcConf;
import org.apache.orc.TypeDescription;
import org.apache.orc.Writer;
import org.apache.orc.mapred.OrcKey;
import org.apache.orc.mapred.OrcMapredRecordWriter;
import org.apache.orc.mapred.OrcStruct;
import org.apache.orc.mapred.OrcValue;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
public class OrcMapreduceRecordWriter<V extends Writable>
extends RecordWriter<NullWritable, V> {
private final Writer writer;
private final VectorizedRowBatch batch;
private final TypeDescription schema;
private final boolean isTopStruct;
private final List<MultiValuedColumnVector> variableLengthColumns =
new ArrayList<>();
private final int maxChildLength;
public OrcMapreduceRecordWriter(Writer writer) {
this(writer, VectorizedRowBatch.DEFAULT_SIZE);
}
public OrcMapreduceRecordWriter(Writer writer,
int rowBatchSize) {
this(writer, rowBatchSize,
(Integer) OrcConf.ROW_BATCH_CHILD_LIMIT.getDefaultValue());
}
public OrcMapreduceRecordWriter(Writer writer,
int rowBatchSize,
int maxChildLength) {
this.writer = writer;
schema = writer.getSchema();
this.batch = schema.createRowBatch(rowBatchSize);
isTopStruct = schema.getCategory() == TypeDescription.Category.STRUCT;
OrcMapredRecordWriter.addVariableLengthColumns(variableLengthColumns, batch);
this.maxChildLength = maxChildLength;
}
@Override
public void write(NullWritable nullWritable, V v) throws IOException {
// if the batch is full, write it out.
if (batch.size == batch.getMaxSize() ||
OrcMapredRecordWriter.getMaxChildLength(variableLengthColumns) >= maxChildLength) {
writer.addRowBatch(batch);
batch.reset();
}
// add the new row
int row = batch.size++;
// skip over the OrcKey or OrcValue
if (v instanceof OrcKey) {
v = (V)((OrcKey) v).key;
} else if (v instanceof OrcValue) {
v = (V)((OrcValue) v).value;
}
if (isTopStruct) {
for(int f=0; f < schema.getChildren().size(); ++f) {
OrcMapredRecordWriter.setColumn(schema.getChildren().get(f),
batch.cols[f], row, ((OrcStruct) v).getFieldValue(f));
}
} else {
OrcMapredRecordWriter.setColumn(schema, batch.cols[0], row, v);
}
}
@Override
public void close(TaskAttemptContext taskAttemptContext) throws IOException {
if (batch.size != 0) {
writer.addRowBatch(batch);
}
writer.close();
}
}
| 3,746 | 34.349057 | 91 | java |
null | orc-main/java/mapreduce/src/java/org/apache/orc/mapreduce/OrcOutputFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.mapreduce;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.orc.OrcConf;
import org.apache.orc.OrcFile;
import org.apache.orc.Writer;
import java.io.IOException;
/**
* An ORC output format that satisfies the org.apache.hadoop.mapreduce API.
*/
public class OrcOutputFormat<V extends Writable>
extends FileOutputFormat<NullWritable, V> {
private static final String EXTENSION = ".orc";
// This is useful for unit tests or local runs where you don't need the
// output committer.
public static final String SKIP_TEMP_DIRECTORY =
"orc.mapreduce.output.skip-temporary-directory";
@Override
public RecordWriter<NullWritable, V>
getRecordWriter(TaskAttemptContext taskAttemptContext
) throws IOException {
Configuration conf = taskAttemptContext.getConfiguration();
Path filename = getDefaultWorkFile(taskAttemptContext, EXTENSION);
Writer writer = OrcFile.createWriter(filename,
org.apache.orc.mapred.OrcOutputFormat.buildOptions(conf));
return new OrcMapreduceRecordWriter<V>(writer,
OrcConf.ROW_BATCH_SIZE.getInt(conf),
OrcConf.ROW_BATCH_CHILD_LIMIT.getInt(conf));
}
@Override
public Path getDefaultWorkFile(TaskAttemptContext context,
String extension) throws IOException {
if (context.getConfiguration().getBoolean(SKIP_TEMP_DIRECTORY, false)) {
return new Path(getOutputPath(context),
getUniqueFile(context, getOutputName(context), extension));
} else {
return super.getDefaultWorkFile(context, extension);
}
}
}
| 2,713 | 38.333333 | 76 | java |
null | orc-main/java/mapreduce/src/test/org/apache/orc/mapred/TestMapRedFiltering.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.mapred;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.orc.OrcConf;
import org.apache.orc.mapreduce.FilterTestUtil;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import java.io.File;
import java.io.IOException;
import java.util.Random;
import static org.apache.orc.mapreduce.FilterTestUtil.RowCount;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestMapRedFiltering {
private static final Path workDir = new Path(System.getProperty("test.tmp.dir",
"target" + File.separator + "test"
+ File.separator + "tmp"));
private static Configuration conf;
private static FileSystem fs;
private static final Path filePath = new Path(workDir, "mapred_skip_file.orc");
@BeforeAll
public static void setup() throws IOException {
conf = new Configuration();
fs = FileSystem.get(conf);
FilterTestUtil.createFile(conf, fs, filePath);
}
@Test
public void readWithSArg() throws IOException, InterruptedException {
OrcConf.ALLOW_SARG_TO_FILTER.setBoolean(conf, false);
OrcConf.INCLUDE_COLUMNS.setString(conf, "0,1,2,3,4");
OrcInputFormat.setSearchArgument(conf,
SearchArgumentFactory.newBuilder()
.in("f1", PredicateLeaf.Type.LONG, 0L)
.build(),
new String[] {"f1"});
FileSplit split = new FileSplit(filePath,
0, fs.getFileStatus(filePath).getLen(),
new String[0]);
FilterTestUtil.readStart();
RecordReader<NullWritable, OrcStruct> r = new OrcInputFormat<OrcStruct>()
.getRecordReader(split, new JobConf(conf), null);
long rowCount = validateFilteredRecordReader(r);
double p = FilterTestUtil.readPercentage(FilterTestUtil.readEnd(),
fs.getFileStatus(filePath).getLen());
assertEquals(FilterTestUtil.RowCount, rowCount);
assertTrue(p >= 100);
}
@Test
public void readWithSArgAsFilter() throws IOException {
OrcConf.ALLOW_SARG_TO_FILTER.setBoolean(conf, true);
OrcConf.INCLUDE_COLUMNS.setString(conf, "0,1,2,3,4");
OrcInputFormat.setSearchArgument(conf,
SearchArgumentFactory.newBuilder()
.in("f1", PredicateLeaf.Type.LONG, 0L)
.build(),
new String[] {"f1"});
FileSplit split = new FileSplit(filePath,
0, fs.getFileStatus(filePath).getLen(),
new String[0]);
FilterTestUtil.readStart();
RecordReader<NullWritable, OrcStruct> r = new OrcInputFormat<OrcStruct>()
.getRecordReader(split, new JobConf(conf), null);
long rowCount = validateFilteredRecordReader(r);
double p = FilterTestUtil.readPercentage(FilterTestUtil.readEnd(),
fs.getFileStatus(filePath).getLen());
assertEquals(0, rowCount);
assertTrue(p < 30);
}
@Test
public void readSingleRowWFilter() throws IOException, InterruptedException {
int cnt = 100;
Random r = new Random(cnt);
long ridx = 0;
while (cnt > 0) {
ridx = r.nextInt((int) RowCount);
readSingleRowWfilter(ridx);
cnt -= 1;
}
}
private static long validateFilteredRecordReader(RecordReader<NullWritable, OrcStruct> rr)
throws IOException {
OrcStruct row = new OrcStruct(FilterTestUtil.schema);
long rowCount = 0;
while (rr.next(NullWritable.get(), row)) {
FilterTestUtil.validateRow(row);
rowCount += 1;
}
return rowCount;
}
private void readSingleRowWfilter(long idx) throws IOException, InterruptedException {
OrcConf.ALLOW_SARG_TO_FILTER.setBoolean(conf, true);
OrcInputFormat.setSearchArgument(conf,
SearchArgumentFactory.newBuilder()
.in("ridx", PredicateLeaf.Type.LONG, idx)
.build(),
new String[] {"ridx"});
OrcConf.INCLUDE_COLUMNS.setString(conf, "0,1,2,4");
FileSplit split = new FileSplit(filePath,
0, fs.getFileStatus(filePath).getLen(),
new String[0]);
FilterTestUtil.readStart();
RecordReader<NullWritable, OrcStruct> r = new OrcInputFormat<OrcStruct>()
.getRecordReader(split, new JobConf(conf), null);
OrcStruct row = new OrcStruct(FilterTestUtil.schema);
long rowCount = 0;
while (r.next(NullWritable.get(), row)) {
FilterTestUtil.validateLimitedRow(row, idx);
rowCount += 1;
}
assertEquals(1, rowCount);
r.close();
}
}
| 6,256 | 39.895425 | 100 | java |
null | orc-main/java/mapreduce/src/test/org/apache/orc/mapred/TestOrcFileEvolution.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.mapred;
import com.google.common.base.Preconditions;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.Reporter;
import org.apache.orc.OrcConf;
import org.apache.orc.OrcFile;
import org.apache.orc.Reader;
import org.apache.orc.TypeDescription;
import org.apache.orc.TypeDescription.Category;
import org.apache.orc.Writer;
import org.apache.orc.impl.SchemaEvolution;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInfo;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.ValueSource;
import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.Mockito.mock;
/**
* Test the behavior of ORC's schema evolution
*/
public class TestOrcFileEvolution {
// These utility methods are just to make writing tests easier. The values
// created here will not feed directly to the ORC writers, but are converted
// within checkEvolution().
private List<Object> struct(Object... fields) {
return list(fields);
}
private List<Object> list(Object... elements) {
return Arrays.asList(elements);
}
private Map<Object, Object> map(Object... kvs) {
if (kvs.length != 2) {
throw new IllegalArgumentException(
"Map must be provided an even number of arguments");
}
Map<Object, Object> result = new HashMap<>();
for (int i = 0; i < kvs.length; i += 2) {
result.put(kvs[i], kvs[i + 1]);
}
return result;
}
Path workDir = new Path(System.getProperty("test.tmp.dir",
"target" + File.separator + "test" + File.separator + "tmp"));
Configuration conf;
FileSystem fs;
Path testFilePath;
@BeforeEach
public void openFileSystem(TestInfo testInfo) throws Exception {
conf = new Configuration();
fs = FileSystem.getLocal(conf);
testFilePath = new Path(workDir, "TestOrcFile." +
testInfo.getTestMethod().get().getName() + ".orc");
fs.delete(testFilePath, false);
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testAddFieldToEnd(boolean addSarg) {
checkEvolution("struct<a:int,b:string>", "struct<a:int,b:string,c:double>",
struct(11, "foo"),
addSarg ? struct(0, "", 0.0) : struct(11, "foo", null),
addSarg);
}
@Test
public void testAddFieldToEndWithSarg() {
SearchArgument sArg = SearchArgumentFactory
.newBuilder()
.lessThan("c", PredicateLeaf.Type.LONG, 10L)
.build();
String[] sCols = new String[]{null, null, "c"};
checkEvolution("struct<a:int,b:string>", "struct<a:int,b:string,c:int>",
struct(1, "foo"),
struct(1, "foo", null),
(boolean) OrcConf.TOLERATE_MISSING_SCHEMA.getDefaultValue(),
sArg, sCols, false);
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testAddFieldBeforeEnd(boolean addSarg) {
checkEvolution("struct<a:int,b:string>", "struct<a:int,c:double,b:string>",
struct(1, "foo"),
struct(1, null, "foo"),
addSarg);
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testRemoveLastField(boolean addSarg) {
checkEvolution("struct<a:int,b:string,c:double>", "struct<a:int,b:string>",
struct(1, "foo", 3.14),
struct(1, "foo"),
addSarg);
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testRemoveFieldBeforeEnd(boolean addSarg) {
checkEvolution("struct<a:int,b:string,c:double>", "struct<a:int,c:double>",
struct(1, "foo", 3.14),
struct(1, 3.14),
addSarg);
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testRemoveAndAddField(boolean addSarg) {
checkEvolution("struct<a:int,b:string>", "struct<a:int,c:double>",
struct(1, "foo"), struct(1, null),
addSarg);
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testReorderFields(boolean addSarg) {
checkEvolution("struct<a:int,b:string>", "struct<b:string,a:int>",
struct(1, "foo"), struct("foo", 1),
addSarg);
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testAddFieldEndOfStruct(boolean addSarg) {
checkEvolution("struct<a:struct<b:int>,c:string>",
"struct<a:struct<b:int,d:double>,c:string>",
struct(struct(2), "foo"), struct(struct(2, null), "foo"),
addSarg);
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testAddFieldBeforeEndOfStruct(boolean addSarg) {
checkEvolution("struct<a:struct<b:int>,c:string>",
"struct<a:struct<d:double,b:int>,c:string>",
struct(struct(2), "foo"), struct(struct(null, 2), "foo"),
addSarg);
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testAddSimilarField(boolean addSarg) {
checkEvolution("struct<a:struct<b:int>>",
"struct<a:struct<b:int>,c:struct<b:int>>", struct(struct(2)),
struct(struct(2), null),
addSarg);
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testConvergentEvolution(boolean addSarg) {
checkEvolution("struct<a:struct<a:int,b:string>,c:struct<a:int>>",
"struct<a:struct<a:int,b:string>,c:struct<a:int,b:string>>",
struct(struct(2, "foo"), struct(3)),
struct(struct(2, "foo"), struct(3, null)),
addSarg);
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testMapKeyEvolution(boolean addSarg) {
checkEvolution("struct<a:map<struct<a:int>,int>>",
"struct<a:map<struct<a:int,b:string>,int>>",
struct(map(struct(1), 2)),
struct(map(struct(1, null), 2)),
addSarg);
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testMapValueEvolution(boolean addSarg) {
checkEvolution("struct<a:map<int,struct<a:int>>>",
"struct<a:map<int,struct<a:int,b:string>>>",
struct(map(2, struct(1))),
struct(map(2, struct(1, null))),
addSarg);
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testListEvolution(boolean addSarg) {
checkEvolution("struct<a:array<struct<b:int>>>",
"struct<a:array<struct<b:int,c:string>>>",
struct(list(struct(1), struct(2))),
struct(list(struct(1, null), struct(2, null))),
addSarg);
}
@Test
public void testMissingColumnFromReaderSchema() {
// If column is part of the SArg but is missing from the reader schema
// will be ignored (consistent with 1.6 release behaviour)
checkEvolution("struct<b:int,c:string>",
"struct<b:int,c:string>",
struct(1, "foo"),
struct(1, "foo", null),
true, true, false);
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testPreHive4243CheckEqual(boolean addSarg) {
// Expect success on equal schemas
checkEvolutionPosn("struct<_col0:int,_col1:string>",
"struct<_col0:int,_col1:string>",
struct(1, "foo"),
struct(1, "foo", null), false, addSarg, false);
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testPreHive4243Check(boolean addSarg) {
// Expect exception on strict compatibility check
Exception e = assertThrows(RuntimeException.class, () -> {
checkEvolutionPosn("struct<_col0:int,_col1:string>",
"struct<_col0:int,_col1:string,_col2:double>",
struct(1, "foo"),
struct(1, "foo", null), false, addSarg, false);
});
assertTrue(e.getMessage().contains("HIVE-4243"));
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testPreHive4243AddColumn(boolean addSarg) {
checkEvolutionPosn("struct<_col0:int,_col1:string>",
"struct<_col0:int,_col1:string,_col2:double>",
struct(1, "foo"),
struct(1, "foo", null), true, addSarg, false);
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testPreHive4243AddColumnMiddle(boolean addSarg) {
// Expect exception on type mismatch
assertThrows(SchemaEvolution.IllegalEvolutionException.class, () -> {
checkEvolutionPosn("struct<_col0:int,_col1:double>",
"struct<_col0:int,_col1:date,_col2:double>",
struct(1, 1.0),
null, true, addSarg, false);
});
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testPreHive4243AddColumnWithFix(boolean addSarg) {
checkEvolution("struct<_col0:int,_col1:string>",
"struct<a:int,b:string,c:double>",
struct(1, "foo"),
struct(1, "foo", null), true, addSarg, false);
}
@Test
public void testPreHive4243AddColumnMiddleWithFix() {
// Expect exception on type mismatch
assertThrows(SchemaEvolution.IllegalEvolutionException.class, () -> {
checkEvolution("struct<_col0:int,_col1:double>",
"struct<a:int,b:date,c:double>",
struct(1, 1.0),
null, true);
});
}
/**
* Test positional schema evolution.
* With the sarg, it eliminates the row and we don't get the row.
*/
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testPositional(boolean addSarg) {
checkEvolution("struct<x:int,y:int,z:int>", "struct<a:int,b:int,c:int,d:int>",
struct(11, 2, 3),
// if the sarg works, we get the default value
addSarg ? struct(0, 0, 0, 0) : struct(11, 2, 3, null),
false, addSarg, true);
}
/**
* Make the sarg try to use a column past the end of the file schema, since
* it will get null, the predicate doesn't hit.
*/
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testPositional2(boolean addSarg) {
checkEvolution("struct<x:int,y:int,z:int>", "struct<b:int,c:int,d:int,a:int>",
struct(11, 2, 3),
struct(11, 2, 3, null),
false, addSarg, true);
}
private void checkEvolutionPosn(String writerType, String readerType,
Object inputRow, Object expectedOutput,
boolean tolerateSchema, boolean addSarg,
boolean positional) {
SearchArgument sArg = null;
String[] sCols = null;
if (addSarg) {
sArg = SearchArgumentFactory
.newBuilder()
.lessThan("_col0", PredicateLeaf.Type.LONG, 10L)
.build();
sCols = new String[]{null, "_col0", null};
}
checkEvolution(writerType, readerType,
inputRow, expectedOutput,
tolerateSchema,
sArg, sCols, positional);
}
private void checkEvolution(String writerType, String readerType,
Object inputRow, Object expectedOutput,
boolean tolerateSchema, boolean addSarg,
boolean positional) {
SearchArgument sArg = null;
String[] sCols = null;
if (addSarg) {
sArg = SearchArgumentFactory
.newBuilder()
.lessThan("a", PredicateLeaf.Type.LONG, 10L)
.build();
sCols = new String[]{null, "a", null};
}
checkEvolution(writerType, readerType,
inputRow, expectedOutput,
tolerateSchema,
sArg, sCols, positional);
}
private void checkEvolution(String writerType, String readerType,
Object inputRow, Object expectedOutput,
boolean addSarg) {
checkEvolution(writerType, readerType,
inputRow, expectedOutput,
(boolean) OrcConf.TOLERATE_MISSING_SCHEMA.getDefaultValue(),
addSarg, false);
}
private void checkEvolution(String writerType, String readerType,
Object inputRow, Object expectedOutput,
boolean tolerateSchema, SearchArgument sArg,
String[] sCols, boolean positional) {
TypeDescription readTypeDescr = TypeDescription.fromString(readerType);
TypeDescription writerTypeDescr = TypeDescription.fromString(writerType);
OrcStruct inputStruct = assembleStruct(writerTypeDescr, inputRow);
OrcStruct expectedStruct = assembleStruct(readTypeDescr, expectedOutput);
try {
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(writerTypeDescr)
.stripeSize(100000).bufferSize(10000)
.version(OrcFile.Version.CURRENT));
OrcMapredRecordWriter<OrcStruct> recordWriter =
new OrcMapredRecordWriter<OrcStruct>(writer);
recordWriter.write(NullWritable.get(), inputStruct);
recordWriter.close(mock(Reporter.class));
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
Reader.Options options = reader.options().schema(readTypeDescr);
if (sArg != null && sCols != null) {
options.searchArgument(sArg, sCols).allowSARGToFilter(false);
}
OrcMapredRecordReader<OrcStruct> recordReader =
new OrcMapredRecordReader<>(reader,
options.tolerateMissingSchema(tolerateSchema)
.forcePositionalEvolution(positional));
OrcStruct result = recordReader.createValue();
recordReader.next(recordReader.createKey(), result);
assertEquals(expectedStruct, result);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private OrcStruct assembleStruct(TypeDescription type, Object row) {
Preconditions.checkArgument(
type.getCategory() == Category.STRUCT, "Top level type must be STRUCT");
return (OrcStruct) assembleRecord(type, row);
}
private WritableComparable assembleRecord(TypeDescription type, Object row) {
if (row == null) {
return null;
}
switch (type.getCategory()) {
case STRUCT:
OrcStruct structResult = new OrcStruct(type);
for (int i = 0; i < structResult.getNumFields(); i++) {
List<TypeDescription> childTypes = type.getChildren();
structResult.setFieldValue(i,
assembleRecord(childTypes.get(i), ((List<Object>) row).get(i)));
}
return structResult;
case LIST:
OrcList<WritableComparable> listResult = new OrcList<>(type);
TypeDescription elemType = type.getChildren().get(0);
List<Object> elems = (List<Object>) row;
for (int i = 0; i < elems.size(); i++) {
listResult.add(assembleRecord(elemType, elems.get(i)));
}
return listResult;
case MAP:
OrcMap<WritableComparable, WritableComparable> mapResult =
new OrcMap<>(type);
TypeDescription keyType = type.getChildren().get(0);
TypeDescription valueType = type.getChildren().get(1);
for (Map.Entry<Object, Object> entry : ((Map<Object, Object>) row)
.entrySet()) {
mapResult.put(assembleRecord(keyType, entry.getKey()),
assembleRecord(valueType, entry.getValue()));
}
return mapResult;
case INT:
return new IntWritable((Integer) row);
case DOUBLE:
return new DoubleWritable((Double) row);
case STRING:
return new Text((String) row);
default:
throw new UnsupportedOperationException(String
.format("Not expecting to have a field of type %s in unit tests",
type.getCategory()));
}
}
}
| 17,328 | 34.952282 | 87 | java |
null | orc-main/java/mapreduce/src/test/org/apache/orc/mapred/TestOrcList.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.mapred;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.orc.TypeDescription;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
public class TestOrcList {
static void cloneWritable(Writable source,
Writable destination) throws IOException {
DataOutputBuffer out = new DataOutputBuffer(1024);
source.write(out);
out.flush();
DataInputBuffer in = new DataInputBuffer();
in.reset(out.getData(), out.getLength());
destination.readFields(in);
}
@Test
public void testRead() throws IOException {
TypeDescription type =
TypeDescription.createList(TypeDescription.createInt());
OrcList<IntWritable> expected = new OrcList<>(type);
OrcList<IntWritable> actual = new OrcList<>(type);
expected.add(new IntWritable(123));
expected.add(new IntWritable(456));
expected.add(new IntWritable(789));
assertNotEquals(expected, actual);
cloneWritable(expected, actual);
assertEquals(expected, actual);
expected.clear();
cloneWritable(expected, actual);
assertEquals(expected, actual);
expected.add(null);
expected.add(new IntWritable(500));
cloneWritable(expected, actual);
assertEquals(expected, actual);
}
@Test
public void testCompare() {
TypeDescription schema = TypeDescription.fromString("array<string>");
OrcList<Text> left = new OrcList<>(schema);
assertEquals(-1 ,left.compareTo(null));
OrcList<Text> right = new OrcList<>(schema);
assertEquals(0, left.compareTo(right));
assertEquals(0, right.compareTo(left));
right.add(new Text("aa"));
assertEquals(-1, left.compareTo(right));
assertEquals(1, right.compareTo(left));
left.add(new Text("aa"));
assertEquals(0, left.compareTo(right));
assertEquals(0, right.compareTo(left));
left.add(new Text("bb"));
right.add(new Text("cc"));
assertEquals(-1, left.compareTo(right));
assertEquals(1, right.compareTo(left));
left.clear();
right.clear();
left.add(null);
right.add(null);
assertEquals(0, left.compareTo(right));
assertEquals(0, right.compareTo(left));
right.clear();
right.add(new Text("ddd"));
assertEquals(1, left.compareTo(right));
assertEquals(-1, right.compareTo(left));
}
@Test
public void testSchemaInCompare() {
TypeDescription leftType = TypeDescription.fromString("array<int>");
TypeDescription rightType = TypeDescription.fromString("array<string>");
OrcList leftList = new OrcList(leftType);
OrcList rightList = new OrcList(rightType);
assertEquals(-4, leftList.compareTo(rightList));
assertEquals(4, rightList.compareTo(leftList));
leftList.add(new IntWritable(123));
rightList.add(new Text("123"));
assertEquals(-4, leftList.compareTo(rightList));
assertEquals(4, rightList.compareTo(leftList));
}
}
| 3,992 | 34.972973 | 76 | java |
null | orc-main/java/mapreduce/src/test/org/apache/orc/mapred/TestOrcMap.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.mapred;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.orc.TypeDescription;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
public class TestOrcMap {
@Test
public void testRead() throws IOException {
TypeDescription type =
TypeDescription.createMap(TypeDescription.createInt(),
TypeDescription.createLong());
OrcMap<IntWritable, LongWritable> expected = new OrcMap<>(type);
OrcMap<IntWritable, LongWritable> actual = new OrcMap<>(type);
expected.put(new IntWritable(999), new LongWritable(1111));
expected.put(new IntWritable(888), new LongWritable(2222));
expected.put(new IntWritable(777), new LongWritable(3333));
assertNotEquals(expected, actual);
TestOrcList.cloneWritable(expected, actual);
assertEquals(expected, actual);
expected.clear();
TestOrcList.cloneWritable(expected, actual);
assertEquals(expected, actual);
expected.put(new IntWritable(666), null);
expected.put(new IntWritable(1), new LongWritable(777));
TestOrcList.cloneWritable(expected, actual);
assertEquals(expected, actual);
}
@Test
public void testCompare() {
TypeDescription schema = TypeDescription.fromString("map<string,string>");
OrcMap<Text,Text> left = new OrcMap<>(schema);
assertEquals(-1 ,left.compareTo(null));
OrcMap<Text,Text> right = new OrcMap<>(schema);
// empty maps
assertEquals(0, left.compareTo(right));
assertEquals(0, right.compareTo(left));
// {} vs {"aa" -> null}
right.put(new Text("aa"), null);
assertEquals(-1, left.compareTo(right));
assertEquals(1, right.compareTo(left));
// {"aa" -> null} vs {"aa" -> null}
left.put(new Text("aa"), null);
assertEquals(0, left.compareTo(right));
assertEquals(0, right.compareTo(left));
// {"aa" -> "bb"} vs {"aa" -> "bb"}
left.put(new Text("aa"), new Text("bb"));
right.put(new Text("aa"), new Text("bb"));
assertEquals(0, left.compareTo(right));
assertEquals(0, right.compareTo(left));
// {"aa" -> "bb"} vs {"aa" -> "cc"}
right.put(new Text("aa"), new Text("cc"));
assertEquals(-1, left.compareTo(right));
assertEquals(1, right.compareTo(left));
// {"aa" -> "bb"} vs {"a" -> "zzz", "aa" -> "cc"}
right.put(new Text("a"), new Text("zzz"));
assertEquals(1, left.compareTo(right));
assertEquals(-1, right.compareTo(left));
// {"aa" -> null} vs {"aa" -> "bb"}
left.put(new Text("aa"), null);
right.remove(new Text("a"));
right.put(new Text("aa"), new Text("cc"));
assertEquals(1, left.compareTo(right));
assertEquals(-1, right.compareTo(left));
// {"aa" -> null, "bb" -> "cc"} vs {"aa" -> null, "bb" -> "dd"}
left.put(new Text("aa"), null);
left.put(new Text("bb"), new Text("cc"));
right.put(new Text("aa"), null);
right.put(new Text("bb"), new Text("dd"));
assertEquals(-1, left.compareTo(right));
assertEquals(1, right.compareTo(left));
}
@Test
public void testStructKeys() {
TypeDescription schema = TypeDescription.fromString("map<struct<i:int>,string>");
OrcMap<OrcStruct, Text> map = new OrcMap<>(schema);
OrcStruct struct = new OrcStruct(schema.getChildren().get(0));
struct.setFieldValue(0, new IntWritable(12));
map.put(struct, new Text("a"));
assertEquals("a", map.get(struct).toString());
struct = new OrcStruct(schema.getChildren().get(0));
struct.setFieldValue(0, new IntWritable(14));
map.put(struct, new Text("b"));
assertEquals(2, map.size());
}
@Test
public void testListKeys() {
TypeDescription schema = TypeDescription.fromString("map<array<int>,string>");
OrcMap<OrcList, Text> map = new OrcMap<>(schema);
OrcList<IntWritable> list = new OrcList<>(schema.getChildren().get(0));
list.add(new IntWritable(123));
map.put(list, new Text("a"));
assertEquals("a", map.get(list).toString());
list = new OrcList<>(schema.getChildren().get(0));
list.add(new IntWritable(333));
map.put(list, new Text("b"));
assertEquals(2, map.size());
assertEquals("b", map.get(list).toString());
}
@Test
public void testUnionKeys() {
TypeDescription schema = TypeDescription.fromString("map<uniontype<int,string>,string>");
OrcMap<OrcUnion, Text> map = new OrcMap<>(schema);
OrcUnion un = new OrcUnion(schema.getChildren().get(0));
un.set(0, new IntWritable(123));
map.put(un, new Text("hi"));
un = new OrcUnion(schema.getChildren().get(0));
un.set(1, new Text("aaaa"));
map.put(un, new Text("google"));
assertEquals(2, map.size());
assertEquals("google", map.get(un).toString());
}
@Test
public void testMapKeys() {
TypeDescription schema = TypeDescription.fromString("map<map<string,string>,string>");
OrcMap<OrcMap<Text,Text>, Text> left = new OrcMap<>(schema);
assertEquals(-1, left.compareTo(null));
OrcMap<OrcMap<Text,Text>, Text> right = new OrcMap<>(schema);
assertEquals(0, left.compareTo(right));
assertEquals(0, right.compareTo(left));
OrcMap<Text,Text> item = new OrcMap<>(schema.getChildren().get(0));
item.put(new Text("aa"), new Text("bb"));
left.put(item, new Text("cc"));
assertEquals(1, left.compareTo(right));
assertEquals(-1, right.compareTo(left));
item = new OrcMap<>(schema.getChildren().get(0));
item.put(new Text("aa"), new Text("dd"));
right.put(item, new Text("bb"));
assertEquals(-2, left.compareTo(right));
assertEquals(2, right.compareTo(left));
}
@Test
public void testSchemaInCompare() {
TypeDescription leftType = TypeDescription.fromString("map<string,int>");
TypeDescription rightType = TypeDescription.fromString("map<string,string>");
OrcMap left = new OrcMap(leftType);
OrcMap right = new OrcMap(rightType);
assertEquals(-4, left.compareTo(right));
assertEquals(4, right.compareTo(left));
left.put(new Text("123"), new IntWritable(123));
right.put(new Text("123"), new Text("123"));
assertEquals(-4, left.compareTo(right));
assertEquals(4, right.compareTo(left));
}
}
| 7,140 | 36.584211 | 93 | java |
null | orc-main/java/mapreduce/src/test/org/apache/orc/mapred/TestOrcOutputFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.mapred;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.serde2.io.DateWritable;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.hadoop.io.BooleanWritable;
import org.apache.hadoop.io.ByteWritable;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.FloatWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.ShortWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobContext;
import org.apache.hadoop.mapred.OutputCommitter;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.RecordWriter;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TaskAttemptContext;
import org.apache.hadoop.util.Progressable;
import org.apache.orc.CompressionKind;
import org.apache.orc.OrcConf;
import org.apache.orc.OrcFile;
import org.apache.orc.Reader;
import org.apache.orc.TypeDescription;
import org.apache.orc.Writer;
import org.junit.jupiter.api.Test;
import java.io.File;
import java.io.IOException;
import java.text.SimpleDateFormat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestOrcOutputFormat {
Path workDir = new Path(System.getProperty("test.tmp.dir",
"target" + File.separator + "test" + File.separator + "tmp"));
JobConf conf = new JobConf();
FileSystem fs;
{
try {
fs = FileSystem.getLocal(conf).getRaw();
fs.delete(workDir, true);
fs.mkdirs(workDir);
} catch (IOException e) {
throw new IllegalStateException("bad fs init", e);
}
}
static class NullOutputCommitter extends OutputCommitter {
@Override
public void setupJob(JobContext jobContext) {
// PASS
}
@Override
public void setupTask(TaskAttemptContext taskAttemptContext) {
}
@Override
public boolean needsTaskCommit(TaskAttemptContext taskAttemptContext) {
return false;
}
@Override
public void commitTask(TaskAttemptContext taskAttemptContext) {
// PASS
}
@Override
public void abortTask(TaskAttemptContext taskAttemptContext) {
// PASS
}
}
@Test
public void testAllTypes() throws Exception {
conf.set("mapreduce.task.attempt.id", "attempt_20160101_0001_m_000001_0");
conf.setOutputCommitter(NullOutputCommitter.class);
final String typeStr = "struct<b1:binary,b2:boolean,b3:tinyint," +
"c:char(10),d1:date,d2:decimal(20,5),d3:double,fff:float,int:int," +
"l:array<bigint>,map:map<smallint,string>," +
"str:struct<u:uniontype<timestamp,varchar(100)>>,ts:timestamp>";
OrcConf.MAPRED_OUTPUT_SCHEMA.setString(conf, typeStr);
FileOutputFormat.setOutputPath(conf, workDir);
TypeDescription type = TypeDescription.fromString(typeStr);
// build a row object
OrcStruct row = (OrcStruct) OrcStruct.createValue(type);
((BytesWritable) row.getFieldValue(0)).set(new byte[]{1,2,3,4}, 0, 4);
((BooleanWritable) row.getFieldValue(1)).set(true);
((ByteWritable) row.getFieldValue(2)).set((byte) 23);
((Text) row.getFieldValue(3)).set("aaabbbcccddd");
SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd");
((DateWritable) row.getFieldValue(4)).set(DateWritable.millisToDays
(format.parse("2016-04-01").getTime()));
((HiveDecimalWritable) row.getFieldValue(5)).set(new HiveDecimalWritable("1.23"));
((DoubleWritable) row.getFieldValue(6)).set(1.5);
((FloatWritable) row.getFieldValue(7)).set(4.5f);
((IntWritable) row.getFieldValue(8)).set(31415);
OrcList<LongWritable> longList = (OrcList<LongWritable>) row.getFieldValue(9);
longList.add(new LongWritable(123));
longList.add(new LongWritable(456));
OrcMap<ShortWritable,Text> map = (OrcMap<ShortWritable,Text>) row.getFieldValue(10);
map.put(new ShortWritable((short) 1000), new Text("aaaa"));
map.put(new ShortWritable((short) 123), new Text("bbbb"));
OrcStruct struct = (OrcStruct) row.getFieldValue(11);
OrcUnion union = (OrcUnion) struct.getFieldValue(0);
union.set((byte) 1, new Text("abcde"));
((OrcTimestamp) row.getFieldValue(12)).set("1996-12-11 15:00:00");
NullWritable nada = NullWritable.get();
RecordWriter<NullWritable, OrcStruct> writer =
new OrcOutputFormat<OrcStruct>().getRecordWriter(fs, conf, "all.orc",
Reporter.NULL);
for(int r=0; r < 10; ++r) {
row.setFieldValue(8, new IntWritable(r * 10));
writer.write(nada, row);
}
union.set((byte) 0, new OrcTimestamp("2011-12-25 12:34:56"));
for(int r=0; r < 10; ++r) {
row.setFieldValue(8, new IntWritable(r * 10 + 100));
writer.write(nada, row);
}
OrcStruct row2 = new OrcStruct(type);
writer.write(nada, row2);
row.setFieldValue(8, new IntWritable(210));
writer.write(nada, row);
writer.close(Reporter.NULL);
FileSplit split = new FileSplit(new Path(workDir, "all.orc"), 0, 100000,
new String[0]);
RecordReader<NullWritable, OrcStruct> reader =
new OrcInputFormat<OrcStruct>().getRecordReader(split, conf,
Reporter.NULL);
nada = reader.createKey();
row = reader.createValue();
for(int r=0; r < 22; ++r) {
assertTrue(reader.next(nada, row));
if (r == 20) {
for(int c=0; c < 12; ++c) {
assertNull(row.getFieldValue(c));
}
} else {
assertEquals(new BytesWritable(new byte[]{1, 2, 3, 4}), row.getFieldValue(0));
assertEquals(new BooleanWritable(true), row.getFieldValue(1));
assertEquals(new ByteWritable((byte) 23), row.getFieldValue(2));
assertEquals(new Text("aaabbbcccd"), row.getFieldValue(3));
assertEquals(new DateWritable(DateWritable.millisToDays
(format.parse("2016-04-01").getTime())), row.getFieldValue(4));
assertEquals(new HiveDecimalWritable("1.23"), row.getFieldValue(5));
assertEquals(new DoubleWritable(1.5), row.getFieldValue(6));
assertEquals(new FloatWritable(4.5f), row.getFieldValue(7));
assertEquals(new IntWritable(r * 10), row.getFieldValue(8));
assertEquals(longList, row.getFieldValue(9));
assertEquals(map, row.getFieldValue(10));
if (r < 10) {
union.set((byte) 1, new Text("abcde"));
} else {
union.set((byte) 0, new OrcTimestamp("2011-12-25 12:34:56"));
}
assertEquals(struct, row.getFieldValue(11), "row " + r);
assertEquals(new OrcTimestamp("1996-12-11 15:00:00"),
row.getFieldValue(12), "row " + r);
}
}
assertFalse(reader.next(nada, row));
}
/**
* Test the case where the top level isn't a struct, but a long.
*/
@Test
public void testLongRoot() throws Exception {
conf.set("mapreduce.task.attempt.id", "attempt_20160101_0001_m_000001_0");
conf.setOutputCommitter(NullOutputCommitter.class);
conf.set(OrcConf.COMPRESS.getAttribute(), "SNAPPY");
conf.setInt(OrcConf.ROW_INDEX_STRIDE.getAttribute(), 1000);
conf.setInt(OrcConf.BUFFER_SIZE.getAttribute(), 64 * 1024);
conf.set(OrcConf.WRITE_FORMAT.getAttribute(), "0.11");
final String typeStr = "bigint";
OrcConf.MAPRED_OUTPUT_SCHEMA.setString(conf, typeStr);
FileOutputFormat.setOutputPath(conf, workDir);
LongWritable value = new LongWritable();
NullWritable nada = NullWritable.get();
RecordWriter<NullWritable, LongWritable> writer =
new OrcOutputFormat<LongWritable>().getRecordWriter(fs, conf,
"long.orc", Reporter.NULL);
for(long lo=0; lo < 2000; ++lo) {
value.set(lo);
writer.write(nada, value);
}
writer.close(Reporter.NULL);
Path path = new Path(workDir, "long.orc");
Reader file = OrcFile.createReader(path, OrcFile.readerOptions(conf));
assertEquals(CompressionKind.SNAPPY, file.getCompressionKind());
assertEquals(2000, file.getNumberOfRows());
assertEquals(1000, file.getRowIndexStride());
assertEquals(64 * 1024, file.getCompressionSize());
assertEquals(OrcFile.Version.V_0_11, file.getFileVersion());
FileSplit split = new FileSplit(path, 0, 100000,
new String[0]);
RecordReader<NullWritable, LongWritable> reader =
new OrcInputFormat<LongWritable>().getRecordReader(split, conf,
Reporter.NULL);
nada = reader.createKey();
value = reader.createValue();
for(long lo=0; lo < 2000; ++lo) {
assertTrue(reader.next(nada, value));
assertEquals(lo, value.get());
}
assertFalse(reader.next(nada, value));
}
/**
* Make sure that the writer ignores the OrcKey
* @throws Exception
*/
@Test
public void testOrcKey() throws Exception {
conf.set("mapreduce.output.fileoutputformat.outputdir", workDir.toString());
conf.set("mapreduce.task.attempt.id", "attempt_jt0_0_m_0_0");
String TYPE_STRING = "struct<i:int,s:string>";
OrcConf.MAPRED_OUTPUT_SCHEMA.setString(conf, TYPE_STRING);
conf.setOutputCommitter(NullOutputCommitter.class);
TypeDescription schema = TypeDescription.fromString(TYPE_STRING);
OrcKey key = new OrcKey(new OrcStruct(schema));
RecordWriter<NullWritable, Writable> writer =
new OrcOutputFormat<>().getRecordWriter(fs, conf, "key.orc",
Reporter.NULL);
NullWritable nada = NullWritable.get();
for(int r=0; r < 2000; ++r) {
((OrcStruct) key.key).setAllFields(new IntWritable(r),
new Text(Integer.toString(r)));
writer.write(nada, key);
}
writer.close(Reporter.NULL);
Path path = new Path(workDir, "key.orc");
Reader file = OrcFile.createReader(path, OrcFile.readerOptions(conf));
assertEquals(2000, file.getNumberOfRows());
assertEquals(TYPE_STRING, file.getSchema().toString());
}
/**
* Make sure that the writer ignores the OrcValue
* @throws Exception
*/
@Test
public void testOrcValue() throws Exception {
conf.set("mapreduce.output.fileoutputformat.outputdir", workDir.toString());
conf.set("mapreduce.task.attempt.id", "attempt_jt0_0_m_0_0");
String TYPE_STRING = "struct<i:int>";
OrcConf.MAPRED_OUTPUT_SCHEMA.setString(conf, TYPE_STRING);
conf.setOutputCommitter(NullOutputCommitter.class);
TypeDescription schema = TypeDescription.fromString(TYPE_STRING);
OrcValue value = new OrcValue(new OrcStruct(schema));
RecordWriter<NullWritable, Writable> writer =
new OrcOutputFormat<>().getRecordWriter(fs, conf, "value.orc",
Reporter.NULL);
NullWritable nada = NullWritable.get();
for(int r=0; r < 3000; ++r) {
((OrcStruct) value.value).setAllFields(new IntWritable(r));
writer.write(nada, value);
}
writer.close(Reporter.NULL);
Path path = new Path(workDir, "value.orc");
Reader file = OrcFile.createReader(path, OrcFile.readerOptions(conf));
assertEquals(OrcConf.ROW_BATCH_SIZE.getDefaultValue(), file.options().getRowBatchSize());
assertEquals(3000, file.getNumberOfRows());
assertEquals(TYPE_STRING, file.getSchema().toString());
}
/**
* Make sure that the ORC writer is initialized with a configured row batch size
* @throws Exception
*/
@Test
public void testOrcOutputFormatWithRowBatchSize() throws Exception {
conf.set("mapreduce.output.fileoutputformat.outputdir", workDir.toString());
conf.set("mapreduce.task.attempt.id", "attempt_jt0_0_m_0_0");
OrcConf.ROW_BATCH_SIZE.setInt(conf, 128);
String TYPE_STRING = "struct<i:int,s:string>";
OrcConf.MAPRED_OUTPUT_SCHEMA.setString(conf, TYPE_STRING);
conf.setOutputCommitter(NullOutputCommitter.class);
TypeDescription schema = TypeDescription.fromString(TYPE_STRING);
OrcKey key = new OrcKey(new OrcStruct(schema));
RecordWriter<NullWritable, Writable> writer =
new OrcOutputFormat<>().getRecordWriter(fs, conf, "key.orc",
Reporter.NULL);
NullWritable nada = NullWritable.get();
for(int r=0; r < 2000; ++r) {
((OrcStruct) key.key).setAllFields(new IntWritable(r),
new Text(Integer.toString(r)));
writer.write(nada, key);
}
writer.close(Reporter.NULL);
Path path = new Path(workDir, "key.orc");
Reader file = OrcFile.createReader(path, OrcFile.readerOptions(conf));
assertEquals(128, file.options().getRowBatchSize());
assertEquals(2000, file.getNumberOfRows());
assertEquals(TYPE_STRING, file.getSchema().toString());
}
}
| 13,743 | 39.904762 | 93 | java |
null | orc-main/java/mapreduce/src/test/org/apache/orc/mapred/TestOrcRecordWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.mapred;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.MultiValuedColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.io.IntWritable;
import org.apache.orc.TypeDescription;
import org.apache.orc.Writer;
import org.junit.jupiter.api.Test;
import org.mockito.Mockito;
import java.util.ArrayList;
import java.util.List;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.times;
public class TestOrcRecordWriter {
/**
* Test finding the multi-value columns.
*/
@Test
public void testFindingMultiValueColumns() {
// Make sure that we find all the multi-value columns from a batch.
TypeDescription schema = TypeDescription.fromString("struct<x:struct<" +
"x:uniontype<int,array<array<int>>,map<array<int>,array<int>>>>>");
VectorizedRowBatch batch = schema.createRowBatchV2();
List<MultiValuedColumnVector> result = new ArrayList<>();
OrcMapredRecordWriter.addVariableLengthColumns(result, batch);
assertEquals(5, result.size());
assertEquals(ColumnVector.Type.LIST, result.get(0).type);
assertEquals(ColumnVector.Type.LIST, result.get(1).type);
assertEquals(ColumnVector.Type.MAP, result.get(2).type);
assertEquals(ColumnVector.Type.LIST, result.get(3).type);
assertEquals(ColumnVector.Type.LIST, result.get(4).type);
}
/**
* Test the child element limit flushes the writer.
*/
@Test
public void testChildElementLimit() throws Exception {
TypeDescription schema = TypeDescription.fromString("struct<x:array<int>>");
Writer mockWriter = Mockito.mock(Writer.class);
Mockito.when(mockWriter.getSchema()).thenReturn(schema);
OrcMapredRecordWriter<OrcStruct> recordWriter =
new OrcMapredRecordWriter<>(mockWriter, 1024, 10);
OrcStruct record = new OrcStruct(schema);
OrcList list = new OrcList(schema.getChildren().get(0));
record.setFieldValue(0, list);
list.add(new IntWritable(1));
list.add(new IntWritable(2));
Mockito.verify(mockWriter, times(0)).addRowBatch(any());
for(int i=0; i < 11; i++) {
recordWriter.write(null, record);
}
// We've written 11 rows with 2 integers each, so we should have written
// 2 batches of 5 rows.
Mockito.verify(mockWriter, times(2)).addRowBatch(any());
}
}
| 3,276 | 38.963415 | 80 | java |
null | orc-main/java/mapreduce/src/test/org/apache/orc/mapred/TestOrcStruct.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.mapred;
import com.google.common.io.Files;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BooleanWritable;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.orc.OrcFile;
import org.apache.orc.Reader;
import org.apache.orc.TypeDescription;
import org.apache.orc.Writer;
import org.junit.jupiter.api.Test;
import java.io.File;
import java.io.IOException;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
public class TestOrcStruct {
@Test
public void testRead() throws IOException {
TypeDescription type =
TypeDescription.createStruct()
.addField("f1", TypeDescription.createInt())
.addField("f2", TypeDescription.createLong())
.addField("f3", TypeDescription.createString());
OrcStruct expected = new OrcStruct(type);
OrcStruct actual = new OrcStruct(type);
assertEquals(3, expected.getNumFields());
expected.setFieldValue(0, new IntWritable(1));
expected.setFieldValue(1, new LongWritable(2));
expected.setFieldValue(2, new Text("wow"));
assertEquals(178524, expected.hashCode());
assertNotEquals(expected, actual);
TestOrcList.cloneWritable(expected, actual);
assertEquals(expected, actual);
expected.setFieldValue(0, null);
expected.setFieldValue(1, null);
expected.setFieldValue(2, null);
TestOrcList.cloneWritable(expected, actual);
assertEquals(expected, actual);
assertEquals(29791, expected.hashCode());
expected.setFieldValue(1, new LongWritable(111));
assertEquals(111, ((LongWritable) expected.getFieldValue(1)).get());
TestOrcList.cloneWritable(expected, actual);
assertEquals(expected, actual);
}
@Test
public void testMapredRead() throws Exception {
TypeDescription internalStruct_0 = TypeDescription.createStruct()
.addField("field0", TypeDescription.createString())
.addField("field1", TypeDescription.createBoolean());
TypeDescription internalStruct_1 = TypeDescription.createStruct();
TypeDescription internalStruct_2 = TypeDescription.createStruct().addField("f0", TypeDescription.createInt());
TypeDescription unionWithMultipleStruct = TypeDescription.createUnion()
.addUnionChild(internalStruct_0)
.addUnionChild(internalStruct_1)
.addUnionChild(internalStruct_2);
OrcStruct o1 = new OrcStruct(internalStruct_0);
o1.setFieldValue("field0", new Text("key"));
o1.setFieldValue("field1", new BooleanWritable(true));
OrcStruct o2 = new OrcStruct(internalStruct_0);
o2.setFieldValue("field0", new Text("key_1"));
o2.setFieldValue("field1", new BooleanWritable(false));
OrcStruct o3 = new OrcStruct(TypeDescription.createStruct());
OrcStruct o4 = new OrcStruct(internalStruct_2);
o4.setFieldValue("f0", new IntWritable(1));
OrcUnion u1 = new OrcUnion(unionWithMultipleStruct);
u1.set(0, o1);
OrcUnion u2 = new OrcUnion(unionWithMultipleStruct);
u2.set(0, o2);
OrcUnion u3 = new OrcUnion(unionWithMultipleStruct);
u3.set(1, o3);
OrcUnion u4 = new OrcUnion(unionWithMultipleStruct);
u4.set(2, o4);
File testFolder = Files.createTempDir();
testFolder.deleteOnExit();
Path testFilePath = new Path(testFolder.getAbsolutePath(), "testFile");
Configuration conf = new Configuration();
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf).setSchema(unionWithMultipleStruct)
.stripeSize(100000).bufferSize(10000)
.version(OrcFile.Version.CURRENT));
OrcMapredRecordWriter<OrcUnion> recordWriter =
new OrcMapredRecordWriter<>(writer);
recordWriter.write(NullWritable.get(), u1);
recordWriter.write(NullWritable.get(), u2);
recordWriter.write(NullWritable.get(), u3);
recordWriter.write(NullWritable.get(), u4);
recordWriter.close(null);
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf).filesystem(FileSystem.getLocal(conf)));
Reader.Options options = reader.options().schema(unionWithMultipleStruct);
OrcMapredRecordReader<OrcUnion> recordReader = new OrcMapredRecordReader<>(reader,options);
OrcUnion result = recordReader.createValue();
recordReader.next(recordReader.createKey(), result);
assertEquals(result, u1);
recordReader.next(recordReader.createKey(), result);
assertEquals(result, u2);
recordReader.next(recordReader.createKey(), result);
assertEquals(result, u3);
recordReader.next(recordReader.createKey(), result);
assertEquals(result, u4);
}
@Test
public void testFieldAccess() {
OrcStruct struct = new OrcStruct(TypeDescription.fromString
("struct<i:int,j:double,k:string>"));
struct.setFieldValue("j", new DoubleWritable(1.5));
struct.setFieldValue("k", new Text("Moria"));
struct.setFieldValue(0, new IntWritable(42));
assertEquals(new IntWritable(42), struct.getFieldValue("i"));
assertEquals(new DoubleWritable(1.5), struct.getFieldValue(1));
assertEquals(new Text("Moria"), struct.getFieldValue("k"));
struct.setAllFields(new IntWritable(123), new DoubleWritable(4.5),
new Text("ok"));
assertEquals("123", struct.getFieldValue(0).toString());
assertEquals("4.5", struct.getFieldValue(1).toString());
assertEquals("ok", struct.getFieldValue(2).toString());
}
@Test
public void testBadFieldRead() {
OrcStruct struct = new OrcStruct(TypeDescription.fromString
("struct<i:int,j:double,k:string>"));
assertThrows(IllegalArgumentException.class, () -> {
struct.getFieldValue("bad");
});
}
@Test
public void testBadFieldWrite() {
OrcStruct struct = new OrcStruct(TypeDescription.fromString
("struct<i:int,j:double,k:string>"));
assertThrows(IllegalArgumentException.class, () -> {
struct.setFieldValue("bad", new Text("foobar"));
});
}
@Test
public void testCompare() {
OrcStruct left = new OrcStruct(TypeDescription.fromString
("struct<i:int,j:string>"));
assertEquals(-1 ,left.compareTo(null));
OrcStruct right = new OrcStruct(TypeDescription.fromString
("struct<i:int,j:string,k:int>"));
left.setFieldValue(0, new IntWritable(10));
right.setFieldValue(0, new IntWritable(12));
assertEquals(-1, left.compareTo(right));
assertEquals(1, right.compareTo(left));
right.setFieldValue(0, new IntWritable(10));
left.setFieldValue(1, new Text("a"));
right.setFieldValue(1, new Text("b"));
assertEquals(-1, left.compareTo(right));
assertEquals(1, right.compareTo(left));
right.setFieldValue(1, new Text("a"));
assertEquals(-1, left.compareTo(right));
assertEquals(1, right.compareTo(left));
right = new OrcStruct(TypeDescription.fromString
("struct<i:int,j:string>"));
left.setFieldValue(0, null);
left.setFieldValue(1, null);
assertEquals(0, left.compareTo(right));
assertEquals(0, right.compareTo(left));
right.setFieldValue(0, new IntWritable(12));
assertEquals(1 , left.compareTo(right));
assertEquals(-1, right.compareTo(left));
}
@Test
public void testSchemaInCompare() {
TypeDescription leftType = TypeDescription.fromString("struct<s:string,i:int>");
TypeDescription rightType = TypeDescription.fromString("struct<s:string,j:bigint>");
OrcStruct left = new OrcStruct(leftType);
OrcStruct right = new OrcStruct(rightType);
assertEquals(-1, left.compareTo(right));
assertEquals(1, right.compareTo(left));
left.setAllFields(new Text("123"), new IntWritable(123));
right.setAllFields(new Text("123"), new LongWritable(456));
assertEquals(-1, left.compareTo(right));
assertEquals(1, right.compareTo(left));
}
}
| 8,929 | 39.044843 | 122 | java |
null | orc-main/java/mapreduce/src/test/org/apache/orc/mapred/TestOrcTimestamp.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.mapred;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
public class TestOrcTimestamp {
@Test
public void testRead() throws IOException {
OrcTimestamp expected = new OrcTimestamp("2016-04-01 12:34:56.9");
OrcTimestamp actual = new OrcTimestamp();
assertNotEquals(expected, actual);
TestOrcList.cloneWritable(expected, actual);
assertEquals(expected, actual);
assertEquals("2016-04-01 12:34:56.9", actual.toString());
}
}
| 1,426 | 34.675 | 75 | java |
null | orc-main/java/mapreduce/src/test/org/apache/orc/mapred/TestOrcUnion.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.mapred;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.orc.TypeDescription;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
public class TestOrcUnion {
@Test
public void testRead() throws IOException {
TypeDescription type =
TypeDescription.fromString("uniontype<int,bigint,string>");
OrcUnion expected = new OrcUnion(type);
OrcUnion actual = new OrcUnion(type);
expected.set((byte) 2, new Text("foo"));
assertEquals(131367, expected.hashCode());
assertNotEquals(expected, actual);
TestOrcList.cloneWritable(expected, actual);
assertEquals(expected, actual);
expected.set((byte) 0, new IntWritable(111));
TestOrcList.cloneWritable(expected, actual);
assertEquals(expected, actual);
expected.set((byte)1, new LongWritable(4567));
TestOrcList.cloneWritable(expected, actual);
assertEquals(expected, actual);
expected.set((byte) 1, new LongWritable(12345));
TestOrcList.cloneWritable(expected, actual);
assertEquals(expected, actual);
expected.set((byte) 1, null);
TestOrcList.cloneWritable(expected, actual);
assertEquals(expected, actual);
}
@Test
public void testCompare() {
TypeDescription schema =
TypeDescription.fromString("uniontype<int,string,bigint>");
OrcUnion left = new OrcUnion(schema);
OrcUnion right = new OrcUnion(schema);
assertEquals(-1 ,left.compareTo(null));
assertEquals(0, left.compareTo(right));
left.set(1, new IntWritable(10));
right.set(1, new IntWritable(12));
assertEquals(-1, left.compareTo(right));
assertEquals(1, right.compareTo(left));
right.set(2, new Text("a"));
assertEquals(-1, left.compareTo(right));
assertEquals(1, right.compareTo(left));
}
@Test
public void testSchemaInCompare() {
TypeDescription leftType = TypeDescription.fromString("uniontype<string,tinyint>");
TypeDescription rightType = TypeDescription.fromString("uniontype<string,bigint>");
OrcUnion left = new OrcUnion(leftType);
OrcUnion right = new OrcUnion(rightType);
assertEquals(-3, left.compareTo(right));
assertEquals(3, right.compareTo(left));
left.set(0, new Text("123"));
right.set(0, new Text("1"));
assertEquals(-3, left.compareTo(right));
assertEquals(3, right.compareTo(left));
}
}
| 3,365 | 35.586957 | 87 | java |
null | orc-main/java/mapreduce/src/test/org/apache/orc/mapreduce/FilterTestUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.mapreduce;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.orc.OrcFile;
import org.apache.orc.TypeDescription;
import org.apache.orc.Writer;
import org.apache.orc.mapred.OrcStruct;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.Random;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class FilterTestUtil {
private final static Logger LOG = LoggerFactory.getLogger(FilterTestUtil.class);
public static final TypeDescription schema = TypeDescription.createStruct()
.addField("f1", TypeDescription.createLong())
.addField("f2", TypeDescription.createDecimal().withPrecision(20).withScale(6))
.addField("f3", TypeDescription.createLong())
.addField("f4", TypeDescription.createString())
.addField("ridx", TypeDescription.createLong());
public static final long RowCount = 4000000L;
private static final int scale = 3;
public static void createFile(Configuration conf, FileSystem fs, Path filePath)
throws IOException {
if (fs.exists(filePath)) {
return;
}
LOG.info("Creating file {} with schema {}", filePath, schema);
try (Writer writer = OrcFile.createWriter(filePath,
OrcFile.writerOptions(conf)
.fileSystem(fs)
.overwrite(true)
.rowIndexStride(8192)
.setSchema(schema))) {
Random rnd = new Random(1024);
VectorizedRowBatch b = schema.createRowBatch();
for (int rowIdx = 0; rowIdx < RowCount; rowIdx++) {
long v = rnd.nextLong();
for (int colIdx = 0; colIdx < schema.getChildren().size() - 1; colIdx++) {
switch (schema.getChildren().get(colIdx).getCategory()) {
case LONG:
((LongColumnVector) b.cols[colIdx]).vector[b.size] = v;
break;
case DECIMAL:
HiveDecimalWritable d = new HiveDecimalWritable();
d.setFromLongAndScale(v, scale);
((DecimalColumnVector) b.cols[colIdx]).vector[b.size] = d;
break;
case STRING:
((BytesColumnVector) b.cols[colIdx]).setVal(b.size,
String.valueOf(v)
.getBytes(StandardCharsets.UTF_8));
break;
default:
throw new IllegalArgumentException();
}
}
// Populate the rowIdx
((LongColumnVector) b.cols[4]).vector[b.size] = rowIdx;
b.size += 1;
if (b.size == b.getMaxSize()) {
writer.addRowBatch(b);
b.reset();
}
}
if (b.size > 0) {
writer.addRowBatch(b);
b.reset();
}
}
LOG.info("Created file {}", filePath);
}
public static void validateRow(OrcStruct row, long expId) {
HiveDecimalWritable d = new HiveDecimalWritable();
if (expId > 0) {
assertEquals(expId, ((LongWritable) row.getFieldValue(4)).get());
}
for (int i = 0; i < row.getNumFields(); i++) {
long expValue = ((LongWritable) row.getFieldValue(0)).get();
d.setFromLongAndScale(expValue, scale);
assertEquals(d, row.getFieldValue(1));
assertEquals(expValue, ((LongWritable) row.getFieldValue(2)).get());
assertEquals(String.valueOf(expValue),
row.getFieldValue(3).toString());
}
}
public static void validateLimitedRow(OrcStruct row, long expId) {
HiveDecimalWritable d = new HiveDecimalWritable();
if (expId > 0) {
assertEquals(expId, ((LongWritable) row.getFieldValue(4)).get());
}
for (int i = 0; i < row.getNumFields(); i++) {
long expValue = ((LongWritable) row.getFieldValue(0)).get();
d.setFromLongAndScale(expValue, scale);
assertEquals(d, row.getFieldValue(1));
assertEquals(expValue, ((LongWritable) row.getFieldValue(2)).get());
}
}
public static void validateRow(OrcStruct row) {
validateRow(row, -1);
}
public static double readPercentage(FileSystem.Statistics stats, long fileSize) {
double p = stats.getBytesRead() * 100.0 / fileSize;
LOG.info(String.format("FileSize: %d%nReadSize: %d%nRead %%: %.2f",
fileSize,
stats.getBytesRead(),
p));
return p;
}
public static void readStart() {
FileSystem.clearStatistics();
}
public static FileSystem.Statistics readEnd() {
return FileSystem.getAllStatistics().get(0);
}
}
| 6,039 | 37.227848 | 95 | java |
null | orc-main/java/mapreduce/src/test/org/apache/orc/mapreduce/TestMapReduceFiltering.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.mapreduce;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
import org.apache.orc.OrcConf;
import org.apache.orc.mapred.OrcStruct;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import java.io.File;
import java.io.IOException;
import java.util.Random;
import static org.apache.orc.mapreduce.FilterTestUtil.RowCount;
import static org.apache.orc.mapreduce.FilterTestUtil.validateLimitedRow;
import static org.apache.orc.mapreduce.FilterTestUtil.validateRow;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestMapReduceFiltering {
private static final Path workDir = new Path(System.getProperty("test.tmp.dir",
"target" + File.separator + "test"
+ File.separator + "tmp"));
private static Configuration conf;
private static FileSystem fs;
private static final Path filePath = new Path(workDir, "mapreduce_skip_file.orc");
@BeforeAll
public static void setup() throws IOException {
conf = new Configuration();
fs = FileSystem.get(conf);
FilterTestUtil.createFile(conf, fs, filePath);
}
@Test
public void readWithSArg() throws IOException, InterruptedException {
TaskAttemptID id = new TaskAttemptID("jt", 0, TaskType.MAP, 0, 0);
OrcConf.ALLOW_SARG_TO_FILTER.setBoolean(conf, false);
OrcConf.INCLUDE_COLUMNS.setString(conf, "0,1,2,3,4");
OrcInputFormat.setSearchArgument(conf,
SearchArgumentFactory.newBuilder()
.in("f1", PredicateLeaf.Type.LONG, 0L)
.build(),
new String[] {"f1"});
FileSplit split = new FileSplit(filePath,
0, fs.getFileStatus(filePath).getLen(),
new String[0]);
TaskAttemptContext attemptContext = new TaskAttemptContextImpl(conf, id);
FilterTestUtil.readStart();
org.apache.hadoop.mapreduce.RecordReader<NullWritable, OrcStruct> r =
new OrcInputFormat<OrcStruct>().createRecordReader(split,
attemptContext);
long rowCount = validateFilteredRecordReader(r);
double p = FilterTestUtil.readPercentage(FilterTestUtil.readEnd(),
fs.getFileStatus(filePath).getLen());
assertEquals(FilterTestUtil.RowCount, rowCount);
assertTrue(p >= 100);
}
@Test
public void readWithSArgAsFilter() throws IOException, InterruptedException {
TaskAttemptID id = new TaskAttemptID("jt", 1, TaskType.MAP, 0, 0);
OrcConf.ALLOW_SARG_TO_FILTER.setBoolean(conf, true);
OrcConf.INCLUDE_COLUMNS.setString(conf, "0,1,2,3,4");
OrcInputFormat.setSearchArgument(conf,
SearchArgumentFactory.newBuilder()
.in("f1", PredicateLeaf.Type.LONG, 0L)
.build(),
new String[] {"f1"});
FileSplit split = new FileSplit(filePath,
0, fs.getFileStatus(filePath).getLen(),
new String[0]);
TaskAttemptContext attemptContext = new TaskAttemptContextImpl(conf, id);
FilterTestUtil.readStart();
org.apache.hadoop.mapreduce.RecordReader<NullWritable, OrcStruct> r =
new OrcInputFormat<OrcStruct>().createRecordReader(split,
attemptContext);
long rowCount = validateFilteredRecordReader(r);
double p = FilterTestUtil.readPercentage(FilterTestUtil.readEnd(),
fs.getFileStatus(filePath).getLen());
assertEquals(0, rowCount);
assertTrue(p < 30);
}
@Test
public void readSingleRowWFilter() throws IOException, InterruptedException {
int cnt = 100;
Random r = new Random(cnt);
long ridx = 0;
while (cnt > 0) {
ridx = r.nextInt((int) RowCount);
testSingleRowWfilter(ridx);
cnt -= 1;
}
}
private void testSingleRowWfilter(long idx) throws IOException, InterruptedException {
TaskAttemptID id = new TaskAttemptID("jt", 1, TaskType.MAP, 0, 0);
OrcConf.ALLOW_SARG_TO_FILTER.setBoolean(conf, true);
OrcConf.INCLUDE_COLUMNS.setString(conf, "0,1,2,4");
OrcInputFormat.setSearchArgument(conf,
SearchArgumentFactory.newBuilder()
.in("ridx", PredicateLeaf.Type.LONG, idx)
.build(),
new String[] {"ridx"});
FileSplit split = new FileSplit(filePath,
0, fs.getFileStatus(filePath).getLen(),
new String[0]);
TaskAttemptContext attemptContext = new TaskAttemptContextImpl(conf, id);
FilterTestUtil.readStart();
org.apache.hadoop.mapreduce.RecordReader<NullWritable, OrcStruct> r =
new OrcInputFormat<OrcStruct>().createRecordReader(split,
attemptContext);
long rowCount = 0;
while (r.nextKeyValue()) {
validateLimitedRow(r.getCurrentValue(), idx);
rowCount += 1;
}
r.close();
assertEquals(1, rowCount);
}
private static long validateFilteredRecordReader(org.apache.hadoop.mapreduce.RecordReader<NullWritable
, OrcStruct> rr)
throws IOException, InterruptedException {
long rowCount = 0;
while (rr.nextKeyValue()) {
validateRow(rr.getCurrentValue());
rowCount += 1;
}
return rowCount;
}
}
| 7,153 | 42.357576 | 104 | java |
null | orc-main/java/mapreduce/src/test/org/apache/orc/mapreduce/TestMapreduceOrcOutputFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.mapreduce;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
import org.apache.orc.OrcConf;
import org.apache.orc.OrcFile;
import org.apache.orc.Reader;
import org.apache.orc.TypeDescription;
import org.apache.orc.Writer;
import org.apache.orc.mapred.OrcKey;
import org.apache.orc.mapred.OrcStruct;
import org.apache.orc.mapred.OrcValue;
import org.junit.jupiter.api.Test;
import java.io.File;
import java.io.IOException;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestMapreduceOrcOutputFormat {
Path workDir = new Path(System.getProperty("test.tmp.dir",
"target" + File.separator + "test" + File.separator + "tmp"));
JobConf conf = new JobConf();
FileSystem fs;
{
try {
fs = FileSystem.getLocal(conf).getRaw();
fs.delete(workDir, true);
fs.mkdirs(workDir);
} catch (IOException e) {
throw new IllegalStateException("bad fs init", e);
}
}
@Test
public void testPredicatePushdown() throws Exception {
TaskAttemptID id = new TaskAttemptID("jt", 0, TaskType.MAP, 0, 0);
TaskAttemptContext attemptContext = new TaskAttemptContextImpl(conf, id);
final String typeStr = "struct<i:int,s:string>";
OrcConf.MAPRED_OUTPUT_SCHEMA.setString(conf, typeStr);
conf.set("mapreduce.output.fileoutputformat.outputdir", workDir.toString());
conf.setInt(OrcConf.ROW_INDEX_STRIDE.getAttribute(), 1000);
conf.setBoolean(OrcConf.ALLOW_SARG_TO_FILTER.getAttribute(), false);
conf.setBoolean(OrcOutputFormat.SKIP_TEMP_DIRECTORY, true);
OutputFormat<NullWritable, OrcStruct> outputFormat =
new OrcOutputFormat<OrcStruct>();
RecordWriter<NullWritable, OrcStruct> writer =
outputFormat.getRecordWriter(attemptContext);
// write 4000 rows with the integer and the binary string
TypeDescription type = TypeDescription.fromString(typeStr);
OrcStruct row = (OrcStruct) OrcStruct.createValue(type);
NullWritable nada = NullWritable.get();
for(int r=0; r < 4000; ++r) {
row.setFieldValue(0, new IntWritable(r));
row.setFieldValue(1, new Text(Integer.toBinaryString(r)));
writer.write(nada, row);
}
writer.close(attemptContext);
OrcInputFormat.setSearchArgument(conf,
SearchArgumentFactory.newBuilder()
.between("i", PredicateLeaf.Type.LONG, new Long(1500), new Long(1999))
.build(), new String[]{null, "i", "s"});
FileSplit split = new FileSplit(new Path(workDir, "part-m-00000.orc"),
0, 1000000, new String[0]);
RecordReader<NullWritable, OrcStruct> reader =
new OrcInputFormat<OrcStruct>().createRecordReader(split,
attemptContext);
// the sarg should cause it to skip over the rows except 1000 to 2000
for(int r=1000; r < 2000; ++r) {
assertTrue(reader.nextKeyValue());
row = reader.getCurrentValue();
assertEquals(r, ((IntWritable) row.getFieldValue(0)).get());
assertEquals(Integer.toBinaryString(r), row.getFieldValue(1).toString());
}
assertFalse(reader.nextKeyValue());
}
@Test
public void testColumnSelection() throws Exception {
String typeStr = "struct<i:int,j:int,k:int>";
OrcConf.MAPRED_OUTPUT_SCHEMA.setString(conf, typeStr);
conf.set("mapreduce.output.fileoutputformat.outputdir", workDir.toString());
conf.setInt(OrcConf.ROW_INDEX_STRIDE.getAttribute(), 1000);
conf.setBoolean(OrcOutputFormat.SKIP_TEMP_DIRECTORY, true);
TaskAttemptID id = new TaskAttemptID("jt", 0, TaskType.MAP, 0, 1);
TaskAttemptContext attemptContext = new TaskAttemptContextImpl(conf, id);
OutputFormat<NullWritable, OrcStruct> outputFormat =
new OrcOutputFormat<OrcStruct>();
RecordWriter<NullWritable, OrcStruct> writer =
outputFormat.getRecordWriter(attemptContext);
// write 4000 rows with the integer and the binary string
TypeDescription type = TypeDescription.fromString(typeStr);
OrcStruct row = (OrcStruct) OrcStruct.createValue(type);
NullWritable nada = NullWritable.get();
for(int r=0; r < 3000; ++r) {
row.setFieldValue(0, new IntWritable(r));
row.setFieldValue(1, new IntWritable(r * 2));
row.setFieldValue(2, new IntWritable(r * 3));
writer.write(nada, row);
}
writer.close(attemptContext);
conf.set(OrcConf.INCLUDE_COLUMNS.getAttribute(), "0,2");
FileSplit split = new FileSplit(new Path(workDir, "part-m-00000.orc"),
0, 1000000, new String[0]);
RecordReader<NullWritable, OrcStruct> reader =
new OrcInputFormat<OrcStruct>().createRecordReader(split,
attemptContext);
// the sarg should cause it to skip over the rows except 1000 to 2000
for(int r=0; r < 3000; ++r) {
assertTrue(reader.nextKeyValue());
row = reader.getCurrentValue();
assertEquals(r, ((IntWritable) row.getFieldValue(0)).get());
assertNull(row.getFieldValue(1));
assertEquals(r * 3, ((IntWritable) row.getFieldValue(2)).get());
}
assertFalse(reader.nextKeyValue());
}
@Test
public void testAcidSelectionNoSchema() throws IOException, InterruptedException {
TaskAttemptID id = new TaskAttemptID("jt", 0, TaskType.MAP, 0, 1);
TaskAttemptContext attemptContext = new TaskAttemptContextImpl(conf, id);
// struct<operation:int,originalTransaction:bigint,bucket:int,rowId:bigint,currentTransaction:bigint,
// row:struct<i:int,j:int,k:int>>
conf.set(OrcConf.INCLUDE_COLUMNS.getAttribute(), "5");
// Do not set OrcConf.MAPRED_INPUT_SCHEMA (reader should use file schema instead)
FileSplit split = new FileSplit(new Path(getClass().getClassLoader().
getSystemResource("acid5k.orc").getPath()),
0, 1000000, new String[0]);
RecordReader<NullWritable, OrcStruct> reader =
new OrcInputFormat<OrcStruct>().createRecordReader(split,
attemptContext);
// Make sure we can read all rows
OrcStruct row;
for (int r=0; r < 5000; ++r) {
assertTrue(reader.nextKeyValue());
row = reader.getCurrentValue();
assertEquals(6, row.getNumFields());
OrcStruct innerRow = (OrcStruct) row.getFieldValue(5);
assertEquals(3,innerRow.getNumFields());
assertTrue(((IntWritable)innerRow.getFieldValue(0)).get() >= 0);
assertTrue(((IntWritable)innerRow.getFieldValue(1)).get() >= 0);
assertTrue(((IntWritable)innerRow.getFieldValue(2)).get() >= 0);
}
}
@Test
public void testColumnSelectionBlank() throws Exception {
String typeStr = "struct<i:int,j:int,k:int>";
OrcConf.MAPRED_OUTPUT_SCHEMA.setString(conf, typeStr);
conf.set("mapreduce.output.fileoutputformat.outputdir", workDir.toString());
conf.setInt(OrcConf.ROW_INDEX_STRIDE.getAttribute(), 1000);
conf.setBoolean(OrcOutputFormat.SKIP_TEMP_DIRECTORY, true);
TaskAttemptID id = new TaskAttemptID("jt", 0, TaskType.MAP, 0, 1);
TaskAttemptContext attemptContext = new TaskAttemptContextImpl(conf, id);
OutputFormat<NullWritable, OrcStruct> outputFormat =
new OrcOutputFormat<OrcStruct>();
RecordWriter<NullWritable, OrcStruct> writer =
outputFormat.getRecordWriter(attemptContext);
// write 3000 rows with the integer and the binary string
TypeDescription type = TypeDescription.fromString(typeStr);
OrcStruct row = (OrcStruct) OrcStruct.createValue(type);
NullWritable nada = NullWritable.get();
for (int r = 0; r < 3000; ++r) {
row.setFieldValue(0, new IntWritable(r));
row.setFieldValue(1, new IntWritable(r * 2));
row.setFieldValue(2, new IntWritable(r * 3));
writer.write(nada, row);
}
writer.close(attemptContext);
conf.set(OrcConf.INCLUDE_COLUMNS.getAttribute(), "");
FileSplit split = new FileSplit(new Path(workDir, "part-m-00000.orc"),
0, 1000000, new String[0]);
RecordReader<NullWritable, OrcStruct> reader =
new OrcInputFormat<OrcStruct>().createRecordReader(split,
attemptContext);
// the sarg should cause it to skip over the rows except 1000 to 2000
for (int r = 0; r < 3000; ++r) {
assertTrue(reader.nextKeyValue());
row = reader.getCurrentValue();
assertNull(row.getFieldValue(0));
assertNull(row.getFieldValue(1));
assertNull(row.getFieldValue(2));
}
assertFalse(reader.nextKeyValue());
}
/**
* Make sure that the writer ignores the OrcKey
* @throws Exception
*/
@Test
public void testOrcKey() throws Exception {
conf.set("mapreduce.output.fileoutputformat.outputdir", workDir.toString());
String TYPE_STRING = "struct<i:int,s:string>";
OrcConf.MAPRED_OUTPUT_SCHEMA.setString(conf, TYPE_STRING);
conf.setBoolean(OrcOutputFormat.SKIP_TEMP_DIRECTORY, true);
TaskAttemptID id = new TaskAttemptID("jt", 0, TaskType.MAP, 0, 1);
TaskAttemptContext attemptContext = new TaskAttemptContextImpl(conf, id);
TypeDescription schema = TypeDescription.fromString(TYPE_STRING);
OrcKey key = new OrcKey(new OrcStruct(schema));
RecordWriter<NullWritable, Writable> writer =
new OrcOutputFormat<>().getRecordWriter(attemptContext);
NullWritable nada = NullWritable.get();
for(int r=0; r < 2000; ++r) {
((OrcStruct) key.key).setAllFields(new IntWritable(r),
new Text(Integer.toString(r)));
writer.write(nada, key);
}
writer.close(attemptContext);
Path path = new Path(workDir, "part-m-00000.orc");
Reader file = OrcFile.createReader(path, OrcFile.readerOptions(conf));
assertEquals(2000, file.getNumberOfRows());
assertEquals(TYPE_STRING, file.getSchema().toString());
}
/**
* Make sure that the writer ignores the OrcValue
* @throws Exception
*/
@Test
public void testOrcValue() throws Exception {
conf.set("mapreduce.output.fileoutputformat.outputdir", workDir.toString());
String TYPE_STRING = "struct<i:int>";
OrcConf.MAPRED_OUTPUT_SCHEMA.setString(conf, TYPE_STRING);
conf.setBoolean(OrcOutputFormat.SKIP_TEMP_DIRECTORY, true);
TaskAttemptID id = new TaskAttemptID("jt", 0, TaskType.MAP, 0, 1);
TaskAttemptContext attemptContext = new TaskAttemptContextImpl(conf, id);
TypeDescription schema = TypeDescription.fromString(TYPE_STRING);
OrcValue value = new OrcValue(new OrcStruct(schema));
RecordWriter<NullWritable, Writable> writer =
new OrcOutputFormat<>().getRecordWriter(attemptContext);
NullWritable nada = NullWritable.get();
for(int r=0; r < 3000; ++r) {
((OrcStruct) value.value).setAllFields(new IntWritable(r));
writer.write(nada, value);
}
writer.close(attemptContext);
Path path = new Path(workDir, "part-m-00000.orc");
Reader file = OrcFile.createReader(path, OrcFile.readerOptions(conf));
assertEquals(OrcConf.ROW_BATCH_SIZE.getDefaultValue(), file.options().getRowBatchSize());
assertEquals(3000, file.getNumberOfRows());
assertEquals(TYPE_STRING, file.getSchema().toString());
}
/**
* Make sure that the ORC writer is initialized with a configured row batch size
* @throws Exception
*/
@Test
public void testOrcOutputFormatWithRowBatchSize() throws Exception {
conf.set("mapreduce.output.fileoutputformat.outputdir", workDir.toString());
OrcConf.ROW_BATCH_SIZE.setInt(conf, 128);
String TYPE_STRING = "struct<i:int,s:string>";
OrcConf.MAPRED_OUTPUT_SCHEMA.setString(conf, TYPE_STRING);
conf.setBoolean(OrcOutputFormat.SKIP_TEMP_DIRECTORY, true);
TaskAttemptID id = new TaskAttemptID("jt", 0, TaskType.MAP, 0, 1);
TaskAttemptContext attemptContext = new TaskAttemptContextImpl(conf, id);
TypeDescription schema = TypeDescription.fromString(TYPE_STRING);
OrcKey key = new OrcKey(new OrcStruct(schema));
RecordWriter<NullWritable, Writable> writer =
new OrcOutputFormat<>().getRecordWriter(attemptContext);
NullWritable nada = NullWritable.get();
for(int r=0; r < 2000; ++r) {
((OrcStruct) key.key).setAllFields(new IntWritable(r),
new Text(Integer.toString(r)));
writer.write(nada, key);
}
writer.close(attemptContext);
Path path = new Path(workDir, "part-m-00000.orc");
Reader file = OrcFile.createReader(path, OrcFile.readerOptions(conf));
assertEquals(128, file.options().getRowBatchSize());
assertEquals(2000, file.getNumberOfRows());
assertEquals(TYPE_STRING, file.getSchema().toString());
}
}
| 14,181 | 42.907121 | 105 | java |
null | orc-main/java/mapreduce/src/test/org/apache/orc/mapreduce/TestMrUnit.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.mapreduce;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobConfigurable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.orc.OrcConf;
import org.apache.orc.OrcFile;
import org.apache.orc.Reader;
import org.apache.orc.TypeDescription;
import org.apache.orc.Writer;
import org.apache.orc.mapred.OrcKey;
import org.apache.orc.mapred.OrcStruct;
import org.apache.orc.mapred.OrcValue;
import org.junit.jupiter.api.Test;
import java.io.File;
import java.io.IOException;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class TestMrUnit {
private static final File TEST_DIR = new File(
System.getProperty("test.build.data",
System.getProperty("java.io.tmpdir")), "TestMapReduce-mapreduce");
private static FileSystem FS;
private static final JobConf CONF = new JobConf();
private static final TypeDescription INPUT_SCHEMA = TypeDescription
.fromString("struct<one:struct<x:int,y:int>,two:struct<z:string>>");
private static final TypeDescription OUT_SCHEMA = TypeDescription
.fromString("struct<first:struct<x:int,y:int>,second:struct<z:string>>");
static {
OrcConf.MAPRED_SHUFFLE_KEY_SCHEMA.setString(CONF, "struct<x:int,y:int>");
OrcConf.MAPRED_SHUFFLE_VALUE_SCHEMA.setString(CONF, "struct<z:string>");
OrcConf.MAPRED_OUTPUT_SCHEMA.setString(CONF, OUT_SCHEMA.toString());
// This is required due to ORC-964
CONF.set("mapreduce.job.output.key.comparator.class", OrcKeyComparator.class.getName());
try {
FS = FileSystem.getLocal(CONF);
} catch (IOException ioe) {
FS = null;
}
}
public static class OrcKeyComparator implements RawComparator<OrcKey>, JobConfigurable {
private JobConf jobConf;
@Override
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
DataInputBuffer buffer1 = new DataInputBuffer();
DataInputBuffer buffer2 = new DataInputBuffer();
try {
buffer1.reset(b1, s1, l1);
buffer2.reset(b2, s2, l2);
OrcKey orcKey1 = new OrcKey();
orcKey1.configure(this.jobConf);
orcKey1.readFields(buffer1);
OrcKey orcKey2 = new OrcKey();
orcKey2.configure(this.jobConf);
orcKey2.readFields(buffer2);
return orcKey1.compareTo(orcKey2);
} catch (IOException e) {
throw new RuntimeException("compare orcKey fail", e);
}
}
@Override
public int compare(OrcKey o1, OrcKey o2) {
return o1.compareTo(o2);
}
@Override
public void configure(JobConf jobConf) {
this.jobConf = jobConf;
}
}
/**
* Split the input struct into its two parts.
*/
public static class MyMapper
extends Mapper<NullWritable, OrcStruct, OrcKey, OrcValue> {
private final OrcKey keyWrapper = new OrcKey();
private final OrcValue valueWrapper = new OrcValue();
@Override
protected void map(NullWritable key,
OrcStruct value,
Context context
) throws IOException, InterruptedException {
keyWrapper.key = value.getFieldValue(0);
valueWrapper.value = value.getFieldValue(1);
context.write(keyWrapper, valueWrapper);
}
}
/**
* Glue the key and values back together.
*/
public static class MyReducer
extends Reducer<OrcKey, OrcValue, NullWritable, OrcStruct> {
private final OrcStruct output = new OrcStruct(OUT_SCHEMA);
private final NullWritable nada = NullWritable.get();
@Override
protected void reduce(OrcKey key,
Iterable<OrcValue> values,
Context context
) throws IOException, InterruptedException {
output.setFieldValue(0, key.key);
for(OrcValue value: values) {
output.setFieldValue(1, value.value);
context.write(nada, output);
}
}
}
public void writeInputFile(Path inputPath) throws IOException {
Writer writer = OrcFile.createWriter(inputPath,
OrcFile.writerOptions(CONF).setSchema(INPUT_SCHEMA).overwrite(true));
OrcMapreduceRecordWriter<OrcStruct> recordWriter = new OrcMapreduceRecordWriter<>(writer);
NullWritable nada = NullWritable.get();
OrcStruct input = (OrcStruct) OrcStruct.createValue(INPUT_SCHEMA);
IntWritable x =
(IntWritable) ((OrcStruct) input.getFieldValue(0)).getFieldValue(0);
IntWritable y =
(IntWritable) ((OrcStruct) input.getFieldValue(0)).getFieldValue(1);
Text z = (Text) ((OrcStruct) input.getFieldValue(1)).getFieldValue(0);
for(int r = 0; r < 20; ++r) {
x.set(100 - (r / 4));
y.set(r * 2);
z.set(Integer.toHexString(r));
recordWriter.write(nada, input);
}
recordWriter.close(null);
}
private void readOutputFile(Path output) throws IOException, InterruptedException {
Reader reader = OrcFile.createReader(output, OrcFile.readerOptions(CONF));
OrcMapreduceRecordReader<OrcStruct> recordReader = new OrcMapreduceRecordReader<>(reader,
org.apache.orc.mapred.OrcInputFormat.buildOptions(CONF, reader, 0, 20));
int[] expectedX = new int[20];
int[] expectedY = new int[20];
String[] expectedZ = new String[20];
int count = 0;
for(int g = 4; g >= 0; --g) {
for(int i = 0; i < 4; ++i) {
expectedX[count] = 100 - g;
int r = g * 4 + i;
expectedY[count] = r * 2;
expectedZ[count ++] = Integer.toHexString(r);
}
}
int row = 0;
while (recordReader.nextKeyValue()) {
OrcStruct value = recordReader.getCurrentValue();
IntWritable x =
(IntWritable) ((OrcStruct) value.getFieldValue(0)).getFieldValue(0);
IntWritable y =
(IntWritable) ((OrcStruct) value.getFieldValue(0)).getFieldValue(1);
Text z = (Text) ((OrcStruct) value.getFieldValue(1)).getFieldValue(0);
assertEquals(expectedX[row], x.get());
assertEquals(expectedY[row], y.get());
assertEquals(expectedZ[row], z.toString());
row ++;
}
recordReader.close();
}
@Test
public void testMapReduce() throws IOException, InterruptedException, ClassNotFoundException {
Path testDir = new Path(TEST_DIR.getAbsolutePath());
Path input = new Path(testDir, "input");
Path output = new Path(testDir, "output");
FS.delete(input, true);
FS.delete(output, true);
writeInputFile(new Path(input, "input.orc"));
Job job = Job.getInstance(CONF);
job.setMapperClass(MyMapper.class);
job.setInputFormatClass(OrcInputFormat.class);
FileInputFormat.setInputPaths(job, input);
FileOutputFormat.setOutputPath(job, output);
job.setOutputKeyClass(OrcKey.class);
job.setOutputValueClass(OrcValue.class);
job.setOutputFormatClass(OrcOutputFormat.class);
job.setReducerClass(MyReducer.class);
job.setNumReduceTasks(1);
job.waitForCompletion(true);
FileStatus[] fileStatuses = output.getFileSystem(CONF)
.listStatus(output, path -> path.getName().endsWith(".orc"));
assertEquals(fileStatuses.length, 1);
Path path = fileStatuses[0].getPath();
readOutputFile(path);
}
}
| 8,560 | 33.800813 | 96 | java |
null | orc-main/java/mapreduce/src/test/org/apache/orc/mapreduce/TestOrcRecordWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.mapreduce;
import org.apache.hadoop.io.IntWritable;
import org.apache.orc.TypeDescription;
import org.apache.orc.Writer;
import org.apache.orc.mapred.OrcList;
import org.apache.orc.mapred.OrcStruct;
import org.junit.jupiter.api.Test;
import org.mockito.Mockito;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.times;
public class TestOrcRecordWriter {
/**
* Test the child element limit flushes the writer.
*/
@Test
public void testChildElementLimit() throws Exception {
TypeDescription schema = TypeDescription.fromString("struct<x:array<int>>");
Writer mockWriter = Mockito.mock(Writer.class);
Mockito.when(mockWriter.getSchema()).thenReturn(schema);
OrcMapreduceRecordWriter<OrcStruct> recordWriter =
new OrcMapreduceRecordWriter<>(mockWriter, 1024, 10);
OrcStruct record = new OrcStruct(schema);
OrcList list = new OrcList(schema.getChildren().get(0));
record.setFieldValue(0, list);
list.add(new IntWritable(1));
list.add(new IntWritable(2));
Mockito.verify(mockWriter, times(0)).addRowBatch(any());
for(int i=0; i < 11; i++) {
recordWriter.write(null, record);
}
// We've written 11 rows with 2 integers each, so we should have written
// 2 batches of 5 rows.
Mockito.verify(mockWriter, times(2)).addRowBatch(any());
}
}
| 2,182 | 37.298246 | 80 | java |
null | orc-main/java/shims/src/java/org/apache/orc/EncryptionAlgorithm.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc;
import javax.crypto.Cipher;
import javax.crypto.NoSuchPaddingException;
import java.security.NoSuchAlgorithmException;
/**
* The encryption algorithms supported by ORC.
* <p>
* This class can't reference any of the newer Hadoop classes.
*/
public enum EncryptionAlgorithm {
AES_CTR_128("AES", "CTR/NoPadding", 16, 1),
AES_CTR_256("AES", "CTR/NoPadding", 32, 2);
private final String algorithm;
private final String mode;
private final int keyLength;
private final int serialization;
private final byte[] zero;
EncryptionAlgorithm(String algorithm, String mode, int keyLength,
int serialization) {
this.algorithm = algorithm;
this.mode = mode;
this.keyLength = keyLength;
this.serialization = serialization;
zero = new byte[keyLength];
}
public String getAlgorithm() {
return algorithm;
}
public int getIvLength() {
return 16;
}
public Cipher createCipher() {
try {
return Cipher.getInstance(algorithm + "/" + mode);
} catch (NoSuchAlgorithmException e) {
throw new IllegalArgumentException("Bad algorithm " + algorithm);
} catch (NoSuchPaddingException e) {
throw new IllegalArgumentException("Bad padding " + mode);
}
}
public int keyLength() {
return keyLength;
}
public byte[] getZeroKey() {
return zero;
}
/**
* Get the serialization code for this enumeration.
* @return the serialization value
*/
public int getSerialization() {
return serialization;
}
/**
* Get the serialization code for this enumeration.
* @return the serialization value
*/
public static EncryptionAlgorithm fromSerialization(int serialization) {
for(EncryptionAlgorithm algorithm: values()) {
if (algorithm.serialization == serialization) {
return algorithm;
}
}
throw new IllegalArgumentException("Unknown code in encryption algorithm " +
serialization);
}
@Override
public String toString() {
return algorithm + (keyLength * 8);
}
}
| 2,873 | 27.176471 | 80 | java |
null | orc-main/java/shims/src/java/org/apache/orc/impl/HadoopShims.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.orc.EncryptionAlgorithm;
import java.io.Closeable;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.util.Random;
public interface HadoopShims {
enum DirectCompressionType {
NONE,
ZLIB_NOHEADER,
ZLIB,
SNAPPY,
}
interface DirectDecompressor {
void decompress(ByteBuffer var1, ByteBuffer var2) throws IOException;
void reset();
void end();
}
/**
* Get a direct decompressor codec, if it is available
* @param codec the kind of decompressor that we need
* @return a direct decompressor or null, if it isn't available
*/
DirectDecompressor getDirectDecompressor(DirectCompressionType codec);
/**
* a hadoop.io ByteBufferPool shim.
*/
interface ByteBufferPoolShim {
/**
* Get a new ByteBuffer from the pool. The pool can provide this from
* removing a buffer from its internal cache, or by allocating a
* new buffer.
*
* @param direct Whether the buffer should be direct.
* @param length The minimum length the buffer will have.
* @return A new ByteBuffer. Its capacity can be less
* than what was requested, but must be at
* least 1 byte.
*/
ByteBuffer getBuffer(boolean direct, int length);
/**
* Release a buffer back to the pool.
* The pool may choose to put this buffer into its cache/free it.
*
* @param buffer a direct bytebuffer
*/
void putBuffer(ByteBuffer buffer);
}
/**
* Provides an HDFS ZeroCopyReader shim.
* @param in FSDataInputStream to read from (where the cached/mmap buffers are
* tied to)
* @param pool ByteBufferPoolShim to allocate fallback buffers with
*
* @return returns null if not supported
*/
ZeroCopyReaderShim getZeroCopyReader(FSDataInputStream in,
ByteBufferPoolShim pool
) throws IOException;
interface ZeroCopyReaderShim extends Closeable {
/**
* Get a ByteBuffer from the FSDataInputStream - this can be either a
* HeapByteBuffer or an MappedByteBuffer. Also move the in stream by that
* amount. The data read can be small than maxLength.
*
* @return ByteBuffer read from the stream,
*/
ByteBuffer readBuffer(int maxLength,
boolean verifyChecksums) throws IOException;
/**
* Release a ByteBuffer obtained from a readBuffer on this
* ZeroCopyReaderShim.
*/
void releaseBuffer(ByteBuffer buffer);
/**
* Close the underlying stream.
*/
@Override
void close() throws IOException;
}
/**
* End the OutputStream's current block at the current location.
* This is only available on HDFS on Hadoop ≥ 2.7, but will return false
* otherwise.
* @return was a variable length block created?
*/
boolean endVariableLengthBlock(OutputStream output) throws IOException;
/**
* The known KeyProviders for column encryption.
* These are identical to OrcProto.KeyProviderKind.
*/
enum KeyProviderKind {
UNKNOWN(0),
HADOOP(1),
AWS(2),
GCP(3),
AZURE(4);
private final int value;
KeyProviderKind(int value) {
this.value = value;
}
public int getValue() {
return value;
}
}
/**
* Information about a crypto key including the key name, version, and the
* algorithm.
*/
class KeyMetadata {
private final String keyName;
private final int version;
private final EncryptionAlgorithm algorithm;
public KeyMetadata(String key, int version, EncryptionAlgorithm algorithm) {
this.keyName = key;
this.version = version;
this.algorithm = algorithm;
}
/**
* Get the name of the key.
*/
public String getKeyName() {
return keyName;
}
/**
* Get the encryption algorithm for this key.
* @return the algorithm
*/
public EncryptionAlgorithm getAlgorithm() {
return algorithm;
}
/**
* Get the version of this key.
* @return the version
*/
public int getVersion() {
return version;
}
@Override
public String toString() {
return keyName + '@' + version + ' ' + algorithm;
}
}
/**
* Create a Hadoop KeyProvider to get encryption keys.
* @param conf the configuration
* @param random a secure random number generator
* @return a key provider or null if none was provided
*/
KeyProvider getHadoopKeyProvider(Configuration conf,
Random random) throws IOException;
}
| 5,614 | 27.075 | 80 | java |
null | orc-main/java/shims/src/java/org/apache/orc/impl/HadoopShimsCurrent.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
import org.apache.hadoop.crypto.key.KeyProviderFactory;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.io.compress.snappy.SnappyDecompressor;
import org.apache.hadoop.io.compress.zlib.ZlibDecompressor;
import org.apache.orc.EncryptionAlgorithm;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.io.OutputStream;
import java.util.EnumSet;
import java.util.List;
import java.util.Random;
/**
* Shims for recent versions of Hadoop
* <p>
* Adds support for:
* <ul>
* <li>Variable length HDFS blocks</li>
* </ul>
*/
public class HadoopShimsCurrent implements HadoopShims {
private static final Logger LOG = LoggerFactory.getLogger(HadoopShimsCurrent.class);
static DirectDecompressor getDecompressor(DirectCompressionType codec) {
switch (codec) {
case ZLIB:
return new ZlibDirectDecompressWrapper(
new ZlibDecompressor.ZlibDirectDecompressor());
case ZLIB_NOHEADER:
return new ZlibDirectDecompressWrapper(
new ZlibDecompressor.ZlibDirectDecompressor(
ZlibDecompressor.CompressionHeader.NO_HEADER, 0));
case SNAPPY:
return new SnappyDirectDecompressWrapper(
new SnappyDecompressor.SnappyDirectDecompressor());
default:
return null;
}
}
/**
* Find the correct algorithm based on the key's metadata.
*
* @param meta the key's metadata
* @return the correct algorithm
*/
static EncryptionAlgorithm findAlgorithm(KeyProviderCryptoExtension.Metadata meta) {
String cipher = meta.getCipher();
if (cipher.startsWith("AES/")) {
int bitLength = meta.getBitLength();
if (bitLength == 128) {
return EncryptionAlgorithm.AES_CTR_128;
} else {
if (bitLength != 256) {
LOG.info("ORC column encryption does not support " + bitLength +
" bit keys. Using 256 bits instead.");
}
return EncryptionAlgorithm.AES_CTR_256;
}
}
throw new IllegalArgumentException("ORC column encryption only supports" +
" AES and not " + cipher);
}
static String buildKeyVersionName(KeyMetadata key) {
return key.getKeyName() + "@" + key.getVersion();
}
static KeyProvider createKeyProvider(Configuration conf,
Random random) throws IOException {
List<org.apache.hadoop.crypto.key.KeyProvider> result =
KeyProviderFactory.getProviders(conf);
if (result.size() == 0) {
LOG.info("Can't get KeyProvider for ORC encryption from" +
" hadoop.security.key.provider.path.");
return new NullKeyProvider();
} else {
return new KeyProviderImpl(result.get(0), random);
}
}
@Override
public DirectDecompressor getDirectDecompressor(DirectCompressionType codec) {
return getDecompressor(codec);
}
@Override
public ZeroCopyReaderShim getZeroCopyReader(FSDataInputStream in,
ByteBufferPoolShim pool
) throws IOException {
return ZeroCopyShims.getZeroCopyReader(in, pool);
}
@Override
public boolean endVariableLengthBlock(OutputStream output) throws IOException {
if (output instanceof HdfsDataOutputStream) {
HdfsDataOutputStream hdfs = (HdfsDataOutputStream) output;
hdfs.hsync(EnumSet.of(HdfsDataOutputStream.SyncFlag.END_BLOCK));
return true;
}
return false;
}
@Override
public KeyProvider getHadoopKeyProvider(Configuration conf,
Random random) throws IOException {
return createKeyProvider(conf, random);
}
}
| 4,655 | 33.488889 | 86 | java |
null | orc-main/java/shims/src/java/org/apache/orc/impl/HadoopShimsPre2_3.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Random;
/**
* Shims for versions of Hadoop up to and including 2.2.x
*/
public class HadoopShimsPre2_3 implements HadoopShims {
HadoopShimsPre2_3() {
}
@Override
public DirectDecompressor getDirectDecompressor(
DirectCompressionType codec) {
return null;
}
@Override
public ZeroCopyReaderShim getZeroCopyReader(FSDataInputStream in,
ByteBufferPoolShim pool
) throws IOException {
/* not supported */
return null;
}
@Override
public boolean endVariableLengthBlock(OutputStream output) {
return false;
}
@Override
public KeyProvider getHadoopKeyProvider(Configuration conf, Random random) {
return new NullKeyProvider();
}
}
| 1,782 | 28.229508 | 78 | java |
null | orc-main/java/shims/src/java/org/apache/orc/impl/HadoopShimsPre2_6.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Random;
/**
* Shims for versions of Hadoop less than 2.6
* <p>
* Adds support for:
* <ul>
* <li>Direct buffer decompression</li>
* <li>Zero copy</li>
* </ul>
*/
public class HadoopShimsPre2_6 implements HadoopShims {
@Override
public DirectDecompressor getDirectDecompressor(DirectCompressionType codec) {
return HadoopShimsCurrent.getDecompressor(codec);
}
@Override
public ZeroCopyReaderShim getZeroCopyReader(FSDataInputStream in,
ByteBufferPoolShim pool
) throws IOException {
return ZeroCopyShims.getZeroCopyReader(in, pool);
}
@Override
public boolean endVariableLengthBlock(OutputStream output) {
return false;
}
@Override
public KeyProvider getHadoopKeyProvider(Configuration conf, Random random) {
return new NullKeyProvider();
}
}
| 1,894 | 30.065574 | 80 | java |
null | orc-main/java/shims/src/java/org/apache/orc/impl/HadoopShimsPre2_7.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Random;
/**
* Shims for versions of Hadoop less than 2.7.
* <p>
* Adds support for:
* <ul>
* <li>Crypto</li>
* </ul>
*/
public class HadoopShimsPre2_7 implements HadoopShims {
private static final Logger LOG =
LoggerFactory.getLogger(HadoopShimsPre2_7.class);
@Override
public DirectDecompressor getDirectDecompressor(DirectCompressionType codec) {
return HadoopShimsCurrent.getDecompressor(codec);
}
@Override
public ZeroCopyReaderShim getZeroCopyReader(FSDataInputStream in,
ByteBufferPoolShim pool
) throws IOException {
return ZeroCopyShims.getZeroCopyReader(in, pool);
}
@Override
public boolean endVariableLengthBlock(OutputStream output) {
return false;
}
@Override
public KeyProvider getHadoopKeyProvider(Configuration conf,
Random random) throws IOException {
return HadoopShimsCurrent.createKeyProvider(conf, random);
}
}
| 2,091 | 30.223881 | 80 | java |
null | orc-main/java/shims/src/java/org/apache/orc/impl/KeyProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.conf.Configuration;
import java.io.IOException;
import java.security.Key;
import java.util.List;
import java.util.Random;
/**
* A source of crypto keys. This is usually backed by a Ranger KMS.
*/
public interface KeyProvider {
/**
* Get the list of key names from the key provider.
* @return a list of key names
*/
List<String> getKeyNames() throws IOException;
/**
* Get the current metadata for a given key. This is used when encrypting
* new data.
*
* @param keyName the name of a key
* @return metadata for the current version of the key
* @throws IllegalArgumentException if the key is unknown
*/
HadoopShims.KeyMetadata getCurrentKeyVersion(String keyName) throws IOException;
/**
* Create a local key for the given key version. This local key will be
* randomly generated and encrypted with the given version of the master
* key. The encryption and decryption is done with the local key and the
* user process never has access to the master key, because it stays on the
* Ranger KMS.
*
* @param key the master key version
* @return the local key's material both encrypted and unencrypted
*/
LocalKey createLocalKey(HadoopShims.KeyMetadata key) throws IOException;
/**
* Decrypt a local key for reading a file.
*
* @param key the master key version
* @param encryptedKey the encrypted key
* @return the decrypted local key's material or null if the key is not
* available
*/
Key decryptLocalKey(HadoopShims.KeyMetadata key, byte[] encryptedKey) throws IOException;
/**
* Get the kind of this provider.
*/
HadoopShims.KeyProviderKind getKind();
/**
* A service loader factory interface.
*/
interface Factory {
KeyProvider create(String kind,
Configuration conf,
Random random) throws IOException;
}
}
| 2,744 | 31.294118 | 91 | java |
null | orc-main/java/shims/src/java/org/apache/orc/impl/KeyProviderImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
import org.apache.orc.EncryptionAlgorithm;
import javax.crypto.spec.SecretKeySpec;
import java.io.IOException;
import java.security.GeneralSecurityException;
import java.security.Key;
import java.util.List;
import java.util.Random;
/**
* Shim implementation for ORC's KeyProvider API that uses Hadoop's
* KeyProvider API and implementations. Most users use a Hadoop or Ranger
* KMS and thus should use this default implementation.
* <p>
* The main two methods of ORC's KeyProvider are createLocalKey and
* decryptLocalKey. These are very similar to Hadoop's
* <pre>
* EncryptedKeyVersion generateEncryptedKey(String keyVersionName);
* KeyVersion decryptEncryptedKey(EncryptedKeyVersion encrypted)
* </pre>
* but there are some important differences.
* <ul>
* <li>Hadoop's generateEncryptedKey doesn't return the decrypted key, so it
* would require two round trips (generateEncryptedKey and then
* decryptEncryptedKey)to the KMS.</li>
* <li>Hadoop's methods require storing both the IV and the encrypted key, so
* for AES256, it is 48 random bytes.</li>
* </ul>
* <p>
* However, since the encryption in the KMS is using AES/CTR we know that the
* flow is:
*
* <pre>
* tmpKey = aes(masterKey, iv);
* cypher = xor(tmpKey, plain);
* </pre>
* <p>
* which means that encryption and decryption are symmetric. Therefore, if we
* use the KMS' decryptEncryptedKey, and feed in a random iv and the right
* number of 0's as the encrypted key, we get the right length of a tmpKey.
* Since it is symmetric, we can use it for both encryption and decryption
* and we only need to store the random iv. Since the iv is 16 bytes, it is
* only a third the size of the other solution, and only requires one trip to
* the KMS.
* <p>
* So the flow looks like:
* <pre>
* encryptedKey = securely random 16 or 32 bytes
* iv = first 16 byte of encryptedKey
* --- on KMS ---
* tmpKey0 = aes(masterKey, iv)
* tmpKey1 = aes(masterKey, iv+1)
* decryptedKey0 = xor(tmpKey0, encryptedKey0)
* decryptedKey1 = xor(tmpKey1, encryptedKey1)
* </pre>
* <p>
* In the long term, we should probably fix Hadoop's generateEncryptedKey
* to either take the random key or pass it back.
*/
class KeyProviderImpl implements KeyProvider {
private final org.apache.hadoop.crypto.key.KeyProvider provider;
private final Random random;
KeyProviderImpl(org.apache.hadoop.crypto.key.KeyProvider provider,
Random random) {
this.provider = provider;
this.random = random;
}
@Override
public List<String> getKeyNames() throws IOException {
return provider.getKeys();
}
@Override
public HadoopShims.KeyMetadata getCurrentKeyVersion(String keyName) throws IOException {
org.apache.hadoop.crypto.key.KeyProvider.Metadata meta =
provider.getMetadata(keyName);
return new HadoopShims.KeyMetadata(keyName, meta.getVersions() - 1,
HadoopShimsCurrent.findAlgorithm(meta));
}
/**
* The Ranger/Hadoop KMS mangles the IV by bit flipping it in a misguided
* attempt to improve security. By bit flipping it here, we undo the
* silliness so that we get
*
* @param input the input array to copy from
* @param output the output array to write to
*/
private static void unmangleIv(byte[] input, byte[] output) {
for (int i = 0; i < output.length && i < input.length; ++i) {
output[i] = (byte) (0xff ^ input[i]);
}
}
@Override
public LocalKey createLocalKey(HadoopShims.KeyMetadata key) throws IOException {
EncryptionAlgorithm algorithm = key.getAlgorithm();
byte[] encryptedKey = new byte[algorithm.keyLength()];
random.nextBytes(encryptedKey);
byte[] iv = new byte[algorithm.getIvLength()];
unmangleIv(encryptedKey, iv);
EncryptedKeyVersion param = EncryptedKeyVersion.createForDecryption(
key.getKeyName(), HadoopShimsCurrent.buildKeyVersionName(key), iv, encryptedKey);
try {
KeyProviderCryptoExtension.KeyVersion decryptedKey;
if (provider instanceof KeyProviderCryptoExtension) {
decryptedKey = ((KeyProviderCryptoExtension) provider).decryptEncryptedKey(param);
} else if (provider instanceof CryptoExtension) {
decryptedKey = ((CryptoExtension) provider).decryptEncryptedKey(param);
} else {
throw new UnsupportedOperationException(
provider.getClass().getCanonicalName() + " is not supported.");
}
return new LocalKey(algorithm, decryptedKey.getMaterial(),
encryptedKey);
} catch (GeneralSecurityException e) {
throw new IOException("Can't create local encryption key for " + key, e);
}
}
@Override
public Key decryptLocalKey(HadoopShims.KeyMetadata key,
byte[] encryptedKey) throws IOException {
EncryptionAlgorithm algorithm = key.getAlgorithm();
byte[] iv = new byte[algorithm.getIvLength()];
unmangleIv(encryptedKey, iv);
EncryptedKeyVersion param = EncryptedKeyVersion.createForDecryption(
key.getKeyName(), HadoopShimsCurrent.buildKeyVersionName(key), iv, encryptedKey);
try {
KeyProviderCryptoExtension.KeyVersion decryptedKey;
if (provider instanceof KeyProviderCryptoExtension) {
decryptedKey = ((KeyProviderCryptoExtension) provider).decryptEncryptedKey(param);
} else if (provider instanceof CryptoExtension) {
decryptedKey = ((CryptoExtension) provider).decryptEncryptedKey(param);
} else {
throw new UnsupportedOperationException(
provider.getClass().getCanonicalName() + " is not supported.");
}
return new SecretKeySpec(decryptedKey.getMaterial(),
algorithm.getAlgorithm());
} catch (GeneralSecurityException e) {
return null;
}
}
@Override
public HadoopShims.KeyProviderKind getKind() {
return HadoopShims.KeyProviderKind.HADOOP;
}
}
| 6,966 | 38.585227 | 90 | java |
null | orc-main/java/shims/src/java/org/apache/orc/impl/LocalKey.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.orc.EncryptionAlgorithm;
import javax.crypto.spec.SecretKeySpec;
import java.security.Key;
/**
* Local keys are random keys that are generated for each file and column.
* The file's metadata includes the encryptedKey and the reader needs to
* use the KeyProvider to get the decryptedKey.
*/
public class LocalKey {
private final byte[] encryptedKey;
private Key decryptedKey;
public LocalKey(EncryptionAlgorithm algorithm,
byte[] decryptedKey,
byte[] encryptedKey) {
this.encryptedKey = encryptedKey;
if (decryptedKey != null) {
setDecryptedKey(new SecretKeySpec(decryptedKey, algorithm.getAlgorithm()));
}
}
public void setDecryptedKey(Key key) {
decryptedKey = key;
}
public Key getDecryptedKey() {
return decryptedKey;
}
public byte[] getEncryptedKey() {
return encryptedKey;
}
}
| 1,734 | 29.982143 | 81 | java |
null | orc-main/java/shims/src/java/org/apache/orc/impl/NullKeyProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import java.security.Key;
import java.util.ArrayList;
import java.util.List;
class NullKeyProvider implements KeyProvider {
@Override
public List<String> getKeyNames() {
return new ArrayList<>();
}
@Override
public HadoopShims.KeyMetadata getCurrentKeyVersion(String keyName) {
throw new IllegalArgumentException("Unknown key " + keyName);
}
@Override
public LocalKey createLocalKey(HadoopShims.KeyMetadata key) {
throw new IllegalArgumentException("Unknown key " + key);
}
@Override
public Key decryptLocalKey(HadoopShims.KeyMetadata key, byte[] encryptedKey) {
return null;
}
@Override
public HadoopShims.KeyProviderKind getKind() {
return null;
}
}
| 1,545 | 28.730769 | 80 | java |
null | orc-main/java/shims/src/java/org/apache/orc/impl/SnappyDirectDecompressWrapper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.io.compress.snappy.SnappyDecompressor;
import java.io.IOException;
import java.nio.ByteBuffer;
class SnappyDirectDecompressWrapper implements HadoopShims.DirectDecompressor {
private final SnappyDecompressor.SnappyDirectDecompressor root;
private boolean isFirstCall = true;
SnappyDirectDecompressWrapper(SnappyDecompressor.SnappyDirectDecompressor root) {
this.root = root;
}
@Override
public void decompress(ByteBuffer input, ByteBuffer output) throws IOException {
if (!isFirstCall) {
root.reset();
} else {
isFirstCall = false;
}
root.decompress(input, output);
}
@Override
public void reset() {
root.reset();
}
@Override
public void end() {
root.end();
}
}
| 1,596 | 28.574074 | 83 | java |
null | orc-main/java/shims/src/java/org/apache/orc/impl/ZeroCopyShims.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.ReadOption;
import org.apache.hadoop.io.ByteBufferPool;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.EnumSet;
class ZeroCopyShims {
private static final class ByteBufferPoolAdapter implements ByteBufferPool {
private HadoopShims.ByteBufferPoolShim pool;
ByteBufferPoolAdapter(HadoopShims.ByteBufferPoolShim pool) {
this.pool = pool;
}
@Override
public ByteBuffer getBuffer(boolean direct, int length) {
return this.pool.getBuffer(direct, length);
}
@Override
public void putBuffer(ByteBuffer buffer) {
this.pool.putBuffer(buffer);
}
}
private static final class ZeroCopyAdapter implements HadoopShims.ZeroCopyReaderShim {
private final FSDataInputStream in;
private final ByteBufferPoolAdapter pool;
private static final EnumSet<ReadOption> CHECK_SUM = EnumSet
.noneOf(ReadOption.class);
private static final EnumSet<ReadOption> NO_CHECK_SUM = EnumSet
.of(ReadOption.SKIP_CHECKSUMS);
ZeroCopyAdapter(FSDataInputStream in,
HadoopShims.ByteBufferPoolShim poolshim) {
this.in = in;
if (poolshim != null) {
pool = new ByteBufferPoolAdapter(poolshim);
} else {
pool = null;
}
}
@Override
public ByteBuffer readBuffer(int maxLength, boolean verifyChecksums)
throws IOException {
EnumSet<ReadOption> options = NO_CHECK_SUM;
if (verifyChecksums) {
options = CHECK_SUM;
}
return this.in.read(this.pool, maxLength, options);
}
@Override
public void releaseBuffer(ByteBuffer buffer) {
this.in.releaseBuffer(buffer);
}
@Override
public void close() throws IOException {
this.in.close();
}
}
public static HadoopShims.ZeroCopyReaderShim getZeroCopyReader(
FSDataInputStream in, HadoopShims.ByteBufferPoolShim pool) throws IOException {
return new ZeroCopyAdapter(in, pool);
}
}
| 2,887 | 30.391304 | 88 | java |
null | orc-main/java/shims/src/java/org/apache/orc/impl/ZlibDirectDecompressWrapper.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.io.compress.zlib.ZlibDecompressor;
import java.io.IOException;
import java.nio.ByteBuffer;
class ZlibDirectDecompressWrapper implements HadoopShims.DirectDecompressor {
private final ZlibDecompressor.ZlibDirectDecompressor root;
private boolean isFirstCall = true;
ZlibDirectDecompressWrapper(ZlibDecompressor.ZlibDirectDecompressor root) {
this.root = root;
}
@Override
public void decompress(ByteBuffer input, ByteBuffer output) throws IOException {
if (!isFirstCall) {
root.reset();
} else {
isFirstCall = false;
}
root.decompress(input, output);
}
@Override
public void reset() {
root.reset();
}
@Override
public void end() {
root.end();
}
}
| 1,580 | 28.277778 | 82 | java |
null | orc-main/java/shims/src/test/org/apache/orc/impl/TestHadoopShimsPre2_7.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
import org.apache.orc.EncryptionAlgorithm;
import org.junit.jupiter.api.Test;
import java.sql.Date;
import java.util.HashMap;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
public class TestHadoopShimsPre2_7 {
@Test
public void testFindingUnknownEncryption() {
assertThrows(IllegalArgumentException.class, () -> {
KeyProvider.Metadata meta = new KMSClientProvider.KMSMetadata(
"XXX/CTR/NoPadding", 128, "", new HashMap<String, String>(),
new Date(0), 1);
HadoopShimsCurrent.findAlgorithm(meta);
});
}
@Test
public void testFindingAesEncryption() {
KeyProvider.Metadata meta = new KMSClientProvider.KMSMetadata(
"AES/CTR/NoPadding", 128, "", new HashMap<String, String>(),
new Date(0), 1);
assertEquals(EncryptionAlgorithm.AES_CTR_128,
HadoopShimsCurrent.findAlgorithm(meta));
meta = new KMSClientProvider.KMSMetadata(
"AES/CTR/NoPadding", 256, "", new HashMap<String, String>(),
new Date(0), 1);
assertEquals(EncryptionAlgorithm.AES_CTR_256,
HadoopShimsCurrent.findAlgorithm(meta));
meta = new KMSClientProvider.KMSMetadata(
"AES/CTR/NoPadding", 512, "", new HashMap<String, String>(),
new Date(0), 1);
assertEquals(EncryptionAlgorithm.AES_CTR_256,
HadoopShimsCurrent.findAlgorithm(meta));
}
}
| 2,368 | 36.603175 | 75 | java |
null | orc-main/java/tools/src/java/org/apache/orc/tools/ColumnSizes.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.tools;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.orc.ColumnStatistics;
import org.apache.orc.OrcFile;
import org.apache.orc.Reader;
import org.apache.orc.StripeInformation;
import org.apache.orc.TypeDescription;
import java.io.IOException;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.List;
/**
* Given a set of paths, finds all of the "*.orc" files under them and
* prints the sizes of each column, both as a percentage and the number of
* bytes per a row.
*/
public class ColumnSizes {
final Configuration conf;
final TypeDescription schema;
final long[] columnSizes;
int goodFiles = 0;
long rows = 0;
long padding = 0;
long totalSize = 0;
long stripeFooterSize = 0;
long fileFooterSize = 0;
long stripeIndex = 0;
// data bytes that aren't assigned to a specific column
long stripeData = 0;
public ColumnSizes(Configuration conf,
LocatedFileStatus file) throws IOException {
this.conf = conf;
try (Reader reader = OrcFile.createReader(file.getPath(),
OrcFile.readerOptions(conf))) {
this.schema = reader.getSchema();
columnSizes = new long[schema.getMaximumId() + 1];
addReader(file, reader);
}
}
private void checkStripes(LocatedFileStatus file,
Reader reader) {
// Count the magic as file overhead
long offset = OrcFile.MAGIC.length();
fileFooterSize += offset;
for (StripeInformation stripe: reader.getStripes()) {
padding += stripe.getOffset() - offset;
stripeIndex += stripe.getIndexLength();
stripeData += stripe.getDataLength();
stripeFooterSize += stripe.getFooterLength();
offset = stripe.getOffset() + stripe.getLength();
}
// Add everything else as the file footer
fileFooterSize += file.getLen() - offset;
}
private boolean addReader(LocatedFileStatus file,
Reader reader) {
// Validate that the schemas are the same
TypeDescription newSchema = reader.getSchema();
if (schema.equals(newSchema)) {
goodFiles += 1;
rows += reader.getNumberOfRows();
totalSize += file.getLen();
checkStripes(file, reader);
ColumnStatistics[] colStats = reader.getStatistics();
for (int c = 0; c < colStats.length && c < columnSizes.length; c++) {
columnSizes[c] += colStats[c].getBytesOnDisk();
// Don't double count. Either count the bytes as stripe data or as
// part of a column.
stripeData -= colStats[c].getBytesOnDisk();
}
} else {
System.err.println("Ignoring " + file.getPath()
+ " because of schema mismatch: " + newSchema);
return false;
}
return true;
}
public boolean addFile(LocatedFileStatus file) throws IOException {
try (Reader reader = OrcFile.createReader(file.getPath(),
OrcFile.readerOptions(conf))) {
return addReader(file, reader);
}
}
private static class StringLongPair {
final String name;
final long size;
StringLongPair(String name, long size) {
this.name = name;
this.size = size;
}
}
private void printResults(PrintStream out) {
List<StringLongPair> sizes = new ArrayList<>(columnSizes.length + 5);
for(int column = 0; column < columnSizes.length; ++column) {
if (columnSizes[column] > 0) {
sizes.add(new StringLongPair(
schema.findSubtype(column).getFullFieldName(),
columnSizes[column]));
}
}
if (padding > 0) {
sizes.add(new StringLongPair("_padding", padding));
}
if (stripeFooterSize > 0) {
sizes.add(new StringLongPair("_stripe_footer", stripeFooterSize));
}
if (fileFooterSize > 0) {
sizes.add(new StringLongPair("_file_footer", fileFooterSize));
}
if (stripeIndex > 0) {
sizes.add(new StringLongPair("_index", stripeIndex));
}
if (stripeData > 0) {
sizes.add(new StringLongPair("_data", stripeData));
}
// sort by descending size, ascending name
sizes.sort((x, y) -> x.size != y.size ?
Long.compare(y.size, x.size) : x.name.compareTo(y.name));
out.println("Percent Bytes/Row Name");
for (StringLongPair item: sizes) {
out.println(String.format(" %-5.2f %-9.2f %s",
100.0 * item.size / totalSize, (double) item.size / rows, item.name));
}
}
public static void main(Configuration conf, String[] args) throws IOException {
ColumnSizes result = null;
int badFiles = 0;
for(String root: args) {
Path rootPath = new Path(root);
FileSystem fs = rootPath.getFileSystem(conf);
for(RemoteIterator<LocatedFileStatus> itr = fs.listFiles(rootPath, true); itr.hasNext(); ) {
LocatedFileStatus status = itr.next();
if (status.isFile() && status.getPath().getName().endsWith(".orc")) {
try {
if (result == null) {
result = new ColumnSizes(conf, status);
} else {
if (!result.addFile(status)) {
badFiles += 1;
}
}
} catch (IOException err) {
badFiles += 1;
System.err.println("Failed to read " + status.getPath());
}
}
}
}
if (result == null) {
System.err.println("No files found");
} else {
result.printResults(System.out);
}
if (badFiles > 0) {
System.err.println(badFiles + " bad ORC files found.");
System.exit(1);
}
}
public static void main(String[] args) throws IOException {
main(new Configuration(), args);
}
}
| 6,667 | 32.847716 | 98 | java |
null | orc-main/java/tools/src/java/org/apache/orc/tools/Driver.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.tools;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.hadoop.conf.Configuration;
import org.apache.orc.tools.convert.ConvertTool;
import org.apache.orc.tools.json.JsonSchemaFinder;
import java.util.Map;
import java.util.Properties;
/**
* Driver program for the java ORC utilities.
*/
public class Driver {
@SuppressWarnings("static-access")
static Options createOptions() {
Options result = new Options();
result.addOption(Option.builder("h")
.longOpt("help")
.desc("Print help message")
.build());
result.addOption(Option.builder("D")
.longOpt("define")
.desc("Set a configuration property")
.numberOfArgs(2)
.valueSeparator()
.build());
return result;
}
static class DriverOptions {
final CommandLine genericOptions;
final String command;
final String[] commandArgs;
DriverOptions(String[] args) throws ParseException {
genericOptions = new DefaultParser().parse(createOptions(), args, true);
String[] unprocessed = genericOptions.getArgs();
if (unprocessed.length == 0) {
command = null;
commandArgs = new String[0];
} else {
command = unprocessed[0];
if (genericOptions.hasOption('h')) {
commandArgs = new String[]{"-h"};
} else {
commandArgs = new String[unprocessed.length - 1];
System.arraycopy(unprocessed, 1, commandArgs, 0, commandArgs.length);
}
}
}
}
public static void main(String[] args) throws Exception {
DriverOptions options = new DriverOptions(args);
if (options.command == null) {
System.err.println("ORC Java Tools");
System.err.println();
System.err.println("usage: java -jar orc-tools-*.jar [--help]" +
" [--define X=Y] <command> <args>");
System.err.println();
System.err.println("Commands:");
System.err.println(" convert - convert CSV and JSON files to ORC");
System.err.println(" count - recursively find *.orc and print the number of rows");
System.err.println(" data - print the data from the ORC file");
System.err.println(" json-schema - scan JSON files to determine their schema");
System.err.println(" key - print information about the keys");
System.err.println(" meta - print the metadata about the ORC file");
System.err.println(" scan - scan the ORC file");
System.err.println(" sizes - list size on disk of each column");
System.err.println(" version - print the version of this ORC tool");
System.err.println();
System.err.println("To get more help, provide -h to the command");
System.exit(1);
}
Configuration conf = new Configuration();
Properties confSettings = options.genericOptions.getOptionProperties("D");
for(Map.Entry pair: confSettings.entrySet()) {
conf.set(pair.getKey().toString(), pair.getValue().toString());
}
switch (options.command) {
case "convert":
ConvertTool.main(conf, options.commandArgs);
break;
case "count":
RowCount.main(conf, options.commandArgs);
break;
case "data":
PrintData.main(conf, options.commandArgs);
break;
case "json-schema":
JsonSchemaFinder.main(conf, options.commandArgs);
break;
case "key":
KeyTool.main(conf, options.commandArgs);
break;
case "meta":
FileDump.main(conf, options.commandArgs);
break;
case "scan":
ScanData.main(conf, options.commandArgs);
break;
case "sizes":
ColumnSizes.main(conf, options.commandArgs);
break;
case "version":
PrintVersion.main(conf, options.commandArgs);
break;
default:
System.err.println("Unknown subcommand: " + options.command);
System.exit(1);
}
}
}
| 4,920 | 33.900709 | 91 | java |
null | orc-main/java/tools/src/java/org/apache/orc/tools/FileDump.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.tools;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.orc.ColumnStatistics;
import org.apache.orc.CompressionKind;
import org.apache.orc.OrcFile;
import org.apache.orc.OrcProto;
import org.apache.orc.Reader;
import org.apache.orc.StripeInformation;
import org.apache.orc.StripeStatistics;
import org.apache.orc.TypeDescription;
import org.apache.orc.Writer;
import org.apache.orc.impl.ColumnStatisticsImpl;
import org.apache.orc.impl.OrcAcidUtils;
import org.apache.orc.impl.OrcIndex;
import org.apache.orc.impl.ReaderImpl;
import org.apache.orc.impl.RecordReaderImpl;
import org.apache.orc.util.BloomFilter;
import org.apache.orc.util.BloomFilterIO;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.text.DecimalFormat;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
/**
* A tool for printing out the file structure of ORC files.
*/
public final class FileDump {
public static final String UNKNOWN = "UNKNOWN";
public static final String SEPARATOR = StringUtils.repeat("_", 120) + "\n";
public static final String RECOVER_READ_SIZE = "orc.recover.read.size"; // only for testing
public static final int DEFAULT_BLOCK_SIZE = 256 * 1024 * 1024;
public static final String DEFAULT_BACKUP_PATH = System.getProperty("java.io.tmpdir");
public static final PathFilter HIDDEN_AND_SIDE_FILE_FILTER = new PathFilter() {
@Override
public boolean accept(Path p) {
String name = p.getName();
return !name.startsWith("_") && !name.startsWith(".") && !name.endsWith(
OrcAcidUtils.DELTA_SIDE_FILE_SUFFIX);
}
};
// not used
private FileDump() {
}
public static void main(Configuration conf, String[] args) throws Exception {
List<Integer> rowIndexCols = new ArrayList<Integer>(0);
Options opts = createOptions();
CommandLine cli = new DefaultParser().parse(opts, args);
if (cli.hasOption('h')) {
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp("orcfiledump", opts);
return;
}
boolean dumpData = cli.hasOption('d');
boolean recover = cli.hasOption("recover");
boolean skipDump = cli.hasOption("skip-dump");
String backupPath = DEFAULT_BACKUP_PATH;
if (cli.hasOption("backup-path")) {
backupPath = cli.getOptionValue("backup-path");
}
if (cli.hasOption("r")) {
String val = cli.getOptionValue("r");
if (val != null && val.trim().equals("*")) {
rowIndexCols = null; // All the columns
} else {
String[] colStrs = cli.getOptionValue("r").split(",");
rowIndexCols = new ArrayList<Integer>(colStrs.length);
for (String colStr : colStrs) {
rowIndexCols.add(Integer.parseInt(colStr));
}
}
}
boolean printTimeZone = cli.hasOption('t');
boolean jsonFormat = cli.hasOption('j');
String[] files = cli.getArgs();
if (files.length == 0) {
System.err.println("Error : ORC files are not specified");
return;
}
// if the specified path is directory, iterate through all files and print the file dump
List<String> filesInPath = new ArrayList<>();
for (String filename : files) {
Path path = new Path(filename);
filesInPath.addAll(getAllFilesInPath(path, conf));
}
if (dumpData) {
PrintData.main(conf, filesInPath.toArray(new String[filesInPath.size()]));
} else if (recover && skipDump) {
recoverFiles(filesInPath, conf, backupPath);
} else {
if (jsonFormat) {
boolean prettyPrint = cli.hasOption('p');
JsonFileDump.printJsonMetaData(filesInPath, conf, rowIndexCols, prettyPrint, printTimeZone);
} else {
printMetaData(filesInPath, conf, rowIndexCols, printTimeZone, recover, backupPath);
}
}
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
main(conf, args);
}
/**
* This method returns an ORC reader object if the specified file is readable. If the specified
* file has side file (_flush_length) file, then max footer offset will be read from the side
* file and orc reader will be created from that offset. Since both data file and side file
* use hflush() for flushing the data, there could be some inconsistencies and both files could be
* out-of-sync. Following are the cases under which null will be returned
*
* 1) If the file specified by path or its side file is still open for writes
* 2) If *_flush_length file does not return any footer offset
* 3) If *_flush_length returns a valid footer offset but the data file is not readable at that
* position (incomplete data file)
* 4) If *_flush_length file length is not a multiple of 8, then reader will be created from
* previous valid footer. If there is no such footer (file length > 0 and < 8), then null will
* be returned
*
* Also, if this method detects any file corruption (mismatch between data file and side file)
* then it will add the corresponding file to the specified input list for corrupted files.
*
* In all other cases, where the file is readable this method will return a reader object.
*
* @param path - file to get reader for
* @param conf - configuration object
* @param corruptFiles - fills this list with all possible corrupted files
* @return - reader for the specified file or null
* @throws IOException
*/
static Reader getReader(final Path path, final Configuration conf,
final List<String> corruptFiles) throws IOException {
FileSystem fs = path.getFileSystem(conf);
long dataFileLen = fs.getFileStatus(path).getLen();
System.err.println("Processing data file " + path + " [length: " + dataFileLen + "]");
Path sideFile = OrcAcidUtils.getSideFile(path);
final boolean sideFileExists = fs.exists(sideFile);
boolean openDataFile = false;
boolean openSideFile = false;
if (fs instanceof DistributedFileSystem) {
DistributedFileSystem dfs = (DistributedFileSystem) fs;
openDataFile = !dfs.isFileClosed(path);
openSideFile = sideFileExists && !dfs.isFileClosed(sideFile);
}
if (openDataFile || openSideFile) {
if (openDataFile && openSideFile) {
System.err.println("Unable to perform file dump as " + path + " and " + sideFile +
" are still open for writes.");
} else if (openSideFile) {
System.err.println("Unable to perform file dump as " + sideFile +
" is still open for writes.");
} else {
System.err.println("Unable to perform file dump as " + path +
" is still open for writes.");
}
return null;
}
Reader reader = null;
if (sideFileExists) {
final long maxLen = OrcAcidUtils.getLastFlushLength(fs, path);
final long sideFileLen = fs.getFileStatus(sideFile).getLen();
System.err.println("Found flush length file " + sideFile
+ " [length: " + sideFileLen + ", maxFooterOffset: " + maxLen + "]");
// no offsets read from side file
if (maxLen == -1) {
// if data file is larger than last flush length, then additional data could be recovered
if (dataFileLen > maxLen) {
System.err.println("Data file has more data than max footer offset:" + maxLen +
". Adding data file to recovery list.");
if (corruptFiles != null) {
corruptFiles.add(path.toUri().toString());
}
}
return null;
}
try {
reader = OrcFile.createReader(path, OrcFile.readerOptions(conf).maxLength(maxLen));
// if data file is larger than last flush length, then additional data could be recovered
if (dataFileLen > maxLen) {
System.err.println("Data file has more data than max footer offset:" + maxLen +
". Adding data file to recovery list.");
if (corruptFiles != null) {
corruptFiles.add(path.toUri().toString());
}
}
} catch (Exception e) {
if (corruptFiles != null) {
corruptFiles.add(path.toUri().toString());
}
System.err.println("Unable to read data from max footer offset." +
" Adding data file to recovery list.");
return null;
}
} else {
reader = OrcFile.createReader(path, OrcFile.readerOptions(conf));
}
return reader;
}
public static Collection<String> getAllFilesInPath(final Path path,
final Configuration conf) throws IOException {
List<String> filesInPath = new ArrayList<>();
FileSystem fs = path.getFileSystem(conf);
FileStatus fileStatus = fs.getFileStatus(path);
if (fileStatus.isDirectory()) {
FileStatus[] fileStatuses = fs.listStatus(path, HIDDEN_AND_SIDE_FILE_FILTER);
for (FileStatus fileInPath : fileStatuses) {
if (fileInPath.isDirectory()) {
filesInPath.addAll(getAllFilesInPath(fileInPath.getPath(), conf));
} else {
filesInPath.add(fileInPath.getPath().toString());
}
}
} else {
filesInPath.add(path.toString());
}
return filesInPath;
}
private static void printMetaData(List<String> files, Configuration conf,
List<Integer> rowIndexCols, boolean printTimeZone, final boolean recover,
final String backupPath)
throws IOException {
List<String> corruptFiles = new ArrayList<>();
for (String filename : files) {
printMetaDataImpl(filename, conf, rowIndexCols, printTimeZone, corruptFiles);
System.out.println(SEPARATOR);
}
if (!corruptFiles.isEmpty()) {
if (recover) {
recoverFiles(corruptFiles, conf, backupPath);
} else {
System.err.println(corruptFiles.size() + " file(s) are corrupted." +
" Run the following command to recover corrupted files.\n");
StringBuilder buffer = new StringBuilder();
buffer.append("hive --orcfiledump --recover --skip-dump");
for(String file: corruptFiles) {
buffer.append(' ');
buffer.append(file);
}
System.err.println(buffer);
System.out.println(SEPARATOR);
}
}
}
static void printTypeAnnotations(TypeDescription type, String prefix) {
List<String> attributes = type.getAttributeNames();
if (attributes.size() > 0) {
System.out.println("Attributes on " + prefix);
for(String attr: attributes) {
System.out.println(" " + attr + ": " + type.getAttributeValue(attr));
}
}
List<TypeDescription> children = type.getChildren();
if (children != null) {
switch (type.getCategory()) {
case STRUCT:
List<String> fields = type.getFieldNames();
for(int c = 0; c < children.size(); ++c) {
printTypeAnnotations(children.get(c), prefix + "." + fields.get(c));
}
break;
case MAP:
printTypeAnnotations(children.get(0), prefix + "._key");
printTypeAnnotations(children.get(1), prefix + "._value");
break;
case LIST:
printTypeAnnotations(children.get(0), prefix + "._elem");
break;
case UNION:
for(int c = 0; c < children.size(); ++c) {
printTypeAnnotations(children.get(c), prefix + "._" + c);
}
break;
}
}
}
private static void printMetaDataImpl(final String filename,
final Configuration conf, List<Integer> rowIndexCols, final boolean printTimeZone,
final List<String> corruptFiles) throws IOException {
Path file = new Path(filename);
Reader reader = getReader(file, conf, corruptFiles);
// if we can create reader then footer is not corrupt and file will readable
if (reader == null) {
return;
}
TypeDescription schema = reader.getSchema();
System.out.println("Structure for " + filename);
System.out.println("File Version: " + reader.getFileVersion().getName() +
" with " + reader.getWriterVersion() + " by " +
reader.getSoftwareVersion());
RecordReaderImpl rows = (RecordReaderImpl) reader.rows();
System.out.println("Rows: " + reader.getNumberOfRows());
System.out.println("Compression: " + reader.getCompressionKind());
if (reader.getCompressionKind() != CompressionKind.NONE) {
System.out.println("Compression size: " + reader.getCompressionSize());
}
System.out.println("Calendar: " + (reader.writerUsedProlepticGregorian()
? "Proleptic Gregorian"
: "Julian/Gregorian"));
System.out.println("Type: " + reader.getSchema().toString());
printTypeAnnotations(reader.getSchema(), "root");
System.out.println("\nStripe Statistics:");
List<StripeStatistics> stripeStats = reader.getStripeStatistics();
for (int n = 0; n < stripeStats.size(); n++) {
System.out.println(" Stripe " + (n + 1) + ":");
StripeStatistics ss = stripeStats.get(n);
for (int i = 0; i < ss.getColumnStatistics().length; ++i) {
System.out.println(" Column " + i + ": " +
ss.getColumnStatistics()[i].toString());
}
}
ColumnStatistics[] stats = reader.getStatistics();
int colCount = stats.length;
if (rowIndexCols == null) {
rowIndexCols = new ArrayList<>(colCount);
for (int i = 0; i < colCount; ++i) {
rowIndexCols.add(i);
}
}
System.out.println("\nFile Statistics:");
for (int i = 0; i < stats.length; ++i) {
System.out.println(" Column " + i + ": " + stats[i].toString());
}
System.out.println("\nStripes:");
int stripeIx = -1;
for (StripeInformation stripe : reader.getStripes()) {
++stripeIx;
long stripeStart = stripe.getOffset();
OrcProto.StripeFooter footer = rows.readStripeFooter(stripe);
if (printTimeZone) {
String tz = footer.getWriterTimezone();
if (tz == null || tz.isEmpty()) {
tz = UNKNOWN;
}
System.out.println(" Stripe: " + stripe + " timezone: " + tz);
} else {
System.out.println(" Stripe: " + stripe);
}
long sectionStart = stripeStart;
for (OrcProto.Stream section : footer.getStreamsList()) {
String kind = section.hasKind() ? section.getKind().name() : UNKNOWN;
System.out.println(" Stream: column " + section.getColumn() +
" section " + kind + " start: " + sectionStart +
" length " + section.getLength());
sectionStart += section.getLength();
}
for (int i = 0; i < footer.getColumnsCount(); ++i) {
OrcProto.ColumnEncoding encoding = footer.getColumns(i);
StringBuilder buf = new StringBuilder();
buf.append(" Encoding column ");
buf.append(i);
buf.append(": ");
buf.append(encoding.getKind());
if (encoding.getKind() == OrcProto.ColumnEncoding.Kind.DICTIONARY ||
encoding.getKind() == OrcProto.ColumnEncoding.Kind.DICTIONARY_V2) {
buf.append("[");
buf.append(encoding.getDictionarySize());
buf.append("]");
}
System.out.println(buf);
}
if (rowIndexCols != null && !rowIndexCols.isEmpty()) {
// include the columns that are specified, only if the columns are included, bloom filter
// will be read
boolean[] sargColumns = new boolean[colCount];
for (int colIdx : rowIndexCols) {
sargColumns[colIdx] = true;
}
OrcIndex indices = rows.readRowIndex(stripeIx, null, sargColumns);
for (int col : rowIndexCols) {
StringBuilder buf = new StringBuilder();
String rowIdxString = getFormattedRowIndices(col,
indices.getRowGroupIndex(), schema, (ReaderImpl) reader);
buf.append(rowIdxString);
String bloomFilString = getFormattedBloomFilters(col, indices,
reader.getWriterVersion(),
reader.getSchema().findSubtype(col).getCategory(),
footer.getColumns(col));
buf.append(bloomFilString);
System.out.println(buf);
}
}
}
FileSystem fs = file.getFileSystem(conf);
long fileLen = fs.getFileStatus(file).getLen();
long paddedBytes = getTotalPaddingSize(reader);
double percentPadding = (fileLen == 0) ? 0.0d : 100.0d * paddedBytes / fileLen;
DecimalFormat format = new DecimalFormat("##.##");
System.out.println("\nFile length: " + fileLen + " bytes");
System.out.println("Padding length: " + paddedBytes + " bytes");
System.out.println("Padding ratio: " + format.format(percentPadding) + "%");
//print out any user metadata properties
List<String> keys = reader.getMetadataKeys();
for(int i = 0; i < keys.size(); i++) {
if(i == 0) {
System.out.println("\nUser Metadata:");
}
ByteBuffer byteBuffer = reader.getMetadataValue(keys.get(i));
System.out.println(" " + keys.get(i) + "="
+ StandardCharsets.UTF_8.decode(byteBuffer));
}
rows.close();
}
private static void recoverFiles(final List<String> corruptFiles, final Configuration conf,
final String backup)
throws IOException {
byte[] magicBytes = OrcFile.MAGIC.getBytes(StandardCharsets.UTF_8);
int magicLength = magicBytes.length;
int readSize = conf.getInt(RECOVER_READ_SIZE, DEFAULT_BLOCK_SIZE);
for (String corruptFile : corruptFiles) {
System.err.println("Recovering file " + corruptFile);
Path corruptPath = new Path(corruptFile);
FileSystem fs = corruptPath.getFileSystem(conf);
FSDataInputStream fdis = fs.open(corruptPath);
try {
long corruptFileLen = fs.getFileStatus(corruptPath).getLen();
long remaining = corruptFileLen;
List<Long> footerOffsets = new ArrayList<>();
// start reading the data file form top to bottom and record the valid footers
while (remaining > 0 && corruptFileLen > (2L * magicLength)) {
int toRead = (int) Math.min(readSize, remaining);
long startPos = corruptFileLen - remaining;
byte[] data;
if (startPos == 0) {
data = new byte[toRead];
fdis.readFully(startPos, data, 0, toRead);
}
else {
// For non-first reads, we let startPos move back magicLength bytes
// which prevents two adjacent reads from separating OrcFile.MAGIC
startPos = startPos - magicLength;
data = new byte[toRead + magicLength];
fdis.readFully(startPos, data, 0, toRead + magicLength);
}
// find all MAGIC string and see if the file is readable from there
int index = 0;
long nextFooterOffset;
while (index != -1) {
// There are two reasons for searching from index + 1
// 1. to skip the OrcFile.MAGIC in the file header when the first match is made
// 2. When the match is successful, the index is moved backwards to search for
// the subsequent OrcFile.MAGIC
index = indexOf(data, magicBytes, index + 1);
if (index != -1) {
nextFooterOffset = startPos + index + magicBytes.length + 1;
if (isReadable(corruptPath, conf, nextFooterOffset)) {
footerOffsets.add(nextFooterOffset);
}
}
}
System.err.println("Scanning for valid footers - startPos: " + startPos +
" toRead: " + toRead + " remaining: " + remaining);
remaining = remaining - toRead;
}
System.err.println("Readable footerOffsets: " + footerOffsets);
recoverFile(corruptPath, fs, conf, footerOffsets, backup);
} catch (Exception e) {
Path recoveryFile = getRecoveryFile(corruptPath);
if (fs.exists(recoveryFile)) {
fs.delete(recoveryFile, false);
}
System.err.println("Unable to recover file " + corruptFile);
e.printStackTrace();
System.err.println(SEPARATOR);
continue;
} finally {
fdis.close();
}
System.err.println(corruptFile + " recovered successfully!");
System.err.println(SEPARATOR);
}
}
private static void recoverFile(final Path corruptPath, final FileSystem fs,
final Configuration conf, final List<Long> footerOffsets, final String backup)
throws IOException {
// first recover the file to .recovered file and then once successful rename it to actual file
Path recoveredPath = getRecoveryFile(corruptPath);
// make sure that file does not exist
if (fs.exists(recoveredPath)) {
fs.delete(recoveredPath, false);
}
// if there are no valid footers, the file should still be readable so create an empty orc file
if (footerOffsets == null || footerOffsets.isEmpty()) {
System.err.println("No readable footers found. Creating empty orc file.");
TypeDescription schema = TypeDescription.createStruct();
Writer writer = OrcFile.createWriter(recoveredPath,
OrcFile.writerOptions(conf).setSchema(schema));
writer.close();
} else {
FSDataInputStream fdis = fs.open(corruptPath);
FileStatus fileStatus = fs.getFileStatus(corruptPath);
// read corrupt file and copy it to recovered file until last valid footer
FSDataOutputStream fdos = fs.create(recoveredPath, true,
conf.getInt("io.file.buffer.size", 4096),
fileStatus.getReplication(),
fileStatus.getBlockSize());
try {
long fileLen = footerOffsets.get(footerOffsets.size() - 1);
long remaining = fileLen;
while (remaining > 0) {
int toRead = (int) Math.min(DEFAULT_BLOCK_SIZE, remaining);
byte[] data = new byte[toRead];
long startPos = fileLen - remaining;
fdis.readFully(startPos, data, 0, toRead);
fdos.write(data);
System.err.println("Copying data to recovery file - startPos: " + startPos +
" toRead: " + toRead + " remaining: " + remaining);
remaining = remaining - toRead;
}
} catch (Exception e) {
fs.delete(recoveredPath, false);
throw new IOException(e);
} finally {
fdis.close();
fdos.close();
}
}
// validate the recovered file once again and start moving corrupt files to backup folder
if (isReadable(recoveredPath, conf, Long.MAX_VALUE)) {
Path backupDataPath;
Path backupDirPath;
Path relativeCorruptPath;
String scheme = corruptPath.toUri().getScheme();
String authority = corruptPath.toUri().getAuthority();
// use the same filesystem as corrupt file if backup-path is not explicitly specified
if (backup.equals(DEFAULT_BACKUP_PATH)) {
backupDirPath = new Path(scheme, authority, DEFAULT_BACKUP_PATH);
} else {
backupDirPath = new Path(backup);
}
if (corruptPath.isUriPathAbsolute()) {
relativeCorruptPath = corruptPath;
} else {
relativeCorruptPath = Path.mergePaths(new Path(Path.SEPARATOR), corruptPath);
}
backupDataPath = Path.mergePaths(backupDirPath, relativeCorruptPath);
// Move data file to backup path
moveFiles(fs, corruptPath, backupDataPath);
// Move side file to backup path
Path sideFilePath = OrcAcidUtils.getSideFile(corruptPath);
if (fs.exists(sideFilePath)) {
Path backupSideFilePath = new Path(backupDataPath.getParent(), sideFilePath.getName());
moveFiles(fs, sideFilePath, backupSideFilePath);
}
// finally move recovered file to actual file
moveFiles(fs, recoveredPath, corruptPath);
// we are done recovering, backing up and validating
System.err.println("Validation of recovered file successful!");
}
}
private static void moveFiles(final FileSystem fs, final Path src, final Path dest)
throws IOException {
try {
// create the dest directory if not exist
if (!fs.exists(dest.getParent())) {
fs.mkdirs(dest.getParent());
}
// if the destination file exists for some reason delete it
fs.delete(dest, false);
if (fs.rename(src, dest)) {
System.err.println("Moved " + src + " to " + dest);
} else {
throw new IOException("Unable to move " + src + " to " + dest);
}
} catch (Exception e) {
throw new IOException("Unable to move " + src + " to " + dest, e);
}
}
private static Path getRecoveryFile(final Path corruptPath) {
return new Path(corruptPath.getParent(), corruptPath.getName() + ".recovered");
}
private static boolean isReadable(final Path corruptPath, final Configuration conf,
final long maxLen) {
try {
OrcFile.createReader(corruptPath, OrcFile.readerOptions(conf).maxLength(maxLen));
return true;
} catch (Exception e) {
// ignore this exception as maxLen is unreadable
return false;
}
}
// search for byte pattern in another byte array
public static int indexOf(final byte[] data, final byte[] pattern, final int index) {
if (data == null || data.length == 0 || pattern == null || pattern.length == 0 ||
index > data.length || index < 0) {
return -1;
}
for (int i = index; i < data.length - pattern.length + 1; i++) {
boolean found = true;
for (int j = 0; j < pattern.length; j++) {
if (data[i + j] != pattern[j]) {
found = false;
break;
}
}
if (found) return i;
}
return -1;
}
private static String getFormattedBloomFilters(int col, OrcIndex index,
OrcFile.WriterVersion version,
TypeDescription.Category type,
OrcProto.ColumnEncoding encoding) {
OrcProto.BloomFilterIndex[] bloomFilterIndex = index.getBloomFilterIndex();
StringBuilder buf = new StringBuilder();
BloomFilter stripeLevelBF = null;
if (bloomFilterIndex != null && bloomFilterIndex[col] != null) {
int idx = 0;
buf.append("\n Bloom filters for column ").append(col).append(":");
for (OrcProto.BloomFilter bf : bloomFilterIndex[col].getBloomFilterList()) {
BloomFilter toMerge = BloomFilterIO.deserialize(
index.getBloomFilterKinds()[col], encoding, version, type, bf);
buf.append("\n Entry ").append(idx++).append(":").append(getBloomFilterStats(toMerge));
if (stripeLevelBF == null) {
stripeLevelBF = toMerge;
} else {
stripeLevelBF.merge(toMerge);
}
}
String bloomFilterStats = getBloomFilterStats(stripeLevelBF);
buf.append("\n Stripe level merge:").append(bloomFilterStats);
}
return buf.toString();
}
private static String getBloomFilterStats(BloomFilter bf) {
StringBuilder sb = new StringBuilder();
int bitCount = bf.getBitSize();
int popCount = 0;
for (long l : bf.getBitSet()) {
popCount += Long.bitCount(l);
}
int k = bf.getNumHashFunctions();
float loadFactor = (float) popCount / (float) bitCount;
float expectedFpp = (float) Math.pow(loadFactor, k);
DecimalFormat df = new DecimalFormat("###.####");
sb.append(" numHashFunctions: ").append(k);
sb.append(" bitCount: ").append(bitCount);
sb.append(" popCount: ").append(popCount);
sb.append(" loadFactor: ").append(df.format(loadFactor));
sb.append(" expectedFpp: ").append(expectedFpp);
return sb.toString();
}
private static String getFormattedRowIndices(int col,
OrcProto.RowIndex[] rowGroupIndex,
TypeDescription schema,
ReaderImpl reader) {
StringBuilder buf = new StringBuilder();
OrcProto.RowIndex index;
buf.append(" Row group indices for column ").append(col).append(":");
if (rowGroupIndex == null || (col >= rowGroupIndex.length) ||
((index = rowGroupIndex[col]) == null)) {
buf.append(" not found\n");
return buf.toString();
}
TypeDescription colSchema = schema.findSubtype(col);
for (int entryIx = 0; entryIx < index.getEntryCount(); ++entryIx) {
buf.append("\n Entry ").append(entryIx).append(": ");
OrcProto.RowIndexEntry entry = index.getEntry(entryIx);
if (entry == null) {
buf.append("unknown\n");
continue;
}
if (!entry.hasStatistics()) {
buf.append("no stats at ");
} else {
OrcProto.ColumnStatistics colStats = entry.getStatistics();
ColumnStatistics cs =
ColumnStatisticsImpl.deserialize(colSchema, colStats,
reader.writerUsedProlepticGregorian(),
reader.getConvertToProlepticGregorian());
buf.append(cs);
}
buf.append(" positions: ");
for (int posIx = 0; posIx < entry.getPositionsCount(); ++posIx) {
if (posIx != 0) {
buf.append(",");
}
buf.append(entry.getPositions(posIx));
}
}
return buf.toString();
}
public static long getTotalPaddingSize(Reader reader) throws IOException {
long paddedBytes = 0;
List<StripeInformation> stripes = reader.getStripes();
for (int i = 1; i < stripes.size(); i++) {
long prevStripeOffset = stripes.get(i - 1).getOffset();
long prevStripeLen = stripes.get(i - 1).getLength();
paddedBytes += stripes.get(i).getOffset() - (prevStripeOffset + prevStripeLen);
}
return paddedBytes;
}
@SuppressWarnings("static-access")
static Options createOptions() {
Options result = new Options();
// add -d and --data to print the rows
result.addOption(Option.builder("d")
.longOpt("data")
.desc("Should the data be printed")
.build());
// to avoid breaking unit tests (when run in different time zones) for file dump, printing
// of timezone is made optional
result.addOption(Option.builder("t")
.longOpt("timezone")
.desc("Print writer's time zone")
.build());
result.addOption(Option.builder("h")
.longOpt("help")
.desc("Print help message")
.build());
result.addOption(Option.builder("r")
.longOpt("rowindex")
.argName("comma separated list of column ids for which row index should be printed")
.desc("Dump stats for column number(s)")
.hasArg()
.build());
result.addOption(Option.builder("j")
.longOpt("json")
.desc("Print metadata in JSON format")
.build());
result.addOption(Option.builder("p")
.longOpt("pretty")
.desc("Pretty print json metadata output")
.build());
result.addOption(Option.builder()
.longOpt("recover")
.desc("recover corrupted orc files generated by streaming")
.build());
result.addOption(Option.builder()
.longOpt("skip-dump")
.desc("used along with --recover to directly recover files without dumping")
.build());
result.addOption(Option.builder()
.longOpt("backup-path")
.desc("specify a backup path to store the corrupted files (default: /tmp)")
.hasArg()
.build());
return result;
}
}
| 32,912 | 38.182143 | 100 | java |
null | orc-main/java/tools/src/java/org/apache/orc/tools/JsonFileDump.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.tools;
import com.google.gson.stream.JsonWriter;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.orc.BinaryColumnStatistics;
import org.apache.orc.BooleanColumnStatistics;
import org.apache.orc.CollectionColumnStatistics;
import org.apache.orc.ColumnStatistics;
import org.apache.orc.CompressionKind;
import org.apache.orc.DateColumnStatistics;
import org.apache.orc.DecimalColumnStatistics;
import org.apache.orc.DoubleColumnStatistics;
import org.apache.orc.IntegerColumnStatistics;
import org.apache.orc.OrcFile;
import org.apache.orc.OrcProto;
import org.apache.orc.Reader;
import org.apache.orc.StringColumnStatistics;
import org.apache.orc.StripeInformation;
import org.apache.orc.StripeStatistics;
import org.apache.orc.TimestampColumnStatistics;
import org.apache.orc.TypeDescription;
import org.apache.orc.impl.AcidStats;
import org.apache.orc.impl.ColumnStatisticsImpl;
import org.apache.orc.impl.OrcAcidUtils;
import org.apache.orc.impl.OrcIndex;
import org.apache.orc.impl.ReaderImpl;
import org.apache.orc.impl.RecordReaderImpl;
import org.apache.orc.util.BloomFilter;
import org.apache.orc.util.BloomFilterIO;
import java.io.IOException;
import java.io.StringWriter;
import java.util.ArrayList;
import java.util.List;
/**
* File dump tool with json formatted output.
*/
public class JsonFileDump {
public static void printJsonMetaData(List<String> files,
Configuration conf,
List<Integer> rowIndexCols, boolean prettyPrint, boolean printTimeZone)
throws IOException {
if (files.isEmpty()) {
return;
}
StringWriter stringWriter = new StringWriter();
JsonWriter writer = new JsonWriter(stringWriter);
if (prettyPrint) {
writer.setIndent(" ");
}
boolean multiFile = files.size() > 1;
if (multiFile) {
writer.beginArray();
} else {
writer.beginObject();
}
for (String filename : files) {
try {
if (multiFile) {
writer.beginObject();
}
writer.name("fileName").value(filename);
Path path = new Path(filename);
Reader reader = FileDump.getReader(path, conf, null);
if (reader == null) {
writer.name("status").value("FAILED");
continue;
}
writer.name("fileVersion").value(reader.getFileVersion().getName());
writer.name("writerVersion").value(reader.getWriterVersion().toString());
writer.name("softwareVersion").value(reader.getSoftwareVersion());
RecordReaderImpl rows = (RecordReaderImpl) reader.rows();
writer.name("numberOfRows").value(reader.getNumberOfRows());
writer.name("compression").value(reader.getCompressionKind().toString());
if (reader.getCompressionKind() != CompressionKind.NONE) {
writer.name("compressionBufferSize").value(reader.getCompressionSize());
}
writer.name("schemaString").value(reader.getSchema().toString());
writer.name("schema");
writeSchema(writer, reader.getSchema());
writer.name("calendar").value(reader.writerUsedProlepticGregorian()
? "proleptic Gregorian"
: "Julian/Gregorian");
writer.name("stripeStatistics").beginArray();
List<StripeStatistics> stripeStatistics = reader.getStripeStatistics();
for (int n = 0; n < stripeStatistics.size(); n++) {
writer.beginObject();
writer.name("stripeNumber").value(n + 1);
StripeStatistics ss = stripeStatistics.get(n);
writer.name("columnStatistics").beginArray();
for (int i = 0; i < ss.getColumnStatistics().length; i++) {
writer.beginObject();
writer.name("columnId").value(i);
writeColumnStatistics(writer, ss.getColumnStatistics()[i]);
writer.endObject();
}
writer.endArray();
writer.endObject();
}
writer.endArray();
ColumnStatistics[] stats = reader.getStatistics();
int colCount = stats.length;
if (rowIndexCols == null) {
rowIndexCols = new ArrayList<>(colCount);
for (int i = 0; i < colCount; ++i) {
rowIndexCols.add(i);
}
}
writer.name("fileStatistics").beginArray();
for (int i = 0; i < stats.length; ++i) {
writer.beginObject();
writer.name("columnId").value(i);
writeColumnStatistics(writer, stats[i]);
writer.endObject();
}
writer.endArray();
writer.name("stripes").beginArray();
int stripeIx = -1;
for (StripeInformation stripe : reader.getStripes()) {
++stripeIx;
long stripeStart = stripe.getOffset();
OrcProto.StripeFooter footer = rows.readStripeFooter(stripe);
writer.beginObject(); // start of stripe information
writer.name("stripeNumber").value(stripeIx + 1);
writer.name("stripeInformation");
writeStripeInformation(writer, stripe);
if (printTimeZone) {
writer.name("writerTimezone").value(
footer.hasWriterTimezone() ? footer.getWriterTimezone() : FileDump.UNKNOWN);
}
long sectionStart = stripeStart;
writer.name("streams").beginArray();
for (OrcProto.Stream section : footer.getStreamsList()) {
writer.beginObject();
String kind = section.hasKind() ? section.getKind().name() : FileDump.UNKNOWN;
writer.name("columnId").value(section.getColumn());
writer.name("section").value(kind);
writer.name("startOffset").value(sectionStart);
writer.name("length").value(section.getLength());
sectionStart += section.getLength();
writer.endObject();
}
writer.endArray();
writer.name("encodings").beginArray();
for (int i = 0; i < footer.getColumnsCount(); ++i) {
writer.beginObject();
OrcProto.ColumnEncoding encoding = footer.getColumns(i);
writer.name("columnId").value(i);
writer.name("kind").value(encoding.getKind().toString());
if (encoding.getKind() == OrcProto.ColumnEncoding.Kind.DICTIONARY ||
encoding.getKind() == OrcProto.ColumnEncoding.Kind.DICTIONARY_V2) {
writer.name("dictionarySize").value(encoding.getDictionarySize());
}
writer.endObject();
}
writer.endArray();
if (!rowIndexCols.isEmpty()) {
// include the columns that are specified, only if the columns are included, bloom filter
// will be read
boolean[] sargColumns = new boolean[colCount];
for (int colIdx : rowIndexCols) {
sargColumns[colIdx] = true;
}
OrcIndex indices = rows.readRowIndex(stripeIx, null, sargColumns);
writer.name("indexes").beginArray();
for (int col : rowIndexCols) {
writer.beginObject();
writer.name("columnId").value(col);
writeRowGroupIndexes(writer, col, indices.getRowGroupIndex(),
reader.getSchema(), (ReaderImpl) reader);
writeBloomFilterIndexes(writer, col, indices,
reader.getWriterVersion(),
reader.getSchema().findSubtype(col).getCategory(),
footer.getColumns(col));
writer.endObject();
}
writer.endArray();
}
writer.endObject(); // end of stripe information
}
writer.endArray();
FileSystem fs = path.getFileSystem(conf);
long fileLen = fs.getContentSummary(path).getLength();
long paddedBytes = FileDump.getTotalPaddingSize(reader);
// empty ORC file is ~45 bytes. Assumption here is file length always >0
double percentPadding = ((double) paddedBytes / (double) fileLen) * 100;
writer.name("fileLength").value(fileLen);
writer.name("paddingLength").value(paddedBytes);
writer.name("paddingRatio").value(percentPadding);
AcidStats acidStats = OrcAcidUtils.parseAcidStats(reader);
if (acidStats != null) {
writer.name("numInserts").value(acidStats.inserts);
writer.name("numDeletes").value(acidStats.deletes);
writer.name("numUpdates").value(acidStats.updates);
}
writer.name("status").value("OK");
rows.close();
writer.endObject();
} catch (Throwable e) {
writer.name("status").value("FAILED");
throw e;
}
}
if (multiFile) {
writer.endArray();
}
System.out.println(stringWriter);
}
private static void writeSchema(JsonWriter writer, TypeDescription type)
throws IOException {
writer.beginObject();
writer.name("columnId").value(type.getId());
writer.name("columnType").value(type.getCategory().toString());
List<String> attributes = type.getAttributeNames();
if (attributes.size() > 0) {
writer.name("attributes").beginObject();
for (String name : attributes) {
writer.name(name).value(type.getAttributeValue(name));
}
writer.endObject();
}
switch (type.getCategory()) {
case DECIMAL:
writer.name("precision").value(type.getPrecision());
writer.name("scale").value(type.getScale());
break;
case VARCHAR:
case CHAR:
writer.name("maxLength").value(type.getMaxLength());
break;
default:
break;
}
List<TypeDescription> children = type.getChildren();
if (children != null) {
writer.name("children");
switch (type.getCategory()) {
case STRUCT:
writer.beginObject();
List<String> fields = type.getFieldNames();
for (int c = 0; c < fields.size(); ++c) {
writer.name(fields.get(c));
writeSchema(writer, children.get(c));
}
writer.endObject();
break;
case LIST:
writer.beginArray();
writeSchema(writer, children.get(0));
writer.endArray();
break;
case MAP:
writer.beginArray();
writeSchema(writer, children.get(0));
writeSchema(writer, children.get(1));
writer.endArray();
break;
case UNION:
writer.beginArray();
for (TypeDescription child : children) {
writeSchema(writer, child);
}
writer.endArray();
break;
default:
break;
}
}
writer.endObject();
}
private static void writeStripeInformation(JsonWriter writer, StripeInformation stripe)
throws IOException {
writer.beginObject();
writer.name("offset").value(stripe.getOffset());
writer.name("indexLength").value(stripe.getIndexLength());
writer.name("dataLength").value(stripe.getDataLength());
writer.name("footerLength").value(stripe.getFooterLength());
writer.name("rowCount").value(stripe.getNumberOfRows());
writer.endObject();
}
private static void writeColumnStatistics(JsonWriter writer, ColumnStatistics cs)
throws IOException {
if (cs != null) {
writer.name("count").value(cs.getNumberOfValues());
writer.name("hasNull").value(cs.hasNull());
if (cs.getBytesOnDisk() != 0) {
writer.name("bytesOnDisk").value(cs.getBytesOnDisk());
}
if (cs instanceof BinaryColumnStatistics) {
writer.name("totalLength").value(((BinaryColumnStatistics) cs).getSum());
writer.name("type").value(OrcProto.Type.Kind.BINARY.toString());
} else if (cs instanceof BooleanColumnStatistics) {
writer.name("trueCount").value(((BooleanColumnStatistics) cs).getTrueCount());
writer.name("falseCount").value(((BooleanColumnStatistics) cs).getFalseCount());
writer.name("type").value(OrcProto.Type.Kind.BOOLEAN.toString());
} else if (cs instanceof IntegerColumnStatistics) {
writer.name("min").value(((IntegerColumnStatistics) cs).getMinimum());
writer.name("max").value(((IntegerColumnStatistics) cs).getMaximum());
if (((IntegerColumnStatistics) cs).isSumDefined()) {
writer.name("sum").value(((IntegerColumnStatistics) cs).getSum());
}
writer.name("type").value(OrcProto.Type.Kind.LONG.toString());
} else if (cs instanceof DoubleColumnStatistics) {
writer.name("min").value(((DoubleColumnStatistics) cs).getMinimum());
writer.name("max").value(((DoubleColumnStatistics) cs).getMaximum());
writer.name("sum").value(((DoubleColumnStatistics) cs).getSum());
writer.name("type").value(OrcProto.Type.Kind.DOUBLE.toString());
} else if (cs instanceof StringColumnStatistics) {
String lower = ((StringColumnStatistics) cs).getLowerBound();
if (((StringColumnStatistics) cs).getMinimum() != null) {
writer.name("min").value(lower);
} else if (lower != null) {
writer.name("lowerBound").value(lower);
}
String upper = ((StringColumnStatistics) cs).getUpperBound();
if (((StringColumnStatistics) cs).getMaximum() != null) {
writer.name("max").value(upper);
} else if (upper != null) {
writer.name("upperBound").value(upper);
}
writer.name("totalLength").value(((StringColumnStatistics) cs).getSum());
writer.name("type").value(OrcProto.Type.Kind.STRING.toString());
} else if (cs instanceof DateColumnStatistics) {
if (((DateColumnStatistics) cs).getMaximumLocalDate() != null) {
writer.name("min").value(((DateColumnStatistics) cs).getMinimumLocalDate().toString());
writer.name("max").value(((DateColumnStatistics) cs).getMaximumLocalDate().toString());
}
writer.name("type").value(OrcProto.Type.Kind.DATE.toString());
} else if (cs instanceof TimestampColumnStatistics) {
if (((TimestampColumnStatistics) cs).getMaximum() != null) {
writer.name("min").value(((TimestampColumnStatistics) cs).getMinimum().toString());
writer.name("max").value(((TimestampColumnStatistics) cs).getMaximum().toString());
}
writer.name("type").value(OrcProto.Type.Kind.TIMESTAMP.toString());
} else if (cs instanceof DecimalColumnStatistics) {
if (((DecimalColumnStatistics) cs).getMaximum() != null) {
writer.name("min").value(((DecimalColumnStatistics) cs).getMinimum().toString());
writer.name("max").value(((DecimalColumnStatistics) cs).getMaximum().toString());
writer.name("sum").value(((DecimalColumnStatistics) cs).getSum().toString());
}
writer.name("type").value(OrcProto.Type.Kind.DECIMAL.toString());
} else if (cs instanceof CollectionColumnStatistics) {
writer.name("minChildren").value(((CollectionColumnStatistics) cs).getMinimumChildren());
writer.name("maxChildren").value(((CollectionColumnStatistics) cs).getMaximumChildren());
writer.name("totalChildren").value(((CollectionColumnStatistics) cs).getTotalChildren());
}
}
}
private static void writeBloomFilterIndexes(JsonWriter writer, int col,
OrcIndex index,
OrcFile.WriterVersion version,
TypeDescription.Category type,
OrcProto.ColumnEncoding encoding
) throws IOException {
BloomFilter stripeLevelBF = null;
OrcProto.BloomFilterIndex[] bloomFilterIndex = index.getBloomFilterIndex();
if (bloomFilterIndex != null && bloomFilterIndex[col] != null) {
int entryIx = 0;
writer.name("bloomFilterIndexes").beginArray();
for (OrcProto.BloomFilter bf : bloomFilterIndex[col].getBloomFilterList()) {
writer.beginObject();
writer.name("entryId").value(entryIx++);
BloomFilter toMerge = BloomFilterIO.deserialize(
index.getBloomFilterKinds()[col], encoding, version, type, bf);
writeBloomFilterStats(writer, toMerge);
if (stripeLevelBF == null) {
stripeLevelBF = toMerge;
} else {
stripeLevelBF.merge(toMerge);
}
writer.endObject();
}
writer.endArray();
}
if (stripeLevelBF != null) {
writer.name("stripeLevelBloomFilter");
writer.beginObject();
writeBloomFilterStats(writer, stripeLevelBF);
writer.endObject();
}
}
private static void writeBloomFilterStats(JsonWriter writer, BloomFilter bf)
throws IOException {
int bitCount = bf.getBitSize();
int popCount = 0;
for (long l : bf.getBitSet()) {
popCount += Long.bitCount(l);
}
int k = bf.getNumHashFunctions();
float loadFactor = (float) popCount / (float) bitCount;
float expectedFpp = (float) Math.pow(loadFactor, k);
writer.name("numHashFunctions").value(k);
writer.name("bitCount").value(bitCount);
writer.name("popCount").value(popCount);
writer.name("loadFactor").value(loadFactor);
writer.name("expectedFpp").value(expectedFpp);
}
private static void writeRowGroupIndexes(JsonWriter writer, int col,
OrcProto.RowIndex[] rowGroupIndex,
TypeDescription schema,
ReaderImpl reader) throws IOException {
OrcProto.RowIndex index;
if (rowGroupIndex == null || (col >= rowGroupIndex.length) ||
((index = rowGroupIndex[col]) == null)) {
return;
}
writer.name("rowGroupIndexes").beginArray();
for (int entryIx = 0; entryIx < index.getEntryCount(); ++entryIx) {
writer.beginObject();
writer.name("entryId").value(entryIx);
OrcProto.RowIndexEntry entry = index.getEntry(entryIx);
if (entry == null || !entry.hasStatistics()) {
continue;
}
OrcProto.ColumnStatistics colStats = entry.getStatistics();
writeColumnStatistics(writer, ColumnStatisticsImpl.deserialize(
schema.findSubtype(col), colStats, reader.writerUsedProlepticGregorian(),
reader.getConvertToProlepticGregorian()));
writer.name("positions").beginArray();
for (int posIx = 0; posIx < entry.getPositionsCount(); ++posIx) {
writer.value(entry.getPositions(posIx));
}
writer.endArray();
writer.endObject();
}
writer.endArray();
}
}
| 19,477 | 40.888172 | 101 | java |
null | orc-main/java/tools/src/java/org/apache/orc/tools/KeyTool.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.tools;
import com.google.gson.stream.JsonWriter;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.BytesWritable;
import org.apache.orc.EncryptionAlgorithm;
import org.apache.orc.impl.CryptoUtils;
import org.apache.orc.impl.HadoopShims;
import org.apache.orc.impl.KeyProvider;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.PrintStream;
import java.security.SecureRandom;
/**
* Print the information about the encryption keys.
*/
public class KeyTool {
static void printKey(JsonWriter writer,
KeyProvider provider,
String keyName) throws IOException {
HadoopShims.KeyMetadata meta = provider.getCurrentKeyVersion(keyName);
writer.beginObject();
writer.name("name");
writer.value(keyName);
EncryptionAlgorithm algorithm = meta.getAlgorithm();
writer.name("algorithm");
writer.value(algorithm.getAlgorithm());
writer.name("keyLength");
writer.value(algorithm.keyLength());
writer.name("version");
writer.value(meta.getVersion());
byte[] iv = new byte[algorithm.getIvLength()];
byte[] key = provider.decryptLocalKey(meta, iv).getEncoded();
writer.name("key 0");
writer.value(new BytesWritable(key).toString());
writer.endObject();
}
private final OutputStreamWriter writer;
private final Configuration conf;
public KeyTool(Configuration conf,
String[] args) throws IOException, ParseException {
CommandLine opts = parseOptions(args);
PrintStream stream;
if (opts.hasOption('o')) {
stream = new PrintStream(opts.getOptionValue('o'), "UTF-8");
} else {
stream = System.out;
}
writer = new OutputStreamWriter(stream, "UTF-8");
this.conf = conf;
}
void run() throws IOException {
KeyProvider provider =
CryptoUtils.getKeyProvider(conf, new SecureRandom());
if (provider == null) {
System.err.println("No key provider available.");
System.exit(1);
}
for(String keyName: provider.getKeyNames()) {
JsonWriter writer = new JsonWriter(this.writer);
printKey(writer, provider, keyName);
this.writer.write('\n');
}
this.writer.close();
}
private static CommandLine parseOptions(String[] args) throws ParseException {
Options options = new Options();
options.addOption(
Option.builder("h").longOpt("help").desc("Provide help").build());
options.addOption(
Option.builder("o").longOpt("output").desc("Output filename")
.hasArg().build());
CommandLine cli = new DefaultParser().parse(options, args);
if (cli.hasOption('h')) {
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp("key", options);
System.exit(1);
}
return cli;
}
public static void main(Configuration conf,
String[] args
) throws IOException, ParseException {
KeyTool tool = new KeyTool(conf, args);
tool.run();
}
}
| 4,122 | 33.358333 | 80 | java |
null | orc-main/java/tools/src/java/org/apache/orc/tools/PrintData.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.tools;
import com.google.gson.stream.JsonWriter;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ListColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.MapColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.StructColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.UnionColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.serde2.io.DateWritable;
import org.apache.orc.Reader;
import org.apache.orc.RecordReader;
import org.apache.orc.TypeDescription;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.PrintStream;
import java.io.PrintWriter;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
/**
* Print the contents of an ORC file as JSON.
*/
public class PrintData {
private static void printMap(JsonWriter writer,
MapColumnVector vector,
TypeDescription schema,
int row) throws IOException {
writer.beginArray();
TypeDescription keyType = schema.getChildren().get(0);
TypeDescription valueType = schema.getChildren().get(1);
int offset = (int) vector.offsets[row];
for (int i = 0; i < vector.lengths[row]; ++i) {
writer.beginObject();
writer.name("_key");
printValue(writer, vector.keys, keyType, offset + i);
writer.name("_value");
printValue(writer, vector.values, valueType, offset + i);
writer.endObject();
}
writer.endArray();
}
private static void printList(JsonWriter writer,
ListColumnVector vector,
TypeDescription schema,
int row) throws IOException {
writer.beginArray();
int offset = (int) vector.offsets[row];
TypeDescription childType = schema.getChildren().get(0);
for (int i = 0; i < vector.lengths[row]; ++i) {
printValue(writer, vector.child, childType, offset + i);
}
writer.endArray();
}
private static void printUnion(JsonWriter writer,
UnionColumnVector vector,
TypeDescription schema,
int row) throws IOException {
int tag = vector.tags[row];
printValue(writer, vector.fields[tag], schema.getChildren().get(tag), row);
}
static void printStruct(JsonWriter writer,
StructColumnVector batch,
TypeDescription schema,
int row) throws IOException {
writer.beginObject();
List<String> fieldNames = schema.getFieldNames();
List<TypeDescription> fieldTypes = schema.getChildren();
for (int i = 0; i < fieldTypes.size(); ++i) {
writer.name(fieldNames.get(i));
printValue(writer, batch.fields[i], fieldTypes.get(i), row);
}
writer.endObject();
}
static void printBinary(JsonWriter writer, BytesColumnVector vector,
int row) throws IOException {
writer.beginArray();
int offset = vector.start[row];
for(int i=0; i < vector.length[row]; ++i) {
writer.value(0xff & (int) vector.vector[row][offset + i]);
}
writer.endArray();
}
static void printValue(JsonWriter writer, ColumnVector vector,
TypeDescription schema, int row) throws IOException {
if (vector.isRepeating) {
row = 0;
}
if (vector.noNulls || !vector.isNull[row]) {
switch (schema.getCategory()) {
case BOOLEAN:
writer.value(((LongColumnVector) vector).vector[row] != 0);
break;
case BYTE:
case SHORT:
case INT:
case LONG:
writer.value(((LongColumnVector) vector).vector[row]);
break;
case FLOAT:
case DOUBLE:
writer.value(((DoubleColumnVector) vector).vector[row]);
break;
case STRING:
case CHAR:
case VARCHAR:
writer.value(((BytesColumnVector) vector).toString(row));
break;
case BINARY:
printBinary(writer, (BytesColumnVector) vector, row);
break;
case DECIMAL:
writer.value(((DecimalColumnVector) vector).vector[row].toString());
break;
case DATE:
writer.value(new DateWritable(
(int) ((LongColumnVector) vector).vector[row]).toString());
break;
case TIMESTAMP:
case TIMESTAMP_INSTANT:
writer.value(((TimestampColumnVector) vector)
.asScratchTimestamp(row).toString());
break;
case LIST:
printList(writer, (ListColumnVector) vector, schema, row);
break;
case MAP:
printMap(writer, (MapColumnVector) vector, schema, row);
break;
case STRUCT:
printStruct(writer, (StructColumnVector) vector, schema, row);
break;
case UNION:
printUnion(writer, (UnionColumnVector) vector, schema, row);
break;
default:
throw new IllegalArgumentException("Unknown type " + schema);
}
} else {
writer.nullValue();
}
}
static void printRow(JsonWriter writer,
VectorizedRowBatch batch,
TypeDescription schema,
int row) throws IOException {
if (schema.getCategory() == TypeDescription.Category.STRUCT) {
List<TypeDescription> fieldTypes = schema.getChildren();
List<String> fieldNames = schema.getFieldNames();
writer.beginObject();
for (int c = 0; c < batch.cols.length; ++c) {
writer.name(fieldNames.get(c));
printValue(writer, batch.cols[c], fieldTypes.get(c), row);
}
writer.endObject();
} else {
printValue(writer, batch.cols[0], schema, row);
}
}
static void printJsonData(PrintStream printStream,
Reader reader, Optional<Integer> numberOfRows) throws IOException {
OutputStreamWriter out = new OutputStreamWriter(printStream, StandardCharsets.UTF_8);
RecordReader rows = reader.rows();
try {
TypeDescription schema = reader.getSchema();
VectorizedRowBatch batch = schema.createRowBatch();
Integer counter = 0;
while (rows.nextBatch(batch)) {
if (numberOfRows.isPresent() && counter >= numberOfRows.get()){
break;
}
for (int r=0; r < batch.size; ++r) {
JsonWriter writer = new JsonWriter(out);
printRow(writer, batch, schema, r);
out.write("\n");
out.flush();
if (printStream.checkError()) {
throw new IOException("Error encountered when writing to stdout.");
}
if (numberOfRows.isPresent()) {
counter++;
if (counter >= numberOfRows.get()){
break;
}
}
}
}
} finally {
rows.close();
}
}
private static Options getOptions() {
Option help = Option.builder("h").longOpt("help")
.hasArg(false)
.desc("Provide help")
.build();
Option linesOpt = Option.builder("n").longOpt("lines")
.argName("LINES")
.hasArg()
.build();
Options options = new Options()
.addOption(help)
.addOption(linesOpt);
return options;
}
private static void printHelp(){
Options opts = getOptions();
PrintWriter pw = new PrintWriter(System.err);
new HelpFormatter().printHelp(pw, HelpFormatter.DEFAULT_WIDTH,
"java -jar orc-tools-*.jar data <orc file>*",
null,
opts,
HelpFormatter.DEFAULT_LEFT_PAD,
HelpFormatter.DEFAULT_DESC_PAD, null);
pw.flush();
}
static CommandLine parseCommandLine(String[] args) throws ParseException {
Options options = getOptions();
return new DefaultParser().parse(options, args);
}
static void main(Configuration conf, String[] args
) throws ParseException {
CommandLine cli = parseCommandLine(args);
if (cli.hasOption('h') || cli.getArgs().length == 0) {
printHelp();
System.exit(1);
} else {
Optional<Integer> lines = Optional.empty();
if(cli.hasOption("n")){
lines = Optional.of( Integer.parseInt(cli.getOptionValue("n")));
}
List<String> badFiles = new ArrayList<>();
for (String file : cli.getArgs()) {
try {
Path path = new Path(file);
Reader reader = FileDump.getReader(path, conf, badFiles);
if (reader == null) {
continue;
}
printJsonData(System.out, reader, lines);
System.out.println(FileDump.SEPARATOR);
} catch (Exception e) {
System.err.println("Unable to dump data for file: " + file);
e.printStackTrace();
}
}
}
}
}
| 10,535 | 34.474747 | 89 | java |
null | orc-main/java/tools/src/java/org/apache/orc/tools/PrintVersion.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.tools;
import org.apache.hadoop.conf.Configuration;
import java.io.IOException;
import java.util.Properties;
/**
* Print the version of this ORC tool.
*/
public class PrintVersion {
public static final String UNKNOWN = "UNKNOWN";
public static final String FILE_NAME = "META-INF/maven/org.apache.orc/orc-tools/pom.properties";
static void main(Configuration conf, String[] args) throws IOException {
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
try (java.io.InputStream resourceStream = classLoader.getResourceAsStream(FILE_NAME)) {
if (resourceStream == null) {
throw new IOException("Could not find " + FILE_NAME);
}
Properties props = new Properties();
props.load(resourceStream);
System.out.println("ORC " + props.getProperty("version", UNKNOWN));
}
}
}
| 1,686 | 37.340909 | 98 | java |
null | orc-main/java/tools/src/java/org/apache/orc/tools/RowCount.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.tools;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.orc.OrcFile;
import org.apache.orc.Reader;
import java.io.IOException;
/**
* Given a set of paths, finds all of the "*.orc" files under them and prints the number of rows in each file.
*/
public class RowCount {
public static void main(Configuration conf, String[] args) throws IOException {
int bad = 0;
for(String root: args) {
Path rootPath = new Path(root);
FileSystem fs = rootPath.getFileSystem(conf);
for(RemoteIterator<LocatedFileStatus> itr = fs.listFiles(rootPath, true); itr.hasNext(); ) {
LocatedFileStatus status = itr.next();
if (status.isFile() && status.getPath().getName().endsWith(".orc")) {
Path filename = status.getPath();
try (Reader reader = OrcFile.createReader(filename, OrcFile.readerOptions(conf))) {
System.out.println(String.format("%s %d",
filename.toString(), reader.getNumberOfRows()));
} catch (IOException ioe) {
bad += 1;
System.err.println("Failed to read " + filename);
}
}
}
}
System.exit(bad == 0 ? 0 : 1);
}
public static void main(String[] args) throws IOException {
main(new Configuration(), args);
}
}
| 2,284 | 36.459016 | 110 | java |
null | orc-main/java/tools/src/java/org/apache/orc/tools/ScanData.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.tools;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.Reader;
import org.apache.orc.RecordReader;
import org.apache.orc.StripeInformation;
import org.apache.orc.TypeDescription;
import java.util.ArrayList;
import java.util.List;
/**
* Scan the contents of an ORC file.
*/
public class ScanData {
private static final Options OPTIONS = new Options()
.addOption("v", "verbose", false, "Print exceptions")
.addOption("s", "schema", false, "Print schema")
.addOption("h", "help", false, "Provide help");
static CommandLine parseCommandLine(String[] args) throws ParseException {
return new DefaultParser().parse(OPTIONS, args);
}
static int calculateBestVectorSize(int indexStride) {
if (indexStride == 0) {
return 1024;
}
// how many 1024 batches do we have in an index stride?
int batchCount = (indexStride + 1023) / 1024;
return indexStride / batchCount;
}
static class LocationInfo {
final long firstRow;
final long followingRow;
final int stripeId;
final long row;
LocationInfo(long firstRow, long followingRow, int stripeId,
long row) {
this.firstRow = firstRow;
this.followingRow = followingRow;
this.stripeId = stripeId;
this.row = row;
}
public String toString() {
return String.format("row %d in stripe %d (rows %d-%d)",
row, stripeId, firstRow, followingRow);
}
}
/**
* Given a row, find the stripe that contains that row.
* @param reader the file reader
* @param row the global row number in the file
* @return the information about that row in the file
*/
static LocationInfo findStripeInfo(Reader reader, long row) {
long firstRow = 0;
int stripeId = 0;
for (StripeInformation stripe: reader.getStripes()) {
long lastRow = firstRow + stripe.getNumberOfRows();
if (firstRow <= row && row < lastRow) {
return new LocationInfo(firstRow, lastRow, stripeId, row);
}
firstRow = lastRow;
stripeId += 1;
}
return new LocationInfo(reader.getNumberOfRows(),
reader.getNumberOfRows(), reader.getStripes().size(), row);
}
/**
* Given a failure point, find the first place that the ORC reader can
* recover.
* @param reader the ORC reader
* @param current the position of the failure
* @param batchSize the size of the batch that we tried to read
* @return the location that we should recover to
*/
static LocationInfo findRecoveryPoint(Reader reader, LocationInfo current,
int batchSize) {
int stride = reader.getRowIndexStride();
long result;
// In the worst case, just move to the next stripe
if (stride == 0 ||
current.row + batchSize >= current.followingRow) {
result = current.followingRow;
} else {
long rowInStripe = current.row + batchSize - current.firstRow;
result = Math.min(current.followingRow,
current.firstRow + (rowInStripe + stride - 1) / stride * stride);
}
return findStripeInfo(reader, result);
}
static boolean findBadColumns(Reader reader, LocationInfo current, int batchSize,
TypeDescription column, boolean[] include) {
include[column.getId()] = true;
TypeDescription schema = reader.getSchema();
boolean result = false;
if (column.getChildren() == null) {
int row = 0;
try (RecordReader rows = reader.rows(reader.options().include(include))) {
rows.seekToRow(current.row);
VectorizedRowBatch batch = schema.createRowBatch(
TypeDescription.RowBatchVersion.USE_DECIMAL64, 1);
for(row=0; row < batchSize; ++row) {
rows.nextBatch(batch);
}
} catch (Throwable t) {
System.out.printf("Column %d failed at row %d%n", column.getId(),
current.row + row);
result = true;
}
} else {
for(TypeDescription child: column.getChildren()) {
result |= findBadColumns(reader, current, batchSize, child, include);
}
}
include[column.getId()] = false;
return result;
}
static void main(Configuration conf, String[] args) throws ParseException {
CommandLine cli = parseCommandLine(args);
if (cli.hasOption('h') || cli.getArgs().length == 0) {
new HelpFormatter().printHelp("java -jar orc-tools-*.jar scan",
OPTIONS);
System.exit(1);
} else {
final boolean printSchema = cli.hasOption('s');
final boolean printExceptions = cli.hasOption('v');
List<String> badFiles = new ArrayList<>();
for (String file : cli.getArgs()) {
try (Reader reader = FileDump.getReader(new Path(file), conf, badFiles)) {
if (reader != null) {
TypeDescription schema = reader.getSchema();
if (printSchema) {
System.out.println(schema.toJson());
}
VectorizedRowBatch batch = schema.createRowBatch(
TypeDescription.RowBatchVersion.USE_DECIMAL64,
calculateBestVectorSize(reader.getRowIndexStride()));
final int batchSize = batch.getMaxSize();
long badBatches = 0;
long currentRow = 0;
long goodRows = 0;
try (RecordReader rows = reader.rows()) {
while (currentRow < reader.getNumberOfRows()) {
currentRow = rows.getRowNumber();
try {
if (!rows.nextBatch(batch)) {
break;
}
goodRows += batch.size;
} catch (Exception e) {
badBatches += 1;
LocationInfo current = findStripeInfo(reader, currentRow);
LocationInfo recover = findRecoveryPoint(reader, current, batchSize);
System.out.println("Unable to read batch at " + current +
", recovery at " + recover);
if (printExceptions) {
e.printStackTrace();
}
findBadColumns(reader, current, batchSize, reader.getSchema(),
new boolean[reader.getSchema().getMaximumId() + 1]);
// If we are at the end of the file, get out
if (recover.row >= reader.getNumberOfRows()) {
break;
} else {
rows.seekToRow(recover.row);
}
}
}
}
if (badBatches != 0) {
badFiles.add(file);
}
System.out.printf("File: %s, bad batches: %d, rows: %d/%d%n", file,
badBatches, goodRows, reader.getNumberOfRows());
}
} catch (Exception e) {
badFiles.add(file);
System.err.println("Unable to open file: " + file);
if (printExceptions) {
e.printStackTrace();
}
}
}
System.exit(badFiles.size());
}
}
}
| 8,167 | 35.792793 | 87 | java |
null | orc-main/java/tools/src/java/org/apache/orc/tools/convert/ConvertTool.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.tools.convert;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.OrcFile;
import org.apache.orc.Reader;
import org.apache.orc.RecordReader;
import org.apache.orc.TypeDescription;
import org.apache.orc.Writer;
import org.apache.orc.tools.json.JsonSchemaFinder;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import java.util.zip.GZIPInputStream;
/**
* A conversion tool to convert CSV or JSON files into ORC files.
*/
public class ConvertTool {
static final String DEFAULT_TIMESTAMP_FORMAT =
"yyyy[[-][/]]MM[[-][/]]dd[['T'][ ]]HH:mm:ss[ ][XXX][X]";
private final List<FileInformation> fileList;
private final TypeDescription schema;
private final char csvSeparator;
private final char csvQuote;
private final char csvEscape;
private final int csvHeaderLines;
private final String csvNullString;
private final String timestampFormat;
private final String bloomFilterColumns;
private final String unionTag;
private final String unionValue;
private final Writer writer;
private final VectorizedRowBatch batch;
TypeDescription buildSchema(List<FileInformation> files,
Configuration conf) throws IOException {
JsonSchemaFinder schemaFinder = new JsonSchemaFinder();
int filesScanned = 0;
for(FileInformation file: files) {
if (file.format == Format.JSON) {
System.err.println("Scanning " + file.path + " for schema");
filesScanned += 1;
schemaFinder.addFile(file.getReader(file.filesystem.open(file.path)), file.path.getName());
} else if (file.format == Format.ORC) {
System.err.println("Merging schema from " + file.path);
filesScanned += 1;
Reader reader = OrcFile.createReader(file.path,
OrcFile.readerOptions(conf)
.filesystem(file.filesystem));
if (files.size() == 1) {
return reader.getSchema();
}
schemaFinder.addSchema(reader.getSchema());
}
}
if (filesScanned == 0) {
throw new IllegalArgumentException("Please specify a schema using" +
" --schema for converting CSV files.");
}
return schemaFinder.getSchema();
}
enum Compression {
NONE, GZIP
}
enum Format {
JSON, CSV, ORC
}
class FileInformation {
private final Compression compression;
private final Format format;
private final Path path;
private final FileSystem filesystem;
private final Configuration conf;
private final long size;
FileInformation(Path path, Configuration conf) throws IOException {
this.path = path;
this.conf = conf;
this.filesystem = path.getFileSystem(conf);
this.size = filesystem.getFileStatus(path).getLen();
String name = path.getName();
int lastDot = name.lastIndexOf(".");
if (lastDot >= 0 && ".gz".equals(name.substring(lastDot))) {
this.compression = Compression.GZIP;
name = name.substring(0, lastDot);
lastDot = name.lastIndexOf(".");
} else {
this.compression = Compression.NONE;
}
if (lastDot >= 0) {
String ext = name.substring(lastDot);
if (".json".equals(ext) || ".jsn".equals(ext)) {
format = Format.JSON;
} else if (".csv".equals(ext)) {
format = Format.CSV;
} else if (".orc".equals(ext)) {
format = Format.ORC;
} else {
throw new IllegalArgumentException("Unknown kind of file " + path);
}
} else {
throw new IllegalArgumentException("No extension on file " + path);
}
}
java.io.Reader getReader(InputStream input) throws IOException {
if (compression == Compression.GZIP) {
input = new GZIPInputStream(input);
}
return new InputStreamReader(input, StandardCharsets.UTF_8);
}
public RecordReader getRecordReader() throws IOException {
switch (format) {
case ORC: {
Reader reader = OrcFile.createReader(path, OrcFile.readerOptions(conf));
return reader.rows(reader.options().schema(schema));
}
case JSON: {
FSDataInputStream underlying = filesystem.open(path);
return new JsonReader(getReader(underlying), underlying, size, schema, timestampFormat,
unionTag, unionValue);
}
case CSV: {
FSDataInputStream underlying = filesystem.open(path);
return new CsvReader(getReader(underlying), underlying, size, schema,
csvSeparator, csvQuote, csvEscape, csvHeaderLines, csvNullString, timestampFormat);
}
default:
throw new IllegalArgumentException("Unhandled format " + format +
" for " + path);
}
}
}
public static void main(Configuration conf,
String[] args) throws IOException, ParseException {
new ConvertTool(conf, args).run();
}
List<FileInformation> buildFileList(String[] files,
Configuration conf) throws IOException {
List<FileInformation> result = new ArrayList<>(files.length);
for(String fn: files) {
result.add(new FileInformation(new Path(fn), conf));
}
return result;
}
public ConvertTool(Configuration conf,
String[] args) throws IOException, ParseException {
CommandLine opts = parseOptions(args);
fileList = buildFileList(opts.getArgs(), conf);
if (opts.hasOption('s')) {
this.schema = TypeDescription.fromString(opts.getOptionValue('s'));
} else {
this.schema = buildSchema(fileList, conf);
}
this.csvQuote = getCharOption(opts, 'q', '"');
this.csvEscape = getCharOption(opts, 'e', '\\');
this.csvSeparator = getCharOption(opts, 'S', ',');
this.csvHeaderLines = getIntOption(opts, 'H', 0);
this.csvNullString = opts.getOptionValue('n', "");
this.timestampFormat = opts.getOptionValue("t", DEFAULT_TIMESTAMP_FORMAT);
this.bloomFilterColumns = opts.getOptionValue('b', null);
this.unionTag = opts.getOptionValue("union-tag", "tag");
this.unionValue = opts.getOptionValue("union-value", "value");
String outFilename = opts.hasOption('o')
? opts.getOptionValue('o') : "output.orc";
boolean overwrite = opts.hasOption('O');
OrcFile.WriterOptions writerOpts = OrcFile.writerOptions(conf)
.setSchema(schema)
.overwrite(overwrite);
if (this.bloomFilterColumns != null) {
writerOpts.bloomFilterColumns(this.bloomFilterColumns);
}
writer = OrcFile.createWriter(new Path(outFilename), writerOpts);
batch = schema.createRowBatch();
}
void run() throws IOException {
for (FileInformation file: fileList) {
System.err.println("Processing " + file.path);
RecordReader reader = file.getRecordReader();
while (reader.nextBatch(batch)) {
writer.addRowBatch(batch);
}
reader.close();
}
writer.close();
}
private static int getIntOption(CommandLine opts, char letter, int mydefault) {
if (opts.hasOption(letter)) {
return Integer.parseInt(opts.getOptionValue(letter));
} else {
return mydefault;
}
}
private static char getCharOption(CommandLine opts, char letter, char mydefault) {
if (opts.hasOption(letter)) {
return opts.getOptionValue(letter).charAt(0);
} else {
return mydefault;
}
}
private static CommandLine parseOptions(String[] args) throws ParseException {
Options options = new Options();
options.addOption(
Option.builder("h").longOpt("help").desc("Provide help").build());
options.addOption(
Option.builder("s").longOpt("schema").hasArg()
.desc("The schema to write in to the file").build());
options.addOption(
Option.builder("b").longOpt("bloomFilterColumns").hasArg()
.desc("Comma separated values of column names for which bloom filter is " +
"to be created").build());
options.addOption(
Option.builder("o").longOpt("output").desc("Output filename")
.hasArg().build());
options.addOption(
Option.builder("n").longOpt("null").desc("CSV null string")
.hasArg().build());
options.addOption(
Option.builder("q").longOpt("quote").desc("CSV quote character")
.hasArg().build());
options.addOption(
Option.builder("e").longOpt("escape").desc("CSV escape character")
.hasArg().build());
options.addOption(
Option.builder("S").longOpt("separator").desc("CSV separator character")
.hasArg().build());
options.addOption(
Option.builder("H").longOpt("header").desc("CSV header lines")
.hasArg().build());
options.addOption(
Option.builder("t").longOpt("timestampformat").desc("Timestamp Format")
.hasArg().build());
options.addOption(
Option.builder("O").longOpt("overwrite").desc("Overwrite an existing file")
.build()
);
options.addOption(
Option.builder().longOpt("union-tag")
.desc("JSON key name representing UNION tag. Default to \"tag\".")
.hasArg().build());
options.addOption(
Option.builder().longOpt("union-value")
.desc("JSON key name representing UNION value. Default to \"value\".")
.hasArg().build());
CommandLine cli = new DefaultParser().parse(options, args);
if (cli.hasOption('h') || cli.getArgs().length == 0) {
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp("convert", options);
System.exit(1);
}
return cli;
}
}
| 11,060 | 35.993311 | 99 | java |
null | orc-main/java/tools/src/java/org/apache/orc/tools/convert/CsvReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.tools.convert;
import com.opencsv.CSVParser;
import com.opencsv.CSVParserBuilder;
import com.opencsv.CSVReader;
import com.opencsv.CSVReaderBuilder;
import com.opencsv.exceptions.CsvValidationException;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DateColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.StructColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.orc.RecordReader;
import org.apache.orc.TypeDescription;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.sql.Timestamp;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.time.OffsetDateTime;
import java.time.ZonedDateTime;
import java.time.format.DateTimeFormatter;
import java.time.temporal.TemporalAccessor;
public class CsvReader implements RecordReader {
private long rowNumber = 0;
private final Converter converter;
private final int columns;
private final CSVReader reader;
private final String nullString;
private final FSDataInputStream underlying;
private final long totalSize;
private final DateTimeFormatter dateTimeFormatter;
/**
* Create a CSV reader
* @param reader the stream to read from
* @param input the underlying file that is only used for getting the
* position within the file
* @param size the number of bytes in the underlying stream
* @param schema the schema to read into
* @param separatorChar the character between fields
* @param quoteChar the quote character
* @param escapeChar the escape character
* @param headerLines the number of header lines
* @param nullString the string that is translated to null
* @param timestampFormat the timestamp format string
*/
public CsvReader(java.io.Reader reader,
FSDataInputStream input,
long size,
TypeDescription schema,
char separatorChar,
char quoteChar,
char escapeChar,
int headerLines,
String nullString,
String timestampFormat) {
this.underlying = input;
CSVParser parser = new CSVParserBuilder()
.withSeparator(separatorChar)
.withQuoteChar(quoteChar)
.withEscapeChar(escapeChar)
.build();
this.reader = new CSVReaderBuilder(reader)
.withSkipLines(headerLines)
.withCSVParser(parser)
.build();
this.nullString = nullString;
this.totalSize = size;
IntWritable nextColumn = new IntWritable(0);
this.converter = buildConverter(nextColumn, schema);
this.columns = nextColumn.get();
this.dateTimeFormatter = DateTimeFormatter.ofPattern(timestampFormat);
}
interface Converter {
void convert(String[] values, VectorizedRowBatch batch, int row);
void convert(String[] values, ColumnVector column, int row);
}
@Override
public boolean nextBatch(VectorizedRowBatch batch) throws IOException {
batch.reset();
final int BATCH_SIZE = batch.getMaxSize();
String[] nextLine;
// Read the CSV rows and place them into the column vectors.
try {
while ((nextLine = reader.readNext()) != null) {
rowNumber++;
if (nextLine.length != columns &&
!(nextLine.length == columns + 1 && "".equals(nextLine[columns]))) {
throw new IllegalArgumentException("Too many columns on line " +
rowNumber + ". Expected " + columns + ", but got " +
nextLine.length + ".");
}
converter.convert(nextLine, batch, batch.size++);
if (batch.size == BATCH_SIZE) {
break;
}
}
} catch (CsvValidationException e) {
throw new IOException(e);
}
return batch.size != 0;
}
@Override
public long getRowNumber() throws IOException {
return rowNumber;
}
@Override
public float getProgress() throws IOException {
long pos = underlying.getPos();
return totalSize != 0 && pos < totalSize ? (float) pos / totalSize : 1;
}
@Override
public void close() throws IOException {
reader.close();
}
@Override
public void seekToRow(long rowCount) throws IOException {
throw new UnsupportedOperationException("Seeking not supported");
}
abstract class ConverterImpl implements Converter {
final int offset;
ConverterImpl(IntWritable offset) {
this.offset = offset.get();
offset.set(this.offset + 1);
}
@Override
public void convert(String[] values, VectorizedRowBatch batch, int row) {
convert(values, batch.cols[0], row);
}
}
class BooleanConverter extends ConverterImpl {
BooleanConverter(IntWritable offset) {
super(offset);
}
@Override
public void convert(String[] values, ColumnVector column, int row) {
if (values[offset] == null || nullString.equals(values[offset])) {
column.noNulls = false;
column.isNull[row] = true;
} else {
if (values[offset].equalsIgnoreCase("true") ||
values[offset].equalsIgnoreCase("t") ||
values[offset].equals("1")) {
((LongColumnVector) column).vector[row] = 1;
} else {
((LongColumnVector) column).vector[row] = 0;
}
}
}
}
class LongConverter extends ConverterImpl {
LongConverter(IntWritable offset) {
super(offset);
}
@Override
public void convert(String[] values, ColumnVector column, int row) {
if (values[offset] == null || nullString.equals(values[offset])) {
column.noNulls = false;
column.isNull[row] = true;
} else {
((LongColumnVector) column).vector[row] =
Long.parseLong(values[offset]);
}
}
}
class DoubleConverter extends ConverterImpl {
DoubleConverter(IntWritable offset) {
super(offset);
}
@Override
public void convert(String[] values, ColumnVector column, int row) {
if (values[offset] == null || nullString.equals(values[offset])) {
column.noNulls = false;
column.isNull[row] = true;
} else {
((DoubleColumnVector) column).vector[row] =
Double.parseDouble(values[offset]);
}
}
}
class DecimalConverter extends ConverterImpl {
DecimalConverter(IntWritable offset) {
super(offset);
}
@Override
public void convert(String[] values, ColumnVector column, int row) {
if (values[offset] == null || nullString.equals(values[offset])) {
column.noNulls = false;
column.isNull[row] = true;
} else {
((DecimalColumnVector) column).vector[row].set(
new HiveDecimalWritable(values[offset]));
}
}
}
class BytesConverter extends ConverterImpl {
BytesConverter(IntWritable offset) {
super(offset);
}
@Override
public void convert(String[] values, ColumnVector column, int row) {
if (values[offset] == null || nullString.equals(values[offset])) {
column.noNulls = false;
column.isNull[row] = true;
} else {
byte[] value = values[offset].getBytes(StandardCharsets.UTF_8);
((BytesColumnVector) column).setRef(row, value, 0, value.length);
}
}
}
class DateColumnConverter extends ConverterImpl {
DateColumnConverter(IntWritable offset) { super(offset); }
@Override
public void convert(String[] values, ColumnVector column, int row) {
if (values[offset] == null || nullString.equals(values[offset])) {
column.noNulls = false;
column.isNull[row] = true;
} else {
DateColumnVector vector = (DateColumnVector) column;
final LocalDate dt = LocalDate.parse(values[offset]);
if (dt != null) {
vector.vector[row] = dt.toEpochDay();
} else {
column.noNulls = false;
column.isNull[row] = true;
}
}
}
}
class TimestampConverter extends ConverterImpl {
TimestampConverter(IntWritable offset) {
super(offset);
}
@Override
public void convert(String[] values, ColumnVector column, int row) {
if (values[offset] == null || nullString.equals(values[offset])) {
column.noNulls = false;
column.isNull[row] = true;
} else {
TimestampColumnVector vector = (TimestampColumnVector) column;
TemporalAccessor temporalAccessor =
dateTimeFormatter.parseBest(values[offset],
ZonedDateTime::from, OffsetDateTime::from, LocalDateTime::from);
if (temporalAccessor instanceof ZonedDateTime) {
ZonedDateTime zonedDateTime = ((ZonedDateTime) temporalAccessor);
Timestamp timestamp = Timestamp.from(zonedDateTime.toInstant());
vector.set(row, timestamp);
} else if (temporalAccessor instanceof OffsetDateTime) {
OffsetDateTime offsetDateTime = (OffsetDateTime) temporalAccessor;
Timestamp timestamp = Timestamp.from(offsetDateTime.toInstant());
vector.set(row, timestamp);
} else if (temporalAccessor instanceof LocalDateTime) {
Timestamp timestamp = Timestamp.valueOf((LocalDateTime) temporalAccessor);
vector.set(row, timestamp);
} else {
column.noNulls = false;
column.isNull[row] = true;
}
}
}
}
class StructConverter implements Converter {
final Converter[] children;
StructConverter(IntWritable offset, TypeDescription schema) {
children = new Converter[schema.getChildren().size()];
int c = 0;
for(TypeDescription child: schema.getChildren()) {
children[c++] = buildConverter(offset, child);
}
}
@Override
public void convert(String[] values, VectorizedRowBatch batch, int row) {
for(int c=0; c < children.length; ++c) {
children[c].convert(values, batch.cols[c], row);
}
}
@Override
public void convert(String[] values, ColumnVector column, int row) {
StructColumnVector cv = (StructColumnVector) column;
for(int c=0; c < children.length; ++c) {
children[c].convert(values, cv.fields[c], row);
}
}
}
Converter buildConverter(IntWritable startOffset, TypeDescription schema) {
switch (schema.getCategory()) {
case BOOLEAN:
return new BooleanConverter(startOffset);
case BYTE:
case SHORT:
case INT:
case LONG:
return new LongConverter(startOffset);
case FLOAT:
case DOUBLE:
return new DoubleConverter(startOffset);
case DECIMAL:
return new DecimalConverter(startOffset);
case BINARY:
case STRING:
case CHAR:
case VARCHAR:
return new BytesConverter(startOffset);
case DATE:
return new DateColumnConverter(startOffset);
case TIMESTAMP:
case TIMESTAMP_INSTANT:
return new TimestampConverter(startOffset);
case STRUCT:
return new StructConverter(startOffset, schema);
default:
throw new IllegalArgumentException("Unhandled type " + schema);
}
}
}
| 12,473 | 32.352941 | 84 | java |
null | orc-main/java/tools/src/java/org/apache/orc/tools/convert/JsonReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.tools.convert;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import com.google.gson.JsonPrimitive;
import com.google.gson.JsonStreamParser;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.hive.common.type.HiveDecimal;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DateColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ListColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.MapColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.StructColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.UnionColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.RecordReader;
import org.apache.orc.TypeDescription;
import java.io.IOException;
import java.io.Reader;
import java.nio.charset.StandardCharsets;
import java.sql.Timestamp;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.time.OffsetDateTime;
import java.time.ZoneId;
import java.time.ZonedDateTime;
import java.time.format.DateTimeFormatter;
import java.time.temporal.TemporalAccessor;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
public class JsonReader implements RecordReader {
private final TypeDescription schema;
private final Iterator<JsonElement> parser;
private final JsonConverter[] converters;
private final long totalSize;
private final FSDataInputStream input;
private long rowNumber = 0;
private final DateTimeFormatter dateTimeFormatter;
private String unionTag = "tag";
private String unionValue = "value";
interface JsonConverter {
void convert(JsonElement value, ColumnVector vect, int row);
}
static class BooleanColumnConverter implements JsonConverter {
@Override
public void convert(JsonElement value, ColumnVector vect, int row) {
if (value == null || value.isJsonNull()) {
vect.noNulls = false;
vect.isNull[row] = true;
} else {
LongColumnVector vector = (LongColumnVector) vect;
vector.vector[row] = value.getAsBoolean() ? 1 : 0;
}
}
}
static class LongColumnConverter implements JsonConverter {
@Override
public void convert(JsonElement value, ColumnVector vect, int row) {
if (value == null || value.isJsonNull()) {
vect.noNulls = false;
vect.isNull[row] = true;
} else {
LongColumnVector vector = (LongColumnVector) vect;
vector.vector[row] = value.getAsLong();
}
}
}
static class DoubleColumnConverter implements JsonConverter {
@Override
public void convert(JsonElement value, ColumnVector vect, int row) {
if (value == null || value.isJsonNull()) {
vect.noNulls = false;
vect.isNull[row] = true;
} else {
DoubleColumnVector vector = (DoubleColumnVector) vect;
vector.vector[row] = value.getAsDouble();
}
}
}
static class StringColumnConverter implements JsonConverter {
@Override
public void convert(JsonElement value, ColumnVector vect, int row) {
if (value == null || value.isJsonNull()) {
vect.noNulls = false;
vect.isNull[row] = true;
} else {
BytesColumnVector vector = (BytesColumnVector) vect;
byte[] bytes = value.getAsString().getBytes(StandardCharsets.UTF_8);
vector.setRef(row, bytes, 0, bytes.length);
}
}
}
static class BinaryColumnConverter implements JsonConverter {
@Override
public void convert(JsonElement value, ColumnVector vect, int row) {
if (value == null || value.isJsonNull()) {
vect.noNulls = false;
vect.isNull[row] = true;
} else {
BytesColumnVector vector = (BytesColumnVector) vect;
String binStr = value.getAsString();
byte[] bytes = new byte[binStr.length()/2];
for(int i=0; i < bytes.length; ++i) {
bytes[i] = (byte) Integer.parseInt(binStr.substring(i*2, i*2+2), 16);
}
vector.setRef(row, bytes, 0, bytes.length);
}
}
}
static class DateColumnConverter implements JsonConverter {
public void convert(JsonElement value, ColumnVector vect, int row) {
if (value == null || value.isJsonNull()) {
vect.noNulls = false;
vect.isNull[row] = true;
} else {
DateColumnVector vector = (DateColumnVector) vect;
final LocalDate dt = LocalDate.parse(value.getAsString());
if (dt != null) {
vector.vector[row] = dt.toEpochDay();
} else {
vect.noNulls = false;
vect.isNull[row] = true;
}
}
}
}
class TimestampColumnConverter implements JsonConverter {
@Override
public void convert(JsonElement value, ColumnVector vect, int row) {
if (value == null || value.isJsonNull()) {
vect.noNulls = false;
vect.isNull[row] = true;
} else {
TimestampColumnVector vector = (TimestampColumnVector) vect;
TemporalAccessor temporalAccessor = dateTimeFormatter.parseBest(value.getAsString(),
ZonedDateTime::from, OffsetDateTime::from, LocalDateTime::from);
if (temporalAccessor instanceof ZonedDateTime) {
ZonedDateTime zonedDateTime = ((ZonedDateTime) temporalAccessor);
Timestamp timestamp = Timestamp.from(zonedDateTime.toInstant());
vector.set(row, timestamp);
} else if (temporalAccessor instanceof OffsetDateTime) {
OffsetDateTime offsetDateTime = (OffsetDateTime) temporalAccessor;
Timestamp timestamp = Timestamp.from(offsetDateTime.toInstant());
vector.set(row, timestamp);
} else if (temporalAccessor instanceof LocalDateTime) {
ZonedDateTime tz = ((LocalDateTime) temporalAccessor).atZone(ZoneId.systemDefault());
Timestamp timestamp = Timestamp.from(tz.toInstant());
vector.set(row, timestamp);
} else {
vect.noNulls = false;
vect.isNull[row] = true;
}
}
}
}
static class DecimalColumnConverter implements JsonConverter {
@Override
public void convert(JsonElement value, ColumnVector vect, int row) {
if (value == null || value.isJsonNull()) {
vect.noNulls = false;
vect.isNull[row] = true;
} else {
DecimalColumnVector vector = (DecimalColumnVector) vect;
vector.vector[row].set(HiveDecimal.create(value.getAsString()));
}
}
}
class StructColumnConverter implements JsonConverter {
private JsonConverter[] childrenConverters;
private List<String> fieldNames;
StructColumnConverter(TypeDescription schema) {
List<TypeDescription> kids = schema.getChildren();
childrenConverters = new JsonConverter[kids.size()];
for(int c=0; c < childrenConverters.length; ++c) {
childrenConverters[c] = createConverter(kids.get(c));
}
fieldNames = schema.getFieldNames();
}
@Override
public void convert(JsonElement value, ColumnVector vect, int row) {
if (value == null || value.isJsonNull()) {
vect.noNulls = false;
vect.isNull[row] = true;
} else {
StructColumnVector vector = (StructColumnVector) vect;
JsonObject obj = value.getAsJsonObject();
for(int c=0; c < childrenConverters.length; ++c) {
JsonElement elem = obj.get(fieldNames.get(c));
childrenConverters[c].convert(elem, vector.fields[c], row);
}
}
}
}
class ListColumnConverter implements JsonConverter {
private JsonConverter childrenConverter;
ListColumnConverter(TypeDescription schema) {
childrenConverter = createConverter(schema.getChildren().get(0));
}
@Override
public void convert(JsonElement value, ColumnVector vect, int row) {
if (value == null || value.isJsonNull()) {
vect.noNulls = false;
vect.isNull[row] = true;
} else {
ListColumnVector vector = (ListColumnVector) vect;
JsonArray obj = value.getAsJsonArray();
vector.lengths[row] = obj.size();
vector.offsets[row] = vector.childCount;
vector.childCount += vector.lengths[row];
vector.child.ensureSize(vector.childCount, true);
for(int c=0; c < obj.size(); ++c) {
childrenConverter.convert(obj.get(c), vector.child,
(int) vector.offsets[row] + c);
}
}
}
}
class MapColumnConverter implements JsonConverter {
private JsonConverter keyConverter;
private JsonConverter valueConverter;
MapColumnConverter(TypeDescription schema) {
TypeDescription keyType = schema.getChildren().get(0);
if (keyType.getCategory() != TypeDescription.Category.STRING) {
throw new IllegalArgumentException("JSON can only support MAP key in STRING type: " + schema);
}
keyConverter = createConverter(keyType);
valueConverter = createConverter(schema.getChildren().get(1));
}
@Override
public void convert(JsonElement value, ColumnVector vect, int row) {
if (value == null || value.isJsonNull()) {
vect.noNulls = false;
vect.isNull[row] = true;
} else {
MapColumnVector vector = (MapColumnVector) vect;
JsonObject obj = value.getAsJsonObject();
vector.lengths[row] = obj.entrySet().size();
vector.offsets[row] = vector.childCount;
vector.childCount += vector.lengths[row];
vector.keys.ensureSize(vector.childCount, true);
vector.values.ensureSize(vector.childCount, true);
int cnt = 0;
for (Map.Entry<String, JsonElement> entry : obj.entrySet()) {
int offset = (int) vector.offsets[row] + cnt++;
keyConverter.convert(new JsonPrimitive(entry.getKey()), vector.keys, offset);
valueConverter.convert(entry.getValue(), vector.values, offset);
}
}
}
}
class UnionColumnConverter implements JsonConverter {
private JsonConverter[] childConverter;
UnionColumnConverter(TypeDescription schema) {
int size = schema.getChildren().size();
childConverter = new JsonConverter[size];
for (int i = 0; i < size; i++) {
childConverter[i] = createConverter(schema.getChildren().get(i));
}
}
@Override
public void convert(JsonElement value, ColumnVector vect, int row) {
if (value == null || value.isJsonNull()) {
vect.noNulls = false;
vect.isNull[row] = true;
} else {
UnionColumnVector vector = (UnionColumnVector) vect;
JsonObject obj = value.getAsJsonObject();
int tag = obj.get(unionTag).getAsInt();
vector.tags[row] = tag;
childConverter[tag].convert(obj.get(unionValue), vector.fields[tag], row);
}
}
}
JsonConverter createConverter(TypeDescription schema) {
switch (schema.getCategory()) {
case BYTE:
case SHORT:
case INT:
case LONG:
return new LongColumnConverter();
case FLOAT:
case DOUBLE:
return new DoubleColumnConverter();
case CHAR:
case VARCHAR:
case STRING:
return new StringColumnConverter();
case DECIMAL:
return new DecimalColumnConverter();
case DATE:
return new DateColumnConverter();
case TIMESTAMP:
case TIMESTAMP_INSTANT:
return new TimestampColumnConverter();
case BINARY:
return new BinaryColumnConverter();
case BOOLEAN:
return new BooleanColumnConverter();
case STRUCT:
return new StructColumnConverter(schema);
case LIST:
return new ListColumnConverter(schema);
case MAP:
return new MapColumnConverter(schema);
case UNION:
return new UnionColumnConverter(schema);
default:
throw new IllegalArgumentException("Unhandled type " + schema);
}
}
public JsonReader(Reader reader,
FSDataInputStream underlying,
long size,
TypeDescription schema,
String timestampFormat,
String unionTag,
String unionValue) throws IOException {
this(new JsonStreamParser(reader), underlying, size, schema, timestampFormat);
this.unionTag = unionTag;
this.unionValue = unionValue;
}
public JsonReader(Reader reader,
FSDataInputStream underlying,
long size,
TypeDescription schema,
String timestampFormat) throws IOException {
this(new JsonStreamParser(reader), underlying, size, schema, timestampFormat);
}
public JsonReader(Iterator<JsonElement> parser,
FSDataInputStream underlying,
long size,
TypeDescription schema,
String timestampFormat) throws IOException {
this.schema = schema;
if (schema.getCategory() != TypeDescription.Category.STRUCT) {
throw new IllegalArgumentException("Root must be struct - " + schema);
}
this.input = underlying;
this.totalSize = size;
this.parser = parser;
this.dateTimeFormatter = DateTimeFormatter.ofPattern(timestampFormat);
List<TypeDescription> fieldTypes = schema.getChildren();
converters = new JsonConverter[fieldTypes.size()];
for(int c = 0; c < converters.length; ++c) {
converters[c] = createConverter(fieldTypes.get(c));
}
}
@Override
public boolean nextBatch(VectorizedRowBatch batch) throws IOException {
batch.reset();
int maxSize = batch.getMaxSize();
List<String> fieldNames = schema.getFieldNames();
while (parser.hasNext() && batch.size < maxSize) {
JsonObject elem = parser.next().getAsJsonObject();
for(int c=0; c < converters.length; ++c) {
// look up each field to see if it is in the input, otherwise
// set it to null.
JsonElement field = elem.get(fieldNames.get(c));
if (field == null) {
batch.cols[c].noNulls = false;
batch.cols[c].isNull[batch.size] = true;
} else {
converters[c].convert(field, batch.cols[c], batch.size);
}
}
batch.size++;
}
rowNumber += batch.size;
return batch.size != 0;
}
@Override
public long getRowNumber() throws IOException {
return rowNumber;
}
@Override
public float getProgress() throws IOException {
long pos = input.getPos();
return totalSize != 0 && pos < totalSize ? (float) pos / totalSize : 1;
}
@Override
public void close() throws IOException {
input.close();
}
@Override
public void seekToRow(long rowCount) throws IOException {
throw new UnsupportedOperationException("Seek is not supported by JsonReader");
}
}
| 15,996 | 34.707589 | 102 | java |
null | orc-main/java/tools/src/java/org/apache/orc/tools/json/BooleanType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.tools.json;
import org.apache.orc.TypeDescription;
/**
* A type that represents true, false, and null.
*/
class BooleanType extends HiveType {
BooleanType() {
super(Kind.BOOLEAN);
}
@Override
public String toString() {
return "boolean";
}
@Override
public boolean subsumes(HiveType other) {
return other.kind == Kind.BOOLEAN || other.kind == Kind.NULL;
}
@Override
public void merge(HiveType other) {
// nothing to do to merge boolean types
}
@Override
public TypeDescription getSchema() {
return TypeDescription.createBoolean();
}
}
| 1,420 | 26.862745 | 75 | java |
null | orc-main/java/tools/src/java/org/apache/orc/tools/json/HiveType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.tools.json;
import org.apache.orc.TypeDescription;
import java.io.PrintStream;
/**
* The internal representation of what we have discovered about a given
* field's type.
*/
abstract class HiveType {
enum Kind {
NULL(0),
BOOLEAN(1),
BYTE(1), SHORT(2), INT(3), LONG(4), DECIMAL(5), FLOAT(6), DOUBLE(7),
BINARY(1), DATE(1), TIMESTAMP(1), TIMESTAMP_INSTANT(1), STRING(2),
STRUCT(1, false),
LIST(1, false),
UNION(8, false),
MAP(9, false);
// for types that subsume each other, establish a ranking.
final int rank;
final boolean isPrimitive;
Kind(int rank, boolean isPrimitive) {
this.rank = rank;
this.isPrimitive = isPrimitive;
}
Kind(int rank) {
this(rank, true);
}
}
protected Kind kind;
HiveType(Kind kind) {
this.kind = kind;
}
@Override
public boolean equals(Object other) {
if (other == null || other.getClass() != getClass()) {
return false;
}
return ((HiveType) other).kind.equals(kind);
}
@Override
public int hashCode() {
return kind.hashCode();
}
/**
* Does this type include all of the values of the other type?
* @param other the other type to compare against
* @return true, if this type includes all of the values of the other type
*/
public abstract boolean subsumes(HiveType other);
/**
* Merge the other type into this one. It assumes that subsumes(other) is
* true.
* @param other
*/
public abstract void merge(HiveType other);
/**
* Print this type into the stream using a flat structure given the
* prefix on each element.
* @param out the stream to print to
* @param prefix the prefix to add to each field name
*/
public void printFlat(PrintStream out, String prefix) {
out.println(prefix + ": " + this);
}
public abstract TypeDescription getSchema();
}
| 2,702 | 26.865979 | 76 | java |
null | orc-main/java/tools/src/java/org/apache/orc/tools/json/JsonSchemaFinder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.tools.json;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import com.google.gson.JsonParseException;
import com.google.gson.JsonPrimitive;
import com.google.gson.JsonStreamParser;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.lang3.exception.ExceptionUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.orc.TypeDescription;
import org.apache.orc.TypeDescriptionPrettyPrint;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.PrintStream;
import java.math.BigInteger;
import java.nio.charset.StandardCharsets;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.zip.GZIPInputStream;
/**
* This class determines the equivalent Hive schema for a group of JSON
* documents.
* boolean
*/
public class JsonSchemaFinder {
private static final Logger LOG = LoggerFactory.getLogger(JsonSchemaFinder.class);
private static final Pattern HEX_PATTERN =
Pattern.compile("^([0-9a-fA-F][0-9a-fA-F])+$");
private static final Pattern TIMESTAMP_PATTERN =
Pattern.compile("^[\"]?([0-9]{4}[-/][0-9]{2}[-/][0-9]{2})[T ]" +
"([0-9]{2}:[0-9]{2}:[0-9]{2})" +
"(( [-+]?|[-+])([0-9]{2}(:[0-9]{2})?)|Z)?[\"]?$");
private static final Pattern DECIMAL_PATTERN =
Pattern.compile("^-?(?<int>[0-9]+)([.](?<fraction>[0-9]+))?$");
private static final int INDENT = 2;
private static final int MAX_DECIMAL_DIGITS = 38;
static final BigInteger MIN_LONG = new BigInteger("-9223372036854775808");
static final BigInteger MAX_LONG = new BigInteger("9223372036854775807");
private HiveType mergedType = null;
private long records = 0;
static HiveType pickType(JsonElement json) {
if (json.isJsonPrimitive()) {
JsonPrimitive prim = (JsonPrimitive) json;
if (prim.isBoolean()) {
return new BooleanType();
} else if (prim.isNumber()) {
Matcher matcher = DECIMAL_PATTERN.matcher(prim.getAsString());
if (matcher.matches()) {
int intDigits = matcher.group("int").length();
String fraction = matcher.group("fraction");
int scale = fraction == null ? 0 : fraction.length();
if (scale == 0) {
if (intDigits < 19) {
long value = prim.getAsLong();
if (value >= -128 && value < 128) {
return new NumericType(HiveType.Kind.BYTE, intDigits, scale);
} else if (value >= -32768 && value < 32768) {
return new NumericType(HiveType.Kind.SHORT, intDigits, scale);
} else if (value >= -2147483648 && value < 2147483648L) {
return new NumericType(HiveType.Kind.INT, intDigits, scale);
} else {
return new NumericType(HiveType.Kind.LONG, intDigits, scale);
}
} else if (intDigits == 19) {
// at 19 digits, it may fit inside a long, but we need to check
BigInteger val = prim.getAsBigInteger();
if (val.compareTo(MIN_LONG) >= 0 && val.compareTo(MAX_LONG) <= 0) {
return new NumericType(HiveType.Kind.LONG, intDigits, scale);
}
}
}
if (intDigits + scale <= MAX_DECIMAL_DIGITS) {
return new NumericType(HiveType.Kind.DECIMAL, intDigits, scale);
}
}
double value = prim.getAsDouble();
if (value >= Float.MIN_VALUE && value <= Float.MAX_VALUE) {
return new NumericType(HiveType.Kind.FLOAT, 0, 0);
} else {
return new NumericType(HiveType.Kind.DOUBLE, 0, 0);
}
} else {
String str = prim.getAsString();
if (TIMESTAMP_PATTERN.matcher(str).matches()) {
return new StringType(HiveType.Kind.TIMESTAMP);
} else if (HEX_PATTERN.matcher(str).matches()) {
return new StringType(HiveType.Kind.BINARY);
} else {
return new StringType(HiveType.Kind.STRING);
}
}
} else if (json.isJsonNull()) {
return new NullType();
} else if (json.isJsonArray()) {
ListType result = new ListType();
result.elementType = new NullType();
for(JsonElement child: ((JsonArray) json)) {
HiveType sub = pickType(child);
if (result.elementType.subsumes(sub)) {
result.elementType.merge(sub);
} else if (sub.subsumes(result.elementType)) {
sub.merge(result.elementType);
result.elementType = sub;
} else {
result.elementType = new UnionType(result.elementType, sub);
}
}
return result;
} else {
JsonObject obj = (JsonObject) json;
StructType result = new StructType();
for(Map.Entry<String,JsonElement> field: obj.entrySet()) {
String fieldName = field.getKey();
HiveType type = pickType(field.getValue());
result.fields.put(fieldName, type);
}
return result;
}
}
static HiveType mergeType(HiveType previous, HiveType type) {
if (previous == null) {
return type;
} else if (type == null) {
return previous;
}
if (previous.subsumes(type)) {
previous.merge(type);
} else if (type.subsumes(previous)) {
type.merge(previous);
previous = type;
} else {
previous = new UnionType(previous, type);
}
return previous;
}
static void printType(PrintStream out, HiveType type, int margin) {
if (type == null) {
out.print("void");
} else if (type.kind.isPrimitive) {
out.print(type);
} else {
switch (type.kind) {
case STRUCT:
out.println("struct <");
boolean first = true;
for(Map.Entry<String, HiveType> field:
((StructType) type).fields.entrySet()) {
if (!first) {
out.println(",");
} else {
first = false;
}
for(int i=0; i < margin; i++) {
out.print(' ');
}
out.print(field.getKey());
out.print(": ");
printType(out, field.getValue(), margin + INDENT);
}
out.print(">");
break;
case LIST:
out.print("array <");
printType(out, ((ListType) type).elementType, margin + INDENT);
out.print(">");
break;
case UNION:
out.print("uniontype <");
first = true;
for(HiveType child: ((UnionType) type).children) {
if (!first) {
out.print(',');
} else {
first = false;
}
printType(out, child, margin + INDENT);
}
out.print(">");
break;
default:
throw new IllegalArgumentException("Unknown kind " + type.kind);
}
}
}
static void printAsTable(PrintStream out, StructType type) {
out.println("create table tbl (");
boolean first = true;
for(Map.Entry<String, HiveType> field: type.fields.entrySet()) {
if (!first) {
out.println(",");
} else {
first = false;
}
for(int i=0; i < INDENT; ++i) {
out.print(' ');
}
out.print(field.getKey());
out.print(" ");
printType(out, field.getValue(), 2 * INDENT);
}
out.println();
out.println(")");
}
public void addFile(String filename) throws IOException {
java.io.Reader reader;
FileInputStream inputStream = new FileInputStream(filename);
if (filename.endsWith(".gz")) {
reader = new InputStreamReader(new GZIPInputStream(inputStream),
StandardCharsets.UTF_8);
} else {
reader = new InputStreamReader(inputStream, StandardCharsets.UTF_8);
}
addFile(reader, filename);
}
public void addFile(java.io.Reader reader, String filename) {
JsonStreamParser parser = new JsonStreamParser(reader);
try {
while (parser.hasNext()) {
mergedType = mergeType(mergedType, pickType(parser.next()));
records += 1;
}
} catch (JsonParseException e) {
printParseExceptionMsg(e, filename);
}
}
private void printParseExceptionMsg(JsonParseException e, String filename) {
System.err.printf(
"A JsonParseException was thrown while processing the %dth record of file %s.%n",
records + 1, filename);
String pattern = "at line (\\d+) column (\\d+)";
Pattern r = Pattern.compile(pattern);
Matcher m = r.matcher(e.getMessage());
int line;
int column;
if (m.find( )) {
line = Integer.parseInt(m.group(1));
column = Integer.parseInt(m.group(2));
if (line == 1 && column == 1) {
System.err.printf("File %s is empty.%n", filename);
System.exit(1);
}
}
System.err.printf("Please check the file.%n%n%s%n", ExceptionUtils.getStackTrace(e));
System.exit(1);
}
HiveType makeHiveType(TypeDescription schema) {
switch (schema.getCategory()) {
case BOOLEAN:
return new BooleanType();
case BYTE:
return new NumericType(HiveType.Kind.BYTE, 3, 0);
case SHORT:
return new NumericType(HiveType.Kind.SHORT, 5, 0);
case INT:
return new NumericType(HiveType.Kind.INT, 10, 0);
case LONG:
return new NumericType(HiveType.Kind.LONG, 19, 0);
case FLOAT:
return new NumericType(HiveType.Kind.FLOAT, 0, 0);
case DOUBLE:
return new NumericType(HiveType.Kind.DOUBLE, 0, 0);
case DECIMAL: {
int scale = schema.getScale();
int intDigits = schema.getPrecision() - scale;
return new NumericType(HiveType.Kind.DECIMAL, intDigits, scale);
}
case CHAR:
case VARCHAR:
case STRING:
return new StringType(HiveType.Kind.STRING);
case TIMESTAMP:
return new StringType(HiveType.Kind.TIMESTAMP);
case TIMESTAMP_INSTANT:
return new StringType(HiveType.Kind.TIMESTAMP_INSTANT);
case DATE:
return new StringType(HiveType.Kind.DATE);
case BINARY:
return new StringType(HiveType.Kind.BINARY);
case LIST:
return new ListType(makeHiveType(schema.getChildren().get(0)));
case STRUCT: {
StructType result = new StructType();
List<String> fields = schema.getFieldNames();
List<TypeDescription> children = schema.getChildren();
for(int i = 0; i < fields.size(); ++i) {
result.addField(fields.get(i), makeHiveType(children.get(i)));
}
return result;
}
case UNION: {
UnionType result = new UnionType();
for(TypeDescription child: schema.getChildren()) {
result.addType(makeHiveType(child));
}
return result;
}
case MAP:
return new MapType(
makeHiveType(schema.getChildren().get(0)),
makeHiveType(schema.getChildren().get(1)));
default:
throw new IllegalArgumentException("Unhandled type " + schema);
}
}
public void addSchema(TypeDescription schema) {
mergedType = mergeType(mergedType, makeHiveType(schema));
}
public TypeDescription getSchema() {
return mergedType.getSchema();
}
public static void main(Configuration conf,
String[] args) throws Exception {
JsonSchemaFinder result = new JsonSchemaFinder();
CommandLine cli = parseArguments(args);
for (String filename: cli.getArgs()) {
System.err.println("Reading file " + filename);
result.addFile(filename);
}
System.err.println(result.records + " records read");
System.err.println();
if (cli.hasOption('f')) {
result.mergedType.printFlat(System.out, "root");
} else if (cli.hasOption('t')) {
printAsTable(System.out, (StructType) result.mergedType);
} else if (cli.hasOption('p')) {
TypeDescriptionPrettyPrint.print(System.out, result.getSchema());
} else {
System.out.println(result.getSchema());
}
}
static CommandLine parseArguments(String[] args) throws ParseException {
Options options = new Options();
options.addOption(Option.builder("h").longOpt("help")
.desc("Provide help").build());
options.addOption(Option.builder("f").longOpt("flat")
.desc("Print types as flat list of types").build());
options.addOption(Option.builder("t").longOpt("table")
.desc("Print types as Hive table declaration").build());
options.addOption(Option.builder("p").longOpt("pretty")
.desc("Pretty print the schema").build());
CommandLine cli = new DefaultParser().parse(options, args);
if (cli.hasOption('h') || cli.getArgs().length == 0) {
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp("json-schema", options);
System.exit(1);
}
return cli;
}
public static void main(String[] args) throws Exception {
main(new Configuration(), args);
}
}
| 14,153 | 34.208955 | 89 | java |
null | orc-main/java/tools/src/java/org/apache/orc/tools/json/JsonShredder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.tools.json;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import com.google.gson.JsonPrimitive;
import com.google.gson.JsonStreamParser;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.PrintStream;
import java.nio.charset.StandardCharsets;
import java.util.HashMap;
import java.util.Map;
import java.util.zip.GZIPInputStream;
/**
* This class takes a set of JSON documents and shreds them into a file per
* a primitive column. This is useful when trying to understand a set of
* documents by providing sample values for each of the columns.
*
* For example, a document that looks like:
* {'a': 'aaaa', 'b': { 'c': 12, 'd': true}, e: 'eeee'}
*
* Will produce 4 files with the given contents:
* root.a: aaaa
* root.b.c: 12
* root.b.d: true
* root.e: eeee
*/
public class JsonShredder {
private final Map<String, PrintStream> files =
new HashMap<String, PrintStream>();
private PrintStream getFile(String name) throws IOException {
PrintStream result = files.get(name);
if (result == null) {
result = new PrintStream(new FileOutputStream(name + ".txt"), false,
StandardCharsets.UTF_8.name());
files.put(name, result);
}
return result;
}
private void shredObject(String name, JsonElement json) throws IOException {
if (json.isJsonPrimitive()) {
JsonPrimitive primitive = (JsonPrimitive) json;
getFile(name).println(primitive.getAsString());
} else if (json.isJsonNull()) {
// just skip it
} else if (json.isJsonArray()) {
for(JsonElement child: ((JsonArray) json)) {
shredObject(name + ".list", child);
}
} else {
JsonObject obj = (JsonObject) json;
for(Map.Entry<String,JsonElement> field: obj.entrySet()) {
String fieldName = field.getKey();
shredObject(name + "." + fieldName, field.getValue());
}
}
}
private void close() throws IOException {
for(Map.Entry<String, PrintStream> file: files.entrySet()) {
file.getValue().close();
}
}
public static void main(String[] args) throws Exception {
int count = 0;
JsonShredder shredder = new JsonShredder();
for (String filename: args) {
System.out.println("Reading " + filename);
System.out.flush();
java.io.Reader reader;
FileInputStream inStream = new FileInputStream(filename);
if (filename.endsWith(".gz")) {
reader = new InputStreamReader(new GZIPInputStream(inStream),
StandardCharsets.UTF_8);
} else {
reader = new InputStreamReader(inStream, StandardCharsets.UTF_8);
}
JsonStreamParser parser = new JsonStreamParser(reader);
while (parser.hasNext()) {
count += 1;
JsonElement item = parser.next();
shredder.shredObject("root", item);
}
}
shredder.close();
System.out.println(count + " records read");
System.out.println();
}
}
| 3,889 | 32.534483 | 78 | java |
null | orc-main/java/tools/src/java/org/apache/orc/tools/json/ListType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.tools.json;
import org.apache.orc.TypeDescription;
import java.io.PrintStream;
/**
* A model for types that are lists.
*/
class ListType extends HiveType {
HiveType elementType;
ListType() {
super(Kind.LIST);
}
ListType(HiveType child) {
super(Kind.LIST);
this.elementType = child;
}
@Override
public String toString() {
StringBuilder buf = new StringBuilder("list<");
buf.append(elementType.toString());
buf.append(">");
return buf.toString();
}
@Override
public boolean equals(Object other) {
return super.equals(other) &&
elementType.equals(((ListType) other).elementType);
}
@Override
public int hashCode() {
return super.hashCode() * 3 + elementType.hashCode();
}
@Override
public boolean subsumes(HiveType other) {
return other.kind == Kind.NULL || other.kind == Kind.LIST;
}
@Override
public void merge(HiveType other) {
if (other instanceof ListType) {
ListType otherList = (ListType) other;
if (elementType.subsumes(otherList.elementType)) {
elementType.merge(otherList.elementType);
} else if (otherList.elementType.subsumes(elementType)) {
otherList.elementType.merge(elementType);
elementType = otherList.elementType;
} else {
elementType = new UnionType(elementType, otherList.elementType);
}
}
}
@Override
public void printFlat(PrintStream out, String prefix) {
elementType.printFlat(out, prefix + "._list");
}
@Override
public TypeDescription getSchema() {
return TypeDescription.createList(elementType.getSchema());
}
}
| 2,458 | 26.629213 | 75 | java |
null | orc-main/java/tools/src/java/org/apache/orc/tools/json/MapType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.tools.json;
import org.apache.orc.TypeDescription;
import java.io.PrintStream;
import java.util.Objects;
public class MapType extends HiveType {
private final HiveType keyType;
private final HiveType valueType;
MapType(HiveType keyType, HiveType valueType) {
super(Kind.MAP);
this.keyType = keyType;
this.valueType = valueType;
}
@Override
public String toString() {
return "map<" + keyType + "," + valueType + ">";
}
@Override
public boolean equals(Object o) {
return super.equals(o) && keyType.equals(((MapType)o).keyType) &&
valueType.equals(((MapType)o).valueType);
}
@Override
public int hashCode() {
return Objects.hash(super.hashCode(), keyType, valueType);
}
@Override
public boolean subsumes(HiveType other) {
return (other.kind == Kind.MAP &&
keyType.subsumes(((MapType)other).keyType) &&
valueType.subsumes(((MapType)other).valueType)) ||
other.kind == Kind.NULL;
}
@Override
public void merge(HiveType other) {
if (other.getClass() == MapType.class) {
MapType otherMap = (MapType) other;
keyType.merge(otherMap.keyType);
valueType.merge(otherMap.valueType);
}
}
@Override
public void printFlat(PrintStream out, String prefix) {
prefix = prefix + ".";
keyType.printFlat(out, prefix + "key");
keyType.printFlat(out, prefix + "value");
}
@Override
public TypeDescription getSchema() {
return TypeDescription.createMap(keyType.getSchema(), valueType.getSchema());
}
}
| 2,372 | 27.25 | 81 | java |
null | orc-main/java/tools/src/java/org/apache/orc/tools/json/NullType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.tools.json;
import org.apache.orc.TypeDescription;
/**
* The type that only includes the null value.
*/
class NullType extends HiveType {
NullType() {
super(Kind.NULL);
}
@Override
public String toString() {
return "void";
}
@Override
public boolean subsumes(HiveType other) {
return other.kind == Kind.NULL;
}
@Override
public void merge(HiveType other) {
// nothing to do to merge null types *smile*
}
@Override
public TypeDescription getSchema() {
return TypeDescription.createUnion();
}
}
| 1,379 | 26.058824 | 75 | java |
null | orc-main/java/tools/src/java/org/apache/orc/tools/json/NumericType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.tools.json;
import org.apache.orc.TypeDescription;
/**
* A type that represents all of the numeric types: byte, short, int, long,
* float, double, and decimal.
*/
class NumericType extends HiveType {
// the maximum number of digits before the decimal
int intDigits;
// the maximum number of digits after the decimal
int scale;
NumericType(Kind kind, int intDigits, int scale) {
super(kind);
this.intDigits = intDigits;
this.scale = scale;
}
@Override
public boolean equals(Object other) {
if (super.equals(other)) {
NumericType otherNumber = (NumericType) other;
return intDigits == otherNumber.intDigits || scale == otherNumber.scale;
}
return false;
}
@Override
public int hashCode() {
return super.hashCode() * 41 + (intDigits * 17) + scale;
}
@Override
public String toString() {
switch (kind) {
case BYTE:
return "tinyint";
case SHORT:
return "smallint";
case INT:
return "int";
case LONG:
return "bigint";
case DECIMAL:
return "decimal(" + (intDigits + scale) + "," + scale + ")";
case FLOAT:
return "float";
case DOUBLE:
return "double";
default:
throw new IllegalArgumentException("Unknown kind " + kind);
}
}
@Override
public boolean subsumes(HiveType other) {
return other.getClass() == NumericType.class || other.kind == Kind.NULL;
}
@Override
public void merge(HiveType other) {
if (other.getClass() == NumericType.class) {
NumericType otherNumber = (NumericType) other;
this.intDigits = Math.max(this.intDigits, otherNumber.intDigits);
this.scale = Math.max(this.scale, otherNumber.scale);
if (kind.rank < other.kind.rank) {
kind = other.kind;
}
}
}
@Override
public TypeDescription getSchema() {
switch (kind) {
case BYTE:
return TypeDescription.createByte();
case SHORT:
return TypeDescription.createShort();
case INT:
return TypeDescription.createInt();
case LONG:
return TypeDescription.createLong();
case DECIMAL:
return TypeDescription.createDecimal()
.withScale(scale).withPrecision(intDigits+scale);
case FLOAT:
return TypeDescription.createFloat();
case DOUBLE:
return TypeDescription.createDouble();
default:
throw new IllegalArgumentException("Unknown kind " + kind);
}
}
}
| 3,332 | 27.982609 | 78 | java |
null | orc-main/java/tools/src/java/org/apache/orc/tools/json/StringType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.tools.json;
import org.apache.orc.TypeDescription;
/**
* These are the types that correspond the the JSON string values: string,
* binary, timestamp, and date.
*/
class StringType extends HiveType {
StringType(Kind kind) {
super(kind);
}
@Override
public String toString() {
switch (kind) {
case BINARY:
return "binary";
case STRING:
return "string";
case TIMESTAMP:
return "timestamp";
case TIMESTAMP_INSTANT:
return "timestamp with local time zone";
case DATE:
return "date";
default:
throw new IllegalArgumentException("Unknown kind " + kind);
}
}
@Override
public boolean subsumes(HiveType other) {
return other.getClass() == StringType.class || other.kind == Kind.NULL;
}
@Override
public void merge(HiveType other) {
// the general case is that everything is a string.
if (other.getClass() == StringType.class && kind != other.kind) {
kind = Kind.STRING;
}
}
@Override
public TypeDescription getSchema() {
switch (kind) {
case BINARY:
return TypeDescription.createBinary();
case STRING:
return TypeDescription.createString();
case TIMESTAMP:
return TypeDescription.createTimestamp();
case TIMESTAMP_INSTANT:
return TypeDescription.createTimestampInstant();
case DATE:
return TypeDescription.createDate();
default:
throw new IllegalArgumentException("Unknown kind " + kind);
}
}
}
| 2,363 | 28.185185 | 75 | java |
null | orc-main/java/tools/src/java/org/apache/orc/tools/json/StructType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.tools.json;
import org.apache.orc.TypeDescription;
import java.io.PrintStream;
import java.util.Comparator;
import java.util.Map;
import java.util.TreeMap;
/**
* Model structs.
*/
class StructType extends HiveType {
private static final String COL_PREFIX = "_col";
private static final Comparator<String> FIELD_COMPARATOR = (left, right) -> {
if (left == null) {
return right == null ? 0 : -1;
} else if (right == null) {
return 1;
} else if (left.startsWith(COL_PREFIX)) {
if (right.startsWith(COL_PREFIX)) {
try {
int leftInt = Integer.parseInt(left.substring(COL_PREFIX.length()));
int rightInt = Integer.parseInt(right.substring(COL_PREFIX.length()));
return Integer.compare(leftInt, rightInt);
} catch (Exception e) {
// fall back to the normal rules
}
} else {
return 1;
}
} else if (right.startsWith(COL_PREFIX)) {
return 1;
}
return left.compareTo(right);
};
final Map<String, HiveType> fields = new TreeMap<>(FIELD_COMPARATOR);
StructType() {
super(Kind.STRUCT);
}
@Override
public String toString() {
StringBuilder buf = new StringBuilder("struct<");
boolean first = true;
for (Map.Entry<String, HiveType> field : fields.entrySet()) {
if (!first) {
buf.append(',');
} else {
first = false;
}
buf.append(field.getKey());
buf.append(':');
buf.append(field.getValue().toString());
}
buf.append(">");
return buf.toString();
}
public StructType addField(String name, HiveType fieldType) {
fields.put(name, fieldType);
return this;
}
@Override
public boolean equals(Object other) {
return super.equals(other) && fields.equals(((StructType) other).fields);
}
@Override
public int hashCode() {
int result = super.hashCode() * 3;
for (Map.Entry<String, HiveType> pair : fields.entrySet()) {
result += pair.getKey().hashCode() * 17 + pair.getValue().hashCode();
}
return result;
}
@Override
public boolean subsumes(HiveType other) {
return other.kind == Kind.NULL || other.kind == Kind.STRUCT;
}
@Override
public void merge(HiveType other) {
if (other.getClass() == StructType.class) {
StructType otherStruct = (StructType) other;
for (Map.Entry<String, HiveType> pair : otherStruct.fields.entrySet()) {
HiveType ourField = fields.get(pair.getKey());
if (ourField == null) {
fields.put(pair.getKey(), pair.getValue());
} else if (ourField.subsumes(pair.getValue())) {
ourField.merge(pair.getValue());
} else if (pair.getValue().subsumes(ourField)) {
pair.getValue().merge(ourField);
fields.put(pair.getKey(), pair.getValue());
} else {
fields.put(pair.getKey(), new UnionType(ourField, pair.getValue()));
}
}
}
}
@Override
public void printFlat(PrintStream out, String prefix) {
prefix = prefix + ".";
for (Map.Entry<String, HiveType> field : fields.entrySet()) {
field.getValue().printFlat(out, prefix + field.getKey());
}
}
@Override
public TypeDescription getSchema() {
TypeDescription result = TypeDescription.createStruct();
for (Map.Entry<String, HiveType> child: fields.entrySet()) {
result.addField(child.getKey(), child.getValue().getSchema());
}
return result;
}
}
| 4,292 | 29.446809 | 80 | java |
null | orc-main/java/tools/src/java/org/apache/orc/tools/json/UnionType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.tools.json;
import org.apache.orc.TypeDescription;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.List;
/**
* A union type to represent types that don't fit together.
*/
class UnionType extends HiveType {
final List<HiveType> children = new ArrayList<HiveType>();
UnionType() {
super(Kind.UNION);
}
UnionType(HiveType left, HiveType right) {
super(Kind.UNION);
children.add(left);
children.add(right);
}
UnionType addType(HiveType type) {
children.add(type);
return this;
}
@Override
public String toString() {
StringBuilder buf = new StringBuilder("uniontype<");
boolean first = true;
for (HiveType child : children) {
if (!first) {
buf.append(',');
} else {
first = false;
}
buf.append(child.toString());
}
buf.append(">");
return buf.toString();
}
@Override
public boolean equals(Object other) {
return super.equals(other) &&
children.equals(((UnionType) other).children);
}
@Override
public int hashCode() {
int result = super.hashCode();
for (HiveType child : children) {
result += child.hashCode() * 17;
}
return result;
}
@Override
public boolean subsumes(HiveType other) {
return true;
}
@Override
public void merge(HiveType other) {
if (other instanceof UnionType) {
for (HiveType otherChild : ((UnionType) other).children) {
merge(otherChild);
}
} else {
for (int i = 0; i < children.size(); ++i) {
HiveType child = children.get(i);
if (child.subsumes(other)) {
child.merge(other);
return;
} else if (other.subsumes(child)) {
other.merge(child);
children.set(i, other);
return;
}
}
addType(other);
}
}
@Override
public void printFlat(PrintStream out, String prefix) {
prefix = prefix + ".";
int id = 0;
for (HiveType child : children) {
child.printFlat(out, prefix + (id++));
}
}
@Override
public TypeDescription getSchema() {
TypeDescription result = TypeDescription.createUnion();
for (HiveType child: children) {
result.addUnionChild(child.getSchema());
}
return result;
}
}
| 3,117 | 24.145161 | 75 | java |
null | orc-main/java/tools/src/test/org/apache/orc/impl/FakeKeyProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
import org.apache.hadoop.crypto.key.KeyProviderFactory;
import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* A Hadoop KeyProvider that lets us test the interaction
* with the Hadoop code.
* Must only be used in unit tests!
*/
public class FakeKeyProvider extends KeyProvider {
// map from key name to metadata
private final Map<String, TestMetadata> keyMetadata = new HashMap<>();
// map from key version name to material
private final Map<String, KeyVersion> keyVersions = new HashMap<>();
public FakeKeyProvider(Configuration conf) {
super(conf);
}
@Override
public KeyVersion getKeyVersion(String name) {
return keyVersions.get(name);
}
@Override
public List<String> getKeys() {
return new ArrayList<>(keyMetadata.keySet());
}
@Override
public List<KeyVersion> getKeyVersions(String name) {
List<KeyVersion> result = new ArrayList<>();
Metadata meta = getMetadata(name);
for(int v=0; v < meta.getVersions(); ++v) {
String versionName = buildVersionName(name, v);
KeyVersion material = keyVersions.get(versionName);
if (material != null) {
result.add(material);
}
}
return result;
}
@Override
public Metadata getMetadata(String name) {
return keyMetadata.get(name);
}
@Override
public KeyVersion createKey(String name, byte[] bytes, Options options) {
String versionName = buildVersionName(name, 0);
keyMetadata.put(name, new TestMetadata(options.getCipher(),
options.getBitLength(), 1));
KeyVersion result = new KMSClientProvider.KMSKeyVersion(name, versionName, bytes);
keyVersions.put(versionName, result);
return result;
}
@Override
public void deleteKey(String name) {
throw new UnsupportedOperationException("Can't delete keys");
}
@Override
public KeyVersion rollNewVersion(String name, byte[] bytes) {
TestMetadata key = keyMetadata.get(name);
String versionName = buildVersionName(name, key.addVersion());
KeyVersion result = new KMSClientProvider.KMSKeyVersion(name, versionName,
bytes);
keyVersions.put(versionName, result);
return result;
}
@Override
public void flush() {
// Nothing
}
static class TestMetadata extends KeyProvider.Metadata {
TestMetadata(String cipher, int bitLength, int versions) {
super(cipher, bitLength, null, null, null, versions);
}
public int addVersion() {
return super.addVersion();
}
}
public static class Factory extends KeyProviderFactory {
@Override
public KeyProvider createProvider(URI uri,
Configuration conf) throws IOException {
if ("test".equals(uri.getScheme())) {
KeyProvider provider = new FakeKeyProvider(conf);
// populate a couple keys into the provider
byte[] piiKey = new byte[]{0,1,2,3,4,5,6,7,8,9,0xa,0xb,0xc,0xd,0xe,0xf};
org.apache.hadoop.crypto.key.KeyProvider.Options aes128 = new KeyProvider.Options(conf);
provider.createKey("pii", piiKey, aes128);
byte[] piiKey2 = new byte[]{0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17,
0x18,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f};
provider.rollNewVersion("pii", piiKey2);
byte[] secretKey = new byte[]{0x20,0x21,0x22,0x23,0x24,0x25,0x26,0x27,
0x28,0x29,0x2a,0x2b,0x2c,0x2d,0x2e,0x2f};
provider.createKey("secret", secretKey, aes128);
return KeyProviderCryptoExtension.createKeyProviderCryptoExtension(provider);
}
return null;
}
}
}
| 4,707 | 31.923077 | 96 | java |
null | orc-main/java/tools/src/test/org/apache/orc/impl/TestHadoopKeyProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.BytesWritable;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.security.Key;
import java.util.List;
import java.util.Random;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestHadoopKeyProvider {
/**
* Tests the path through the hadoop key provider code base.
* This should be consistent with TestCryptoUtils.testMemoryKeyProvider.
* @throws IOException
*/
@Test
public void testHadoopKeyProvider() throws IOException {
Configuration conf = new Configuration();
conf.set("hadoop.security.key.provider.path", "test:///");
// Hard code the random so that we know the bytes that will come out.
KeyProvider provider = CryptoUtils.getKeyProvider(conf, new Random(24));
List<String> keyNames = provider.getKeyNames();
assertEquals(2, keyNames.size());
assertTrue(keyNames.contains("pii"));
assertTrue(keyNames.contains("secret"));
HadoopShims.KeyMetadata piiKey = provider.getCurrentKeyVersion("pii");
assertEquals(1, piiKey.getVersion());
LocalKey localKey = provider.createLocalKey(piiKey);
byte[] encrypted = localKey.getEncryptedKey();
// make sure that we get exactly what we expect to test the encryption
assertEquals("c7 ab 4f bb 38 f4 de ad d0 b3 59 e2 21 2a 95 32",
new BytesWritable(encrypted).toString());
// now check to make sure that we get the expected bytes back
assertEquals("c7 a1 d0 41 7b 24 72 44 1a 58 c7 72 4a d4 be b3",
new BytesWritable(localKey.getDecryptedKey().getEncoded()).toString());
Key key = provider.decryptLocalKey(piiKey, encrypted);
assertEquals(new BytesWritable(localKey.getDecryptedKey().getEncoded()).toString(),
new BytesWritable(key.getEncoded()).toString());
}
}
| 2,743 | 41.875 | 87 | java |
null | orc-main/java/tools/src/test/org/apache/orc/impl/TestRLEv2.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.impl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.CompressionKind;
import org.apache.orc.OrcFile;
import org.apache.orc.PhysicalWriter;
import org.apache.orc.Reader;
import org.apache.orc.RecordReader;
import org.apache.orc.TypeDescription;
import org.apache.orc.Writer;
import org.apache.orc.impl.writer.StreamOptions;
import org.apache.orc.tools.FileDump;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInfo;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestRLEv2 {
Path workDir = new Path(System.getProperty("test.tmp.dir",
"target" + File.separator + "test" + File.separator + "tmp"));
Path testFilePath;
Configuration conf;
FileSystem fs;
@BeforeEach
public void openFileSystem (TestInfo testInfo) throws Exception {
conf = new Configuration();
fs = FileSystem.getLocal(conf);
testFilePath = new Path(workDir, "TestRLEv2." +
testInfo.getTestMethod().get().getName() + ".orc");
fs.delete(testFilePath, false);
}
private void appendInt(VectorizedRowBatch batch, long i) {
((LongColumnVector) batch.cols[0]).vector[batch.size++] = i;
}
@Test
public void testFixedDeltaZero() throws Exception {
TypeDescription schema = TypeDescription.createInt();
Writer w = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.compress(CompressionKind.NONE)
.setSchema(schema)
.rowIndexStride(0)
.encodingStrategy(OrcFile.EncodingStrategy.COMPRESSION)
.version(OrcFile.Version.V_0_12)
);
VectorizedRowBatch batch = schema.createRowBatch(5120);
for (int i = 0; i < 5120; ++i) {
appendInt(batch, 123);
}
w.addRowBatch(batch);
w.close();
PrintStream origOut = System.out;
ByteArrayOutputStream myOut = new ByteArrayOutputStream();
System.setOut(new PrintStream(myOut, false, StandardCharsets.UTF_8.toString()));
FileDump.main(new String[]{testFilePath.toUri().toString()});
System.out.flush();
String outDump = new String(myOut.toByteArray(), StandardCharsets.UTF_8);
// 10 runs of 512 elements. Each run has 2 bytes header, 2 bytes base (base = 123,
// zigzag encoded varint) and 1 byte delta (delta = 0). In total, 5 bytes per run.
assertTrue(outDump.contains("Stream: column 0 section DATA start: 3 length 50"));
System.setOut(origOut);
}
@Test
public void testFixedDeltaOne() throws Exception {
TypeDescription schema = TypeDescription.createInt();
Writer w = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.compress(CompressionKind.NONE)
.setSchema(schema)
.rowIndexStride(0)
.encodingStrategy(OrcFile.EncodingStrategy.COMPRESSION)
.version(OrcFile.Version.V_0_12)
);
VectorizedRowBatch batch = schema.createRowBatch(5120);
for (int i = 0; i < 5120; ++i) {
appendInt(batch, i % 512);
}
w.addRowBatch(batch);
w.close();
PrintStream origOut = System.out;
ByteArrayOutputStream myOut = new ByteArrayOutputStream();
System.setOut(new PrintStream(myOut, false, StandardCharsets.UTF_8.toString()));
FileDump.main(new String[]{testFilePath.toUri().toString()});
System.out.flush();
String outDump = new String(myOut.toByteArray(), StandardCharsets.UTF_8);
// 10 runs of 512 elements. Each run has 2 bytes header, 1 byte base (base = 0)
// and 1 byte delta (delta = 1). In total, 4 bytes per run.
assertTrue(outDump.contains("Stream: column 0 section DATA start: 3 length 40"));
System.setOut(origOut);
}
@Test
public void testFixedDeltaOneDescending() throws Exception {
TypeDescription schema = TypeDescription.createInt();
Writer w = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.compress(CompressionKind.NONE)
.setSchema(schema)
.rowIndexStride(0)
.encodingStrategy(OrcFile.EncodingStrategy.COMPRESSION)
.version(OrcFile.Version.V_0_12)
);
VectorizedRowBatch batch = schema.createRowBatch(5120);
for (int i = 0; i < 5120; ++i) {
appendInt(batch, 512 - (i % 512));
}
w.addRowBatch(batch);
w.close();
PrintStream origOut = System.out;
ByteArrayOutputStream myOut = new ByteArrayOutputStream();
System.setOut(new PrintStream(myOut, false, StandardCharsets.UTF_8.toString()));
FileDump.main(new String[]{testFilePath.toUri().toString()});
System.out.flush();
String outDump = new String(myOut.toByteArray(), StandardCharsets.UTF_8);
// 10 runs of 512 elements. Each run has 2 bytes header, 2 byte base (base = 512, zigzag + varint)
// and 1 byte delta (delta = 1). In total, 5 bytes per run.
assertTrue(outDump.contains("Stream: column 0 section DATA start: 3 length 50"));
System.setOut(origOut);
}
@Test
public void testFixedDeltaLarge() throws Exception {
TypeDescription schema = TypeDescription.createInt();
Writer w = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.compress(CompressionKind.NONE)
.setSchema(schema)
.rowIndexStride(0)
.encodingStrategy(OrcFile.EncodingStrategy.COMPRESSION)
.version(OrcFile.Version.V_0_12)
);
VectorizedRowBatch batch = schema.createRowBatch(5120);
for (int i = 0; i < 5120; ++i) {
appendInt(batch, i % 512 + ((i % 512) * 100));
}
w.addRowBatch(batch);
w.close();
PrintStream origOut = System.out;
ByteArrayOutputStream myOut = new ByteArrayOutputStream();
System.setOut(new PrintStream(myOut, false, StandardCharsets.UTF_8.toString()));
FileDump.main(new String[]{testFilePath.toUri().toString()});
System.out.flush();
String outDump = new String(myOut.toByteArray(), StandardCharsets.UTF_8);
// 10 runs of 512 elements. Each run has 2 bytes header, 1 byte base (base = 0)
// and 2 bytes delta (delta = 100, zigzag encoded varint). In total, 5 bytes per run.
assertTrue(outDump.contains("Stream: column 0 section DATA start: 3 length 50"));
System.setOut(origOut);
}
@Test
public void testFixedDeltaLargeDescending() throws Exception {
TypeDescription schema = TypeDescription.createInt();
Writer w = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.compress(CompressionKind.NONE)
.setSchema(schema)
.rowIndexStride(0)
.encodingStrategy(OrcFile.EncodingStrategy.COMPRESSION)
.version(OrcFile.Version.V_0_12)
);
VectorizedRowBatch batch = schema.createRowBatch(5120);
for (int i = 0; i < 5120; ++i) {
appendInt(batch, (512 - i % 512) + ((i % 512) * 100));
}
w.addRowBatch(batch);
w.close();
PrintStream origOut = System.out;
ByteArrayOutputStream myOut = new ByteArrayOutputStream();
System.setOut(new PrintStream(myOut, false, StandardCharsets.UTF_8.toString()));
FileDump.main(new String[]{testFilePath.toUri().toString()});
System.out.flush();
String outDump = new String(myOut.toByteArray(), StandardCharsets.UTF_8);
// 10 runs of 512 elements. Each run has 2 bytes header, 2 byte base (base = 512, zigzag + varint)
// and 2 bytes delta (delta = 100, zigzag encoded varint). In total, 6 bytes per run.
assertTrue(outDump.contains("Stream: column 0 section DATA start: 3 length 60"));
System.setOut(origOut);
}
@Test
public void testShortRepeat() throws Exception {
TypeDescription schema = TypeDescription.createInt();
Writer w = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.compress(CompressionKind.NONE)
.setSchema(schema)
.rowIndexStride(0)
.encodingStrategy(OrcFile.EncodingStrategy.COMPRESSION)
.version(OrcFile.Version.V_0_12)
);
VectorizedRowBatch batch = schema.createRowBatch(5120);
for (int i = 0; i < 5; ++i) {
appendInt(batch, 10);
}
w.addRowBatch(batch);
w.close();
PrintStream origOut = System.out;
ByteArrayOutputStream myOut = new ByteArrayOutputStream();
System.setOut(new PrintStream(myOut, false, StandardCharsets.UTF_8.toString()));
FileDump.main(new String[]{testFilePath.toUri().toString()});
System.out.flush();
String outDump = new String(myOut.toByteArray(), StandardCharsets.UTF_8);
// 1 byte header + 1 byte value
assertTrue(outDump.contains("Stream: column 0 section DATA start: 3 length 2"));
System.setOut(origOut);
}
@Test
public void testDeltaUnknownSign() throws Exception {
TypeDescription schema = TypeDescription.createInt();
Writer w = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.compress(CompressionKind.NONE)
.setSchema(schema)
.rowIndexStride(0)
.encodingStrategy(OrcFile.EncodingStrategy.COMPRESSION)
.version(OrcFile.Version.V_0_12)
);
VectorizedRowBatch batch = schema.createRowBatch(5120);
appendInt(batch, 0);
for (int i = 0; i < 511; ++i) {
appendInt(batch, i);
}
w.addRowBatch(batch);
w.close();
PrintStream origOut = System.out;
ByteArrayOutputStream myOut = new ByteArrayOutputStream();
System.setOut(new PrintStream(myOut, false, StandardCharsets.UTF_8.toString()));
FileDump.main(new String[]{testFilePath.toUri().toString()});
System.out.flush();
String outDump = new String(myOut.toByteArray(), StandardCharsets.UTF_8);
// monotonicity will be undetermined for this sequence 0,0,1,2,3,...510. Hence DIRECT encoding
// will be used. 2 bytes for header and 640 bytes for data (512 values with fixed bit of 10 bits
// each, 5120/8 = 640). Total bytes 642
assertTrue(outDump.contains("Stream: column 0 section DATA start: 3 length 642"));
System.setOut(origOut);
}
@Test
public void testPatchedBase() throws Exception {
TypeDescription schema = TypeDescription.createInt();
Writer w = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.compress(CompressionKind.NONE)
.setSchema(schema)
.rowIndexStride(0)
.encodingStrategy(OrcFile.EncodingStrategy.COMPRESSION)
.version(OrcFile.Version.V_0_12)
);
Random rand = new Random(123);
VectorizedRowBatch batch = schema.createRowBatch(5120);
appendInt(batch, 10000000);
for (int i = 0; i < 511; ++i) {
appendInt(batch, rand.nextInt(i+1));
}
w.addRowBatch(batch);
w.close();
PrintStream origOut = System.out;
ByteArrayOutputStream myOut = new ByteArrayOutputStream();
System.setOut(new PrintStream(myOut, false, StandardCharsets.UTF_8.toString()));
FileDump.main(new String[]{testFilePath.toUri().toString()});
System.out.flush();
String outDump = new String(myOut.toByteArray(), StandardCharsets.UTF_8);
// use PATCHED_BASE encoding
assertTrue(outDump.contains("Stream: column 0 section DATA start: 3 length 583"));
System.setOut(origOut);
}
@Test
public void testBaseValueLimit() throws Exception {
TypeDescription schema = TypeDescription.createInt();
Writer w = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.compress(CompressionKind.NONE)
.setSchema(schema)
.rowIndexStride(0)
.encodingStrategy(OrcFile.EncodingStrategy.COMPRESSION)
.version(OrcFile.Version.V_0_12)
);
VectorizedRowBatch batch = schema.createRowBatch();
//the minimum value is beyond RunLengthIntegerWriterV2.BASE_VALUE_LIMIT
long[] input = {-9007199254740992l,-8725724278030337l,-1125762467889153l, -1l,-9007199254740992l,
-9007199254740992l, -497l,127l,-1l,-72057594037927936l,-4194304l,-9007199254740992l,-4503599593816065l,
-4194304l,-8936830510563329l,-9007199254740992l, -1l, -70334384439312l,-4063233l, -6755399441973249l};
for(long data: input) {
appendInt(batch, data);
}
w.addRowBatch(batch);
w.close();
try(Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs))) {
RecordReader rows = reader.rows();
batch = reader.getSchema().createRowBatch();
long[] output = null;
while (rows.nextBatch(batch)) {
output = new long[batch.size];
System.arraycopy(((LongColumnVector) batch.cols[0]).vector, 0, output, 0, batch.size);
}
assertArrayEquals(input, output);
}
}
static class TestOutputCatcher implements PhysicalWriter.OutputReceiver {
int currentBuffer = 0;
List<ByteBuffer> buffers = new ArrayList<ByteBuffer>();
@Override
public void output(ByteBuffer buffer) throws IOException {
buffers.add(buffer);
}
@Override
public void suppress() {
}
ByteBuffer getCurrentBuffer() {
while (currentBuffer < buffers.size() &&
buffers.get(currentBuffer).remaining() == 0) {
currentBuffer += 1;
}
return currentBuffer < buffers.size() ? buffers.get(currentBuffer) : null;
}
// assert that the list of ints (as bytes) are equal to the output
public void compareBytes(int... expected) {
for(int i=0; i < expected.length; ++i) {
ByteBuffer current = getCurrentBuffer();
assertEquals((byte) expected[i], current.get(), "position " + i);
}
assertNull(getCurrentBuffer());
}
}
static TestOutputCatcher encodeV2(long[] input,
boolean signed) throws IOException {
TestOutputCatcher catcher = new TestOutputCatcher();
RunLengthIntegerWriterV2 writer =
new RunLengthIntegerWriterV2(new OutStream("test",
new StreamOptions(10000), catcher), signed);
for(long x: input) {
writer.write(x);
}
writer.flush();
return catcher;
}
@Test
public void testShortRepeatExample() throws Exception {
long[] input = {10000, 10000, 10000, 10000, 10000};
TestOutputCatcher output = encodeV2(input, false);
output.compareBytes(0x0a, 0x27, 0x10);
}
@Test
public void testDirectExample() throws Exception {
long[] input = {23713, 43806, 57005, 48879};
TestOutputCatcher output = encodeV2(input, false);
output.compareBytes(0x5e, 0x03, 0x5c, 0xa1, 0xab, 0x1e, 0xde, 0xad, 0xbe,
0xef);
}
@Test
public void testPatchedBaseExample() throws Exception {
long[] input = {2030, 2000, 2020, 1000000, 2040, 2050, 2060, 2070, 2080,
2090, 2100, 2110, 2120, 2130, 2140, 2150, 2160, 2170, 2180, 2190};
TestOutputCatcher output = encodeV2(input, false);
output.compareBytes(0x8e, 0x13, 0x2b, 0x21, 0x07, 0xd0, 0x1e, 0x00, 0x14,
0x70, 0x28, 0x32, 0x3c, 0x46, 0x50, 0x5a, 0x64, 0x6e, 0x78, 0x82, 0x8c,
0x96, 0xa0, 0xaa, 0xb4, 0xbe, 0xfc, 0xe8);
}
@Test
public void testDeltaExample() throws Exception {
long[] input = {2, 3, 5, 7, 11, 13, 17, 19, 23, 29};
TestOutputCatcher output = encodeV2(input, false);
output.compareBytes(0xc6, 0x09, 0x02, 0x02, 0x22, 0x42, 0x42, 0x46);
}
@Test
public void testDelta2Example() throws Exception {
long[] input = {0, 10000, 10001, 10001, 10002, 10003, 10003};
TestOutputCatcher output = encodeV2(input, false);
output.compareBytes(0xc2, 0x06, 0x0, 0xa0, 0x9c, 0x01, 0x45, 0x0);
}
}
| 17,037 | 38.167816 | 111 | java |
null | orc-main/java/tools/src/test/org/apache/orc/tools/TestFileDump.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.tools;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.ListColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.MapColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.StructColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.serde2.io.DateWritable;
import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.hadoop.io.BytesWritable;
import org.apache.orc.ColumnStatistics;
import org.apache.orc.CompressionKind;
import org.apache.orc.OrcConf;
import org.apache.orc.OrcFile;
import org.apache.orc.Reader;
import org.apache.orc.StripeStatistics;
import org.apache.orc.TypeDescription;
import org.apache.orc.Writer;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.io.BufferedInputStream;
import java.io.BufferedReader;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.PrintStream;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.sql.Timestamp;
import java.text.SimpleDateFormat;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import static org.apache.orc.tools.FileDump.RECOVER_READ_SIZE;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assumptions.assumeTrue;
public class TestFileDump {
Path workDir = new Path(System.getProperty("test.tmp.dir"));
Configuration conf;
FileSystem fs;
Path testFilePath;
@BeforeEach
public void openFileSystem () throws Exception {
conf = new Configuration();
fs = FileSystem.getLocal(conf);
fs.setWorkingDirectory(workDir);
testFilePath = new Path("TestFileDump.testDump.orc");
fs.delete(testFilePath, false);
}
static TypeDescription getMyRecordType() {
return TypeDescription.createStruct()
.addField("i", TypeDescription.createInt())
.addField("l", TypeDescription.createLong())
.addField("s", TypeDescription.createString());
}
static void appendMyRecord(VectorizedRowBatch batch,
int i,
long l,
String str) {
((LongColumnVector) batch.cols[0]).vector[batch.size] = i;
((LongColumnVector) batch.cols[1]).vector[batch.size] = l;
if (str == null) {
batch.cols[2].noNulls = false;
batch.cols[2].isNull[batch.size] = true;
} else {
((BytesColumnVector) batch.cols[2]).setVal(batch.size,
str.getBytes(StandardCharsets.UTF_8));
}
batch.size += 1;
}
static TypeDescription getAllTypesType() {
return TypeDescription.createStruct()
.addField("b", TypeDescription.createBoolean())
.addField("bt", TypeDescription.createByte())
.addField("s", TypeDescription.createShort())
.addField("i", TypeDescription.createInt())
.addField("l", TypeDescription.createLong())
.addField("f", TypeDescription.createFloat())
.addField("d", TypeDescription.createDouble())
.addField("de", TypeDescription.createDecimal())
.addField("t", TypeDescription.createTimestamp())
.addField("dt", TypeDescription.createDate())
.addField("str", TypeDescription.createString())
.addField("c", TypeDescription.createChar().withMaxLength(5))
.addField("vc", TypeDescription.createVarchar().withMaxLength(10))
.addField("m", TypeDescription.createMap(
TypeDescription.createString(),
TypeDescription.createString()))
.addField("a", TypeDescription.createList(TypeDescription.createInt()))
.addField("st", TypeDescription.createStruct()
.addField("i", TypeDescription.createInt())
.addField("s", TypeDescription.createString()));
}
static void appendAllTypes(VectorizedRowBatch batch,
boolean b,
byte bt,
short s,
int i,
long l,
float f,
double d,
HiveDecimalWritable de,
Timestamp t,
DateWritable dt,
String str,
String c,
String vc,
Map<String, String> m,
List<Integer> a,
int sti,
String sts) {
int row = batch.size++;
((LongColumnVector) batch.cols[0]).vector[row] = b ? 1 : 0;
((LongColumnVector) batch.cols[1]).vector[row] = bt;
((LongColumnVector) batch.cols[2]).vector[row] = s;
((LongColumnVector) batch.cols[3]).vector[row] = i;
((LongColumnVector) batch.cols[4]).vector[row] = l;
((DoubleColumnVector) batch.cols[5]).vector[row] = f;
((DoubleColumnVector) batch.cols[6]).vector[row] = d;
((DecimalColumnVector) batch.cols[7]).vector[row].set(de);
((TimestampColumnVector) batch.cols[8]).set(row, t);
((LongColumnVector) batch.cols[9]).vector[row] = dt.getDays();
((BytesColumnVector) batch.cols[10]).setVal(row, str.getBytes(StandardCharsets.UTF_8));
((BytesColumnVector) batch.cols[11]).setVal(row, c.getBytes(StandardCharsets.UTF_8));
((BytesColumnVector) batch.cols[12]).setVal(row, vc.getBytes(StandardCharsets.UTF_8));
MapColumnVector map = (MapColumnVector) batch.cols[13];
int offset = map.childCount;
map.offsets[row] = offset;
map.lengths[row] = m.size();
map.childCount += map.lengths[row];
for(Map.Entry<String, String> entry: m.entrySet()) {
((BytesColumnVector) map.keys).setVal(offset, entry.getKey().getBytes(StandardCharsets.UTF_8));
((BytesColumnVector) map.values).setVal(offset++,
entry.getValue().getBytes(StandardCharsets.UTF_8));
}
ListColumnVector list = (ListColumnVector) batch.cols[14];
offset = list.childCount;
list.offsets[row] = offset;
list.lengths[row] = a.size();
list.childCount += list.lengths[row];
for(int e=0; e < a.size(); ++e) {
((LongColumnVector) list.child).vector[offset + e] = a.get(e);
}
StructColumnVector struct = (StructColumnVector) batch.cols[15];
((LongColumnVector) struct.fields[0]).vector[row] = sti;
((BytesColumnVector) struct.fields[1]).setVal(row, sts.getBytes(StandardCharsets.UTF_8));
}
private static final Pattern ignoreTailPattern =
Pattern.compile("^(?<head>File Version|\"softwareVersion\"): .*");
private static final Pattern fileSizePattern =
Pattern.compile("^(\"fileLength\"|File length): (?<size>[0-9]+).*");
// Allow file size to be up to 100 bytes larger.
private static final int SIZE_SLOP = 100;
/**
* Preprocess the string for matching.
* If it matches the fileSizePattern, we return the file size as a Long.
* @param line the input line
* @return the processed line or a Long with the file size
*/
private static Object preprocessLine(String line) {
if (line == null) {
return line;
}
line = line.trim();
Matcher match = fileSizePattern.matcher(line);
if (match.matches()) {
return Long.parseLong(match.group("size"));
}
match = ignoreTailPattern.matcher(line);
if (match.matches()) {
return match.group("head");
}
return line;
}
/**
* Compare two files for equivalence.
* @param expected Loaded from the class path
* @param actual Loaded from the file system
*/
public static void checkOutput(String expected,
String actual) throws Exception {
BufferedReader eStream = Files.newBufferedReader(Paths.get(
TestJsonFileDump.getFileFromClasspath(expected)), StandardCharsets.UTF_8);
BufferedReader aStream = Files.newBufferedReader(Paths.get(actual), StandardCharsets.UTF_8);
Object expectedLine = preprocessLine(eStream.readLine());
while (expectedLine != null) {
Object actualLine = preprocessLine(aStream.readLine());
if (expectedLine instanceof Long && actualLine instanceof Long) {
long diff = (Long) actualLine - (Long) expectedLine;
assertTrue(diff < SIZE_SLOP,
"expected: " + expectedLine + ", actual: " + actualLine);
} else {
assertEquals(expectedLine, actualLine);
}
expectedLine = preprocessLine(eStream.readLine());
}
assertNull(eStream.readLine());
assertNull(aStream.readLine());
eStream.close();
aStream.close();
}
@Test
public void testDump() throws Exception {
TypeDescription schema = getMyRecordType();
conf.set(OrcConf.ENCODING_STRATEGY.getAttribute(), "COMPRESSION");
conf.set(OrcConf.DICTIONARY_IMPL.getAttribute(), "rbtree");
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.fileSystem(fs)
.setSchema(schema)
.compress(CompressionKind.ZLIB)
.stripeSize(100000)
.rowIndexStride(1000));
Random r1 = new Random(1);
String[] words = new String[]{"It", "was", "the", "best", "of", "times,",
"it", "was", "the", "worst", "of", "times,", "it", "was", "the", "age",
"of", "wisdom,", "it", "was", "the", "age", "of", "foolishness,", "it",
"was", "the", "epoch", "of", "belief,", "it", "was", "the", "epoch",
"of", "incredulity,", "it", "was", "the", "season", "of", "Light,",
"it", "was", "the", "season", "of", "Darkness,", "it", "was", "the",
"spring", "of", "hope,", "it", "was", "the", "winter", "of", "despair,",
"we", "had", "everything", "before", "us,", "we", "had", "nothing",
"before", "us,", "we", "were", "all", "going", "direct", "to",
"Heaven,", "we", "were", "all", "going", "direct", "the", "other",
"way"};
VectorizedRowBatch batch = schema.createRowBatch(1000);
for(int i=0; i < 21000; ++i) {
appendMyRecord(batch, r1.nextInt(), r1.nextLong(),
words[r1.nextInt(words.length)]);
if (batch.size == batch.getMaxSize()) {
writer.addRowBatch(batch);
batch.reset();
}
}
if (batch.size > 0) {
writer.addRowBatch(batch);
}
writer.addUserMetadata("hive.acid.key.index",
StandardCharsets.UTF_8.encode("1,1,1;2,3,5;"));
writer.addUserMetadata("some.user.property",
StandardCharsets.UTF_8.encode("foo#bar$baz&"));
writer.close();
assertEquals(2079000, writer.getRawDataSize());
assertEquals(21000, writer.getNumberOfRows());
PrintStream origOut = System.out;
String outputFilename = "orc-file-dump.out";
FileOutputStream myOut = new FileOutputStream(workDir + File.separator + outputFilename);
// replace stdout and run command
System.setOut(new PrintStream(myOut, false, StandardCharsets.UTF_8.toString()));
FileDump.main(new String[]{testFilePath.toString(), "--rowindex=1,2,3"});
System.out.flush();
System.setOut(origOut);
checkOutput(outputFilename, workDir + File.separator + outputFilename);
}
@Test
public void testDataDump() throws Exception {
TypeDescription schema = getAllTypesType();
SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.fileSystem(fs)
.setSchema(schema)
.stripeSize(100000)
.compress(CompressionKind.NONE)
.bufferSize(10000)
.rowIndexStride(1000));
VectorizedRowBatch batch = schema.createRowBatch(1000);
Map<String, String> m = new HashMap<String, String>(2);
m.put("k1", "v1");
appendAllTypes(batch,
true,
(byte) 10,
(short) 100,
1000,
10000L,
4.0f,
20.0,
new HiveDecimalWritable("4.2222"),
new Timestamp(format.parse("2014-11-25 18:09:24").getTime()),
new DateWritable(DateWritable.millisToDays(
format.parse("2014-11-25 00:00:00").getTime())),
"string",
"hello",
"hello",
m,
Arrays.asList(100, 200),
10, "foo");
m.clear();
m.put("k3", "v3");
appendAllTypes(
batch,
false,
(byte)20,
(short)200,
2000,
20000L,
8.0f,
40.0,
new HiveDecimalWritable("2.2222"),
new Timestamp(format.parse("2014-11-25 18:02:44").getTime()),
new DateWritable(DateWritable.millisToDays(
format.parse("2014-09-28 00:00:00").getTime())),
"abcd",
"world",
"world",
m,
Arrays.asList(200, 300),
20, "bar");
writer.addRowBatch(batch);
writer.close();
assertEquals(1564, writer.getRawDataSize());
assertEquals(2, writer.getNumberOfRows());
PrintStream origOut = System.out;
ByteArrayOutputStream myOut = new ByteArrayOutputStream();
// replace stdout and run command
System.setOut(new PrintStream(myOut, false, "UTF-8"));
FileDump.main(new String[]{testFilePath.toString(), "-d"});
System.out.flush();
System.setOut(origOut);
String[] lines = myOut.toString(StandardCharsets.UTF_8.toString()).split("\n");
assertEquals("{\"b\":true,\"bt\":10,\"s\":100,\"i\":1000,\"l\":10000,\"f\":4.0,\"d\":20.0,\"de\":\"4.2222\",\"t\":\"2014-11-25 18:09:24.0\",\"dt\":\"2014-11-25\",\"str\":\"string\",\"c\":\"hello\",\"vc\":\"hello\",\"m\":[{\"_key\":\"k1\",\"_value\":\"v1\"}],\"a\":[100,200],\"st\":{\"i\":10,\"s\":\"foo\"}}", lines[0]);
assertEquals("{\"b\":false,\"bt\":20,\"s\":200,\"i\":2000,\"l\":20000,\"f\":8.0,\"d\":40.0,\"de\":\"2.2222\",\"t\":\"2014-11-25 18:02:44.0\",\"dt\":\"2014-09-28\",\"str\":\"abcd\",\"c\":\"world\",\"vc\":\"world\",\"m\":[{\"_key\":\"k3\",\"_value\":\"v3\"}],\"a\":[200,300],\"st\":{\"i\":20,\"s\":\"bar\"}}", lines[1]);
}
// Test that if the fraction of rows that have distinct strings is greater than the configured
// threshold dictionary encoding is turned off. If dictionary encoding is turned off the length
// of the dictionary stream for the column will be 0 in the ORC file dump.
@Test
public void testDictionaryThreshold() throws Exception {
TypeDescription schema = getMyRecordType();
Configuration conf = new Configuration();
conf.set(OrcConf.ENCODING_STRATEGY.getAttribute(), "COMPRESSION");
conf.setFloat(OrcConf.DICTIONARY_KEY_SIZE_THRESHOLD.getAttribute(), 0.49f);
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.fileSystem(fs)
.setSchema(schema)
.stripeSize(100000)
.compress(CompressionKind.ZLIB)
.rowIndexStride(1000)
.bufferSize(10000));
VectorizedRowBatch batch = schema.createRowBatch(1000);
Random r1 = new Random(1);
String[] words = new String[]{"It", "was", "the", "best", "of", "times,",
"it", "was", "the", "worst", "of", "times,", "it", "was", "the", "age",
"of", "wisdom,", "it", "was", "the", "age", "of", "foolishness,", "it",
"was", "the", "epoch", "of", "belief,", "it", "was", "the", "epoch",
"of", "incredulity,", "it", "was", "the", "season", "of", "Light,",
"it", "was", "the", "season", "of", "Darkness,", "it", "was", "the",
"spring", "of", "hope,", "it", "was", "the", "winter", "of", "despair,",
"we", "had", "everything", "before", "us,", "we", "had", "nothing",
"before", "us,", "we", "were", "all", "going", "direct", "to",
"Heaven,", "we", "were", "all", "going", "direct", "the", "other",
"way"};
int nextInt = 0;
for(int i=0; i < 21000; ++i) {
// Write out the same string twice, this guarantees the fraction of rows with
// distinct strings is 0.5
if (i % 2 == 0) {
nextInt = r1.nextInt(words.length);
// Append the value of i to the word, this guarantees when an index or word is repeated
// the actual string is unique.
words[nextInt] += "-" + i;
}
appendMyRecord(batch, r1.nextInt(), r1.nextLong(), words[nextInt]);
if (batch.size == batch.getMaxSize()) {
writer.addRowBatch(batch);
batch.reset();
}
}
if (batch.size != 0) {
writer.addRowBatch(batch);
}
writer.close();
PrintStream origOut = System.out;
String outputFilename = "orc-file-dump-dictionary-threshold.out";
FileOutputStream myOut = new FileOutputStream(workDir + File.separator + outputFilename);
// replace stdout and run command
System.setOut(new PrintStream(myOut, false, StandardCharsets.UTF_8.toString()));
FileDump.main(new String[]{testFilePath.toString(), "--rowindex=1,2,3"});
System.out.flush();
System.setOut(origOut);
checkOutput(outputFilename, workDir + File.separator + outputFilename);
}
@Test
public void testBloomFilter() throws Exception {
TypeDescription schema = getMyRecordType();
schema.setAttribute("test1", "value1");
schema.findSubtype("s")
.setAttribute("test2", "value2")
.setAttribute("test3", "value3");
conf.set(OrcConf.ENCODING_STRATEGY.getAttribute(), "COMPRESSION");
conf.set(OrcConf.DICTIONARY_IMPL.getAttribute(), "rbtree");
OrcFile.WriterOptions options = OrcFile.writerOptions(conf)
.fileSystem(fs)
.setSchema(schema)
.stripeSize(100000)
.compress(CompressionKind.ZLIB)
.bufferSize(10000)
.rowIndexStride(1000)
.bloomFilterColumns("S");
Writer writer = OrcFile.createWriter(testFilePath, options);
Random r1 = new Random(1);
String[] words = new String[]{"It", "was", "the", "best", "of", "times,",
"it", "was", "the", "worst", "of", "times,", "it", "was", "the", "age",
"of", "wisdom,", "it", "was", "the", "age", "of", "foolishness,", "it",
"was", "the", "epoch", "of", "belief,", "it", "was", "the", "epoch",
"of", "incredulity,", "it", "was", "the", "season", "of", "Light,",
"it", "was", "the", "season", "of", "Darkness,", "it", "was", "the",
"spring", "of", "hope,", "it", "was", "the", "winter", "of", "despair,",
"we", "had", "everything", "before", "us,", "we", "had", "nothing",
"before", "us,", "we", "were", "all", "going", "direct", "to",
"Heaven,", "we", "were", "all", "going", "direct", "the", "other",
"way"};
VectorizedRowBatch batch = schema.createRowBatch(1000);
for(int i=0; i < 21000; ++i) {
appendMyRecord(batch, r1.nextInt(), r1.nextLong(),
words[r1.nextInt(words.length)]);
if (batch.size == batch.getMaxSize()) {
writer.addRowBatch(batch);
batch.reset();
}
}
if (batch.size > 0) {
writer.addRowBatch(batch);
}
writer.close();
PrintStream origOut = System.out;
String outputFilename = "orc-file-dump-bloomfilter.out";
FileOutputStream myOut = new FileOutputStream(workDir + File.separator + outputFilename);
// replace stdout and run command
System.setOut(new PrintStream(myOut, false, StandardCharsets.UTF_8.toString()));
FileDump.main(new String[]{testFilePath.toString(), "--rowindex=3"});
System.out.flush();
System.setOut(origOut);
checkOutput(outputFilename, workDir + File.separator + outputFilename);
}
@Test
public void testBloomFilter2() throws Exception {
TypeDescription schema = getMyRecordType();
conf.set(OrcConf.ENCODING_STRATEGY.getAttribute(), "COMPRESSION");
conf.set(OrcConf.DICTIONARY_IMPL.getAttribute(), "rbtree");
OrcFile.WriterOptions options = OrcFile.writerOptions(conf)
.fileSystem(fs)
.setSchema(schema)
.stripeSize(100000)
.compress(CompressionKind.ZLIB)
.bufferSize(10000)
.rowIndexStride(1000)
.bloomFilterColumns("l,s")
.bloomFilterFpp(0.01)
.bloomFilterVersion(OrcFile.BloomFilterVersion.ORIGINAL);
VectorizedRowBatch batch = schema.createRowBatch(1000);
Writer writer = OrcFile.createWriter(testFilePath, options);
Random r1 = new Random(1);
String[] words = new String[]{"It", "was", "the", "best", "of", "times,",
"it", "was", "the", "worst", "of", "times,", "it", "was", "the", "age",
"of", "wisdom,", "it", "was", "the", "age", "of", "foolishness,", "it",
"was", "the", "epoch", "of", "belief,", "it", "was", "the", "epoch",
"of", "incredulity,", "it", "was", "the", "season", "of", "Light,",
"it", "was", "the", "season", "of", "Darkness,", "it", "was", "the",
"spring", "of", "hope,", "it", "was", "the", "winter", "of", "despair,",
"we", "had", "everything", "before", "us,", "we", "had", "nothing",
"before", "us,", "we", "were", "all", "going", "direct", "to",
"Heaven,", "we", "were", "all", "going", "direct", "the", "other",
"way"};
for(int i=0; i < 21000; ++i) {
appendMyRecord(batch, r1.nextInt(), r1.nextLong(),
words[r1.nextInt(words.length)]);
if (batch.size == batch.getMaxSize()) {
writer.addRowBatch(batch);
batch.reset();
}
}
if (batch.size > 0) {
writer.addRowBatch(batch);
}
writer.close();
PrintStream origOut = System.out;
String outputFilename = "orc-file-dump-bloomfilter2.out";
FileOutputStream myOut = new FileOutputStream(workDir + File.separator + outputFilename);
// replace stdout and run command
System.setOut(new PrintStream(myOut, false, StandardCharsets.UTF_8.toString()));
FileDump.main(new String[]{testFilePath.toString(), "--rowindex=2"});
System.out.flush();
System.setOut(origOut);
checkOutput(outputFilename, workDir + File.separator + outputFilename);
}
private static BytesWritable bytes(int... items) {
BytesWritable result = new BytesWritable();
result.setSize(items.length);
for (int i = 0; i < items.length; ++i) {
result.getBytes()[i] = (byte) items[i];
}
return result;
}
private void appendRow(VectorizedRowBatch batch, BytesWritable bytes,
String str) {
int row = batch.size++;
if (bytes == null) {
batch.cols[0].noNulls = false;
batch.cols[0].isNull[row] = true;
} else {
((BytesColumnVector) batch.cols[0]).setVal(row, bytes.getBytes(),
0, bytes.getLength());
}
if (str == null) {
batch.cols[1].noNulls = false;
batch.cols[1].isNull[row] = true;
} else {
((BytesColumnVector) batch.cols[1]).setVal(row, str.getBytes(StandardCharsets.UTF_8));
}
}
@Test
public void testHasNull() throws Exception {
TypeDescription schema =
TypeDescription.createStruct()
.addField("bytes1", TypeDescription.createBinary())
.addField("string1", TypeDescription.createString());
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema)
.rowIndexStride(1000)
.stripeSize(10000)
.bufferSize(10000));
VectorizedRowBatch batch = schema.createRowBatch(5000);
// STRIPE 1
// RG1
for(int i=0; i<1000; i++) {
appendRow(batch, bytes(1, 2, 3), "RG1");
}
writer.addRowBatch(batch);
batch.reset();
// RG2
for(int i=0; i<1000; i++) {
appendRow(batch, bytes(1, 2, 3), null);
}
writer.addRowBatch(batch);
batch.reset();
// RG3
for(int i=0; i<1000; i++) {
appendRow(batch, bytes(1, 2, 3), "RG3");
}
writer.addRowBatch(batch);
batch.reset();
// RG4
for (int i = 0; i < 1000; i++) {
appendRow(batch, bytes(1,2,3), null);
}
writer.addRowBatch(batch);
batch.reset();
// RG5
for(int i=0; i<1000; i++) {
appendRow(batch, bytes(1, 2, 3), null);
}
writer.addRowBatch(batch);
batch.reset();
// STRIPE 2
for (int i = 0; i < 5000; i++) {
appendRow(batch, bytes(1,2,3), null);
}
writer.addRowBatch(batch);
batch.reset();
// STRIPE 3
for (int i = 0; i < 5000; i++) {
appendRow(batch, bytes(1,2,3), "STRIPE-3");
}
writer.addRowBatch(batch);
batch.reset();
// STRIPE 4
for (int i = 0; i < 5000; i++) {
appendRow(batch, bytes(1,2,3), null);
}
writer.addRowBatch(batch);
batch.reset();
writer.close();
Reader reader = OrcFile.createReader(testFilePath,
OrcFile.readerOptions(conf).filesystem(fs));
// check the file level stats
ColumnStatistics[] stats = reader.getStatistics();
assertEquals(20000, stats[0].getNumberOfValues());
assertEquals(20000, stats[1].getNumberOfValues());
assertEquals(7000, stats[2].getNumberOfValues());
assertFalse(stats[0].hasNull());
assertFalse(stats[1].hasNull());
assertTrue(stats[2].hasNull());
// check the stripe level stats
List<StripeStatistics> stripeStats = reader.getStripeStatistics();
// stripe 1 stats
StripeStatistics ss1 = stripeStats.get(0);
ColumnStatistics ss1_cs1 = ss1.getColumnStatistics()[0];
ColumnStatistics ss1_cs2 = ss1.getColumnStatistics()[1];
ColumnStatistics ss1_cs3 = ss1.getColumnStatistics()[2];
assertFalse(ss1_cs1.hasNull());
assertFalse(ss1_cs2.hasNull());
assertTrue(ss1_cs3.hasNull());
// stripe 2 stats
StripeStatistics ss2 = stripeStats.get(1);
ColumnStatistics ss2_cs1 = ss2.getColumnStatistics()[0];
ColumnStatistics ss2_cs2 = ss2.getColumnStatistics()[1];
ColumnStatistics ss2_cs3 = ss2.getColumnStatistics()[2];
assertFalse(ss2_cs1.hasNull());
assertFalse(ss2_cs2.hasNull());
assertTrue(ss2_cs3.hasNull());
// stripe 3 stats
StripeStatistics ss3 = stripeStats.get(2);
ColumnStatistics ss3_cs1 = ss3.getColumnStatistics()[0];
ColumnStatistics ss3_cs2 = ss3.getColumnStatistics()[1];
ColumnStatistics ss3_cs3 = ss3.getColumnStatistics()[2];
assertFalse(ss3_cs1.hasNull());
assertFalse(ss3_cs2.hasNull());
assertFalse(ss3_cs3.hasNull());
// stripe 4 stats
StripeStatistics ss4 = stripeStats.get(3);
ColumnStatistics ss4_cs1 = ss4.getColumnStatistics()[0];
ColumnStatistics ss4_cs2 = ss4.getColumnStatistics()[1];
ColumnStatistics ss4_cs3 = ss4.getColumnStatistics()[2];
assertFalse(ss4_cs1.hasNull());
assertFalse(ss4_cs2.hasNull());
assertTrue(ss4_cs3.hasNull());
// Test file dump
PrintStream origOut = System.out;
String outputFilename = "orc-file-has-null.out";
FileOutputStream myOut = new FileOutputStream(workDir + File.separator + outputFilename);
// replace stdout and run command
System.setOut(new PrintStream(myOut, false, StandardCharsets.UTF_8.toString()));
FileDump.main(new String[]{testFilePath.toString(), "--rowindex=2"});
System.out.flush();
System.setOut(origOut);
// If called with an expression evaluating to false, the test will halt
// and be ignored.
assumeTrue(!System.getProperty("os.name").startsWith("Windows"));
TestFileDump.checkOutput(outputFilename, workDir + File.separator + outputFilename);
}
@Test
public void testIndexOf() {
byte[] bytes = ("OO" + OrcFile.MAGIC).getBytes(StandardCharsets.UTF_8);
byte[] pattern = OrcFile.MAGIC.getBytes(StandardCharsets.UTF_8);
assertEquals(2, FileDump.indexOf(bytes, pattern, 1));
}
@Test
public void testRecover() throws Exception {
TypeDescription schema = getMyRecordType();
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.fileSystem(fs)
.setSchema(schema));
Random r1 = new Random(1);
String[] words = new String[]{"It", "was", "the", "best", "of", "times,",
"it", "was", "the", "worst", "of", "times,", "it", "was", "the", "age",
"of", "wisdom,", "it", "was", "the", "age", "of", "foolishness,", "it",
"was", "the", "epoch", "of", "belief,", "it", "was", "the", "epoch",
"of", "incredulity,", "it", "was", "the", "season", "of", "Light,",
"it", "was", "the", "season", "of", "Darkness,", "it", "was", "the",
"spring", "of", "hope,", "it", "was", "the", "winter", "of", "despair,",
"we", "had", "everything", "before", "us,", "we", "had", "nothing",
"before", "us,", "we", "were", "all", "going", "direct", "to",
"Heaven,", "we", "were", "all", "going", "direct", "the", "other",
"way"};
VectorizedRowBatch batch = schema.createRowBatch(1000);
for(int i=0; i < 21000; ++i) {
appendMyRecord(batch, r1.nextInt(), r1.nextLong(),
words[r1.nextInt(words.length)]);
if (batch.size == batch.getMaxSize()) {
writer.addRowBatch(batch);
batch.reset();
}
}
if (batch.size > 0) {
writer.addRowBatch(batch);
}
writer.close();
long fileSize = fs.getFileStatus(testFilePath).getLen();
String testFilePathStr = Path.mergePaths(
workDir, Path.mergePaths(new Path(Path.SEPARATOR), testFilePath))
.toUri().getPath();
String copyTestFilePathStr = Path.mergePaths(
workDir, Path.mergePaths(new Path(Path.SEPARATOR),
new Path("CopyTestFileDump.testDump.orc")))
.toUri().getPath();
String testCrcFilePathStr = Path.mergePaths(
workDir, Path.mergePaths(new Path(Path.SEPARATOR),
new Path(".TestFileDump.testDump.orc.crc")))
.toUri().getPath();
try {
Files.copy(Paths.get(testFilePathStr), Paths.get(copyTestFilePathStr));
// Append write data to make it a corrupt file
try (FileOutputStream output = new FileOutputStream(testFilePathStr, true)) {
output.write(new byte[1024]);
output.write(OrcFile.MAGIC.getBytes(StandardCharsets.UTF_8));
output.write(new byte[1024]);
output.flush();
}
// Clean up the crc file and append data to avoid checksum read exceptions
Files.delete(Paths.get(testCrcFilePathStr));
conf.setInt(RECOVER_READ_SIZE, (int) (fileSize - 2));
FileDump.main(conf, new String[]{"--recover", "--skip-dump",
testFilePath.toUri().getPath()});
assertTrue(contentEquals(testFilePathStr, copyTestFilePathStr));
} finally {
Files.delete(Paths.get(copyTestFilePathStr));
}
}
private static boolean contentEquals(String filePath, String otherFilePath) throws IOException {
try (InputStream is = new BufferedInputStream(new FileInputStream(filePath));
InputStream otherIs = new BufferedInputStream(new FileInputStream(otherFilePath))) {
int ch = is.read();
while (-1 != ch) {
int ch2 = otherIs.read();
if (ch != ch2) {
return false;
}
ch = is.read();
}
int ch2 = otherIs.read();
return ch2 == -1;
}
}
}
| 32,604 | 39.253086 | 323 | java |
null | orc-main/java/tools/src/test/org/apache/orc/tools/TestJsonFileDump.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.tools;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.CompressionKind;
import org.apache.orc.OrcConf;
import org.apache.orc.OrcFile;
import org.apache.orc.TypeDescription;
import org.apache.orc.Writer;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileOutputStream;
import java.io.PrintStream;
import java.net.URL;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.Random;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNull;
public class TestJsonFileDump {
public static String getFileFromClasspath(String name) {
URL url = ClassLoader.getSystemResource(name);
if (url == null) {
throw new IllegalArgumentException("Could not find " + name);
}
return url.getPath();
}
Path workDir = new Path(System.getProperty("test.tmp.dir"));
Configuration conf;
FileSystem fs;
Path testFilePath;
@BeforeEach
public void openFileSystem () throws Exception {
conf = new Configuration();
fs = FileSystem.getLocal(conf);
fs.setWorkingDirectory(workDir);
testFilePath = new Path("TestFileDump.testDump.orc");
fs.delete(testFilePath, false);
}
@Test
public void testJsonDump() throws Exception {
TypeDescription schema =
TypeDescription.fromString("struct<i:int,l:bigint,s:string>");
schema.findSubtype("l")
.setAttribute("test1", "value1")
.setAttribute("test2","value2");
conf.set(OrcConf.ENCODING_STRATEGY.getAttribute(), "COMPRESSION");
conf.set(OrcConf.DICTIONARY_IMPL.getAttribute(), "rbtree");
OrcFile.WriterOptions options = OrcFile.writerOptions(conf)
.fileSystem(fs)
.setSchema(schema)
.stripeSize(100000)
.compress(CompressionKind.ZLIB)
.bufferSize(10000)
.rowIndexStride(1000)
.bloomFilterColumns("s");
Writer writer = OrcFile.createWriter(testFilePath, options);
Random r1 = new Random(1);
String[] words = new String[]{"It", "was", "the", "best", "of", "times,",
"it", "was", "the", "worst", "of", "times,", "it", "was", "the", "age",
"of", "wisdom,", "it", "was", "the", "age", "of", "foolishness,", "it",
"was", "the", "epoch", "of", "belief,", "it", "was", "the", "epoch",
"of", "incredulity,", "it", "was", "the", "season", "of", "Light,",
"it", "was", "the", "season", "of", "Darkness,", "it", "was", "the",
"spring", "of", "hope,", "it", "was", "the", "winter", "of", "despair,",
"we", "had", "everything", "before", "us,", "we", "had", "nothing",
"before", "us,", "we", "were", "all", "going", "direct", "to",
"Heaven,", "we", "were", "all", "going", "direct", "the", "other",
"way"};
VectorizedRowBatch batch = schema.createRowBatch(1000);
for(int i=0; i < 21000; ++i) {
((LongColumnVector) batch.cols[0]).vector[batch.size] = r1.nextInt();
((LongColumnVector) batch.cols[1]).vector[batch.size] = r1.nextLong();
if (i % 100 == 0) {
batch.cols[2].noNulls = false;
batch.cols[2].isNull[batch.size] = true;
} else {
((BytesColumnVector) batch.cols[2]).setVal(batch.size,
words[r1.nextInt(words.length)].getBytes(StandardCharsets.UTF_8));
}
batch.size += 1;
if (batch.size == batch.getMaxSize()) {
writer.addRowBatch(batch);
batch.reset();
}
}
if (batch.size > 0) {
writer.addRowBatch(batch);
}
writer.close();
PrintStream origOut = System.out;
String outputFilename = "orc-file-dump.json";
FileOutputStream myOut = new FileOutputStream(workDir + File.separator + outputFilename);
// replace stdout and run command
System.setOut(new PrintStream(myOut, true, StandardCharsets.UTF_8.toString()));
FileDump.main(new String[]{testFilePath.toString(), "-j", "-p", "--rowindex=3"});
System.out.flush();
System.setOut(origOut);
TestFileDump.checkOutput(outputFilename, workDir + File.separator + outputFilename);
}
}
| 5,291 | 37.347826 | 93 | java |
null | orc-main/java/tools/src/test/org/apache/orc/tools/convert/TestConvert.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.tools.convert;
import org.apache.commons.cli.ParseException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.OrcFile;
import org.apache.orc.Reader;
import org.apache.orc.RecordReader;
import org.apache.orc.TypeDescription;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.sql.Timestamp;
import java.util.TimeZone;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestConvert {
public static final TimeZone DEFAULT_TIME_ZONE = TimeZone.getDefault();
Path workDir = new Path(System.getProperty("test.tmp.dir"));
Configuration conf;
FileSystem fs;
Path testFilePath;
@BeforeEach
public void openFileSystem () throws Exception {
conf = new Configuration();
fs = FileSystem.getLocal(conf);
fs.setWorkingDirectory(workDir);
testFilePath = new Path("TestConvert.testConvert.orc");
fs.delete(testFilePath, false);
}
@BeforeAll
public static void changeDefaultTimeZone() {
TimeZone.setDefault(TimeZone.getTimeZone("America/New_York"));
}
@AfterAll
public static void resetDefaultTimeZone() {
TimeZone.setDefault(DEFAULT_TIME_ZONE);
}
@Test
public void testConvertCustomTimestampFromCsv() throws IOException, ParseException {
Path csvFile = new Path("test.csv");
FSDataOutputStream stream = fs.create(csvFile, true);
String[] timeValues = new String[] {"0001-01-01 00:00:00.000", "2021-12-01 18:36:00.800"};
stream.writeBytes(String.join("\n", timeValues));
stream.close();
String schema = "struct<d:timestamp>";
String timestampFormat = "yyyy-MM-dd HH:mm:ss.SSS";
TypeDescription readSchema = TypeDescription.fromString(schema);
ConvertTool.main(conf, new String[]{"--schema", schema, "-o", testFilePath.toString(),
"-t", timestampFormat, csvFile.toString()});
assertTrue(fs.exists(testFilePath));
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf));
VectorizedRowBatch batch = readSchema.createRowBatch();
RecordReader rowIterator = reader.rows(reader.options().schema(readSchema));
TimestampColumnVector tcv = (TimestampColumnVector) batch.cols[0];
while (rowIterator.nextBatch(batch)) {
for (int row = 0; row < batch.size; ++row) {
Timestamp timestamp = Timestamp.valueOf(timeValues[row]);
assertEquals(timestamp.getTime(), tcv.time[row]);
assertEquals(timestamp.getNanos(), tcv.nanos[row]);
}
}
rowIterator.close();
}
}
| 3,766 | 35.221154 | 94 | java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.