repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileSeqFileComparison.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.io.file.tfile;
import java.io.IOException;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.Random;
import java.util.StringTokenizer;
import junit.framework.TestCase;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner.Entry;
import org.apache.hadoop.util.Time;
public class TestTFileSeqFileComparison extends TestCase {
MyOptions options;
private FileSystem fs;
private Configuration conf;
private long startTimeEpoch;
private long finishTimeEpoch;
private DateFormat formatter;
byte[][] dictionary;
@Override
public void setUp() throws IOException {
if (options == null) {
options = new MyOptions(new String[0]);
}
conf = new Configuration();
conf.setInt("tfile.fs.input.buffer.size", options.fsInputBufferSize);
conf.setInt("tfile.fs.output.buffer.size", options.fsOutputBufferSize);
Path path = new Path(options.rootDir);
fs = path.getFileSystem(conf);
formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
setUpDictionary();
}
private void setUpDictionary() {
Random rng = new Random();
dictionary = new byte[options.dictSize][];
for (int i = 0; i < options.dictSize; ++i) {
int len =
rng.nextInt(options.maxWordLen - options.minWordLen)
+ options.minWordLen;
dictionary[i] = new byte[len];
rng.nextBytes(dictionary[i]);
}
}
@Override
public void tearDown() throws IOException {
// do nothing
}
public void startTime() throws IOException {
startTimeEpoch = Time.now();
System.out.println(formatTime() + " Started timing.");
}
public void stopTime() throws IOException {
finishTimeEpoch = Time.now();
System.out.println(formatTime() + " Stopped timing.");
}
public long getIntervalMillis() throws IOException {
return finishTimeEpoch - startTimeEpoch;
}
public void printlnWithTimestamp(String message) throws IOException {
System.out.println(formatTime() + " " + message);
}
/*
* Format millis into minutes and seconds.
*/
public String formatTime(long milis) {
return formatter.format(milis);
}
public String formatTime() {
return formatTime(Time.now());
}
private interface KVAppendable {
public void append(BytesWritable key, BytesWritable value)
throws IOException;
public void close() throws IOException;
}
private interface KVReadable {
public byte[] getKey();
public byte[] getValue();
public int getKeyLength();
public int getValueLength();
public boolean next() throws IOException;
public void close() throws IOException;
}
static class TFileAppendable implements KVAppendable {
private FSDataOutputStream fsdos;
private TFile.Writer writer;
public TFileAppendable(FileSystem fs, Path path, String compress,
int minBlkSize, int osBufferSize, Configuration conf)
throws IOException {
this.fsdos = fs.create(path, true, osBufferSize);
this.writer = new TFile.Writer(fsdos, minBlkSize, compress, null, conf);
}
@Override
public void append(BytesWritable key, BytesWritable value)
throws IOException {
writer.append(key.get(), 0, key.getSize(), value.get(), 0, value
.getSize());
}
@Override
public void close() throws IOException {
writer.close();
fsdos.close();
}
}
static class TFileReadable implements KVReadable {
private FSDataInputStream fsdis;
private TFile.Reader reader;
private TFile.Reader.Scanner scanner;
private byte[] keyBuffer;
private int keyLength;
private byte[] valueBuffer;
private int valueLength;
public TFileReadable(FileSystem fs, Path path, int osBufferSize,
Configuration conf) throws IOException {
this.fsdis = fs.open(path, osBufferSize);
this.reader =
new TFile.Reader(fsdis, fs.getFileStatus(path).getLen(), conf);
this.scanner = reader.createScanner();
keyBuffer = new byte[32];
valueBuffer = new byte[32];
}
private void checkKeyBuffer(int size) {
if (size <= keyBuffer.length) {
return;
}
keyBuffer =
new byte[Math.max(2 * keyBuffer.length, 2 * size - keyBuffer.length)];
}
private void checkValueBuffer(int size) {
if (size <= valueBuffer.length) {
return;
}
valueBuffer =
new byte[Math.max(2 * valueBuffer.length, 2 * size
- valueBuffer.length)];
}
@Override
public byte[] getKey() {
return keyBuffer;
}
@Override
public int getKeyLength() {
return keyLength;
}
@Override
public byte[] getValue() {
return valueBuffer;
}
@Override
public int getValueLength() {
return valueLength;
}
@Override
public boolean next() throws IOException {
if (scanner.atEnd()) return false;
Entry entry = scanner.entry();
keyLength = entry.getKeyLength();
checkKeyBuffer(keyLength);
entry.getKey(keyBuffer);
valueLength = entry.getValueLength();
checkValueBuffer(valueLength);
entry.getValue(valueBuffer);
scanner.advance();
return true;
}
@Override
public void close() throws IOException {
scanner.close();
reader.close();
fsdis.close();
}
}
static class SeqFileAppendable implements KVAppendable {
private FSDataOutputStream fsdos;
private SequenceFile.Writer writer;
public SeqFileAppendable(FileSystem fs, Path path, int osBufferSize,
String compress, int minBlkSize) throws IOException {
Configuration conf = new Configuration();
conf.setBoolean("hadoop.native.lib", true);
CompressionCodec codec = null;
if ("lzo".equals(compress)) {
codec = Compression.Algorithm.LZO.getCodec();
}
else if ("gz".equals(compress)) {
codec = Compression.Algorithm.GZ.getCodec();
}
else if (!"none".equals(compress))
throw new IOException("Codec not supported.");
this.fsdos = fs.create(path, true, osBufferSize);
if (!"none".equals(compress)) {
writer =
SequenceFile.createWriter(conf, fsdos, BytesWritable.class,
BytesWritable.class, SequenceFile.CompressionType.BLOCK, codec);
}
else {
writer =
SequenceFile.createWriter(conf, fsdos, BytesWritable.class,
BytesWritable.class, SequenceFile.CompressionType.NONE, null);
}
}
@Override
public void append(BytesWritable key, BytesWritable value)
throws IOException {
writer.append(key, value);
}
@Override
public void close() throws IOException {
writer.close();
fsdos.close();
}
}
static class SeqFileReadable implements KVReadable {
private SequenceFile.Reader reader;
private BytesWritable key;
private BytesWritable value;
public SeqFileReadable(FileSystem fs, Path path, int osBufferSize)
throws IOException {
Configuration conf = new Configuration();
conf.setInt("io.file.buffer.size", osBufferSize);
reader = new SequenceFile.Reader(fs, path, conf);
key = new BytesWritable();
value = new BytesWritable();
}
@Override
public byte[] getKey() {
return key.get();
}
@Override
public int getKeyLength() {
return key.getSize();
}
@Override
public byte[] getValue() {
return value.get();
}
@Override
public int getValueLength() {
return value.getSize();
}
@Override
public boolean next() throws IOException {
return reader.next(key, value);
}
@Override
public void close() throws IOException {
reader.close();
}
}
private void reportStats(Path path, long totalBytes) throws IOException {
long duration = getIntervalMillis();
long fsize = fs.getFileStatus(path).getLen();
printlnWithTimestamp(String.format(
"Duration: %dms...total size: %.2fMB...raw thrpt: %.2fMB/s", duration,
(double) totalBytes / 1024 / 1024, (double) totalBytes / duration
* 1000 / 1024 / 1024));
printlnWithTimestamp(String.format(
"Compressed size: %.2fMB...compressed thrpt: %.2fMB/s.",
(double) fsize / 1024 / 1024, (double) fsize / duration * 1000 / 1024
/ 1024));
}
private void fillBuffer(Random rng, BytesWritable bw, byte[] tmp, int len) {
int n = 0;
while (n < len) {
byte[] word = dictionary[rng.nextInt(dictionary.length)];
int l = Math.min(word.length, len - n);
System.arraycopy(word, 0, tmp, n, l);
n += l;
}
bw.set(tmp, 0, len);
}
private void timeWrite(Path path, KVAppendable appendable, int baseKlen,
int baseVlen, long fileSize) throws IOException {
int maxKlen = baseKlen * 2;
int maxVlen = baseVlen * 2;
BytesWritable key = new BytesWritable();
BytesWritable value = new BytesWritable();
byte[] keyBuffer = new byte[maxKlen];
byte[] valueBuffer = new byte[maxVlen];
Random rng = new Random(options.seed);
long totalBytes = 0;
printlnWithTimestamp("Start writing: " + path.getName() + "...");
startTime();
for (long i = 0; true; ++i) {
if (i % 1000 == 0) { // test the size for every 1000 rows.
if (fs.getFileStatus(path).getLen() >= fileSize) {
break;
}
}
int klen = rng.nextInt(baseKlen) + baseKlen;
int vlen = rng.nextInt(baseVlen) + baseVlen;
fillBuffer(rng, key, keyBuffer, klen);
fillBuffer(rng, value, valueBuffer, vlen);
key.set(keyBuffer, 0, klen);
value.set(valueBuffer, 0, vlen);
appendable.append(key, value);
totalBytes += klen;
totalBytes += vlen;
}
stopTime();
appendable.close();
reportStats(path, totalBytes);
}
private void timeRead(Path path, KVReadable readable) throws IOException {
printlnWithTimestamp("Start reading: " + path.getName() + "...");
long totalBytes = 0;
startTime();
for (; readable.next();) {
totalBytes += readable.getKeyLength();
totalBytes += readable.getValueLength();
}
stopTime();
readable.close();
reportStats(path, totalBytes);
}
private void createTFile(String parameters, String compress)
throws IOException {
System.out.println("=== TFile: Creation (" + parameters + ") === ");
Path path = new Path(options.rootDir, "TFile.Performance");
KVAppendable appendable =
new TFileAppendable(fs, path, compress, options.minBlockSize,
options.osOutputBufferSize, conf);
timeWrite(path, appendable, options.keyLength, options.valueLength,
options.fileSize);
}
private void readTFile(String parameters, boolean delFile) throws IOException {
System.out.println("=== TFile: Reading (" + parameters + ") === ");
{
Path path = new Path(options.rootDir, "TFile.Performance");
KVReadable readable =
new TFileReadable(fs, path, options.osInputBufferSize, conf);
timeRead(path, readable);
if (delFile) {
if (fs.exists(path)) {
fs.delete(path, true);
}
}
}
}
private void createSeqFile(String parameters, String compress)
throws IOException {
System.out.println("=== SeqFile: Creation (" + parameters + ") === ");
Path path = new Path(options.rootDir, "SeqFile.Performance");
KVAppendable appendable =
new SeqFileAppendable(fs, path, options.osOutputBufferSize, compress,
options.minBlockSize);
timeWrite(path, appendable, options.keyLength, options.valueLength,
options.fileSize);
}
private void readSeqFile(String parameters, boolean delFile)
throws IOException {
System.out.println("=== SeqFile: Reading (" + parameters + ") === ");
Path path = new Path(options.rootDir, "SeqFile.Performance");
KVReadable readable =
new SeqFileReadable(fs, path, options.osInputBufferSize);
timeRead(path, readable);
if (delFile) {
if (fs.exists(path)) {
fs.delete(path, true);
}
}
}
private void compareRun(String compress) throws IOException {
String[] supported = TFile.getSupportedCompressionAlgorithms();
boolean proceed = false;
for (String c : supported) {
if (c.equals(compress)) {
proceed = true;
break;
}
}
if (!proceed) {
System.out.println("Skipped for " + compress);
return;
}
options.compress = compress;
String parameters = parameters2String(options);
createSeqFile(parameters, compress);
readSeqFile(parameters, true);
createTFile(parameters, compress);
readTFile(parameters, true);
createTFile(parameters, compress);
readTFile(parameters, true);
createSeqFile(parameters, compress);
readSeqFile(parameters, true);
}
public void testRunComparisons() throws IOException {
String[] compresses = new String[] { "none", "lzo", "gz" };
for (String compress : compresses) {
if (compress.equals("none")) {
conf
.setInt("tfile.fs.input.buffer.size", options.fsInputBufferSizeNone);
conf.setInt("tfile.fs.output.buffer.size",
options.fsOutputBufferSizeNone);
}
else if (compress.equals("lzo")) {
conf.setInt("tfile.fs.input.buffer.size", options.fsInputBufferSizeLzo);
conf.setInt("tfile.fs.output.buffer.size",
options.fsOutputBufferSizeLzo);
}
else {
conf.setInt("tfile.fs.input.buffer.size", options.fsInputBufferSizeGz);
conf
.setInt("tfile.fs.output.buffer.size", options.fsOutputBufferSizeGz);
}
compareRun(compress);
}
}
private static String parameters2String(MyOptions options) {
return String
.format(
"KLEN: %d-%d... VLEN: %d-%d...MinBlkSize: %.2fKB...Target Size: %.2fMB...Compression: ...%s",
options.keyLength, options.keyLength * 2, options.valueLength,
options.valueLength * 2, (double) options.minBlockSize / 1024,
(double) options.fileSize / 1024 / 1024, options.compress);
}
private static class MyOptions {
String rootDir =
System
.getProperty("test.build.data", "/tmp/tfile-test");
String compress = "gz";
String format = "tfile";
int dictSize = 1000;
int minWordLen = 5;
int maxWordLen = 20;
int keyLength = 50;
int valueLength = 100;
int minBlockSize = 256 * 1024;
int fsOutputBufferSize = 1;
int fsInputBufferSize = 0;
// special variable only for unit testing.
int fsInputBufferSizeNone = 0;
int fsInputBufferSizeGz = 0;
int fsInputBufferSizeLzo = 0;
int fsOutputBufferSizeNone = 1;
int fsOutputBufferSizeGz = 1;
int fsOutputBufferSizeLzo = 1;
// un-exposed parameters.
int osInputBufferSize = 64 * 1024;
int osOutputBufferSize = 64 * 1024;
long fileSize = 3 * 1024 * 1024;
long seed;
static final int OP_CREATE = 1;
static final int OP_READ = 2;
int op = OP_READ;
boolean proceed = false;
public MyOptions(String[] args) {
seed = System.nanoTime();
try {
Options opts = buildOptions();
CommandLineParser parser = new GnuParser();
CommandLine line = parser.parse(opts, args, true);
processOptions(line, opts);
validateOptions();
}
catch (ParseException e) {
System.out.println(e.getMessage());
System.out.println("Try \"--help\" option for details.");
setStopProceed();
}
}
public boolean proceed() {
return proceed;
}
private Options buildOptions() {
Option compress =
OptionBuilder.withLongOpt("compress").withArgName("[none|lzo|gz]")
.hasArg().withDescription("compression scheme").create('c');
Option ditSize =
OptionBuilder.withLongOpt("dict").withArgName("size").hasArg()
.withDescription("number of dictionary entries").create('d');
Option fileSize =
OptionBuilder.withLongOpt("file-size").withArgName("size-in-MB")
.hasArg().withDescription("target size of the file (in MB).")
.create('s');
Option format =
OptionBuilder.withLongOpt("format").withArgName("[tfile|seqfile]")
.hasArg().withDescription("choose TFile or SeqFile").create('f');
Option fsInputBufferSz =
OptionBuilder.withLongOpt("fs-input-buffer").withArgName("size")
.hasArg().withDescription(
"size of the file system input buffer (in bytes).").create(
'i');
Option fsOutputBufferSize =
OptionBuilder.withLongOpt("fs-output-buffer").withArgName("size")
.hasArg().withDescription(
"size of the file system output buffer (in bytes).").create(
'o');
Option keyLen =
OptionBuilder
.withLongOpt("key-length")
.withArgName("length")
.hasArg()
.withDescription(
"base length of the key (in bytes), actual length varies in [base, 2*base)")
.create('k');
Option valueLen =
OptionBuilder
.withLongOpt("value-length")
.withArgName("length")
.hasArg()
.withDescription(
"base length of the value (in bytes), actual length varies in [base, 2*base)")
.create('v');
Option wordLen =
OptionBuilder.withLongOpt("word-length").withArgName("min,max")
.hasArg().withDescription(
"range of dictionary word length (in bytes)").create('w');
Option blockSz =
OptionBuilder.withLongOpt("block").withArgName("size-in-KB").hasArg()
.withDescription("minimum block size (in KB)").create('b');
Option seed =
OptionBuilder.withLongOpt("seed").withArgName("long-int").hasArg()
.withDescription("specify the seed").create('S');
Option operation =
OptionBuilder.withLongOpt("operation").withArgName("r|w|rw").hasArg()
.withDescription(
"action: read-only, create-only, read-after-create").create(
'x');
Option rootDir =
OptionBuilder.withLongOpt("root-dir").withArgName("path").hasArg()
.withDescription(
"specify root directory where files will be created.")
.create('r');
Option help =
OptionBuilder.withLongOpt("help").hasArg(false).withDescription(
"show this screen").create("h");
return new Options().addOption(compress).addOption(ditSize).addOption(
fileSize).addOption(format).addOption(fsInputBufferSz).addOption(
fsOutputBufferSize).addOption(keyLen).addOption(wordLen).addOption(
blockSz).addOption(rootDir).addOption(valueLen).addOption(operation)
.addOption(help);
}
private void processOptions(CommandLine line, Options opts)
throws ParseException {
// --help -h and --version -V must be processed first.
if (line.hasOption('h')) {
HelpFormatter formatter = new HelpFormatter();
System.out.println("TFile and SeqFile benchmark.");
System.out.println();
formatter.printHelp(100,
"java ... TestTFileSeqFileComparison [options]",
"\nSupported options:", opts, "");
return;
}
if (line.hasOption('c')) {
compress = line.getOptionValue('c');
}
if (line.hasOption('d')) {
dictSize = Integer.parseInt(line.getOptionValue('d'));
}
if (line.hasOption('s')) {
fileSize = Long.parseLong(line.getOptionValue('s')) * 1024 * 1024;
}
if (line.hasOption('f')) {
format = line.getOptionValue('f');
}
if (line.hasOption('i')) {
fsInputBufferSize = Integer.parseInt(line.getOptionValue('i'));
}
if (line.hasOption('o')) {
fsOutputBufferSize = Integer.parseInt(line.getOptionValue('o'));
}
if (line.hasOption('k')) {
keyLength = Integer.parseInt(line.getOptionValue('k'));
}
if (line.hasOption('v')) {
valueLength = Integer.parseInt(line.getOptionValue('v'));
}
if (line.hasOption('b')) {
minBlockSize = Integer.parseInt(line.getOptionValue('b')) * 1024;
}
if (line.hasOption('r')) {
rootDir = line.getOptionValue('r');
}
if (line.hasOption('S')) {
seed = Long.parseLong(line.getOptionValue('S'));
}
if (line.hasOption('w')) {
String min_max = line.getOptionValue('w');
StringTokenizer st = new StringTokenizer(min_max, " \t,");
if (st.countTokens() != 2) {
throw new ParseException("Bad word length specification: " + min_max);
}
minWordLen = Integer.parseInt(st.nextToken());
maxWordLen = Integer.parseInt(st.nextToken());
}
if (line.hasOption('x')) {
String strOp = line.getOptionValue('x');
if (strOp.equals("r")) {
op = OP_READ;
}
else if (strOp.equals("w")) {
op = OP_CREATE;
}
else if (strOp.equals("rw")) {
op = OP_CREATE | OP_READ;
}
else {
throw new ParseException("Unknown action specifier: " + strOp);
}
}
proceed = true;
}
private void validateOptions() throws ParseException {
if (!compress.equals("none") && !compress.equals("lzo")
&& !compress.equals("gz")) {
throw new ParseException("Unknown compression scheme: " + compress);
}
if (!format.equals("tfile") && !format.equals("seqfile")) {
throw new ParseException("Unknown file format: " + format);
}
if (minWordLen >= maxWordLen) {
throw new ParseException(
"Max word length must be greater than min word length.");
}
return;
}
private void setStopProceed() {
proceed = false;
}
public boolean doCreate() {
return (op & OP_CREATE) != 0;
}
public boolean doRead() {
return (op & OP_READ) != 0;
}
}
public static void main(String[] args) throws IOException {
TestTFileSeqFileComparison testCase = new TestTFileSeqFileComparison();
MyOptions options = new MyOptions(args);
if (options.proceed == false) {
return;
}
testCase.options = options;
String parameters = parameters2String(options);
testCase.setUp();
if (testCase.options.format.equals("tfile")) {
if (options.doCreate()) {
testCase.createTFile(parameters, options.compress);
}
if (options.doRead()) {
testCase.readTFile(parameters, options.doCreate());
}
}
else {
if (options.doCreate()) {
testCase.createSeqFile(parameters, options.compress);
}
if (options.doRead()) {
testCase.readSeqFile(parameters, options.doCreate());
}
}
testCase.tearDown();
}
}
| 24,784 | 29.98125 | 105 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/RandomDistribution.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.io.file.tfile;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Random;
/**
* A class that generates random numbers that follow some distribution.
*/
public class RandomDistribution {
/**
* Interface for discrete (integer) random distributions.
*/
public static interface DiscreteRNG {
/**
* Get the next random number
*
* @return the next random number.
*/
public int nextInt();
}
/**
* P(i)=1/(max-min)
*/
public static final class Flat implements DiscreteRNG {
private final Random random;
private final int min;
private final int max;
/**
* Generate random integers from min (inclusive) to max (exclusive)
* following even distribution.
*
* @param random
* The basic random number generator.
* @param min
* Minimum integer
* @param max
* maximum integer (exclusive).
*
*/
public Flat(Random random, int min, int max) {
if (min >= max) {
throw new IllegalArgumentException("Invalid range");
}
this.random = random;
this.min = min;
this.max = max;
}
/**
* @see DiscreteRNG#nextInt()
*/
@Override
public int nextInt() {
return random.nextInt(max - min) + min;
}
}
/**
* Zipf distribution. The ratio of the probabilities of integer i and j is
* defined as follows:
*
* P(i)/P(j)=((j-min+1)/(i-min+1))^sigma.
*/
public static final class Zipf implements DiscreteRNG {
private static final double DEFAULT_EPSILON = 0.001;
private final Random random;
private final ArrayList<Integer> k;
private final ArrayList<Double> v;
/**
* Constructor
*
* @param r
* The random number generator.
* @param min
* minimum integer (inclusvie)
* @param max
* maximum integer (exclusive)
* @param sigma
* parameter sigma. (sigma > 1.0)
*/
public Zipf(Random r, int min, int max, double sigma) {
this(r, min, max, sigma, DEFAULT_EPSILON);
}
/**
* Constructor.
*
* @param r
* The random number generator.
* @param min
* minimum integer (inclusvie)
* @param max
* maximum integer (exclusive)
* @param sigma
* parameter sigma. (sigma > 1.0)
* @param epsilon
* Allowable error percentage (0 < epsilon < 1.0).
*/
public Zipf(Random r, int min, int max, double sigma, double epsilon) {
if ((max <= min) || (sigma <= 1) || (epsilon <= 0)
|| (epsilon >= 0.5)) {
throw new IllegalArgumentException("Invalid arguments");
}
random = r;
k = new ArrayList<Integer>();
v = new ArrayList<Double>();
double sum = 0;
int last = -1;
for (int i = min; i < max; ++i) {
sum += Math.exp(-sigma * Math.log(i - min + 1));
if ((last == -1) || i * (1 - epsilon) > last) {
k.add(i);
v.add(sum);
last = i;
}
}
if (last != max - 1) {
k.add(max - 1);
v.add(sum);
}
v.set(v.size() - 1, 1.0);
for (int i = v.size() - 2; i >= 0; --i) {
v.set(i, v.get(i) / sum);
}
}
/**
* @see DiscreteRNG#nextInt()
*/
@Override
public int nextInt() {
double d = random.nextDouble();
int idx = Collections.binarySearch(v, d);
if (idx > 0) {
++idx;
}
else {
idx = -(idx + 1);
}
if (idx >= v.size()) {
idx = v.size() - 1;
}
if (idx == 0) {
return k.get(0);
}
int ceiling = k.get(idx);
int lower = k.get(idx - 1);
return ceiling - random.nextInt(ceiling - lower);
}
}
/**
* Binomial distribution.
*
* P(k)=select(n, k)*p^k*(1-p)^(n-k) (k = 0, 1, ..., n)
*
* P(k)=select(max-min-1, k-min)*p^(k-min)*(1-p)^(k-min)*(1-p)^(max-k-1)
*/
public static final class Binomial implements DiscreteRNG {
private final Random random;
private final int min;
private final int n;
private final double[] v;
private static double select(int n, int k) {
double ret = 1.0;
for (int i = k + 1; i <= n; ++i) {
ret *= (double) i / (i - k);
}
return ret;
}
private static double power(double p, int k) {
return Math.exp(k * Math.log(p));
}
/**
* Generate random integers from min (inclusive) to max (exclusive)
* following Binomial distribution.
*
* @param random
* The basic random number generator.
* @param min
* Minimum integer
* @param max
* maximum integer (exclusive).
* @param p
* parameter.
*
*/
public Binomial(Random random, int min, int max, double p) {
if (min >= max) {
throw new IllegalArgumentException("Invalid range");
}
this.random = random;
this.min = min;
this.n = max - min - 1;
if (n > 0) {
v = new double[n + 1];
double sum = 0.0;
for (int i = 0; i <= n; ++i) {
sum += select(n, i) * power(p, i) * power(1 - p, n - i);
v[i] = sum;
}
for (int i = 0; i <= n; ++i) {
v[i] /= sum;
}
}
else {
v = null;
}
}
/**
* @see DiscreteRNG#nextInt()
*/
@Override
public int nextInt() {
if (v == null) {
return min;
}
double d = random.nextDouble();
int idx = Arrays.binarySearch(v, d);
if (idx > 0) {
++idx;
} else {
idx = -(idx + 1);
}
if (idx >= v.length) {
idx = v.length - 1;
}
return idx + min;
}
}
}
| 6,722 | 24.179775 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileNoneCodecsJClassComparatorByteArrays.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.io.file.tfile;
import java.io.IOException;
/**
*
* Byte arrays test case class using GZ compression codec, base class of none
* and LZO compression classes.
*
*/
public class TestTFileNoneCodecsJClassComparatorByteArrays extends TestTFileByteArrays {
/**
* Test non-compression codec, using the same test cases as in the ByteArrays.
*/
@Override
public void setUp() throws IOException {
init(Compression.Algorithm.NONE.getName(),
"jclass: org.apache.hadoop.io.file.tfile.MyComparator", 24, 24);
super.setUp();
}
}
| 1,380 | 33.525 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileJClassComparatorByteArrays.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.io.file.tfile;
import java.io.IOException;
import java.io.Serializable;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.WritableComparator;
/**
*
* Byte arrays test case class using GZ compression codec, base class of none
* and LZO compression classes.
*
*/
public class TestTFileJClassComparatorByteArrays extends TestTFileByteArrays {
/**
* Test non-compression codec, using the same test cases as in the ByteArrays.
*/
@Override
public void setUp() throws IOException {
init(Compression.Algorithm.GZ.getName(),
"jclass: org.apache.hadoop.io.file.tfile.MyComparator");
super.setUp();
}
}
class MyComparator implements RawComparator<byte[]>, Serializable {
@Override
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
return WritableComparator.compareBytes(b1, s1, l1, b2, s2, l2);
}
@Override
public int compare(byte[] o1, byte[] o2) {
return WritableComparator.compareBytes(o1, 0, o1.length, o2, 0, o2.length);
}
}
| 1,860 | 30.542373 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileByteArrays.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.io.file.tfile;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.EOFException;
import java.io.IOException;
import java.util.Random;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.io.compress.zlib.ZlibFactory;
import org.apache.hadoop.io.file.tfile.TFile.Reader;
import org.apache.hadoop.io.file.tfile.TFile.Writer;
import org.apache.hadoop.io.file.tfile.TFile.Reader.Location;
import org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/**
*
* Byte arrays test case class using GZ compression codec, base class of none
* and LZO compression classes.
*
*/
public class TestTFileByteArrays {
private static String ROOT =
System.getProperty("test.build.data", "/tmp/tfile-test");
private final static int BLOCK_SIZE = 512;
private final static int BUF_SIZE = 64;
private final static int K = 1024;
protected boolean skip = false;
private static final String KEY = "key";
private static final String VALUE = "value";
private FileSystem fs;
private Configuration conf = new Configuration();
private Path path;
private FSDataOutputStream out;
private Writer writer;
private String compression = Compression.Algorithm.GZ.getName();
private String comparator = "memcmp";
private final String outputFile = getClass().getSimpleName();
/*
* pre-sampled numbers of records in one block, based on the given the
* generated key and value strings. This is slightly different based on
* whether or not the native libs are present.
*/
private boolean usingNative = ZlibFactory.isNativeZlibLoaded(conf);
private int records1stBlock = usingNative ? 5674 : 4480;
private int records2ndBlock = usingNative ? 5574 : 4263;
public void init(String compression, String comparator,
int numRecords1stBlock, int numRecords2ndBlock) {
init(compression, comparator);
this.records1stBlock = numRecords1stBlock;
this.records2ndBlock = numRecords2ndBlock;
}
public void init(String compression, String comparator) {
this.compression = compression;
this.comparator = comparator;
}
@Before
public void setUp() throws IOException {
path = new Path(ROOT, outputFile);
fs = path.getFileSystem(conf);
out = fs.create(path);
writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
}
@After
public void tearDown() throws IOException {
if (!skip)
fs.delete(path, true);
}
@Test
public void testNoDataEntry() throws IOException {
if (skip)
return;
closeOutput();
Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
Assert.assertTrue(reader.isSorted());
Scanner scanner = reader.createScanner();
Assert.assertTrue(scanner.atEnd());
scanner.close();
reader.close();
}
@Test
public void testOneDataEntry() throws IOException {
if (skip)
return;
writeRecords(1);
readRecords(1);
checkBlockIndex(0, 0);
readValueBeforeKey(0);
readKeyWithoutValue(0);
readValueWithoutKey(0);
readKeyManyTimes(0);
}
@Test
public void testTwoDataEntries() throws IOException {
if (skip)
return;
writeRecords(2);
readRecords(2);
}
/**
* Fill up exactly one block.
*
* @throws IOException
*/
@Test
public void testOneBlock() throws IOException {
if (skip)
return;
// just under one block
writeRecords(records1stBlock);
readRecords(records1stBlock);
// last key should be in the first block (block 0)
checkBlockIndex(records1stBlock - 1, 0);
}
/**
* One block plus one record.
*
* @throws IOException
*/
@Test
public void testOneBlockPlusOneEntry() throws IOException {
if (skip)
return;
writeRecords(records1stBlock + 1);
readRecords(records1stBlock + 1);
checkBlockIndex(records1stBlock - 1, 0);
checkBlockIndex(records1stBlock, 1);
}
@Test
public void testTwoBlocks() throws IOException {
if (skip)
return;
writeRecords(records1stBlock + 5);
readRecords(records1stBlock + 5);
checkBlockIndex(records1stBlock + 4, 1);
}
@Test
public void testThreeBlocks() throws IOException {
if (skip)
return;
writeRecords(2 * records1stBlock + 5);
readRecords(2 * records1stBlock + 5);
checkBlockIndex(2 * records1stBlock + 4, 2);
// 1st key in file
readValueBeforeKey(0);
readKeyWithoutValue(0);
readValueWithoutKey(0);
readKeyManyTimes(0);
// last key in file
readValueBeforeKey(2 * records1stBlock + 4);
readKeyWithoutValue(2 * records1stBlock + 4);
readValueWithoutKey(2 * records1stBlock + 4);
readKeyManyTimes(2 * records1stBlock + 4);
// 1st key in mid block, verify block indexes then read
checkBlockIndex(records1stBlock - 1, 0);
checkBlockIndex(records1stBlock, 1);
readValueBeforeKey(records1stBlock);
readKeyWithoutValue(records1stBlock);
readValueWithoutKey(records1stBlock);
readKeyManyTimes(records1stBlock);
// last key in mid block, verify block indexes then read
checkBlockIndex(records1stBlock + records2ndBlock
- 1, 1);
checkBlockIndex(records1stBlock + records2ndBlock, 2);
readValueBeforeKey(records1stBlock
+ records2ndBlock - 1);
readKeyWithoutValue(records1stBlock
+ records2ndBlock - 1);
readValueWithoutKey(records1stBlock
+ records2ndBlock - 1);
readKeyManyTimes(records1stBlock + records2ndBlock
- 1);
// mid in mid block
readValueBeforeKey(records1stBlock + 10);
readKeyWithoutValue(records1stBlock + 10);
readValueWithoutKey(records1stBlock + 10);
readKeyManyTimes(records1stBlock + 10);
}
Location locate(Scanner scanner, byte[] key) throws IOException {
if (scanner.seekTo(key) == true) {
return scanner.currentLocation;
}
return scanner.endLocation;
}
@Test
public void testLocate() throws IOException {
if (skip)
return;
writeRecords(3 * records1stBlock);
Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
Scanner scanner = reader.createScanner();
locate(scanner, composeSortedKey(KEY, 2).getBytes());
locate(scanner, composeSortedKey(KEY, records1stBlock - 1).getBytes());
locate(scanner, composeSortedKey(KEY, records1stBlock).getBytes());
Location locX = locate(scanner, "keyX".getBytes());
Assert.assertEquals(scanner.endLocation, locX);
scanner.close();
reader.close();
}
@Test
public void testFailureWriterNotClosed() throws IOException {
if (skip)
return;
Reader reader = null;
try {
reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
Assert.fail("Cannot read before closing the writer.");
} catch (IOException e) {
// noop, expecting exceptions
} finally {
if (reader != null) {
reader.close();
}
}
}
@Test
public void testFailureWriteMetaBlocksWithSameName() throws IOException {
if (skip)
return;
writer.append("keyX".getBytes(), "valueX".getBytes());
// create a new metablock
DataOutputStream outMeta =
writer.prepareMetaBlock("testX", Compression.Algorithm.GZ.getName());
outMeta.write(123);
outMeta.write("foo".getBytes());
outMeta.close();
// add the same metablock
try {
writer.prepareMetaBlock("testX", Compression.Algorithm.GZ.getName());
Assert.fail("Cannot create metablocks with the same name.");
} catch (Exception e) {
// noop, expecting exceptions
}
closeOutput();
}
@Test
public void testFailureGetNonExistentMetaBlock() throws IOException {
if (skip)
return;
writer.append("keyX".getBytes(), "valueX".getBytes());
// create a new metablock
DataOutputStream outMeta =
writer.prepareMetaBlock("testX", Compression.Algorithm.GZ.getName());
outMeta.write(123);
outMeta.write("foo".getBytes());
outMeta.close();
closeOutput();
Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
DataInputStream mb = reader.getMetaBlock("testX");
Assert.assertNotNull(mb);
mb.close();
try {
DataInputStream mbBad = reader.getMetaBlock("testY");
Assert.fail("Error on handling non-existent metablocks.");
} catch (Exception e) {
// noop, expecting exceptions
}
reader.close();
}
@Test
public void testFailureWriteRecordAfterMetaBlock() throws IOException {
if (skip)
return;
// write a key/value first
writer.append("keyX".getBytes(), "valueX".getBytes());
// create a new metablock
DataOutputStream outMeta =
writer.prepareMetaBlock("testX", Compression.Algorithm.GZ.getName());
outMeta.write(123);
outMeta.write("dummy".getBytes());
outMeta.close();
// add more key/value
try {
writer.append("keyY".getBytes(), "valueY".getBytes());
Assert.fail("Cannot add key/value after start adding meta blocks.");
} catch (Exception e) {
// noop, expecting exceptions
}
closeOutput();
}
@Test
public void testFailureReadValueManyTimes() throws IOException {
if (skip)
return;
writeRecords(5);
Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
Scanner scanner = reader.createScanner();
byte[] vbuf = new byte[BUF_SIZE];
int vlen = scanner.entry().getValueLength();
scanner.entry().getValue(vbuf);
Assert.assertEquals(new String(vbuf, 0, vlen), VALUE + 0);
try {
scanner.entry().getValue(vbuf);
Assert.fail("Cannot get the value mlutiple times.");
} catch (Exception e) {
// noop, expecting exceptions
}
scanner.close();
reader.close();
}
@Test
public void testFailureBadCompressionCodec() throws IOException {
if (skip)
return;
closeOutput();
out = fs.create(path);
try {
writer = new Writer(out, BLOCK_SIZE, "BAD", comparator, conf);
Assert.fail("Error on handling invalid compression codecs.");
} catch (Exception e) {
// noop, expecting exceptions
// e.printStackTrace();
}
}
@Test
public void testFailureOpenEmptyFile() throws IOException {
if (skip)
return;
closeOutput();
// create an absolutely empty file
path = new Path(fs.getWorkingDirectory(), outputFile);
out = fs.create(path);
out.close();
try {
new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
Assert.fail("Error on handling empty files.");
} catch (EOFException e) {
// noop, expecting exceptions
}
}
@Test
public void testFailureOpenRandomFile() throws IOException {
if (skip)
return;
closeOutput();
// create an random file
path = new Path(fs.getWorkingDirectory(), outputFile);
out = fs.create(path);
Random rand = new Random();
byte[] buf = new byte[K];
// fill with > 1MB data
for (int nx = 0; nx < K + 2; nx++) {
rand.nextBytes(buf);
out.write(buf);
}
out.close();
try {
new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
Assert.fail("Error on handling random files.");
} catch (IOException e) {
// noop, expecting exceptions
}
}
@Test
public void testFailureKeyLongerThan64K() throws IOException {
if (skip)
return;
byte[] buf = new byte[64 * K + 1];
Random rand = new Random();
rand.nextBytes(buf);
try {
writer.append(buf, "valueX".getBytes());
} catch (IndexOutOfBoundsException e) {
// noop, expecting exceptions
}
closeOutput();
}
@Test
public void testFailureOutOfOrderKeys() throws IOException {
if (skip)
return;
try {
writer.append("keyM".getBytes(), "valueM".getBytes());
writer.append("keyA".getBytes(), "valueA".getBytes());
Assert.fail("Error on handling out of order keys.");
} catch (Exception e) {
// noop, expecting exceptions
// e.printStackTrace();
}
closeOutput();
}
@Test
public void testFailureNegativeOffset() throws IOException {
if (skip)
return;
try {
writer.append("keyX".getBytes(), -1, 4, "valueX".getBytes(), 0, 6);
Assert.fail("Error on handling negative offset.");
} catch (Exception e) {
// noop, expecting exceptions
}
closeOutput();
}
@Test
public void testFailureNegativeOffset_2() throws IOException {
if (skip)
return;
closeOutput();
Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
Scanner scanner = reader.createScanner();
try {
scanner.lowerBound("keyX".getBytes(), -1, 4);
Assert.fail("Error on handling negative offset.");
} catch (Exception e) {
// noop, expecting exceptions
} finally {
reader.close();
scanner.close();
}
closeOutput();
}
@Test
public void testFailureNegativeLength() throws IOException {
if (skip)
return;
try {
writer.append("keyX".getBytes(), 0, -1, "valueX".getBytes(), 0, 6);
Assert.fail("Error on handling negative length.");
} catch (Exception e) {
// noop, expecting exceptions
}
closeOutput();
}
@Test
public void testFailureNegativeLength_2() throws IOException {
if (skip)
return;
closeOutput();
Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
Scanner scanner = reader.createScanner();
try {
scanner.lowerBound("keyX".getBytes(), 0, -1);
Assert.fail("Error on handling negative length.");
} catch (Exception e) {
// noop, expecting exceptions
} finally {
scanner.close();
reader.close();
}
closeOutput();
}
@Test
public void testFailureNegativeLength_3() throws IOException {
if (skip)
return;
writeRecords(3);
Reader reader =
new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
Scanner scanner = reader.createScanner();
try {
// test negative array offset
try {
scanner.seekTo("keyY".getBytes(), -1, 4);
Assert.fail("Failed to handle negative offset.");
} catch (Exception e) {
// noop, expecting exceptions
}
// test negative array length
try {
scanner.seekTo("keyY".getBytes(), 0, -2);
Assert.fail("Failed to handle negative key length.");
} catch (Exception e) {
// noop, expecting exceptions
}
} finally {
reader.close();
scanner.close();
}
}
@Test
public void testFailureCompressionNotWorking() throws IOException {
if (skip)
return;
long rawDataSize = writeRecords(10 * records1stBlock, false);
if (!compression.equalsIgnoreCase(Compression.Algorithm.NONE.getName())) {
Assert.assertTrue(out.getPos() < rawDataSize);
}
closeOutput();
}
@Test
public void testFailureFileWriteNotAt0Position() throws IOException {
if (skip)
return;
closeOutput();
out = fs.create(path);
out.write(123);
try {
writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
Assert.fail("Failed to catch file write not at position 0.");
} catch (Exception e) {
// noop, expecting exceptions
}
closeOutput();
}
private long writeRecords(int count) throws IOException {
return writeRecords(count, true);
}
private long writeRecords(int count, boolean close) throws IOException {
long rawDataSize = writeRecords(writer, count);
if (close) {
closeOutput();
}
return rawDataSize;
}
static long writeRecords(Writer writer, int count) throws IOException {
long rawDataSize = 0;
int nx;
for (nx = 0; nx < count; nx++) {
byte[] key = composeSortedKey(KEY, nx).getBytes();
byte[] value = (VALUE + nx).getBytes();
writer.append(key, value);
rawDataSize +=
WritableUtils.getVIntSize(key.length) + key.length
+ WritableUtils.getVIntSize(value.length) + value.length;
}
return rawDataSize;
}
/**
* Insert some leading 0's in front of the value, to make the keys sorted.
*
* @param prefix
* @param value
* @return
*/
static String composeSortedKey(String prefix, int value) {
return String.format("%s%010d", prefix, value);
}
private void readRecords(int count) throws IOException {
readRecords(fs, path, count, conf);
}
static void readRecords(FileSystem fs, Path path, int count,
Configuration conf) throws IOException {
Reader reader =
new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
Scanner scanner = reader.createScanner();
try {
for (int nx = 0; nx < count; nx++, scanner.advance()) {
Assert.assertFalse(scanner.atEnd());
// Assert.assertTrue(scanner.next());
byte[] kbuf = new byte[BUF_SIZE];
int klen = scanner.entry().getKeyLength();
scanner.entry().getKey(kbuf);
Assert.assertEquals(new String(kbuf, 0, klen), composeSortedKey(KEY,
nx));
byte[] vbuf = new byte[BUF_SIZE];
int vlen = scanner.entry().getValueLength();
scanner.entry().getValue(vbuf);
Assert.assertEquals(new String(vbuf, 0, vlen), VALUE + nx);
}
Assert.assertTrue(scanner.atEnd());
Assert.assertFalse(scanner.advance());
} finally {
scanner.close();
reader.close();
}
}
private void checkBlockIndex(int recordIndex, int blockIndexExpected) throws IOException {
Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
Scanner scanner = reader.createScanner();
scanner.seekTo(composeSortedKey(KEY, recordIndex).getBytes());
Assert.assertEquals(blockIndexExpected, scanner.currentLocation
.getBlockIndex());
scanner.close();
reader.close();
}
private void readValueBeforeKey(int recordIndex)
throws IOException {
Reader reader =
new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
Scanner scanner =
reader.createScannerByKey(composeSortedKey(KEY, recordIndex)
.getBytes(), null);
try {
byte[] vbuf = new byte[BUF_SIZE];
int vlen = scanner.entry().getValueLength();
scanner.entry().getValue(vbuf);
Assert.assertEquals(new String(vbuf, 0, vlen), VALUE + recordIndex);
byte[] kbuf = new byte[BUF_SIZE];
int klen = scanner.entry().getKeyLength();
scanner.entry().getKey(kbuf);
Assert.assertEquals(new String(kbuf, 0, klen), composeSortedKey(KEY,
recordIndex));
} finally {
scanner.close();
reader.close();
}
}
private void readKeyWithoutValue(int recordIndex)
throws IOException {
Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
Scanner scanner =
reader.createScannerByKey(composeSortedKey(KEY, recordIndex)
.getBytes(), null);
try {
// read the indexed key
byte[] kbuf1 = new byte[BUF_SIZE];
int klen1 = scanner.entry().getKeyLength();
scanner.entry().getKey(kbuf1);
Assert.assertEquals(new String(kbuf1, 0, klen1), composeSortedKey(KEY,
recordIndex));
if (scanner.advance() && !scanner.atEnd()) {
// read the next key following the indexed
byte[] kbuf2 = new byte[BUF_SIZE];
int klen2 = scanner.entry().getKeyLength();
scanner.entry().getKey(kbuf2);
Assert.assertEquals(new String(kbuf2, 0, klen2), composeSortedKey(KEY,
recordIndex + 1));
}
} finally {
scanner.close();
reader.close();
}
}
private void readValueWithoutKey(int recordIndex)
throws IOException {
Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
Scanner scanner =
reader.createScannerByKey(composeSortedKey(KEY, recordIndex)
.getBytes(), null);
byte[] vbuf1 = new byte[BUF_SIZE];
int vlen1 = scanner.entry().getValueLength();
scanner.entry().getValue(vbuf1);
Assert.assertEquals(new String(vbuf1, 0, vlen1), VALUE + recordIndex);
if (scanner.advance() && !scanner.atEnd()) {
byte[] vbuf2 = new byte[BUF_SIZE];
int vlen2 = scanner.entry().getValueLength();
scanner.entry().getValue(vbuf2);
Assert.assertEquals(new String(vbuf2, 0, vlen2), VALUE
+ (recordIndex + 1));
}
scanner.close();
reader.close();
}
private void readKeyManyTimes(int recordIndex) throws IOException {
Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
Scanner scanner =
reader.createScannerByKey(composeSortedKey(KEY, recordIndex)
.getBytes(), null);
// read the indexed key
byte[] kbuf1 = new byte[BUF_SIZE];
int klen1 = scanner.entry().getKeyLength();
scanner.entry().getKey(kbuf1);
Assert.assertEquals(new String(kbuf1, 0, klen1), composeSortedKey(KEY,
recordIndex));
klen1 = scanner.entry().getKeyLength();
scanner.entry().getKey(kbuf1);
Assert.assertEquals(new String(kbuf1, 0, klen1), composeSortedKey(KEY,
recordIndex));
klen1 = scanner.entry().getKeyLength();
scanner.entry().getKey(kbuf1);
Assert.assertEquals(new String(kbuf1, 0, klen1), composeSortedKey(KEY,
recordIndex));
scanner.close();
reader.close();
}
private void closeOutput() throws IOException {
if (writer != null) {
writer.close();
writer = null;
}
if (out != null) {
out.close();
out = null;
}
}
}
| 22,826 | 28.492248 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileComparator2.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.io.file.tfile;
import java.io.DataOutputStream;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.file.tfile.TFile.Writer;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestTFileComparator2 {
private static final String ROOT = System.getProperty("test.build.data",
"/tmp/tfile-test");
private static final String name = "test-tfile-comparator2";
private final static int BLOCK_SIZE = 512;
private static final String VALUE = "value";
private static final String jClassLongWritableComparator = "jclass:"
+ LongWritable.Comparator.class.getName();
private static final long NENTRY = 10000;
private static long cube(long n) {
return n*n*n;
}
private static String buildValue(long i) {
return String.format("%s-%d", VALUE, i);
}
@Test
public void testSortedLongWritable() throws IOException {
Configuration conf = new Configuration();
Path path = new Path(ROOT, name);
FileSystem fs = path.getFileSystem(conf);
FSDataOutputStream out = fs.create(path);
try {
TFile.Writer writer = new Writer(out, BLOCK_SIZE, "gz",
jClassLongWritableComparator, conf);
try {
LongWritable key = new LongWritable(0);
for (long i=0; i<NENTRY; ++i) {
key.set(cube(i-NENTRY/2));
DataOutputStream dos = writer.prepareAppendKey(-1);
try {
key.write(dos);
} finally {
dos.close();
}
dos = writer.prepareAppendValue(-1);
try {
dos.write(buildValue(i).getBytes());
} finally {
dos.close();
}
}
} finally {
writer.close();
}
} finally {
out.close();
}
FSDataInputStream in = fs.open(path);
try {
TFile.Reader reader = new TFile.Reader(in, fs.getFileStatus(path)
.getLen(), conf);
try {
TFile.Reader.Scanner scanner = reader.createScanner();
long i=0;
BytesWritable value = new BytesWritable();
for (; !scanner.atEnd(); scanner.advance()) {
scanner.entry().getValue(value);
assertEquals(buildValue(i), new String(value.getBytes(), 0, value
.getLength()));
++i;
}
} finally {
reader.close();
}
} finally {
in.close();
}
}
}
| 3,489 | 31.616822 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/Timer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.io.file.tfile;
import java.io.IOException;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import org.apache.hadoop.util.Time;
/**
* this class is a time class to
* measure to measure the time
* taken for some event.
*/
public class Timer {
long startTimeEpoch;
long finishTimeEpoch;
private DateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
public void startTime() throws IOException {
startTimeEpoch = Time.now();
}
public void stopTime() throws IOException {
finishTimeEpoch = Time.now();
}
public long getIntervalMillis() throws IOException {
return finishTimeEpoch - startTimeEpoch;
}
public void printlnWithTimestamp(String message) throws IOException {
System.out.println(formatCurrentTime() + " " + message);
}
public String formatTime(long millis) {
return formatter.format(millis);
}
public String getIntervalString() throws IOException {
long time = getIntervalMillis();
return formatTime(time);
}
public String formatCurrentTime() {
return formatTime(Time.now());
}
}
| 1,981 | 29.030303 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileNoneCodecsByteArrays.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.io.file.tfile;
import java.io.IOException;
public class TestTFileNoneCodecsByteArrays extends TestTFileByteArrays {
/**
* Test non-compression codec, using the same test cases as in the ByteArrays.
*/
@Override
public void setUp() throws IOException {
init(Compression.Algorithm.NONE.getName(), "memcmp", 24, 24);
super.setUp();
}
}
| 1,183 | 36 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/NanoTimer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.io.file.tfile;
/**
* A nano-second timer.
*/
public class NanoTimer {
private long last = -1;
private boolean started = false;
private long cumulate = 0;
/**
* Constructor
*
* @param start
* Start the timer upon construction.
*/
public NanoTimer(boolean start) {
if (start) this.start();
}
/**
* Start the timer.
*
* Note: No effect if timer is already started.
*/
public void start() {
if (!this.started) {
this.last = System.nanoTime();
this.started = true;
}
}
/**
* Stop the timer.
*
* Note: No effect if timer is already stopped.
*/
public void stop() {
if (this.started) {
this.started = false;
this.cumulate += System.nanoTime() - this.last;
}
}
/**
* Read the timer.
*
* @return the elapsed time in nano-seconds. Note: If the timer is never
* started before, -1 is returned.
*/
public long read() {
if (!readable()) return -1;
return this.cumulate;
}
/**
* Reset the timer.
*/
public void reset() {
this.last = -1;
this.started = false;
this.cumulate = 0;
}
/**
* Checking whether the timer is started
*
* @return true if timer is started.
*/
public boolean isStarted() {
return this.started;
}
/**
* Format the elapsed time to a human understandable string.
*
* Note: If timer is never started, "ERR" will be returned.
*/
@Override
public String toString() {
if (!readable()) {
return "ERR";
}
return NanoTimer.nanoTimeToString(this.cumulate);
}
/**
* A utility method to format a time duration in nano seconds into a human
* understandable stirng.
*
* @param t
* Time duration in nano seconds.
* @return String representation.
*/
public static String nanoTimeToString(long t) {
if (t < 0) return "ERR";
if (t == 0) return "0";
if (t < 1000) {
return t + "ns";
}
double us = (double) t / 1000;
if (us < 1000) {
return String.format("%.2fus", us);
}
double ms = us / 1000;
if (ms < 1000) {
return String.format("%.2fms", ms);
}
double ss = ms / 1000;
if (ss < 1000) {
return String.format("%.2fs", ss);
}
long mm = (long) ss / 60;
ss -= mm * 60;
long hh = mm / 60;
mm -= hh * 60;
long dd = hh / 24;
hh -= dd * 24;
if (dd > 0) {
return String.format("%dd%dh", dd, hh);
}
if (hh > 0) {
return String.format("%dh%dm", hh, mm);
}
if (mm > 0) {
return String.format("%dm%.1fs", mm, ss);
}
return String.format("%.2fs", ss);
/**
* StringBuilder sb = new StringBuilder(); String sep = "";
*
* if (dd > 0) { String unit = (dd > 1) ? "days" : "day";
* sb.append(String.format("%s%d%s", sep, dd, unit)); sep = " "; }
*
* if (hh > 0) { String unit = (hh > 1) ? "hrs" : "hr";
* sb.append(String.format("%s%d%s", sep, hh, unit)); sep = " "; }
*
* if (mm > 0) { String unit = (mm > 1) ? "mins" : "min";
* sb.append(String.format("%s%d%s", sep, mm, unit)); sep = " "; }
*
* if (ss > 0) { String unit = (ss > 1) ? "secs" : "sec";
* sb.append(String.format("%s%.3f%s", sep, ss, unit)); sep = " "; }
*
* return sb.toString();
*/
}
private boolean readable() {
return this.last != -1;
}
/**
* Simple tester.
*
* @param args
*/
public static void main(String[] args) {
long i = 7;
for (int x = 0; x < 20; ++x, i *= 7) {
System.out.println(NanoTimer.nanoTimeToString(i));
}
}
}
| 4,478 | 21.969231 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFile.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.io.file.tfile;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.Arrays;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.file.tfile.TFile.Reader;
import org.apache.hadoop.io.file.tfile.TFile.Writer;
import org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner;
/**
* test tfile features.
*
*/
public class TestTFile extends TestCase {
private static String ROOT =
System.getProperty("test.build.data", "/tmp/tfile-test");
private FileSystem fs;
private Configuration conf;
private static final int minBlockSize = 512;
private static final int largeVal = 3 * 1024 * 1024;
private static final String localFormatter = "%010d";
@Override
public void setUp() throws IOException {
conf = new Configuration();
fs = FileSystem.get(conf);
}
@Override
public void tearDown() throws IOException {
// do nothing
}
// read a key from the scanner
public byte[] readKey(Scanner scanner) throws IOException {
int keylen = scanner.entry().getKeyLength();
byte[] read = new byte[keylen];
scanner.entry().getKey(read);
return read;
}
// read a value from the scanner
public byte[] readValue(Scanner scanner) throws IOException {
int valueLen = scanner.entry().getValueLength();
byte[] read = new byte[valueLen];
scanner.entry().getValue(read);
return read;
}
// read a long value from the scanner
public byte[] readLongValue(Scanner scanner, int len) throws IOException {
DataInputStream din = scanner.entry().getValueStream();
byte[] b = new byte[len];
din.readFully(b);
din.close();
return b;
}
// write some records into the tfile
// write them twice
private int writeSomeRecords(Writer writer, int start, int n)
throws IOException {
String value = "value";
for (int i = start; i < (start + n); i++) {
String key = String.format(localFormatter, i);
writer.append(key.getBytes(), (value + key).getBytes());
writer.append(key.getBytes(), (value + key).getBytes());
}
return (start + n);
}
// read the records and check
private int readAndCheckbytes(Scanner scanner, int start, int n)
throws IOException {
String value = "value";
for (int i = start; i < (start + n); i++) {
byte[] key = readKey(scanner);
byte[] val = readValue(scanner);
String keyStr = String.format(localFormatter, i);
String valStr = value + keyStr;
assertTrue("btyes for keys do not match " + keyStr + " "
+ new String(key), Arrays.equals(keyStr.getBytes(), key));
assertTrue("bytes for vals do not match " + valStr + " "
+ new String(val), Arrays.equals(
valStr.getBytes(), val));
assertTrue(scanner.advance());
key = readKey(scanner);
val = readValue(scanner);
assertTrue("btyes for keys do not match", Arrays.equals(
keyStr.getBytes(), key));
assertTrue("bytes for vals do not match", Arrays.equals(
valStr.getBytes(), val));
assertTrue(scanner.advance());
}
return (start + n);
}
// write some large records
// write them twice
private int writeLargeRecords(Writer writer, int start, int n)
throws IOException {
byte[] value = new byte[largeVal];
for (int i = start; i < (start + n); i++) {
String key = String.format(localFormatter, i);
writer.append(key.getBytes(), value);
writer.append(key.getBytes(), value);
}
return (start + n);
}
// read large records
// read them twice since its duplicated
private int readLargeRecords(Scanner scanner, int start, int n)
throws IOException {
for (int i = start; i < (start + n); i++) {
byte[] key = readKey(scanner);
String keyStr = String.format(localFormatter, i);
assertTrue("btyes for keys do not match", Arrays.equals(
keyStr.getBytes(), key));
scanner.advance();
key = readKey(scanner);
assertTrue("btyes for keys do not match", Arrays.equals(
keyStr.getBytes(), key));
scanner.advance();
}
return (start + n);
}
// write empty keys and values
private void writeEmptyRecords(Writer writer, int n) throws IOException {
byte[] key = new byte[0];
byte[] value = new byte[0];
for (int i = 0; i < n; i++) {
writer.append(key, value);
}
}
// read empty keys and values
private void readEmptyRecords(Scanner scanner, int n) throws IOException {
byte[] key = new byte[0];
byte[] value = new byte[0];
byte[] readKey = null;
byte[] readValue = null;
for (int i = 0; i < n; i++) {
readKey = readKey(scanner);
readValue = readValue(scanner);
assertTrue("failed to match keys", Arrays.equals(readKey, key));
assertTrue("failed to match values", Arrays.equals(readValue, value));
assertTrue("failed to advance cursor", scanner.advance());
}
}
private int writePrepWithKnownLength(Writer writer, int start, int n)
throws IOException {
// get the length of the key
String key = String.format(localFormatter, start);
int keyLen = key.getBytes().length;
String value = "value" + key;
int valueLen = value.getBytes().length;
for (int i = start; i < (start + n); i++) {
DataOutputStream out = writer.prepareAppendKey(keyLen);
String localKey = String.format(localFormatter, i);
out.write(localKey.getBytes());
out.close();
out = writer.prepareAppendValue(valueLen);
String localValue = "value" + localKey;
out.write(localValue.getBytes());
out.close();
}
return (start + n);
}
private int readPrepWithKnownLength(Scanner scanner, int start, int n)
throws IOException {
for (int i = start; i < (start + n); i++) {
String key = String.format(localFormatter, i);
byte[] read = readKey(scanner);
assertTrue("keys not equal", Arrays.equals(key.getBytes(), read));
String value = "value" + key;
read = readValue(scanner);
assertTrue("values not equal", Arrays.equals(value.getBytes(), read));
scanner.advance();
}
return (start + n);
}
private int writePrepWithUnkownLength(Writer writer, int start, int n)
throws IOException {
for (int i = start; i < (start + n); i++) {
DataOutputStream out = writer.prepareAppendKey(-1);
String localKey = String.format(localFormatter, i);
out.write(localKey.getBytes());
out.close();
String value = "value" + localKey;
out = writer.prepareAppendValue(-1);
out.write(value.getBytes());
out.close();
}
return (start + n);
}
private int readPrepWithUnknownLength(Scanner scanner, int start, int n)
throws IOException {
for (int i = start; i < start; i++) {
String key = String.format(localFormatter, i);
byte[] read = readKey(scanner);
assertTrue("keys not equal", Arrays.equals(key.getBytes(), read));
try {
read = readValue(scanner);
assertTrue(false);
}
catch (IOException ie) {
// should have thrown exception
}
String value = "value" + key;
read = readLongValue(scanner, value.getBytes().length);
assertTrue("values nto equal", Arrays.equals(read, value.getBytes()));
scanner.advance();
}
return (start + n);
}
private byte[] getSomeKey(int rowId) {
return String.format(localFormatter, rowId).getBytes();
}
private void writeRecords(Writer writer) throws IOException {
writeEmptyRecords(writer, 10);
int ret = writeSomeRecords(writer, 0, 100);
ret = writeLargeRecords(writer, ret, 1);
ret = writePrepWithKnownLength(writer, ret, 40);
ret = writePrepWithUnkownLength(writer, ret, 50);
writer.close();
}
private void readAllRecords(Scanner scanner) throws IOException {
readEmptyRecords(scanner, 10);
int ret = readAndCheckbytes(scanner, 0, 100);
ret = readLargeRecords(scanner, ret, 1);
ret = readPrepWithKnownLength(scanner, ret, 40);
ret = readPrepWithUnknownLength(scanner, ret, 50);
}
private FSDataOutputStream createFSOutput(Path name) throws IOException {
if (fs.exists(name)) fs.delete(name, true);
FSDataOutputStream fout = fs.create(name);
return fout;
}
/**
* test none codecs
*/
void basicWithSomeCodec(String codec) throws IOException {
Path ncTFile = new Path(ROOT, "basic.tfile");
FSDataOutputStream fout = createFSOutput(ncTFile);
Writer writer = new Writer(fout, minBlockSize, codec, "memcmp", conf);
writeRecords(writer);
fout.close();
FSDataInputStream fin = fs.open(ncTFile);
Reader reader =
new Reader(fs.open(ncTFile), fs.getFileStatus(ncTFile).getLen(), conf);
Scanner scanner = reader.createScanner();
readAllRecords(scanner);
scanner.seekTo(getSomeKey(50));
assertTrue("location lookup failed", scanner.seekTo(getSomeKey(50)));
// read the key and see if it matches
byte[] readKey = readKey(scanner);
assertTrue("seeked key does not match", Arrays.equals(getSomeKey(50),
readKey));
scanner.seekTo(new byte[0]);
byte[] val1 = readValue(scanner);
scanner.seekTo(new byte[0]);
byte[] val2 = readValue(scanner);
assertTrue(Arrays.equals(val1, val2));
// check for lowerBound
scanner.lowerBound(getSomeKey(50));
assertTrue("locaton lookup failed", scanner.currentLocation
.compareTo(reader.end()) < 0);
readKey = readKey(scanner);
assertTrue("seeked key does not match", Arrays.equals(readKey,
getSomeKey(50)));
// check for upper bound
scanner.upperBound(getSomeKey(50));
assertTrue("location lookup failed", scanner.currentLocation
.compareTo(reader.end()) < 0);
readKey = readKey(scanner);
assertTrue("seeked key does not match", Arrays.equals(readKey,
getSomeKey(51)));
scanner.close();
// test for a range of scanner
scanner = reader.createScannerByKey(getSomeKey(10), getSomeKey(60));
readAndCheckbytes(scanner, 10, 50);
assertFalse(scanner.advance());
scanner.close();
reader.close();
fin.close();
fs.delete(ncTFile, true);
}
// unsorted with some codec
void unsortedWithSomeCodec(String codec) throws IOException {
Path uTfile = new Path(ROOT, "unsorted.tfile");
FSDataOutputStream fout = createFSOutput(uTfile);
Writer writer = new Writer(fout, minBlockSize, codec, null, conf);
writeRecords(writer);
writer.close();
fout.close();
FSDataInputStream fin = fs.open(uTfile);
Reader reader =
new Reader(fs.open(uTfile), fs.getFileStatus(uTfile).getLen(), conf);
Scanner scanner = reader.createScanner();
readAllRecords(scanner);
scanner.close();
reader.close();
fin.close();
fs.delete(uTfile, true);
}
public void testTFileFeatures() throws IOException {
basicWithSomeCodec("none");
basicWithSomeCodec("gz");
}
// test unsorted t files.
public void testUnsortedTFileFeatures() throws IOException {
unsortedWithSomeCodec("none");
unsortedWithSomeCodec("gz");
}
private void writeNumMetablocks(Writer writer, String compression, int n)
throws IOException {
for (int i = 0; i < n; i++) {
DataOutputStream dout =
writer.prepareMetaBlock("TfileMeta" + i, compression);
byte[] b = ("something to test" + i).getBytes();
dout.write(b);
dout.close();
}
}
private void someTestingWithMetaBlock(Writer writer, String compression)
throws IOException {
DataOutputStream dout = null;
writeNumMetablocks(writer, compression, 10);
try {
dout = writer.prepareMetaBlock("TfileMeta1", compression);
assertTrue(false);
}
catch (MetaBlockAlreadyExists me) {
// avoid this exception
}
dout = writer.prepareMetaBlock("TFileMeta100", compression);
dout.close();
}
private void readNumMetablocks(Reader reader, int n) throws IOException {
int len = ("something to test" + 0).getBytes().length;
for (int i = 0; i < n; i++) {
DataInputStream din = reader.getMetaBlock("TfileMeta" + i);
byte b[] = new byte[len];
din.readFully(b);
assertTrue("faield to match metadata", Arrays.equals(
("something to test" + i).getBytes(), b));
din.close();
}
}
private void someReadingWithMetaBlock(Reader reader) throws IOException {
DataInputStream din = null;
readNumMetablocks(reader, 10);
try {
din = reader.getMetaBlock("NO ONE");
assertTrue(false);
}
catch (MetaBlockDoesNotExist me) {
// should catch
}
din = reader.getMetaBlock("TFileMeta100");
int read = din.read();
assertTrue("check for status", (read == -1));
din.close();
}
// test meta blocks for tfiles
public void testMetaBlocks() throws IOException {
Path mFile = new Path(ROOT, "meta.tfile");
FSDataOutputStream fout = createFSOutput(mFile);
Writer writer = new Writer(fout, minBlockSize, "none", null, conf);
someTestingWithMetaBlock(writer, "none");
writer.close();
fout.close();
FSDataInputStream fin = fs.open(mFile);
Reader reader = new Reader(fin, fs.getFileStatus(mFile).getLen(), conf);
someReadingWithMetaBlock(reader);
fs.delete(mFile, true);
reader.close();
fin.close();
}
}
| 14,451 | 32.453704 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableInterface.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.retry;
import java.io.IOException;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.StandbyException;
public interface UnreliableInterface {
public static class UnreliableException extends Exception {
private static final long serialVersionUID = 1L;
private String identifier;
public UnreliableException() {
// no body
}
public UnreliableException(String identifier) {
this.identifier = identifier;
}
@Override
public String getMessage() {
return identifier;
}
}
public static class FatalException extends UnreliableException {
private static final long serialVersionUID = 1L;
// no body
}
void alwaysSucceeds() throws UnreliableException;
void alwaysFailsWithFatalException() throws FatalException;
void alwaysFailsWithRemoteFatalException() throws RemoteException;
void failsOnceThenSucceeds() throws UnreliableException;
boolean failsOnceThenSucceedsWithReturnValue() throws UnreliableException;
void failsTenTimesThenSucceeds() throws UnreliableException;
public String succeedsOnceThenFailsReturningString()
throws UnreliableException, StandbyException, IOException;
@Idempotent
public String succeedsOnceThenFailsReturningStringIdempotent()
throws UnreliableException, StandbyException, IOException;
public String succeedsTenTimesThenFailsReturningString()
throws UnreliableException, StandbyException, IOException;
@Idempotent
public String failsIfIdentifierDoesntMatch(String identifier)
throws UnreliableException, StandbyException, IOException;
void nonIdempotentVoidFailsIfIdentifierDoesntMatch(String identifier)
throws UnreliableException, StandbyException, IOException;
}
| 2,615 | 32.974026 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.retry;
import static org.apache.hadoop.io.retry.RetryPolicies.RETRY_FOREVER;
import static org.apache.hadoop.io.retry.RetryPolicies.TRY_ONCE_THEN_FAIL;
import static org.apache.hadoop.io.retry.RetryPolicies.retryByException;
import static org.apache.hadoop.io.retry.RetryPolicies.retryByRemoteException;
import static org.apache.hadoop.io.retry.RetryPolicies.retryUpToMaximumCountWithFixedSleep;
import static org.apache.hadoop.io.retry.RetryPolicies.retryUpToMaximumCountWithProportionalSleep;
import static org.apache.hadoop.io.retry.RetryPolicies.retryUpToMaximumTimeWithFixedSleep;
import static org.apache.hadoop.io.retry.RetryPolicies.exponentialBackoffRetry;
import static org.junit.Assert.*;
import java.util.Collections;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.hadoop.io.retry.UnreliableInterface.FatalException;
import org.apache.hadoop.io.retry.UnreliableInterface.UnreliableException;
import org.apache.hadoop.ipc.ProtocolTranslator;
import org.apache.hadoop.ipc.RemoteException;
import org.junit.Before;
import org.junit.Test;
import java.lang.reflect.UndeclaredThrowableException;
public class TestRetryProxy {
private UnreliableImplementation unreliableImpl;
@Before
public void setUp() throws Exception {
unreliableImpl = new UnreliableImplementation();
}
@Test
public void testTryOnceThenFail() throws UnreliableException {
UnreliableInterface unreliable = (UnreliableInterface)
RetryProxy.create(UnreliableInterface.class, unreliableImpl, TRY_ONCE_THEN_FAIL);
unreliable.alwaysSucceeds();
try {
unreliable.failsOnceThenSucceeds();
fail("Should fail");
} catch (UnreliableException e) {
// expected
}
}
/**
* Test for {@link RetryInvocationHandler#isRpcInvocation(Object)}
*/
@Test
public void testRpcInvocation() throws Exception {
// For a proxy method should return true
final UnreliableInterface unreliable = (UnreliableInterface)
RetryProxy.create(UnreliableInterface.class, unreliableImpl, RETRY_FOREVER);
assertTrue(RetryInvocationHandler.isRpcInvocation(unreliable));
// Embed the proxy in ProtocolTranslator
ProtocolTranslator xlator = new ProtocolTranslator() {
int count = 0;
@Override
public Object getUnderlyingProxyObject() {
count++;
return unreliable;
}
@Override
public String toString() {
return "" + count;
}
};
// For a proxy wrapped in ProtocolTranslator method should return true
assertTrue(RetryInvocationHandler.isRpcInvocation(xlator));
// Ensure underlying proxy was looked at
assertEquals(xlator.toString(), "1");
// For non-proxy the method must return false
assertFalse(RetryInvocationHandler.isRpcInvocation(new Object()));
}
@Test
public void testRetryForever() throws UnreliableException {
UnreliableInterface unreliable = (UnreliableInterface)
RetryProxy.create(UnreliableInterface.class, unreliableImpl, RETRY_FOREVER);
unreliable.alwaysSucceeds();
unreliable.failsOnceThenSucceeds();
unreliable.failsTenTimesThenSucceeds();
}
@Test
public void testRetryUpToMaximumCountWithFixedSleep() throws UnreliableException {
UnreliableInterface unreliable = (UnreliableInterface)
RetryProxy.create(UnreliableInterface.class, unreliableImpl,
retryUpToMaximumCountWithFixedSleep(8, 1, TimeUnit.NANOSECONDS));
unreliable.alwaysSucceeds();
unreliable.failsOnceThenSucceeds();
try {
unreliable.failsTenTimesThenSucceeds();
fail("Should fail");
} catch (UnreliableException e) {
// expected
}
}
@Test
public void testRetryUpToMaximumTimeWithFixedSleep() throws UnreliableException {
UnreliableInterface unreliable = (UnreliableInterface)
RetryProxy.create(UnreliableInterface.class, unreliableImpl,
retryUpToMaximumTimeWithFixedSleep(80, 10, TimeUnit.NANOSECONDS));
unreliable.alwaysSucceeds();
unreliable.failsOnceThenSucceeds();
try {
unreliable.failsTenTimesThenSucceeds();
fail("Should fail");
} catch (UnreliableException e) {
// expected
}
}
@Test
public void testRetryUpToMaximumCountWithProportionalSleep() throws UnreliableException {
UnreliableInterface unreliable = (UnreliableInterface)
RetryProxy.create(UnreliableInterface.class, unreliableImpl,
retryUpToMaximumCountWithProportionalSleep(8, 1, TimeUnit.NANOSECONDS));
unreliable.alwaysSucceeds();
unreliable.failsOnceThenSucceeds();
try {
unreliable.failsTenTimesThenSucceeds();
fail("Should fail");
} catch (UnreliableException e) {
// expected
}
}
@Test
public void testExponentialRetry() throws UnreliableException {
UnreliableInterface unreliable = (UnreliableInterface)
RetryProxy.create(UnreliableInterface.class, unreliableImpl,
exponentialBackoffRetry(5, 1L, TimeUnit.NANOSECONDS));
unreliable.alwaysSucceeds();
unreliable.failsOnceThenSucceeds();
try {
unreliable.failsTenTimesThenSucceeds();
fail("Should fail");
} catch (UnreliableException e) {
// expected
}
}
@Test
public void testRetryByException() throws UnreliableException {
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap =
Collections.<Class<? extends Exception>, RetryPolicy>singletonMap(FatalException.class, TRY_ONCE_THEN_FAIL);
UnreliableInterface unreliable = (UnreliableInterface)
RetryProxy.create(UnreliableInterface.class, unreliableImpl,
retryByException(RETRY_FOREVER, exceptionToPolicyMap));
unreliable.failsOnceThenSucceeds();
try {
unreliable.alwaysFailsWithFatalException();
fail("Should fail");
} catch (FatalException e) {
// expected
}
}
@Test
public void testRetryByRemoteException() {
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap =
Collections.<Class<? extends Exception>, RetryPolicy>singletonMap(FatalException.class, TRY_ONCE_THEN_FAIL);
UnreliableInterface unreliable = (UnreliableInterface)
RetryProxy.create(UnreliableInterface.class, unreliableImpl,
retryByRemoteException(RETRY_FOREVER, exceptionToPolicyMap));
try {
unreliable.alwaysFailsWithRemoteFatalException();
fail("Should fail");
} catch (RemoteException e) {
// expected
}
}
@Test
public void testRetryInterruptible() throws Throwable {
final UnreliableInterface unreliable = (UnreliableInterface)
RetryProxy.create(UnreliableInterface.class, unreliableImpl,
retryUpToMaximumTimeWithFixedSleep(10, 10, TimeUnit.SECONDS));
final CountDownLatch latch = new CountDownLatch(1);
final AtomicReference<Thread> futureThread = new AtomicReference<Thread>();
ExecutorService exec = Executors.newSingleThreadExecutor();
Future<Throwable> future = exec.submit(new Callable<Throwable>(){
@Override
public Throwable call() throws Exception {
futureThread.set(Thread.currentThread());
latch.countDown();
try {
unreliable.alwaysFailsWithFatalException();
} catch (UndeclaredThrowableException ute) {
return ute.getCause();
}
return null;
}
});
latch.await();
Thread.sleep(1000); // time to fail and sleep
assertTrue(futureThread.get().isAlive());
futureThread.get().interrupt();
Throwable e = future.get(1, TimeUnit.SECONDS); // should return immediately
assertNotNull(e);
assertEquals(InterruptedException.class, e.getClass());
assertEquals("sleep interrupted", e.getMessage());
}
}
| 8,935 | 36.389121 | 114 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestDefaultRetryPolicy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.retry;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.RetriableException;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
import java.io.IOException;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertThat;
/**
* Test the behavior of the default retry policy.
*/
public class TestDefaultRetryPolicy {
@Rule
public Timeout timeout = new Timeout(300000);
/**
* Verify that the default retry policy correctly retries
* RetriableException when defaultRetryPolicyEnabled is enabled.
*
* @throws IOException
*/
@Test
public void testWithRetriable() throws Exception {
Configuration conf = new Configuration();
RetryPolicy policy = RetryUtils.getDefaultRetryPolicy(
conf, "Test.No.Such.Key",
true, // defaultRetryPolicyEnabled = true
"Test.No.Such.Key", "10000,6",
null);
RetryPolicy.RetryAction action = policy.shouldRetry(
new RetriableException("Dummy exception"), 0, 0, true);
assertThat(action.action,
is(RetryPolicy.RetryAction.RetryDecision.RETRY));
}
/**
* Verify that the default retry policy correctly retries
* a RetriableException wrapped in a RemoteException when
* defaultRetryPolicyEnabled is enabled.
*
* @throws IOException
*/
@Test
public void testWithWrappedRetriable() throws Exception {
Configuration conf = new Configuration();
RetryPolicy policy = RetryUtils.getDefaultRetryPolicy(
conf, "Test.No.Such.Key",
true, // defaultRetryPolicyEnabled = true
"Test.No.Such.Key", "10000,6",
null);
RetryPolicy.RetryAction action = policy.shouldRetry(
new RemoteException(RetriableException.class.getName(),
"Dummy exception"), 0, 0, true);
assertThat(action.action,
is(RetryPolicy.RetryAction.RetryDecision.RETRY));
}
/**
* Verify that the default retry policy does *not* retry
* RetriableException when defaultRetryPolicyEnabled is disabled.
*
* @throws IOException
*/
@Test
public void testWithRetriableAndRetryDisabled() throws Exception {
Configuration conf = new Configuration();
RetryPolicy policy = RetryUtils.getDefaultRetryPolicy(
conf, "Test.No.Such.Key",
false, // defaultRetryPolicyEnabled = false
"Test.No.Such.Key", "10000,6",
null);
RetryPolicy.RetryAction action = policy.shouldRetry(
new RetriableException("Dummy exception"), 0, 0, true);
assertThat(action.action,
is(RetryPolicy.RetryAction.RetryDecision.FAIL));
}
}
| 3,562 | 33.931373 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.retry;
import java.io.IOException;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.StandbyException;
class UnreliableImplementation implements UnreliableInterface {
private int failsOnceInvocationCount,
failsOnceWithValueInvocationCount,
failsTenTimesInvocationCount,
succeedsOnceThenFailsCount,
succeedsOnceThenFailsIdempotentCount,
succeedsTenTimesThenFailsCount;
private String identifier;
private TypeOfExceptionToFailWith exceptionToFailWith;
public static enum TypeOfExceptionToFailWith {
UNRELIABLE_EXCEPTION,
STANDBY_EXCEPTION,
IO_EXCEPTION,
REMOTE_EXCEPTION
}
public UnreliableImplementation() {
this(null);
}
public UnreliableImplementation(String identifier) {
this(identifier, TypeOfExceptionToFailWith.UNRELIABLE_EXCEPTION);
}
public void setIdentifier(String identifier) {
this.identifier = identifier;
}
public UnreliableImplementation(String identifier,
TypeOfExceptionToFailWith exceptionToFailWith) {
this.identifier = identifier;
this.exceptionToFailWith = exceptionToFailWith;
}
@Override
public void alwaysSucceeds() {
// do nothing
}
@Override
public void alwaysFailsWithFatalException() throws FatalException {
throw new FatalException();
}
@Override
public void alwaysFailsWithRemoteFatalException() throws RemoteException {
throw new RemoteException(FatalException.class.getName(), "Oops");
}
@Override
public void failsOnceThenSucceeds() throws UnreliableException {
if (failsOnceInvocationCount++ == 0) {
throw new UnreliableException();
}
}
@Override
public boolean failsOnceThenSucceedsWithReturnValue() throws UnreliableException {
if (failsOnceWithValueInvocationCount++ == 0) {
throw new UnreliableException();
}
return true;
}
@Override
public void failsTenTimesThenSucceeds() throws UnreliableException {
if (failsTenTimesInvocationCount++ < 10) {
throw new UnreliableException();
}
}
@Override
public String succeedsOnceThenFailsReturningString()
throws UnreliableException, IOException, StandbyException {
if (succeedsOnceThenFailsCount++ < 1) {
return identifier;
} else {
throwAppropriateException(exceptionToFailWith, identifier);
return null;
}
}
@Override
public String succeedsTenTimesThenFailsReturningString()
throws UnreliableException, IOException, StandbyException {
if (succeedsTenTimesThenFailsCount++ < 10) {
return identifier;
} else {
throwAppropriateException(exceptionToFailWith, identifier);
return null;
}
}
@Override
public String succeedsOnceThenFailsReturningStringIdempotent()
throws UnreliableException, StandbyException, IOException {
if (succeedsOnceThenFailsIdempotentCount++ < 1) {
return identifier;
} else {
throwAppropriateException(exceptionToFailWith, identifier);
return null;
}
}
@Override
public String failsIfIdentifierDoesntMatch(String identifier)
throws UnreliableException, StandbyException, IOException {
if (this.identifier.equals(identifier)) {
return identifier;
} else {
String message = "expected '" + this.identifier + "' but received '" +
identifier + "'";
throwAppropriateException(exceptionToFailWith, message);
return null;
}
}
@Override
public void nonIdempotentVoidFailsIfIdentifierDoesntMatch(String identifier)
throws UnreliableException, StandbyException, IOException {
if (this.identifier.equals(identifier)) {
return;
} else {
String message = "expected '" + this.identifier + "' but received '" +
identifier + "'";
throwAppropriateException(exceptionToFailWith, message);
}
}
@Override
public String toString() {
return getClass().getSimpleName() + "[" + identifier + "]";
}
private static void throwAppropriateException(TypeOfExceptionToFailWith eType,
String message) throws UnreliableException, StandbyException, IOException {
switch (eType) {
case STANDBY_EXCEPTION:
throw new StandbyException(message);
case UNRELIABLE_EXCEPTION:
throw new UnreliableException(message);
case IO_EXCEPTION:
throw new IOException(message);
case REMOTE_EXCEPTION:
throw new RemoteException(IOException.class.getName(), message);
default:
throw new RuntimeException(message);
}
}
}
| 5,365 | 29.146067 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestFailoverProxy.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.retry;
import static org.junit.Assert.*;
import java.io.IOException;
import java.util.concurrent.CountDownLatch;
import org.apache.hadoop.io.retry.UnreliableImplementation.TypeOfExceptionToFailWith;
import org.apache.hadoop.io.retry.UnreliableInterface.UnreliableException;
import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.util.ThreadUtil;
import org.junit.Test;
public class TestFailoverProxy {
public static class FlipFlopProxyProvider<T> implements FailoverProxyProvider<T> {
private Class<T> iface;
private T currentlyActive;
private T impl1;
private T impl2;
private int failoversOccurred = 0;
public FlipFlopProxyProvider(Class<T> iface, T activeImpl,
T standbyImpl) {
this.iface = iface;
this.impl1 = activeImpl;
this.impl2 = standbyImpl;
currentlyActive = impl1;
}
@Override
public ProxyInfo<T> getProxy() {
return new ProxyInfo<T>(currentlyActive, currentlyActive.toString());
}
@Override
public synchronized void performFailover(Object currentProxy) {
currentlyActive = impl1 == currentProxy ? impl2 : impl1;
failoversOccurred++;
}
@Override
public Class<T> getInterface() {
return iface;
}
@Override
public void close() throws IOException {
// Nothing to do.
}
public int getFailoversOccurred() {
return failoversOccurred;
}
}
public static class FailOverOnceOnAnyExceptionPolicy implements RetryPolicy {
@Override
public RetryAction shouldRetry(Exception e, int retries, int failovers,
boolean isIdempotentOrAtMostOnce) {
return failovers < 1 ? RetryAction.FAILOVER_AND_RETRY : RetryAction.FAIL;
}
}
private static FlipFlopProxyProvider<UnreliableInterface>
newFlipFlopProxyProvider() {
return new FlipFlopProxyProvider<UnreliableInterface>(
UnreliableInterface.class,
new UnreliableImplementation("impl1"),
new UnreliableImplementation("impl2"));
}
private static FlipFlopProxyProvider<UnreliableInterface>
newFlipFlopProxyProvider(TypeOfExceptionToFailWith t1,
TypeOfExceptionToFailWith t2) {
return new FlipFlopProxyProvider<UnreliableInterface>(
UnreliableInterface.class,
new UnreliableImplementation("impl1", t1),
new UnreliableImplementation("impl2", t2));
}
@Test
public void testSuccedsOnceThenFailOver() throws UnreliableException,
IOException, StandbyException {
UnreliableInterface unreliable = (UnreliableInterface)RetryProxy.create(
UnreliableInterface.class, newFlipFlopProxyProvider(),
new FailOverOnceOnAnyExceptionPolicy());
assertEquals("impl1", unreliable.succeedsOnceThenFailsReturningString());
assertEquals("impl2", unreliable.succeedsOnceThenFailsReturningString());
try {
unreliable.succeedsOnceThenFailsReturningString();
fail("should not have succeeded more than twice");
} catch (UnreliableException e) {
// expected
}
}
@Test
public void testSucceedsTenTimesThenFailOver() throws UnreliableException,
IOException, StandbyException {
UnreliableInterface unreliable = (UnreliableInterface)RetryProxy.create(
UnreliableInterface.class,
newFlipFlopProxyProvider(),
new FailOverOnceOnAnyExceptionPolicy());
for (int i = 0; i < 10; i++) {
assertEquals("impl1", unreliable.succeedsTenTimesThenFailsReturningString());
}
assertEquals("impl2", unreliable.succeedsTenTimesThenFailsReturningString());
}
@Test
public void testNeverFailOver() throws UnreliableException,
IOException, StandbyException {
UnreliableInterface unreliable = (UnreliableInterface)RetryProxy.create(
UnreliableInterface.class,
newFlipFlopProxyProvider(),
RetryPolicies.TRY_ONCE_THEN_FAIL);
unreliable.succeedsOnceThenFailsReturningString();
try {
unreliable.succeedsOnceThenFailsReturningString();
fail("should not have succeeded twice");
} catch (UnreliableException e) {
assertEquals("impl1", e.getMessage());
}
}
@Test
public void testFailoverOnStandbyException()
throws UnreliableException, IOException, StandbyException {
UnreliableInterface unreliable = (UnreliableInterface)RetryProxy.create(
UnreliableInterface.class,
newFlipFlopProxyProvider(),
RetryPolicies.failoverOnNetworkException(1));
assertEquals("impl1", unreliable.succeedsOnceThenFailsReturningString());
try {
unreliable.succeedsOnceThenFailsReturningString();
fail("should not have succeeded twice");
} catch (UnreliableException e) {
// Make sure there was no failover on normal exception.
assertEquals("impl1", e.getMessage());
}
unreliable = (UnreliableInterface)RetryProxy
.create(UnreliableInterface.class,
newFlipFlopProxyProvider(
TypeOfExceptionToFailWith.STANDBY_EXCEPTION,
TypeOfExceptionToFailWith.UNRELIABLE_EXCEPTION),
RetryPolicies.failoverOnNetworkException(1));
assertEquals("impl1", unreliable.succeedsOnceThenFailsReturningString());
// Make sure we fail over since the first implementation threw a StandbyException
assertEquals("impl2", unreliable.succeedsOnceThenFailsReturningString());
}
@Test
public void testFailoverOnNetworkExceptionIdempotentOperation()
throws UnreliableException, IOException, StandbyException {
UnreliableInterface unreliable = (UnreliableInterface)RetryProxy.create(
UnreliableInterface.class,
newFlipFlopProxyProvider(
TypeOfExceptionToFailWith.IO_EXCEPTION,
TypeOfExceptionToFailWith.UNRELIABLE_EXCEPTION),
RetryPolicies.failoverOnNetworkException(1));
assertEquals("impl1", unreliable.succeedsOnceThenFailsReturningString());
try {
unreliable.succeedsOnceThenFailsReturningString();
fail("should not have succeeded twice");
} catch (IOException e) {
// Make sure we *don't* fail over since the first implementation threw an
// IOException and this method is not idempotent
assertEquals("impl1", e.getMessage());
}
assertEquals("impl1", unreliable.succeedsOnceThenFailsReturningStringIdempotent());
// Make sure we fail over since the first implementation threw an
// IOException and this method is idempotent.
assertEquals("impl2", unreliable.succeedsOnceThenFailsReturningStringIdempotent());
}
/**
* Test that if a non-idempotent void function is called, and there is an exception,
* the exception is properly propagated
*/
@Test
public void testExceptionPropagatedForNonIdempotentVoid() throws Exception {
UnreliableInterface unreliable = (UnreliableInterface)RetryProxy
.create(UnreliableInterface.class,
newFlipFlopProxyProvider(
TypeOfExceptionToFailWith.IO_EXCEPTION,
TypeOfExceptionToFailWith.UNRELIABLE_EXCEPTION),
RetryPolicies.failoverOnNetworkException(1));
try {
unreliable.nonIdempotentVoidFailsIfIdentifierDoesntMatch("impl2");
fail("did not throw an exception");
} catch (Exception e) {
}
}
private static class SynchronizedUnreliableImplementation extends UnreliableImplementation {
private CountDownLatch methodLatch;
public SynchronizedUnreliableImplementation(String identifier,
TypeOfExceptionToFailWith exceptionToFailWith, int threadCount) {
super(identifier, exceptionToFailWith);
methodLatch = new CountDownLatch(threadCount);
}
@Override
public String failsIfIdentifierDoesntMatch(String identifier)
throws UnreliableException, StandbyException, IOException {
// Wait until all threads are trying to invoke this method
methodLatch.countDown();
try {
methodLatch.await();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return super.failsIfIdentifierDoesntMatch(identifier);
}
}
private static class ConcurrentMethodThread extends Thread {
private UnreliableInterface unreliable;
public String result;
public ConcurrentMethodThread(UnreliableInterface unreliable) {
this.unreliable = unreliable;
}
@Override
public void run() {
try {
result = unreliable.failsIfIdentifierDoesntMatch("impl2");
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
/**
* Test that concurrent failed method invocations only result in a single
* failover.
*/
@Test
public void testConcurrentMethodFailures() throws InterruptedException {
FlipFlopProxyProvider<UnreliableInterface> proxyProvider
= new FlipFlopProxyProvider<UnreliableInterface>(
UnreliableInterface.class,
new SynchronizedUnreliableImplementation("impl1",
TypeOfExceptionToFailWith.STANDBY_EXCEPTION,
2),
new UnreliableImplementation("impl2",
TypeOfExceptionToFailWith.STANDBY_EXCEPTION));
final UnreliableInterface unreliable = (UnreliableInterface)RetryProxy
.create(UnreliableInterface.class, proxyProvider,
RetryPolicies.failoverOnNetworkException(10));
ConcurrentMethodThread t1 = new ConcurrentMethodThread(unreliable);
ConcurrentMethodThread t2 = new ConcurrentMethodThread(unreliable);
t1.start();
t2.start();
t1.join();
t2.join();
assertEquals("impl2", t1.result);
assertEquals("impl2", t2.result);
assertEquals(1, proxyProvider.getFailoversOccurred());
}
/**
* Ensure that when all configured services are throwing StandbyException
* that we fail over back and forth between them until one is no longer
* throwing StandbyException.
*/
@Test
public void testFailoverBetweenMultipleStandbys()
throws UnreliableException, StandbyException, IOException {
final long millisToSleep = 10000;
final UnreliableImplementation impl1 = new UnreliableImplementation("impl1",
TypeOfExceptionToFailWith.STANDBY_EXCEPTION);
FlipFlopProxyProvider<UnreliableInterface> proxyProvider
= new FlipFlopProxyProvider<UnreliableInterface>(
UnreliableInterface.class,
impl1,
new UnreliableImplementation("impl2",
TypeOfExceptionToFailWith.STANDBY_EXCEPTION));
final UnreliableInterface unreliable = (UnreliableInterface)RetryProxy
.create(UnreliableInterface.class, proxyProvider,
RetryPolicies.failoverOnNetworkException(
RetryPolicies.TRY_ONCE_THEN_FAIL, 10, 1000, 10000));
new Thread() {
@Override
public void run() {
ThreadUtil.sleepAtLeastIgnoreInterrupts(millisToSleep);
impl1.setIdentifier("renamed-impl1");
}
}.start();
String result = unreliable.failsIfIdentifierDoesntMatch("renamed-impl1");
assertEquals("renamed-impl1", result);
}
/**
* Ensure that normal IO exceptions don't result in a failover.
*/
@Test
public void testExpectedIOException() {
UnreliableInterface unreliable = (UnreliableInterface)RetryProxy.create(
UnreliableInterface.class,
newFlipFlopProxyProvider(
TypeOfExceptionToFailWith.REMOTE_EXCEPTION,
TypeOfExceptionToFailWith.UNRELIABLE_EXCEPTION),
RetryPolicies.failoverOnNetworkException(
RetryPolicies.TRY_ONCE_THEN_FAIL, 10, 1000, 10000));
try {
unreliable.failsIfIdentifierDoesntMatch("no-such-identifier");
fail("Should have thrown *some* exception");
} catch (Exception e) {
assertTrue("Expected IOE but got " + e.getClass(),
e instanceof IOException);
}
}
}
| 12,696 | 34.171745 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.nativeio;
import java.io.File;
import java.io.FileDescriptor;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.channels.FileChannel.MapMode;
import java.util.Random;
import java.util.concurrent.atomic.AtomicReference;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assume.*;
import static org.junit.Assert.*;
import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.NativeCodeLoader;
import org.apache.hadoop.util.Time;
import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.*;
import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.Stat.*;
public class TestNativeIO {
static final Log LOG = LogFactory.getLog(TestNativeIO.class);
static final File TEST_DIR = new File(
System.getProperty("test.build.data"), "testnativeio");
@Before
public void checkLoaded() {
assumeTrue(NativeCodeLoader.isNativeCodeLoaded());
}
@Before
public void setupTestDir() {
FileUtil.fullyDelete(TEST_DIR);
TEST_DIR.mkdirs();
}
@Test (timeout = 30000)
public void testFstat() throws Exception {
FileOutputStream fos = new FileOutputStream(
new File(TEST_DIR, "testfstat"));
NativeIO.POSIX.Stat stat = NativeIO.POSIX.getFstat(fos.getFD());
fos.close();
LOG.info("Stat: " + String.valueOf(stat));
String owner = stat.getOwner();
String expectedOwner = System.getProperty("user.name");
if (Path.WINDOWS) {
UserGroupInformation ugi =
UserGroupInformation.createRemoteUser(expectedOwner);
final String adminsGroupString = "Administrators";
if (Arrays.asList(ugi.getGroupNames()).contains(adminsGroupString)) {
expectedOwner = adminsGroupString;
}
}
assertEquals(expectedOwner, owner);
assertNotNull(stat.getGroup());
assertTrue(!stat.getGroup().isEmpty());
assertEquals("Stat mode field should indicate a regular file", S_IFREG,
stat.getMode() & S_IFMT);
}
/**
* Test for races in fstat usage
*
* NOTE: this test is likely to fail on RHEL 6.0 which has a non-threadsafe
* implementation of getpwuid_r.
*/
@Test (timeout = 30000)
public void testMultiThreadedFstat() throws Exception {
if (Path.WINDOWS) {
return;
}
final FileOutputStream fos = new FileOutputStream(
new File(TEST_DIR, "testfstat"));
final AtomicReference<Throwable> thrown =
new AtomicReference<Throwable>();
List<Thread> statters = new ArrayList<Thread>();
for (int i = 0; i < 10; i++) {
Thread statter = new Thread() {
@Override
public void run() {
long et = Time.now() + 5000;
while (Time.now() < et) {
try {
NativeIO.POSIX.Stat stat = NativeIO.POSIX.getFstat(fos.getFD());
assertEquals(System.getProperty("user.name"), stat.getOwner());
assertNotNull(stat.getGroup());
assertTrue(!stat.getGroup().isEmpty());
assertEquals("Stat mode field should indicate a regular file",
S_IFREG, stat.getMode() & S_IFMT);
} catch (Throwable t) {
thrown.set(t);
}
}
}
};
statters.add(statter);
statter.start();
}
for (Thread t : statters) {
t.join();
}
fos.close();
if (thrown.get() != null) {
throw new RuntimeException(thrown.get());
}
}
@Test (timeout = 30000)
public void testFstatClosedFd() throws Exception {
FileOutputStream fos = new FileOutputStream(
new File(TEST_DIR, "testfstat2"));
fos.close();
try {
NativeIO.POSIX.Stat stat = NativeIO.POSIX.getFstat(fos.getFD());
} catch (NativeIOException nioe) {
LOG.info("Got expected exception", nioe);
assertEquals(Errno.EBADF, nioe.getErrno());
}
}
@Test (timeout = 30000)
public void testSetFilePointer() throws Exception {
if (!Path.WINDOWS) {
return;
}
LOG.info("Set a file pointer on Windows");
try {
File testfile = new File(TEST_DIR, "testSetFilePointer");
assertTrue("Create test subject",
testfile.exists() || testfile.createNewFile());
FileWriter writer = new FileWriter(testfile);
try {
for (int i = 0; i < 200; i++)
if (i < 100)
writer.write('a');
else
writer.write('b');
writer.flush();
} catch (Exception writerException) {
fail("Got unexpected exception: " + writerException.getMessage());
} finally {
writer.close();
}
FileDescriptor fd = NativeIO.Windows.createFile(
testfile.getCanonicalPath(),
NativeIO.Windows.GENERIC_READ,
NativeIO.Windows.FILE_SHARE_READ |
NativeIO.Windows.FILE_SHARE_WRITE |
NativeIO.Windows.FILE_SHARE_DELETE,
NativeIO.Windows.OPEN_EXISTING);
NativeIO.Windows.setFilePointer(fd, 120, NativeIO.Windows.FILE_BEGIN);
FileReader reader = new FileReader(fd);
try {
int c = reader.read();
assertTrue("Unexpected character: " + c, c == 'b');
} catch (Exception readerException) {
fail("Got unexpected exception: " + readerException.getMessage());
} finally {
reader.close();
}
} catch (Exception e) {
fail("Got unexpected exception: " + e.getMessage());
}
}
@Test (timeout = 30000)
public void testCreateFile() throws Exception {
if (!Path.WINDOWS) {
return;
}
LOG.info("Open a file on Windows with SHARE_DELETE shared mode");
try {
File testfile = new File(TEST_DIR, "testCreateFile");
assertTrue("Create test subject",
testfile.exists() || testfile.createNewFile());
FileDescriptor fd = NativeIO.Windows.createFile(
testfile.getCanonicalPath(),
NativeIO.Windows.GENERIC_READ,
NativeIO.Windows.FILE_SHARE_READ |
NativeIO.Windows.FILE_SHARE_WRITE |
NativeIO.Windows.FILE_SHARE_DELETE,
NativeIO.Windows.OPEN_EXISTING);
FileInputStream fin = new FileInputStream(fd);
try {
fin.read();
File newfile = new File(TEST_DIR, "testRenamedFile");
boolean renamed = testfile.renameTo(newfile);
assertTrue("Rename failed.", renamed);
fin.read();
} catch (Exception e) {
fail("Got unexpected exception: " + e.getMessage());
}
finally {
fin.close();
}
} catch (Exception e) {
fail("Got unexpected exception: " + e.getMessage());
}
}
/** Validate access checks on Windows */
@Test (timeout = 30000)
public void testAccess() throws Exception {
if (!Path.WINDOWS) {
return;
}
File testFile = new File(TEST_DIR, "testfileaccess");
assertTrue(testFile.createNewFile());
// Validate ACCESS_READ
FileUtil.setReadable(testFile, false);
assertFalse(NativeIO.Windows.access(testFile.getAbsolutePath(),
NativeIO.Windows.AccessRight.ACCESS_READ));
FileUtil.setReadable(testFile, true);
assertTrue(NativeIO.Windows.access(testFile.getAbsolutePath(),
NativeIO.Windows.AccessRight.ACCESS_READ));
// Validate ACCESS_WRITE
FileUtil.setWritable(testFile, false);
assertFalse(NativeIO.Windows.access(testFile.getAbsolutePath(),
NativeIO.Windows.AccessRight.ACCESS_WRITE));
FileUtil.setWritable(testFile, true);
assertTrue(NativeIO.Windows.access(testFile.getAbsolutePath(),
NativeIO.Windows.AccessRight.ACCESS_WRITE));
// Validate ACCESS_EXECUTE
FileUtil.setExecutable(testFile, false);
assertFalse(NativeIO.Windows.access(testFile.getAbsolutePath(),
NativeIO.Windows.AccessRight.ACCESS_EXECUTE));
FileUtil.setExecutable(testFile, true);
assertTrue(NativeIO.Windows.access(testFile.getAbsolutePath(),
NativeIO.Windows.AccessRight.ACCESS_EXECUTE));
// Validate that access checks work as expected for long paths
// Assemble a path longer then 260 chars (MAX_PATH)
String testFileRelativePath = "";
for (int i = 0; i < 15; ++i) {
testFileRelativePath += "testfileaccessfolder\\";
}
testFileRelativePath += "testfileaccess";
testFile = new File(TEST_DIR, testFileRelativePath);
assertTrue(testFile.getParentFile().mkdirs());
assertTrue(testFile.createNewFile());
// Validate ACCESS_READ
FileUtil.setReadable(testFile, false);
assertFalse(NativeIO.Windows.access(testFile.getAbsolutePath(),
NativeIO.Windows.AccessRight.ACCESS_READ));
FileUtil.setReadable(testFile, true);
assertTrue(NativeIO.Windows.access(testFile.getAbsolutePath(),
NativeIO.Windows.AccessRight.ACCESS_READ));
// Validate ACCESS_WRITE
FileUtil.setWritable(testFile, false);
assertFalse(NativeIO.Windows.access(testFile.getAbsolutePath(),
NativeIO.Windows.AccessRight.ACCESS_WRITE));
FileUtil.setWritable(testFile, true);
assertTrue(NativeIO.Windows.access(testFile.getAbsolutePath(),
NativeIO.Windows.AccessRight.ACCESS_WRITE));
// Validate ACCESS_EXECUTE
FileUtil.setExecutable(testFile, false);
assertFalse(NativeIO.Windows.access(testFile.getAbsolutePath(),
NativeIO.Windows.AccessRight.ACCESS_EXECUTE));
FileUtil.setExecutable(testFile, true);
assertTrue(NativeIO.Windows.access(testFile.getAbsolutePath(),
NativeIO.Windows.AccessRight.ACCESS_EXECUTE));
}
@Test (timeout = 30000)
public void testOpenMissingWithoutCreate() throws Exception {
if (Path.WINDOWS) {
return;
}
LOG.info("Open a missing file without O_CREAT and it should fail");
try {
FileDescriptor fd = NativeIO.POSIX.open(
new File(TEST_DIR, "doesntexist").getAbsolutePath(), O_WRONLY, 0700);
fail("Able to open a new file without O_CREAT");
} catch (NativeIOException nioe) {
LOG.info("Got expected exception", nioe);
assertEquals(Errno.ENOENT, nioe.getErrno());
}
}
@Test (timeout = 30000)
public void testOpenWithCreate() throws Exception {
if (Path.WINDOWS) {
return;
}
LOG.info("Test creating a file with O_CREAT");
FileDescriptor fd = NativeIO.POSIX.open(
new File(TEST_DIR, "testWorkingOpen").getAbsolutePath(),
O_WRONLY | O_CREAT, 0700);
assertNotNull(true);
assertTrue(fd.valid());
FileOutputStream fos = new FileOutputStream(fd);
fos.write("foo".getBytes());
fos.close();
assertFalse(fd.valid());
LOG.info("Test exclusive create");
try {
fd = NativeIO.POSIX.open(
new File(TEST_DIR, "testWorkingOpen").getAbsolutePath(),
O_WRONLY | O_CREAT | O_EXCL, 0700);
fail("Was able to create existing file with O_EXCL");
} catch (NativeIOException nioe) {
LOG.info("Got expected exception for failed exclusive create", nioe);
assertEquals(Errno.EEXIST, nioe.getErrno());
}
}
/**
* Test that opens and closes a file 10000 times - this would crash with
* "Too many open files" if we leaked fds using this access pattern.
*/
@Test (timeout = 30000)
public void testFDDoesntLeak() throws IOException {
if (Path.WINDOWS) {
return;
}
for (int i = 0; i < 10000; i++) {
FileDescriptor fd = NativeIO.POSIX.open(
new File(TEST_DIR, "testNoFdLeak").getAbsolutePath(),
O_WRONLY | O_CREAT, 0700);
assertNotNull(true);
assertTrue(fd.valid());
FileOutputStream fos = new FileOutputStream(fd);
fos.write("foo".getBytes());
fos.close();
}
}
/**
* Test basic chmod operation
*/
@Test (timeout = 30000)
public void testChmod() throws Exception {
if (Path.WINDOWS) {
return;
}
try {
NativeIO.POSIX.chmod("/this/file/doesnt/exist", 777);
fail("Chmod of non-existent file didn't fail");
} catch (NativeIOException nioe) {
assertEquals(Errno.ENOENT, nioe.getErrno());
}
File toChmod = new File(TEST_DIR, "testChmod");
assertTrue("Create test subject",
toChmod.exists() || toChmod.mkdir());
NativeIO.POSIX.chmod(toChmod.getAbsolutePath(), 0777);
assertPermissions(toChmod, 0777);
NativeIO.POSIX.chmod(toChmod.getAbsolutePath(), 0000);
assertPermissions(toChmod, 0000);
NativeIO.POSIX.chmod(toChmod.getAbsolutePath(), 0644);
assertPermissions(toChmod, 0644);
}
@Test (timeout = 30000)
public void testPosixFadvise() throws Exception {
if (Path.WINDOWS) {
return;
}
FileInputStream fis = new FileInputStream("/dev/zero");
try {
NativeIO.POSIX.posix_fadvise(
fis.getFD(), 0, 0, POSIX_FADV_SEQUENTIAL);
} catch (UnsupportedOperationException uoe) {
// we should just skip the unit test on machines where we don't
// have fadvise support
assumeTrue(false);
} catch (NativeIOException nioe) {
// ignore this error as FreeBSD returns EBADF even if length is zero
}
finally {
fis.close();
}
try {
NativeIO.POSIX.posix_fadvise(fis.getFD(), 0, 1024, POSIX_FADV_SEQUENTIAL);
fail("Did not throw on bad file");
} catch (NativeIOException nioe) {
assertEquals(Errno.EBADF, nioe.getErrno());
}
try {
NativeIO.POSIX.posix_fadvise(null, 0, 1024, POSIX_FADV_SEQUENTIAL);
fail("Did not throw on null file");
} catch (NullPointerException npe) {
// expected
}
}
@Test (timeout = 30000)
public void testSyncFileRange() throws Exception {
FileOutputStream fos = new FileOutputStream(
new File(TEST_DIR, "testSyncFileRange"));
try {
fos.write("foo".getBytes());
NativeIO.POSIX.sync_file_range(fos.getFD(), 0, 1024,
SYNC_FILE_RANGE_WRITE);
// no way to verify that this actually has synced,
// but if it doesn't throw, we can assume it worked
} catch (UnsupportedOperationException uoe) {
// we should just skip the unit test on machines where we don't
// have fadvise support
assumeTrue(false);
} finally {
fos.close();
}
try {
NativeIO.POSIX.sync_file_range(fos.getFD(), 0, 1024,
SYNC_FILE_RANGE_WRITE);
fail("Did not throw on bad file");
} catch (NativeIOException nioe) {
assertEquals(Errno.EBADF, nioe.getErrno());
}
}
private void assertPermissions(File f, int expected) throws IOException {
FileSystem localfs = FileSystem.getLocal(new Configuration());
FsPermission perms = localfs.getFileStatus(
new Path(f.getAbsolutePath())).getPermission();
assertEquals(expected, perms.toShort());
}
@Test (timeout = 30000)
public void testGetUserName() throws IOException {
if (Path.WINDOWS) {
return;
}
assertFalse(NativeIO.POSIX.getUserName(0).isEmpty());
}
@Test (timeout = 30000)
public void testGetGroupName() throws IOException {
if (Path.WINDOWS) {
return;
}
assertFalse(NativeIO.POSIX.getGroupName(0).isEmpty());
}
@Test (timeout = 30000)
public void testRenameTo() throws Exception {
final File TEST_DIR = new File(new File(
System.getProperty("test.build.data","build/test/data")), "renameTest");
assumeTrue(TEST_DIR.mkdirs());
File nonExistentFile = new File(TEST_DIR, "nonexistent");
File targetFile = new File(TEST_DIR, "target");
// Test attempting to rename a nonexistent file.
try {
NativeIO.renameTo(nonExistentFile, targetFile);
Assert.fail();
} catch (NativeIOException e) {
if (Path.WINDOWS) {
Assert.assertEquals(
String.format("The system cannot find the file specified.%n"),
e.getMessage());
} else {
Assert.assertEquals(Errno.ENOENT, e.getErrno());
}
}
// Test renaming a file to itself. It should succeed and do nothing.
File sourceFile = new File(TEST_DIR, "source");
Assert.assertTrue(sourceFile.createNewFile());
NativeIO.renameTo(sourceFile, sourceFile);
// Test renaming a source to a destination.
NativeIO.renameTo(sourceFile, targetFile);
// Test renaming a source to a path which uses a file as a directory.
sourceFile = new File(TEST_DIR, "source");
Assert.assertTrue(sourceFile.createNewFile());
File badTarget = new File(targetFile, "subdir");
try {
NativeIO.renameTo(sourceFile, badTarget);
Assert.fail();
} catch (NativeIOException e) {
if (Path.WINDOWS) {
Assert.assertEquals(
String.format("The parameter is incorrect.%n"),
e.getMessage());
} else {
Assert.assertEquals(Errno.ENOTDIR, e.getErrno());
}
}
FileUtils.deleteQuietly(TEST_DIR);
}
@Test(timeout=10000)
public void testMlock() throws Exception {
assumeTrue(NativeIO.isAvailable());
final File TEST_FILE = new File(new File(
System.getProperty("test.build.data","build/test/data")),
"testMlockFile");
final int BUF_LEN = 12289;
byte buf[] = new byte[BUF_LEN];
int bufSum = 0;
for (int i = 0; i < buf.length; i++) {
buf[i] = (byte)(i % 60);
bufSum += buf[i];
}
FileOutputStream fos = new FileOutputStream(TEST_FILE);
try {
fos.write(buf);
fos.getChannel().force(true);
} finally {
fos.close();
}
FileInputStream fis = null;
FileChannel channel = null;
try {
// Map file into memory
fis = new FileInputStream(TEST_FILE);
channel = fis.getChannel();
long fileSize = channel.size();
MappedByteBuffer mapbuf = channel.map(MapMode.READ_ONLY, 0, fileSize);
// mlock the buffer
NativeIO.POSIX.mlock(mapbuf, fileSize);
// Read the buffer
int sum = 0;
for (int i=0; i<fileSize; i++) {
sum += mapbuf.get(i);
}
assertEquals("Expected sums to be equal", bufSum, sum);
// munmap the buffer, which also implicitly unlocks it
NativeIO.POSIX.munmap(mapbuf);
} finally {
if (channel != null) {
channel.close();
}
if (fis != null) {
fis.close();
}
}
}
@Test(timeout=10000)
public void testGetMemlockLimit() throws Exception {
assumeTrue(NativeIO.isAvailable());
NativeIO.getMemlockLimit();
}
@Test (timeout = 30000)
public void testCopyFileUnbuffered() throws Exception {
final String METHOD_NAME = GenericTestUtils.getMethodName();
File srcFile = new File(TEST_DIR, METHOD_NAME + ".src.dat");
File dstFile = new File(TEST_DIR, METHOD_NAME + ".dst.dat");
final int fileSize = 0x8000000; // 128 MB
final int SEED = 0xBEEF;
final int batchSize = 4096;
final int numBatches = fileSize / batchSize;
Random rb = new Random(SEED);
FileChannel channel = null;
RandomAccessFile raSrcFile = null;
try {
raSrcFile = new RandomAccessFile(srcFile, "rw");
channel = raSrcFile.getChannel();
byte bytesToWrite[] = new byte[batchSize];
MappedByteBuffer mapBuf;
mapBuf = channel.map(MapMode.READ_WRITE, 0, fileSize);
for (int i = 0; i < numBatches; i++) {
rb.nextBytes(bytesToWrite);
mapBuf.put(bytesToWrite);
}
NativeIO.copyFileUnbuffered(srcFile, dstFile);
Assert.assertEquals(srcFile.length(), dstFile.length());
} finally {
IOUtils.cleanup(LOG, channel);
IOUtils.cleanup(LOG, raSrcFile);
FileUtils.deleteQuietly(TEST_DIR);
}
}
@Test (timeout=10000)
public void testNativePosixConsts() {
assumeTrue("Native POSIX constants not required for Windows",
!Path.WINDOWS);
assertTrue("Native 0_RDONLY const not set", O_RDONLY >= 0);
assertTrue("Native 0_WRONLY const not set", O_WRONLY >= 0);
assertTrue("Native 0_RDWR const not set", O_RDWR >= 0);
assertTrue("Native 0_CREAT const not set", O_CREAT >= 0);
assertTrue("Native 0_EXCL const not set", O_EXCL >= 0);
assertTrue("Native 0_NOCTTY const not set", O_NOCTTY >= 0);
assertTrue("Native 0_TRUNC const not set", O_TRUNC >= 0);
assertTrue("Native 0_APPEND const not set", O_APPEND >= 0);
assertTrue("Native 0_NONBLOCK const not set", O_NONBLOCK >= 0);
assertTrue("Native 0_SYNC const not set", O_SYNC >= 0);
assertTrue("Native S_IFMT const not set", S_IFMT >= 0);
assertTrue("Native S_IFIFO const not set", S_IFIFO >= 0);
assertTrue("Native S_IFCHR const not set", S_IFCHR >= 0);
assertTrue("Native S_IFDIR const not set", S_IFDIR >= 0);
assertTrue("Native S_IFBLK const not set", S_IFBLK >= 0);
assertTrue("Native S_IFREG const not set", S_IFREG >= 0);
assertTrue("Native S_IFLNK const not set", S_IFLNK >= 0);
assertTrue("Native S_IFSOCK const not set", S_IFSOCK >= 0);
assertTrue("Native S_ISUID const not set", S_ISUID >= 0);
assertTrue("Native S_ISGID const not set", S_ISGID >= 0);
assertTrue("Native S_ISVTX const not set", S_ISVTX >= 0);
assertTrue("Native S_IRUSR const not set", S_IRUSR >= 0);
assertTrue("Native S_IWUSR const not set", S_IWUSR >= 0);
assertTrue("Native S_IXUSR const not set", S_IXUSR >= 0);
}
@Test (timeout=10000)
public void testNativeFadviseConsts() {
assumeTrue("Fadvise constants not supported", fadvisePossible);
assertTrue("Native POSIX_FADV_NORMAL const not set",
POSIX_FADV_NORMAL >= 0);
assertTrue("Native POSIX_FADV_RANDOM const not set",
POSIX_FADV_RANDOM >= 0);
assertTrue("Native POSIX_FADV_SEQUENTIAL const not set",
POSIX_FADV_SEQUENTIAL >= 0);
assertTrue("Native POSIX_FADV_WILLNEED const not set",
POSIX_FADV_WILLNEED >= 0);
assertTrue("Native POSIX_FADV_DONTNEED const not set",
POSIX_FADV_DONTNEED >= 0);
assertTrue("Native POSIX_FADV_NOREUSE const not set",
POSIX_FADV_NOREUSE >= 0);
}
}
| 23,322 | 32.366237 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestSharedFileDescriptorFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.nativeio;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Before;
import org.junit.Test;
import org.apache.commons.lang.SystemUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
public class TestSharedFileDescriptorFactory {
static final Log LOG = LogFactory.getLog(TestSharedFileDescriptorFactory.class);
private static final File TEST_BASE =
new File(System.getProperty("test.build.data", "/tmp"));
@Before
public void setup() throws Exception {
Assume.assumeTrue(null ==
SharedFileDescriptorFactory.getLoadingFailureReason());
}
@Test(timeout=10000)
public void testReadAndWrite() throws Exception {
File path = new File(TEST_BASE, "testReadAndWrite");
path.mkdirs();
SharedFileDescriptorFactory factory =
SharedFileDescriptorFactory.create("woot_",
new String[] { path.getAbsolutePath() });
FileInputStream inStream =
factory.createDescriptor("testReadAndWrite", 4096);
FileOutputStream outStream = new FileOutputStream(inStream.getFD());
outStream.write(101);
inStream.getChannel().position(0);
Assert.assertEquals(101, inStream.read());
inStream.close();
outStream.close();
FileUtil.fullyDelete(path);
}
static private void createTempFile(String path) throws Exception {
FileOutputStream fos = new FileOutputStream(path);
fos.write(101);
fos.close();
}
@Test(timeout=10000)
public void testCleanupRemainders() throws Exception {
Assume.assumeTrue(NativeIO.isAvailable());
Assume.assumeTrue(SystemUtils.IS_OS_UNIX);
File path = new File(TEST_BASE, "testCleanupRemainders");
path.mkdirs();
String remainder1 = path.getAbsolutePath() +
Path.SEPARATOR + "woot2_remainder1";
String remainder2 = path.getAbsolutePath() +
Path.SEPARATOR + "woot2_remainder2";
createTempFile(remainder1);
createTempFile(remainder2);
SharedFileDescriptorFactory.create("woot2_",
new String[] { path.getAbsolutePath() });
// creating the SharedFileDescriptorFactory should have removed
// the remainders
Assert.assertFalse(new File(remainder1).exists());
Assert.assertFalse(new File(remainder2).exists());
FileUtil.fullyDelete(path);
}
@Test(timeout=60000)
public void testDirectoryFallbacks() throws Exception {
File nonExistentPath = new File(TEST_BASE, "nonexistent");
File permissionDeniedPath = new File("/");
File goodPath = new File(TEST_BASE, "testDirectoryFallbacks");
goodPath.mkdirs();
try {
SharedFileDescriptorFactory.create("shm_",
new String[] { nonExistentPath.getAbsolutePath(),
permissionDeniedPath.getAbsolutePath() });
Assert.fail();
} catch (IOException e) {
}
SharedFileDescriptorFactory factory =
SharedFileDescriptorFactory.create("shm_",
new String[] { nonExistentPath.getAbsolutePath(),
permissionDeniedPath.getAbsolutePath(),
goodPath.getAbsolutePath() } );
Assert.assertEquals(goodPath.getAbsolutePath(), factory.getPath());
FileUtil.fullyDelete(goodPath);
}
}
| 4,245 | 36.245614 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/SerializationTestUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.serializer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.util.GenericsUtil;
public class SerializationTestUtil {
/**
* A utility that tests serialization/deserialization.
* @param conf configuration to use, "io.serializations" is read to
* determine the serialization
* @param <K> the class of the item
* @param before item to (de)serialize
* @return deserialized item
*/
public static <K> K testSerialization(Configuration conf, K before)
throws Exception {
SerializationFactory factory = new SerializationFactory(conf);
Serializer<K> serializer
= factory.getSerializer(GenericsUtil.getClass(before));
Deserializer<K> deserializer
= factory.getDeserializer(GenericsUtil.getClass(before));
DataOutputBuffer out = new DataOutputBuffer();
serializer.open(out);
serializer.serialize(before);
serializer.close();
DataInputBuffer in = new DataInputBuffer();
in.reset(out.getData(), out.getLength());
deserializer.open(in);
K after = deserializer.deserialize(null);
deserializer.close();
return after;
}
}
| 2,059 | 34.517241 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestSerializationFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.serializer;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.junit.Test;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertNotNull;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Writable;
public class TestSerializationFactory {
@Test
public void testSerializerAvailability() {
Configuration conf = new Configuration();
SerializationFactory factory = new SerializationFactory(conf);
// Test that a valid serializer class is returned when its present
assertNotNull("A valid class must be returned for default Writable Serde",
factory.getSerializer(Writable.class));
assertNotNull("A valid class must be returned for default Writable serDe",
factory.getDeserializer(Writable.class));
// Test that a null is returned when none can be found.
assertNull("A null should be returned if there are no serializers found.",
factory.getSerializer(TestSerializationFactory.class));
assertNull("A null should be returned if there are no deserializers found",
factory.getDeserializer(TestSerializationFactory.class));
}
@Test
public void testSerializationKeyIsTrimmed() {
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.IO_SERIALIZATIONS_KEY, " org.apache.hadoop.io.serializer.WritableSerialization ");
SerializationFactory factory = new SerializationFactory(conf);
assertNotNull("Valid class must be returned",
factory.getSerializer(LongWritable.class));
}
}
| 2,425 | 42.321429 | 119 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestWritableSerialization.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.serializer;
import java.io.Serializable;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import static org.apache.hadoop.io.TestGenericWritable.CONF_TEST_KEY;
import static org.apache.hadoop.io.TestGenericWritable.CONF_TEST_VALUE;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.TestGenericWritable.Baz;
import org.apache.hadoop.io.TestGenericWritable.FooGenericWritable;
import org.apache.hadoop.io.WritableComparator;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestWritableSerialization {
private static final Configuration conf = new Configuration();
@Test
public void testWritableSerialization() throws Exception {
Text before = new Text("test writable");
Text after = SerializationTestUtil.testSerialization(conf, before);
assertEquals(before, after);
}
@Test
public void testWritableConfigurable() throws Exception {
//set the configuration parameter
conf.set(CONF_TEST_KEY, CONF_TEST_VALUE);
//reuse TestGenericWritable inner classes to test
//writables that also implement Configurable.
FooGenericWritable generic = new FooGenericWritable();
generic.setConf(conf);
Baz baz = new Baz();
generic.set(baz);
Baz result = SerializationTestUtil.testSerialization(conf, baz);
assertEquals(baz, result);
assertNotNull(result.getConf());
}
@Test
@SuppressWarnings({"rawtypes", "unchecked"})
public void testWritableComparatorJavaSerialization() throws Exception {
Serialization ser = new JavaSerialization();
Serializer<TestWC> serializer = ser.getSerializer(TestWC.class);
DataOutputBuffer dob = new DataOutputBuffer();
serializer.open(dob);
TestWC orig = new TestWC(0);
serializer.serialize(orig);
serializer.close();
Deserializer<TestWC> deserializer = ser.getDeserializer(TestWC.class);
DataInputBuffer dib = new DataInputBuffer();
dib.reset(dob.getData(), 0, dob.getLength());
deserializer.open(dib);
TestWC deser = deserializer.deserialize(null);
deserializer.close();
assertEquals(orig, deser);
}
static class TestWC extends WritableComparator implements Serializable {
static final long serialVersionUID = 0x4344;
final int val;
TestWC() { this(7); }
TestWC(int val) { this.val = val; }
@Override
public boolean equals(Object o) {
if (o instanceof TestWC) {
return ((TestWC)o).val == val;
}
return false;
}
@Override
public int hashCode() { return val; }
}
}
| 3,455 | 32.553398 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/avro/Record.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.serializer.avro;
public class Record {
public int x = 7;
@Override
public int hashCode() {
return x;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
final Record other = (Record) obj;
if (x != other.x)
return false;
return true;
}
}
| 1,247 | 28.714286 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/avro/TestAvroSerialization.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.serializer.avro;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.serializer.SerializationFactory;
import org.apache.hadoop.io.serializer.SerializationTestUtil;
public class TestAvroSerialization extends TestCase {
private static final Configuration conf = new Configuration();
public void testSpecific() throws Exception {
AvroRecord before = new AvroRecord();
before.intField = 5;
AvroRecord after = SerializationTestUtil.testSerialization(conf, before);
assertEquals(before, after);
}
public void testReflectPkg() throws Exception {
Record before = new Record();
before.x = 10;
conf.set(AvroReflectSerialization.AVRO_REFLECT_PACKAGES,
before.getClass().getPackage().getName());
Record after = SerializationTestUtil.testSerialization(conf, before);
assertEquals(before, after);
}
public void testAcceptHandlingPrimitivesAndArrays() throws Exception {
SerializationFactory factory = new SerializationFactory(conf);
assertNull(factory.getSerializer(byte[].class));
assertNull(factory.getSerializer(byte.class));
}
public void testReflectInnerClass() throws Exception {
InnerRecord before = new InnerRecord();
before.x = 10;
conf.set(AvroReflectSerialization.AVRO_REFLECT_PACKAGES,
before.getClass().getPackage().getName());
InnerRecord after = SerializationTestUtil.testSerialization(conf, before);
assertEquals(before, after);
}
public void testReflect() throws Exception {
RefSerializable before = new RefSerializable();
before.x = 10;
RefSerializable after =
SerializationTestUtil.testSerialization(conf, before);
assertEquals(before, after);
}
public static class InnerRecord {
public int x = 7;
@Override
public int hashCode() {
return x;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
final InnerRecord other = (InnerRecord) obj;
if (x != other.x)
return false;
return true;
}
}
public static class RefSerializable implements AvroReflectSerializable {
public int x = 7;
@Override
public int hashCode() {
return x;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
final RefSerializable other = (RefSerializable) obj;
if (x != other.x)
return false;
return true;
}
}
}
| 3,545 | 29.568966 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCompressorStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import org.junit.Assert;
import org.junit.Test;
public class TestCompressorStream extends CompressorStream{
private static FileOutputStream fop = null;
private static File file = null;
static {
try {
file = new File("tmp.txt");
fop = new FileOutputStream(file);
if (!file.exists()) {
file.createNewFile();
}
} catch (IOException e) {
System.out.println("Error while creating a new file " + e.getMessage());
}
}
public TestCompressorStream() {
super(fop);
}
/**
* Overriding {@link CompressorStream#finish()} method in order
* to reproduce test case
*/
public void finish() throws IOException {
throw new IOException();
}
/**
* In {@link CompressorStream#close()}, if
* {@link CompressorStream#finish()} throws an IOEXception, outputStream
* object was not getting closed.
*/
@Test
public void testClose() {
TestCompressorStream testCompressorStream = new TestCompressorStream();
try {
testCompressorStream.close();
}
catch(IOException e) {
System.out.println("Expected IOException");
}
Assert.assertTrue("closed shoud be true",
((CompressorStream)testCompressorStream).closed);
//cleanup after test case
file.delete();
}
}
| 2,230 | 28.355263 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodecPool.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress;
import static org.junit.Assert.assertEquals;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingDeque;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.conf.Configuration;
import org.junit.Before;
import org.junit.Test;
import java.util.HashSet;
import java.util.Set;
public class TestCodecPool {
private final String LEASE_COUNT_ERR =
"Incorrect number of leased (de)compressors";
DefaultCodec codec;
@Before
public void setup() {
this.codec = new DefaultCodec();
this.codec.setConf(new Configuration());
}
@Test(timeout = 1000)
public void testCompressorPoolCounts() {
// Get two compressors and return them
Compressor comp1 = CodecPool.getCompressor(codec);
Compressor comp2 = CodecPool.getCompressor(codec);
assertEquals(LEASE_COUNT_ERR, 2,
CodecPool.getLeasedCompressorsCount(codec));
CodecPool.returnCompressor(comp2);
assertEquals(LEASE_COUNT_ERR, 1,
CodecPool.getLeasedCompressorsCount(codec));
CodecPool.returnCompressor(comp1);
assertEquals(LEASE_COUNT_ERR, 0,
CodecPool.getLeasedCompressorsCount(codec));
CodecPool.returnCompressor(comp1);
assertEquals(LEASE_COUNT_ERR, 0,
CodecPool.getLeasedCompressorsCount(codec));
}
@Test(timeout = 1000)
public void testCompressorNotReturnSameInstance() {
Compressor comp = CodecPool.getCompressor(codec);
CodecPool.returnCompressor(comp);
CodecPool.returnCompressor(comp);
Set<Compressor> compressors = new HashSet<Compressor>();
for (int i = 0; i < 10; ++i) {
compressors.add(CodecPool.getCompressor(codec));
}
assertEquals(10, compressors.size());
for (Compressor compressor : compressors) {
CodecPool.returnCompressor(compressor);
}
}
@Test(timeout = 1000)
public void testDecompressorPoolCounts() {
// Get two decompressors and return them
Decompressor decomp1 = CodecPool.getDecompressor(codec);
Decompressor decomp2 = CodecPool.getDecompressor(codec);
assertEquals(LEASE_COUNT_ERR, 2,
CodecPool.getLeasedDecompressorsCount(codec));
CodecPool.returnDecompressor(decomp2);
assertEquals(LEASE_COUNT_ERR, 1,
CodecPool.getLeasedDecompressorsCount(codec));
CodecPool.returnDecompressor(decomp1);
assertEquals(LEASE_COUNT_ERR, 0,
CodecPool.getLeasedDecompressorsCount(codec));
CodecPool.returnDecompressor(decomp1);
assertEquals(LEASE_COUNT_ERR, 0,
CodecPool.getLeasedCompressorsCount(codec));
}
@Test(timeout = 1000)
public void testMultiThreadedCompressorPool() throws InterruptedException {
final int iterations = 4;
ExecutorService threadpool = Executors.newFixedThreadPool(3);
final LinkedBlockingDeque<Compressor> queue = new LinkedBlockingDeque<Compressor>(
2 * iterations);
Callable<Boolean> consumer = new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
Compressor c = queue.take();
CodecPool.returnCompressor(c);
return c != null;
}
};
Callable<Boolean> producer = new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
Compressor c = CodecPool.getCompressor(codec);
queue.put(c);
return c != null;
}
};
for (int i = 0; i < iterations; i++) {
threadpool.submit(consumer);
threadpool.submit(producer);
}
// wait for completion
threadpool.shutdown();
threadpool.awaitTermination(1000, TimeUnit.SECONDS);
assertEquals(LEASE_COUNT_ERR, 0, CodecPool.getLeasedCompressorsCount(codec));
}
@Test(timeout = 1000)
public void testMultiThreadedDecompressorPool() throws InterruptedException {
final int iterations = 4;
ExecutorService threadpool = Executors.newFixedThreadPool(3);
final LinkedBlockingDeque<Decompressor> queue = new LinkedBlockingDeque<Decompressor>(
2 * iterations);
Callable<Boolean> consumer = new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
Decompressor dc = queue.take();
CodecPool.returnDecompressor(dc);
return dc != null;
}
};
Callable<Boolean> producer = new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
Decompressor c = CodecPool.getDecompressor(codec);
queue.put(c);
return c != null;
}
};
for (int i = 0; i < iterations; i++) {
threadpool.submit(consumer);
threadpool.submit(producer);
}
// wait for completion
threadpool.shutdown();
threadpool.awaitTermination(1000, TimeUnit.SECONDS);
assertEquals(LEASE_COUNT_ERR, 0,
CodecPool.getLeasedDecompressorsCount(codec));
}
@Test(timeout = 1000)
public void testDecompressorNotReturnSameInstance() {
Decompressor decomp = CodecPool.getDecompressor(codec);
CodecPool.returnDecompressor(decomp);
CodecPool.returnDecompressor(decomp);
Set<Decompressor> decompressors = new HashSet<Decompressor>();
for (int i = 0; i < 10; ++i) {
decompressors.add(CodecPool.getDecompressor(codec));
}
assertEquals(10, decompressors.size());
for (Decompressor decompressor : decompressors) {
CodecPool.returnDecompressor(decompressor);
}
}
}
| 6,268 | 31.481865 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCompressorDecompressor.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress;
import static org.junit.Assert.fail;
import java.util.Random;
import org.apache.hadoop.io.compress.CompressDecompressTester.CompressionTestStrategy;
import org.apache.hadoop.io.compress.lz4.Lz4Compressor;
import org.apache.hadoop.io.compress.lz4.Lz4Decompressor;
import org.apache.hadoop.io.compress.snappy.SnappyCompressor;
import org.apache.hadoop.io.compress.snappy.SnappyDecompressor;
import org.apache.hadoop.io.compress.zlib.BuiltInZlibDeflater;
import org.apache.hadoop.io.compress.zlib.BuiltInZlibInflater;
import org.junit.Test;
import com.google.common.collect.ImmutableSet;
/**
* Test for pairs:
* <pre>
* SnappyCompressor/SnappyDecompressor
* Lz4Compressor/Lz4Decompressor
* BuiltInZlibDeflater/new BuiltInZlibInflater
*
*
* Note: we can't use ZlibCompressor/ZlibDecompressor here
* because his constructor can throw exception (if native libraries not found)
* For ZlibCompressor/ZlibDecompressor pair testing used {@code TestZlibCompressorDecompressor}
*
* </pre>
*
*/
public class TestCompressorDecompressor {
private static final Random rnd = new Random(12345L);
@Test
public void testCompressorDecompressor() {
// no more for this data
int SIZE = 44 * 1024;
byte[] rawData = generate(SIZE);
try {
CompressDecompressTester.of(rawData)
.withCompressDecompressPair(new SnappyCompressor(), new SnappyDecompressor())
.withCompressDecompressPair(new Lz4Compressor(), new Lz4Decompressor())
.withCompressDecompressPair(new BuiltInZlibDeflater(), new BuiltInZlibInflater())
.withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
.test();
} catch (Exception ex) {
fail("testCompressorDecompressor error !!!" + ex);
}
}
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
int BYTE_SIZE = 100 * 1024;
byte[] rawData = generate(BYTE_SIZE);
try {
CompressDecompressTester.of(rawData)
.withCompressDecompressPair(
new SnappyCompressor(BYTE_SIZE + BYTE_SIZE / 2),
new SnappyDecompressor(BYTE_SIZE + BYTE_SIZE / 2))
.withCompressDecompressPair(new Lz4Compressor(BYTE_SIZE),
new Lz4Decompressor(BYTE_SIZE))
.withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
.test();
} catch (Exception ex) {
fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
}
}
public static byte[] generate(int size) {
byte[] array = new byte[size];
for (int i = 0; i < size; i++)
array[i] = (byte) rnd.nextInt(16);
return array;
}
}
| 4,004 | 38.264706 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodecFactory.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.*;
import junit.framework.TestCase;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.conf.Configuration;
public class TestCodecFactory extends TestCase {
private static class BaseCodec implements CompressionCodec {
private Configuration conf;
public void setConf(Configuration conf) {
this.conf = conf;
}
public Configuration getConf() {
return conf;
}
@Override
public CompressionOutputStream createOutputStream(OutputStream out)
throws IOException {
return null;
}
@Override
public Class<? extends Compressor> getCompressorType() {
return null;
}
@Override
public Compressor createCompressor() {
return null;
}
@Override
public CompressionInputStream createInputStream(InputStream in,
Decompressor decompressor)
throws IOException {
return null;
}
@Override
public CompressionInputStream createInputStream(InputStream in)
throws IOException {
return null;
}
@Override
public CompressionOutputStream createOutputStream(OutputStream out,
Compressor compressor)
throws IOException {
return null;
}
@Override
public Class<? extends Decompressor> getDecompressorType() {
return null;
}
@Override
public Decompressor createDecompressor() {
return null;
}
@Override
public String getDefaultExtension() {
return ".base";
}
}
private static class BarCodec extends BaseCodec {
@Override
public String getDefaultExtension() {
return "bar";
}
}
private static class FooBarCodec extends BaseCodec {
@Override
public String getDefaultExtension() {
return ".foo.bar";
}
}
private static class FooCodec extends BaseCodec {
@Override
public String getDefaultExtension() {
return ".foo";
}
}
private static class NewGzipCodec extends BaseCodec {
@Override
public String getDefaultExtension() {
return ".gz";
}
}
/**
* Returns a factory for a given set of codecs
* @param classes the codec classes to include
* @return a new factory
*/
private static CompressionCodecFactory setClasses(Class[] classes) {
Configuration conf = new Configuration();
CompressionCodecFactory.setCodecClasses(conf, Arrays.asList(classes));
return new CompressionCodecFactory(conf);
}
private static void checkCodec(String msg,
Class expected, CompressionCodec actual) {
assertEquals(msg + " unexpected codec found",
expected.getName(),
actual.getClass().getName());
}
public static void testFinding() {
CompressionCodecFactory factory =
new CompressionCodecFactory(new Configuration());
CompressionCodec codec = factory.getCodec(new Path("/tmp/foo.bar"));
assertEquals("default factory foo codec", null, codec);
codec = factory.getCodecByClassName(BarCodec.class.getCanonicalName());
assertEquals("default factory foo codec", null, codec);
codec = factory.getCodec(new Path("/tmp/foo.gz"));
checkCodec("default factory for .gz", GzipCodec.class, codec);
codec = factory.getCodecByClassName(GzipCodec.class.getCanonicalName());
checkCodec("default factory for gzip codec", GzipCodec.class, codec);
codec = factory.getCodecByName("gzip");
checkCodec("default factory for gzip codec", GzipCodec.class, codec);
codec = factory.getCodecByName("GZIP");
checkCodec("default factory for gzip codec", GzipCodec.class, codec);
codec = factory.getCodecByName("GZIPCodec");
checkCodec("default factory for gzip codec", GzipCodec.class, codec);
codec = factory.getCodecByName("gzipcodec");
checkCodec("default factory for gzip codec", GzipCodec.class, codec);
Class klass = factory.getCodecClassByName("gzipcodec");
assertEquals(GzipCodec.class, klass);
codec = factory.getCodec(new Path("/tmp/foo.bz2"));
checkCodec("default factory for .bz2", BZip2Codec.class, codec);
codec = factory.getCodecByClassName(BZip2Codec.class.getCanonicalName());
checkCodec("default factory for bzip2 codec", BZip2Codec.class, codec);
codec = factory.getCodecByName("bzip2");
checkCodec("default factory for bzip2 codec", BZip2Codec.class, codec);
codec = factory.getCodecByName("bzip2codec");
checkCodec("default factory for bzip2 codec", BZip2Codec.class, codec);
codec = factory.getCodecByName("BZIP2");
checkCodec("default factory for bzip2 codec", BZip2Codec.class, codec);
codec = factory.getCodecByName("BZIP2CODEC");
checkCodec("default factory for bzip2 codec", BZip2Codec.class, codec);
codec = factory.getCodecByClassName(DeflateCodec.class.getCanonicalName());
checkCodec("default factory for deflate codec", DeflateCodec.class, codec);
codec = factory.getCodecByName("deflate");
checkCodec("default factory for deflate codec", DeflateCodec.class, codec);
codec = factory.getCodecByName("deflatecodec");
checkCodec("default factory for deflate codec", DeflateCodec.class, codec);
codec = factory.getCodecByName("DEFLATE");
checkCodec("default factory for deflate codec", DeflateCodec.class, codec);
codec = factory.getCodecByName("DEFLATECODEC");
checkCodec("default factory for deflate codec", DeflateCodec.class, codec);
factory = setClasses(new Class[0]);
// gz, bz2, snappy, lz4 are picked up by service loader, but bar isn't
codec = factory.getCodec(new Path("/tmp/foo.bar"));
assertEquals("empty factory bar codec", null, codec);
codec = factory.getCodecByClassName(BarCodec.class.getCanonicalName());
assertEquals("empty factory bar codec", null, codec);
codec = factory.getCodec(new Path("/tmp/foo.gz"));
checkCodec("empty factory gz codec", GzipCodec.class, codec);
codec = factory.getCodecByClassName(GzipCodec.class.getCanonicalName());
checkCodec("empty factory gz codec", GzipCodec.class, codec);
codec = factory.getCodec(new Path("/tmp/foo.bz2"));
checkCodec("empty factory for .bz2", BZip2Codec.class, codec);
codec = factory.getCodecByClassName(BZip2Codec.class.getCanonicalName());
checkCodec("empty factory for bzip2 codec", BZip2Codec.class, codec);
codec = factory.getCodec(new Path("/tmp/foo.snappy"));
checkCodec("empty factory snappy codec", SnappyCodec.class, codec);
codec = factory.getCodecByClassName(SnappyCodec.class.getCanonicalName());
checkCodec("empty factory snappy codec", SnappyCodec.class, codec);
codec = factory.getCodec(new Path("/tmp/foo.lz4"));
checkCodec("empty factory lz4 codec", Lz4Codec.class, codec);
codec = factory.getCodecByClassName(Lz4Codec.class.getCanonicalName());
checkCodec("empty factory lz4 codec", Lz4Codec.class, codec);
factory = setClasses(new Class[]{BarCodec.class, FooCodec.class,
FooBarCodec.class});
codec = factory.getCodec(new Path("/tmp/.foo.bar.gz"));
checkCodec("full factory gz codec", GzipCodec.class, codec);
codec = factory.getCodecByClassName(GzipCodec.class.getCanonicalName());
checkCodec("full codec gz codec", GzipCodec.class, codec);
codec = factory.getCodec(new Path("/tmp/foo.bz2"));
checkCodec("full factory for .bz2", BZip2Codec.class, codec);
codec = factory.getCodecByClassName(BZip2Codec.class.getCanonicalName());
checkCodec("full codec bzip2 codec", BZip2Codec.class, codec);
codec = factory.getCodec(new Path("/tmp/foo.bar"));
checkCodec("full factory bar codec", BarCodec.class, codec);
codec = factory.getCodecByClassName(BarCodec.class.getCanonicalName());
checkCodec("full factory bar codec", BarCodec.class, codec);
codec = factory.getCodecByName("bar");
checkCodec("full factory bar codec", BarCodec.class, codec);
codec = factory.getCodecByName("BAR");
checkCodec("full factory bar codec", BarCodec.class, codec);
codec = factory.getCodec(new Path("/tmp/foo/baz.foo.bar"));
checkCodec("full factory foo bar codec", FooBarCodec.class, codec);
codec = factory.getCodecByClassName(FooBarCodec.class.getCanonicalName());
checkCodec("full factory foo bar codec", FooBarCodec.class, codec);
codec = factory.getCodecByName("foobar");
checkCodec("full factory foo bar codec", FooBarCodec.class, codec);
codec = factory.getCodecByName("FOOBAR");
checkCodec("full factory foo bar codec", FooBarCodec.class, codec);
codec = factory.getCodec(new Path("/tmp/foo.foo"));
checkCodec("full factory foo codec", FooCodec.class, codec);
codec = factory.getCodecByClassName(FooCodec.class.getCanonicalName());
checkCodec("full factory foo codec", FooCodec.class, codec);
codec = factory.getCodecByName("foo");
checkCodec("full factory foo codec", FooCodec.class, codec);
codec = factory.getCodecByName("FOO");
checkCodec("full factory foo codec", FooCodec.class, codec);
factory = setClasses(new Class[]{NewGzipCodec.class});
codec = factory.getCodec(new Path("/tmp/foo.gz"));
checkCodec("overridden factory for .gz", NewGzipCodec.class, codec);
codec = factory.getCodecByClassName(NewGzipCodec.class.getCanonicalName());
checkCodec("overridden factory for gzip codec", NewGzipCodec.class, codec);
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.IO_COMPRESSION_CODECS_KEY,
" org.apache.hadoop.io.compress.GzipCodec , " +
" org.apache.hadoop.io.compress.DefaultCodec , " +
" org.apache.hadoop.io.compress.BZip2Codec ");
try {
CompressionCodecFactory.getCodecClasses(conf);
} catch (IllegalArgumentException e) {
fail("IllegalArgumentException is unexpected");
}
}
}
| 10,970 | 39.040146 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.Random;
import java.util.zip.GZIPInputStream;
import java.util.zip.GZIPOutputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.MapFile;
import org.apache.hadoop.io.RandomDatum;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.compress.zlib.BuiltInGzipDecompressor;
import org.apache.hadoop.io.compress.zlib.BuiltInZlibDeflater;
import org.apache.hadoop.io.compress.zlib.BuiltInZlibInflater;
import org.apache.hadoop.io.compress.zlib.ZlibCompressor;
import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel;
import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy;
import org.apache.hadoop.io.compress.zlib.ZlibFactory;
import org.apache.hadoop.io.compress.bzip2.Bzip2Factory;
import org.apache.hadoop.util.LineReader;
import org.apache.hadoop.util.NativeCodeLoader;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.commons.codec.binary.Base64;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestCodec {
private static final Log LOG= LogFactory.getLog(TestCodec.class);
private Configuration conf = new Configuration();
private int count = 10000;
private int seed = new Random().nextInt();
@Test
public void testDefaultCodec() throws IOException {
codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.DefaultCodec");
codecTest(conf, seed, count, "org.apache.hadoop.io.compress.DefaultCodec");
}
@Test
public void testGzipCodec() throws IOException {
codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.GzipCodec");
codecTest(conf, seed, count, "org.apache.hadoop.io.compress.GzipCodec");
}
@Test(timeout=20000)
public void testBZip2Codec() throws IOException {
Configuration conf = new Configuration();
conf.set("io.compression.codec.bzip2.library", "java-builtin");
codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.BZip2Codec");
codecTest(conf, seed, count, "org.apache.hadoop.io.compress.BZip2Codec");
}
@Test(timeout=20000)
public void testBZip2NativeCodec() throws IOException {
Configuration conf = new Configuration();
conf.set("io.compression.codec.bzip2.library", "system-native");
if (NativeCodeLoader.isNativeCodeLoaded()) {
if (Bzip2Factory.isNativeBzip2Loaded(conf)) {
codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.BZip2Codec");
codecTest(conf, seed, count,
"org.apache.hadoop.io.compress.BZip2Codec");
conf.set("io.compression.codec.bzip2.library", "java-builtin");
codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.BZip2Codec");
codecTest(conf, seed, count,
"org.apache.hadoop.io.compress.BZip2Codec");
} else {
LOG.warn("Native hadoop library available but native bzip2 is not");
}
}
}
@Test
public void testSnappyCodec() throws IOException {
if (SnappyCodec.isNativeCodeLoaded()) {
codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.SnappyCodec");
codecTest(conf, seed, count, "org.apache.hadoop.io.compress.SnappyCodec");
}
}
@Test
public void testLz4Codec() throws IOException {
if (NativeCodeLoader.isNativeCodeLoaded()) {
if (Lz4Codec.isNativeCodeLoaded()) {
conf.setBoolean(
CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_USELZ4HC_KEY,
false);
codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.Lz4Codec");
codecTest(conf, seed, count, "org.apache.hadoop.io.compress.Lz4Codec");
conf.setBoolean(
CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_USELZ4HC_KEY,
true);
codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.Lz4Codec");
codecTest(conf, seed, count, "org.apache.hadoop.io.compress.Lz4Codec");
} else {
Assert.fail("Native hadoop library available but lz4 not");
}
}
}
@Test
public void testDeflateCodec() throws IOException {
codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.DeflateCodec");
codecTest(conf, seed, count, "org.apache.hadoop.io.compress.DeflateCodec");
}
@Test
public void testGzipCodecWithParam() throws IOException {
Configuration conf = new Configuration(this.conf);
ZlibFactory.setCompressionLevel(conf, CompressionLevel.BEST_COMPRESSION);
ZlibFactory.setCompressionStrategy(conf, CompressionStrategy.HUFFMAN_ONLY);
codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.GzipCodec");
codecTest(conf, seed, count, "org.apache.hadoop.io.compress.GzipCodec");
}
private static void codecTest(Configuration conf, int seed, int count,
String codecClass)
throws IOException {
// Create the codec
CompressionCodec codec = null;
try {
codec = (CompressionCodec)
ReflectionUtils.newInstance(conf.getClassByName(codecClass), conf);
} catch (ClassNotFoundException cnfe) {
throw new IOException("Illegal codec!");
}
LOG.info("Created a Codec object of type: " + codecClass);
// Generate data
DataOutputBuffer data = new DataOutputBuffer();
RandomDatum.Generator generator = new RandomDatum.Generator(seed);
for(int i=0; i < count; ++i) {
generator.next();
RandomDatum key = generator.getKey();
RandomDatum value = generator.getValue();
key.write(data);
value.write(data);
}
LOG.info("Generated " + count + " records");
// Compress data
DataOutputBuffer compressedDataBuffer = new DataOutputBuffer();
CompressionOutputStream deflateFilter =
codec.createOutputStream(compressedDataBuffer);
DataOutputStream deflateOut =
new DataOutputStream(new BufferedOutputStream(deflateFilter));
deflateOut.write(data.getData(), 0, data.getLength());
deflateOut.flush();
deflateFilter.finish();
LOG.info("Finished compressing data");
// De-compress data
DataInputBuffer deCompressedDataBuffer = new DataInputBuffer();
deCompressedDataBuffer.reset(compressedDataBuffer.getData(), 0,
compressedDataBuffer.getLength());
CompressionInputStream inflateFilter =
codec.createInputStream(deCompressedDataBuffer);
DataInputStream inflateIn =
new DataInputStream(new BufferedInputStream(inflateFilter));
// Check
DataInputBuffer originalData = new DataInputBuffer();
originalData.reset(data.getData(), 0, data.getLength());
DataInputStream originalIn = new DataInputStream(new BufferedInputStream(originalData));
for(int i=0; i < count; ++i) {
RandomDatum k1 = new RandomDatum();
RandomDatum v1 = new RandomDatum();
k1.readFields(originalIn);
v1.readFields(originalIn);
RandomDatum k2 = new RandomDatum();
RandomDatum v2 = new RandomDatum();
k2.readFields(inflateIn);
v2.readFields(inflateIn);
assertTrue("original and compressed-then-decompressed-output not equal",
k1.equals(k2) && v1.equals(v2));
// original and compressed-then-decompressed-output have the same hashCode
Map<RandomDatum, String> m = new HashMap<RandomDatum, String>();
m.put(k1, k1.toString());
m.put(v1, v1.toString());
String result = m.get(k2);
assertEquals("k1 and k2 hashcode not equal", result, k1.toString());
result = m.get(v2);
assertEquals("v1 and v2 hashcode not equal", result, v1.toString());
}
// De-compress data byte-at-a-time
originalData.reset(data.getData(), 0, data.getLength());
deCompressedDataBuffer.reset(compressedDataBuffer.getData(), 0,
compressedDataBuffer.getLength());
inflateFilter =
codec.createInputStream(deCompressedDataBuffer);
// Check
originalIn = new DataInputStream(new BufferedInputStream(originalData));
int expected;
do {
expected = originalIn.read();
assertEquals("Inflated stream read by byte does not match",
expected, inflateFilter.read());
} while (expected != -1);
LOG.info("SUCCESS! Completed checking " + count + " records");
}
@Test
public void testSplitableCodecs() throws Exception {
testSplitableCodec(BZip2Codec.class);
}
private void testSplitableCodec(
Class<? extends SplittableCompressionCodec> codecClass)
throws IOException {
final long DEFLBYTES = 2 * 1024 * 1024;
final Configuration conf = new Configuration();
final Random rand = new Random();
final long seed = rand.nextLong();
LOG.info("seed: " + seed);
rand.setSeed(seed);
SplittableCompressionCodec codec =
ReflectionUtils.newInstance(codecClass, conf);
final FileSystem fs = FileSystem.getLocal(conf);
final FileStatus infile =
fs.getFileStatus(writeSplitTestFile(fs, rand, codec, DEFLBYTES));
if (infile.getLen() > Integer.MAX_VALUE) {
fail("Unexpected compression: " + DEFLBYTES + " -> " + infile.getLen());
}
final int flen = (int) infile.getLen();
final Text line = new Text();
final Decompressor dcmp = CodecPool.getDecompressor(codec);
try {
for (int pos = 0; pos < infile.getLen(); pos += rand.nextInt(flen / 8)) {
// read from random positions, verifying that there exist two sequential
// lines as written in writeSplitTestFile
final SplitCompressionInputStream in =
codec.createInputStream(fs.open(infile.getPath()), dcmp,
pos, flen, SplittableCompressionCodec.READ_MODE.BYBLOCK);
if (in.getAdjustedStart() >= flen) {
break;
}
LOG.info("SAMPLE " + in.getAdjustedStart() + "," + in.getAdjustedEnd());
final LineReader lreader = new LineReader(in);
lreader.readLine(line); // ignore; likely partial
if (in.getPos() >= flen) {
break;
}
lreader.readLine(line);
final int seq1 = readLeadingInt(line);
lreader.readLine(line);
if (in.getPos() >= flen) {
break;
}
final int seq2 = readLeadingInt(line);
assertEquals("Mismatched lines", seq1 + 1, seq2);
}
} finally {
CodecPool.returnDecompressor(dcmp);
}
// remove on success
fs.delete(infile.getPath().getParent(), true);
}
private static int readLeadingInt(Text txt) throws IOException {
DataInputStream in =
new DataInputStream(new ByteArrayInputStream(txt.getBytes()));
return in.readInt();
}
/** Write infLen bytes (deflated) to file in test dir using codec.
* Records are of the form
* <i><b64 rand><i+i><b64 rand>
*/
private static Path writeSplitTestFile(FileSystem fs, Random rand,
CompressionCodec codec, long infLen) throws IOException {
final int REC_SIZE = 1024;
final Path wd = new Path(new Path(
System.getProperty("test.build.data", "/tmp")).makeQualified(fs),
codec.getClass().getSimpleName());
final Path file = new Path(wd, "test" + codec.getDefaultExtension());
final byte[] b = new byte[REC_SIZE];
final Base64 b64 = new Base64(0, null);
DataOutputStream fout = null;
Compressor cmp = CodecPool.getCompressor(codec);
try {
fout = new DataOutputStream(codec.createOutputStream(
fs.create(file, true), cmp));
final DataOutputBuffer dob = new DataOutputBuffer(REC_SIZE * 4 / 3 + 4);
int seq = 0;
while (infLen > 0) {
rand.nextBytes(b);
final byte[] b64enc = b64.encode(b); // ensures rand printable, no LF
dob.reset();
dob.writeInt(seq);
System.arraycopy(dob.getData(), 0, b64enc, 0, dob.getLength());
fout.write(b64enc);
fout.write('\n');
++seq;
infLen -= b64enc.length;
}
LOG.info("Wrote " + seq + " records to " + file);
} finally {
IOUtils.cleanup(LOG, fout);
CodecPool.returnCompressor(cmp);
}
return file;
}
@Test
public void testCodecPoolGzipReuse() throws Exception {
Configuration conf = new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, true);
if (!ZlibFactory.isNativeZlibLoaded(conf)) {
LOG.warn("testCodecPoolGzipReuse skipped: native libs not loaded");
return;
}
GzipCodec gzc = ReflectionUtils.newInstance(GzipCodec.class, conf);
DefaultCodec dfc = ReflectionUtils.newInstance(DefaultCodec.class, conf);
Compressor c1 = CodecPool.getCompressor(gzc);
Compressor c2 = CodecPool.getCompressor(dfc);
CodecPool.returnCompressor(c1);
CodecPool.returnCompressor(c2);
assertTrue("Got mismatched ZlibCompressor", c2 != CodecPool.getCompressor(gzc));
}
private static void gzipReinitTest(Configuration conf, CompressionCodec codec)
throws IOException {
// Add codec to cache
ZlibFactory.setCompressionLevel(conf, CompressionLevel.BEST_COMPRESSION);
ZlibFactory.setCompressionStrategy(conf,
CompressionStrategy.DEFAULT_STRATEGY);
Compressor c1 = CodecPool.getCompressor(codec);
CodecPool.returnCompressor(c1);
// reset compressor's compression level to perform no compression
ZlibFactory.setCompressionLevel(conf, CompressionLevel.NO_COMPRESSION);
Compressor c2 = CodecPool.getCompressor(codec, conf);
// ensure same compressor placed earlier
assertTrue("Got mismatched ZlibCompressor", c1 == c2);
ByteArrayOutputStream bos = new ByteArrayOutputStream();
CompressionOutputStream cos = null;
// write trivially compressable data
byte[] b = new byte[1 << 15];
Arrays.fill(b, (byte) 43);
try {
cos = codec.createOutputStream(bos, c2);
cos.write(b);
} finally {
if (cos != null) {
cos.close();
}
CodecPool.returnCompressor(c2);
}
byte[] outbytes = bos.toByteArray();
// verify data were not compressed
assertTrue("Compressed bytes contrary to configuration",
outbytes.length >= b.length);
}
private static void codecTestWithNOCompression (Configuration conf,
String codecClass) throws IOException {
// Create a compressor with NO_COMPRESSION and make sure that
// output is not compressed by comparing the size with the
// original input
CompressionCodec codec = null;
ZlibFactory.setCompressionLevel(conf, CompressionLevel.NO_COMPRESSION);
try {
codec = (CompressionCodec)
ReflectionUtils.newInstance(conf.getClassByName(codecClass), conf);
} catch (ClassNotFoundException cnfe) {
throw new IOException("Illegal codec!");
}
Compressor c = codec.createCompressor();
// ensure same compressor placed earlier
ByteArrayOutputStream bos = new ByteArrayOutputStream();
CompressionOutputStream cos = null;
// write trivially compressable data
byte[] b = new byte[1 << 15];
Arrays.fill(b, (byte) 43);
try {
cos = codec.createOutputStream(bos, c);
cos.write(b);
} finally {
if (cos != null) {
cos.close();
}
}
byte[] outbytes = bos.toByteArray();
// verify data were not compressed
assertTrue("Compressed bytes contrary to configuration(NO_COMPRESSION)",
outbytes.length >= b.length);
}
@Test
public void testCodecInitWithCompressionLevel() throws Exception {
Configuration conf = new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, true);
if (ZlibFactory.isNativeZlibLoaded(conf)) {
LOG.info("testCodecInitWithCompressionLevel with native");
codecTestWithNOCompression(conf,
"org.apache.hadoop.io.compress.GzipCodec");
codecTestWithNOCompression(conf,
"org.apache.hadoop.io.compress.DefaultCodec");
} else {
LOG.warn("testCodecInitWithCompressionLevel for native skipped"
+ ": native libs not loaded");
}
conf = new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, false);
codecTestWithNOCompression( conf,
"org.apache.hadoop.io.compress.DefaultCodec");
}
@Test
public void testCodecPoolCompressorReinit() throws Exception {
Configuration conf = new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, true);
if (ZlibFactory.isNativeZlibLoaded(conf)) {
GzipCodec gzc = ReflectionUtils.newInstance(GzipCodec.class, conf);
gzipReinitTest(conf, gzc);
} else {
LOG.warn("testCodecPoolCompressorReinit skipped: native libs not loaded");
}
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, false);
DefaultCodec dfc = ReflectionUtils.newInstance(DefaultCodec.class, conf);
gzipReinitTest(conf, dfc);
}
@Test
public void testSequenceFileDefaultCodec() throws IOException, ClassNotFoundException,
InstantiationException, IllegalAccessException {
sequenceFileCodecTest(conf, 100, "org.apache.hadoop.io.compress.DefaultCodec", 100);
sequenceFileCodecTest(conf, 200000, "org.apache.hadoop.io.compress.DefaultCodec", 1000000);
}
@Test(timeout=20000)
public void testSequenceFileBZip2Codec() throws IOException, ClassNotFoundException,
InstantiationException, IllegalAccessException {
Configuration conf = new Configuration();
conf.set("io.compression.codec.bzip2.library", "java-builtin");
sequenceFileCodecTest(conf, 0, "org.apache.hadoop.io.compress.BZip2Codec", 100);
sequenceFileCodecTest(conf, 100, "org.apache.hadoop.io.compress.BZip2Codec", 100);
sequenceFileCodecTest(conf, 200000, "org.apache.hadoop.io.compress.BZip2Codec", 1000000);
}
@Test(timeout=20000)
public void testSequenceFileBZip2NativeCodec() throws IOException,
ClassNotFoundException, InstantiationException,
IllegalAccessException {
Configuration conf = new Configuration();
conf.set("io.compression.codec.bzip2.library", "system-native");
if (NativeCodeLoader.isNativeCodeLoaded()) {
if (Bzip2Factory.isNativeBzip2Loaded(conf)) {
sequenceFileCodecTest(conf, 0,
"org.apache.hadoop.io.compress.BZip2Codec", 100);
sequenceFileCodecTest(conf, 100,
"org.apache.hadoop.io.compress.BZip2Codec", 100);
sequenceFileCodecTest(conf, 200000,
"org.apache.hadoop.io.compress.BZip2Codec",
1000000);
} else {
LOG.warn("Native hadoop library available but native bzip2 is not");
}
}
}
@Test
public void testSequenceFileDeflateCodec() throws IOException, ClassNotFoundException,
InstantiationException, IllegalAccessException {
sequenceFileCodecTest(conf, 100, "org.apache.hadoop.io.compress.DeflateCodec", 100);
sequenceFileCodecTest(conf, 200000, "org.apache.hadoop.io.compress.DeflateCodec", 1000000);
}
private static void sequenceFileCodecTest(Configuration conf, int lines,
String codecClass, int blockSize)
throws IOException, ClassNotFoundException, InstantiationException, IllegalAccessException {
Path filePath = new Path("SequenceFileCodecTest." + codecClass);
// Configuration
conf.setInt("io.seqfile.compress.blocksize", blockSize);
// Create the SequenceFile
FileSystem fs = FileSystem.get(conf);
LOG.info("Creating SequenceFile with codec \"" + codecClass + "\"");
SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, filePath,
Text.class, Text.class, CompressionType.BLOCK,
(CompressionCodec)Class.forName(codecClass).newInstance());
// Write some data
LOG.info("Writing to SequenceFile...");
for (int i=0; i<lines; i++) {
Text key = new Text("key" + i);
Text value = new Text("value" + i);
writer.append(key, value);
}
writer.close();
// Read the data back and check
LOG.info("Reading from the SequenceFile...");
SequenceFile.Reader reader = new SequenceFile.Reader(fs, filePath, conf);
Writable key = (Writable)reader.getKeyClass().newInstance();
Writable value = (Writable)reader.getValueClass().newInstance();
int lc = 0;
try {
while (reader.next(key, value)) {
assertEquals("key" + lc, key.toString());
assertEquals("value" + lc, value.toString());
lc ++;
}
} finally {
reader.close();
}
assertEquals(lines, lc);
// Delete temporary files
fs.delete(filePath, false);
LOG.info("SUCCESS! Completed SequenceFileCodecTest with codec \"" + codecClass + "\"");
}
/**
* Regression test for HADOOP-8423: seeking in a block-compressed
* stream would not properly reset the block decompressor state.
*/
@Test
public void testSnappyMapFile() throws Exception {
Assume.assumeTrue(SnappyCodec.isNativeCodeLoaded());
codecTestMapFile(SnappyCodec.class, CompressionType.BLOCK, 100);
}
private void codecTestMapFile(Class<? extends CompressionCodec> clazz,
CompressionType type, int records) throws Exception {
FileSystem fs = FileSystem.get(conf);
LOG.info("Creating MapFiles with " + records +
" records using codec " + clazz.getSimpleName());
Path path = new Path(new Path(
System.getProperty("test.build.data", "/tmp")),
clazz.getSimpleName() + "-" + type + "-" + records);
LOG.info("Writing " + path);
createMapFile(conf, fs, path, clazz.newInstance(), type, records);
MapFile.Reader reader = new MapFile.Reader(path, conf);
Text key1 = new Text("002");
assertNotNull(reader.get(key1, new Text()));
Text key2 = new Text("004");
assertNotNull(reader.get(key2, new Text()));
}
private static void createMapFile(Configuration conf, FileSystem fs, Path path,
CompressionCodec codec, CompressionType type, int records) throws IOException {
MapFile.Writer writer =
new MapFile.Writer(conf, path,
MapFile.Writer.keyClass(Text.class),
MapFile.Writer.valueClass(Text.class),
MapFile.Writer.compression(type, codec));
Text key = new Text();
for (int j = 0; j < records; j++) {
key.set(String.format("%03d", j));
writer.append(key, key);
}
writer.close();
}
public static void main(String[] args) throws IOException {
int count = 10000;
String codecClass = "org.apache.hadoop.io.compress.DefaultCodec";
String usage = "TestCodec [-count N] [-codec <codec class>]";
if (args.length == 0) {
System.err.println(usage);
System.exit(-1);
}
for (int i=0; i < args.length; ++i) { // parse command line
if (args[i] == null) {
continue;
} else if (args[i].equals("-count")) {
count = Integer.parseInt(args[++i]);
} else if (args[i].equals("-codec")) {
codecClass = args[++i];
}
}
Configuration conf = new Configuration();
int seed = 0;
// Note that exceptions will propagate out.
codecTest(conf, seed, count, codecClass);
}
@Test
public void testGzipCompatibility() throws IOException {
Random r = new Random();
long seed = r.nextLong();
r.setSeed(seed);
LOG.info("seed: " + seed);
DataOutputBuffer dflbuf = new DataOutputBuffer();
GZIPOutputStream gzout = new GZIPOutputStream(dflbuf);
byte[] b = new byte[r.nextInt(128 * 1024 + 1)];
r.nextBytes(b);
gzout.write(b);
gzout.close();
DataInputBuffer gzbuf = new DataInputBuffer();
gzbuf.reset(dflbuf.getData(), dflbuf.getLength());
Configuration conf = new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, false);
CompressionCodec codec = ReflectionUtils.newInstance(GzipCodec.class, conf);
Decompressor decom = codec.createDecompressor();
assertNotNull(decom);
assertEquals(BuiltInGzipDecompressor.class, decom.getClass());
InputStream gzin = codec.createInputStream(gzbuf, decom);
dflbuf.reset();
IOUtils.copyBytes(gzin, dflbuf, 4096);
final byte[] dflchk = Arrays.copyOf(dflbuf.getData(), dflbuf.getLength());
assertArrayEquals(b, dflchk);
}
void GzipConcatTest(Configuration conf,
Class<? extends Decompressor> decomClass) throws IOException {
Random r = new Random();
long seed = r.nextLong();
r.setSeed(seed);
LOG.info(decomClass + " seed: " + seed);
final int CONCAT = r.nextInt(4) + 3;
final int BUFLEN = 128 * 1024;
DataOutputBuffer dflbuf = new DataOutputBuffer();
DataOutputBuffer chkbuf = new DataOutputBuffer();
byte[] b = new byte[BUFLEN];
for (int i = 0; i < CONCAT; ++i) {
GZIPOutputStream gzout = new GZIPOutputStream(dflbuf);
r.nextBytes(b);
int len = r.nextInt(BUFLEN);
int off = r.nextInt(BUFLEN - len);
chkbuf.write(b, off, len);
gzout.write(b, off, len);
gzout.close();
}
final byte[] chk = Arrays.copyOf(chkbuf.getData(), chkbuf.getLength());
CompressionCodec codec = ReflectionUtils.newInstance(GzipCodec.class, conf);
Decompressor decom = codec.createDecompressor();
assertNotNull(decom);
assertEquals(decomClass, decom.getClass());
DataInputBuffer gzbuf = new DataInputBuffer();
gzbuf.reset(dflbuf.getData(), dflbuf.getLength());
InputStream gzin = codec.createInputStream(gzbuf, decom);
dflbuf.reset();
IOUtils.copyBytes(gzin, dflbuf, 4096);
final byte[] dflchk = Arrays.copyOf(dflbuf.getData(), dflbuf.getLength());
assertArrayEquals(chk, dflchk);
}
@Test
public void testBuiltInGzipConcat() throws IOException {
Configuration conf = new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, false);
GzipConcatTest(conf, BuiltInGzipDecompressor.class);
}
@Test
public void testNativeGzipConcat() throws IOException {
Configuration conf = new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, true);
if (!ZlibFactory.isNativeZlibLoaded(conf)) {
LOG.warn("skipped: native libs not loaded");
return;
}
GzipConcatTest(conf, GzipCodec.GzipZlibDecompressor.class);
}
@Test
public void testGzipCodecRead() throws IOException {
// Create a gzipped file and try to read it back, using a decompressor
// from the CodecPool.
// Don't use native libs for this test.
Configuration conf = new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, false);
assertFalse("ZlibFactory is using native libs against request",
ZlibFactory.isNativeZlibLoaded(conf));
// Ensure that the CodecPool has a BuiltInZlibInflater in it.
Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);
assertNotNull("zlibDecompressor is null!", zlibDecompressor);
assertTrue("ZlibFactory returned unexpected inflator",
zlibDecompressor instanceof BuiltInZlibInflater);
CodecPool.returnDecompressor(zlibDecompressor);
// Now create a GZip text file.
String tmpDir = System.getProperty("test.build.data", "/tmp/");
Path f = new Path(new Path(tmpDir), "testGzipCodecRead.txt.gz");
BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(
new GZIPOutputStream(new FileOutputStream(f.toString()))));
final String msg = "This is the message in the file!";
bw.write(msg);
bw.close();
// Now read it back, using the CodecPool to establish the
// decompressor to use.
CompressionCodecFactory ccf = new CompressionCodecFactory(conf);
CompressionCodec codec = ccf.getCodec(f);
Decompressor decompressor = CodecPool.getDecompressor(codec);
FileSystem fs = FileSystem.getLocal(conf);
InputStream is = fs.open(f);
is = codec.createInputStream(is, decompressor);
BufferedReader br = new BufferedReader(new InputStreamReader(is));
String line = br.readLine();
assertEquals("Didn't get the same message back!", msg, line);
br.close();
}
private void verifyGzipFile(String filename, String msg) throws IOException {
BufferedReader r = new BufferedReader(new InputStreamReader(
new GZIPInputStream(new FileInputStream(filename))));
try {
String line = r.readLine();
assertEquals("Got invalid line back from " + filename, msg, line);
} finally {
r.close();
new File(filename).delete();
}
}
@Test
public void testGzipLongOverflow() throws IOException {
LOG.info("testGzipLongOverflow");
// Don't use native libs for this test.
Configuration conf = new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, false);
assertFalse("ZlibFactory is using native libs against request",
ZlibFactory.isNativeZlibLoaded(conf));
// Ensure that the CodecPool has a BuiltInZlibInflater in it.
Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);
assertNotNull("zlibDecompressor is null!", zlibDecompressor);
assertTrue("ZlibFactory returned unexpected inflator",
zlibDecompressor instanceof BuiltInZlibInflater);
CodecPool.returnDecompressor(zlibDecompressor);
// Now create a GZip text file.
String tmpDir = System.getProperty("test.build.data", "/tmp/");
Path f = new Path(new Path(tmpDir), "testGzipLongOverflow.bin.gz");
BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(
new GZIPOutputStream(new FileOutputStream(f.toString()))));
final int NBUF = 1024 * 4 + 1;
final char[] buf = new char[1024 * 1024];
for (int i = 0; i < buf.length; i++) buf[i] = '\0';
for (int i = 0; i < NBUF; i++) {
bw.write(buf);
}
bw.close();
// Now read it back, using the CodecPool to establish the
// decompressor to use.
CompressionCodecFactory ccf = new CompressionCodecFactory(conf);
CompressionCodec codec = ccf.getCodec(f);
Decompressor decompressor = CodecPool.getDecompressor(codec);
FileSystem fs = FileSystem.getLocal(conf);
InputStream is = fs.open(f);
is = codec.createInputStream(is, decompressor);
BufferedReader br = new BufferedReader(new InputStreamReader(is));
for (int j = 0; j < NBUF; j++) {
int n = br.read(buf);
assertEquals("got wrong read length!", n, buf.length);
for (int i = 0; i < buf.length; i++)
assertEquals("got wrong byte!", buf[i], '\0');
}
br.close();
}
private void testGzipCodecWrite(boolean useNative) throws IOException {
// Create a gzipped file using a compressor from the CodecPool,
// and try to read it back via the regular GZIPInputStream.
// Use native libs per the parameter
Configuration conf = new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, useNative);
if (useNative) {
if (!ZlibFactory.isNativeZlibLoaded(conf)) {
LOG.warn("testGzipCodecWrite skipped: native libs not loaded");
return;
}
} else {
assertFalse("ZlibFactory is using native libs against request",
ZlibFactory.isNativeZlibLoaded(conf));
}
// Ensure that the CodecPool has a BuiltInZlibDeflater in it.
Compressor zlibCompressor = ZlibFactory.getZlibCompressor(conf);
assertNotNull("zlibCompressor is null!", zlibCompressor);
assertTrue("ZlibFactory returned unexpected deflator",
useNative ? zlibCompressor instanceof ZlibCompressor
: zlibCompressor instanceof BuiltInZlibDeflater);
CodecPool.returnCompressor(zlibCompressor);
// Create a GZIP text file via the Compressor interface.
CompressionCodecFactory ccf = new CompressionCodecFactory(conf);
CompressionCodec codec = ccf.getCodec(new Path("foo.gz"));
assertTrue("Codec for .gz file is not GzipCodec",
codec instanceof GzipCodec);
final String msg = "This is the message we are going to compress.";
final String tmpDir = System.getProperty("test.build.data", "/tmp/");
final String fileName = new Path(new Path(tmpDir),
"testGzipCodecWrite.txt.gz").toString();
BufferedWriter w = null;
Compressor gzipCompressor = CodecPool.getCompressor(codec);
if (null != gzipCompressor) {
// If it gives us back a Compressor, we should be able to use this
// to write files we can then read back with Java's gzip tools.
OutputStream os = new CompressorStream(new FileOutputStream(fileName),
gzipCompressor);
w = new BufferedWriter(new OutputStreamWriter(os));
w.write(msg);
w.close();
CodecPool.returnCompressor(gzipCompressor);
verifyGzipFile(fileName, msg);
}
// Create a gzip text file via codec.getOutputStream().
w = new BufferedWriter(new OutputStreamWriter(
codec.createOutputStream(new FileOutputStream(fileName))));
w.write(msg);
w.close();
verifyGzipFile(fileName, msg);
}
@Test
public void testGzipCodecWriteJava() throws IOException {
testGzipCodecWrite(false);
}
@Test
public void testGzipNativeCodecWrite() throws IOException {
testGzipCodecWrite(true);
}
@Test
public void testCodecPoolAndGzipDecompressor() {
// BuiltInZlibInflater should not be used as the GzipCodec decompressor.
// Assert that this is the case.
// Don't use native libs for this test.
Configuration conf = new Configuration();
conf.setBoolean("hadoop.native.lib", false);
assertFalse("ZlibFactory is using native libs against request",
ZlibFactory.isNativeZlibLoaded(conf));
// This should give us a BuiltInZlibInflater.
Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);
assertNotNull("zlibDecompressor is null!", zlibDecompressor);
assertTrue("ZlibFactory returned unexpected inflator",
zlibDecompressor instanceof BuiltInZlibInflater);
// its createOutputStream() just wraps the existing stream in a
// java.util.zip.GZIPOutputStream.
CompressionCodecFactory ccf = new CompressionCodecFactory(conf);
CompressionCodec codec = ccf.getCodec(new Path("foo.gz"));
assertTrue("Codec for .gz file is not GzipCodec",
codec instanceof GzipCodec);
// make sure we don't get a null decompressor
Decompressor codecDecompressor = codec.createDecompressor();
if (null == codecDecompressor) {
fail("Got null codecDecompressor");
}
// Asking the CodecPool for a decompressor for GzipCodec
// should not return null
Decompressor poolDecompressor = CodecPool.getDecompressor(codec);
if (null == poolDecompressor) {
fail("Got null poolDecompressor");
}
// return a couple decompressors
CodecPool.returnDecompressor(zlibDecompressor);
CodecPool.returnDecompressor(poolDecompressor);
Decompressor poolDecompressor2 = CodecPool.getDecompressor(codec);
if (poolDecompressor.getClass() == BuiltInGzipDecompressor.class) {
if (poolDecompressor == poolDecompressor2) {
fail("Reused java gzip decompressor in pool");
}
} else {
if (poolDecompressor != poolDecompressor2) {
fail("Did not reuse native gzip decompressor in pool");
}
}
}
}
| 37,384 | 38.146597 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/CompressDecompressTester.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.lz4.Lz4Compressor;
import org.apache.hadoop.io.compress.snappy.SnappyCompressor;
import org.apache.hadoop.io.compress.zlib.BuiltInZlibDeflater;
import org.apache.hadoop.io.compress.zlib.ZlibCompressor;
import org.apache.hadoop.io.compress.zlib.ZlibFactory;
import org.apache.hadoop.util.NativeCodeLoader;
import org.apache.log4j.Logger;
import org.junit.Assert;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import static org.junit.Assert.*;
public class CompressDecompressTester<T extends Compressor, E extends Decompressor> {
private static final Logger logger = Logger
.getLogger(CompressDecompressTester.class);
private final byte[] originalRawData;
private ImmutableList<TesterPair<T, E>> pairs = ImmutableList.of();
private ImmutableList.Builder<TesterPair<T, E>> builder = ImmutableList.builder();
private ImmutableSet<CompressionTestStrategy> stateges = ImmutableSet.of();
private PreAssertionTester<T, E> assertionDelegate;
public CompressDecompressTester(byte[] originalRawData) {
this.originalRawData = Arrays.copyOf(originalRawData,
originalRawData.length);
this.assertionDelegate = new PreAssertionTester<T, E>() {
@Override
public ImmutableList<TesterPair<T, E>> filterOnAssumeWhat(
ImmutableList<TesterPair<T, E>> pairs) {
ImmutableList.Builder<TesterPair<T, E>> builder = ImmutableList
.builder();
for (TesterPair<T, E> pair : pairs) {
if (isAvailable(pair))
builder.add(pair);
}
return builder.build();
}
};
}
private static boolean isNativeSnappyLoadable() {
boolean snappyAvailable = false;
boolean loaded = false;
try {
System.loadLibrary("snappy");
logger.warn("Snappy native library is available");
snappyAvailable = true;
boolean hadoopNativeAvailable = NativeCodeLoader.isNativeCodeLoaded();
loaded = snappyAvailable && hadoopNativeAvailable;
if (loaded) {
logger.info("Snappy native library loaded");
} else {
logger.warn("Snappy native library not loaded");
}
} catch (Throwable t) {
logger.warn("Failed to load snappy: ", t);
return false;
}
return loaded;
}
public static <T extends Compressor, E extends Decompressor> CompressDecompressTester<T, E> of(
byte[] rawData) {
return new CompressDecompressTester<T, E>(rawData);
}
public CompressDecompressTester<T, E> withCompressDecompressPair(
T compressor, E decompressor) {
addPair(
compressor,
decompressor,
Joiner.on("_").join(compressor.getClass().getCanonicalName(),
decompressor.getClass().getCanonicalName()));
return this;
}
public CompressDecompressTester<T, E> withTestCases(
ImmutableSet<CompressionTestStrategy> stateges) {
this.stateges = ImmutableSet.copyOf(stateges);
return this;
}
private void addPair(T compressor, E decompressor, String name) {
builder.add(new TesterPair<T, E>(name, compressor, decompressor));
}
public void test() throws InstantiationException, IllegalAccessException {
pairs = builder.build();
pairs = assertionDelegate.filterOnAssumeWhat(pairs);
for (TesterPair<T, E> pair : pairs) {
for (CompressionTestStrategy strategy : stateges) {
strategy.getTesterStrategy().assertCompression(pair.getName(),
pair.getCompressor(), pair.getDecompressor(),
Arrays.copyOf(originalRawData, originalRawData.length));
}
}
endAll(pairs);
}
private void endAll(ImmutableList<TesterPair<T, E>> pairs) {
for (TesterPair<T, E> pair : pairs)
pair.end();
}
interface PreAssertionTester<T extends Compressor, E extends Decompressor> {
ImmutableList<TesterPair<T, E>> filterOnAssumeWhat(
ImmutableList<TesterPair<T, E>> pairs);
}
public enum CompressionTestStrategy {
COMPRESS_DECOMPRESS_ERRORS(new TesterCompressionStrategy() {
private final Joiner joiner = Joiner.on("- ");
@Override
public void assertCompression(String name, Compressor compressor,
Decompressor decompressor, byte[] rawData) {
assertTrue(checkSetInputNullPointerException(compressor));
assertTrue(checkSetInputNullPointerException(decompressor));
assertTrue(checkCompressArrayIndexOutOfBoundsException(compressor,
rawData));
assertTrue(checkCompressArrayIndexOutOfBoundsException(decompressor,
rawData));
assertTrue(checkCompressNullPointerException(compressor, rawData));
assertTrue(checkCompressNullPointerException(decompressor, rawData));
assertTrue(checkSetInputArrayIndexOutOfBoundsException(compressor));
assertTrue(checkSetInputArrayIndexOutOfBoundsException(decompressor));
}
private boolean checkSetInputNullPointerException(Compressor compressor) {
try {
compressor.setInput(null, 0, 1);
} catch (NullPointerException npe) {
return true;
} catch (Exception ex) {
logger.error(joiner.join(compressor.getClass().getCanonicalName(),
"checkSetInputNullPointerException error !!!"));
}
return false;
}
private boolean checkCompressNullPointerException(Compressor compressor,
byte[] rawData) {
try {
compressor.setInput(rawData, 0, rawData.length);
compressor.compress(null, 0, 1);
} catch (NullPointerException npe) {
return true;
} catch (Exception ex) {
logger.error(joiner.join(compressor.getClass().getCanonicalName(),
"checkCompressNullPointerException error !!!"));
}
return false;
}
private boolean checkCompressNullPointerException(
Decompressor decompressor, byte[] rawData) {
try {
decompressor.setInput(rawData, 0, rawData.length);
decompressor.decompress(null, 0, 1);
} catch (NullPointerException npe) {
return true;
} catch (Exception ex) {
logger.error(joiner.join(decompressor.getClass().getCanonicalName(),
"checkCompressNullPointerException error !!!"));
}
return false;
}
private boolean checkSetInputNullPointerException(
Decompressor decompressor) {
try {
decompressor.setInput(null, 0, 1);
} catch (NullPointerException npe) {
return true;
} catch (Exception ex) {
logger.error(joiner.join(decompressor.getClass().getCanonicalName(),
"checkSetInputNullPointerException error !!!"));
}
return false;
}
private boolean checkSetInputArrayIndexOutOfBoundsException(
Compressor compressor) {
try {
compressor.setInput(new byte[] { (byte) 0 }, 0, -1);
} catch (ArrayIndexOutOfBoundsException e) {
return true;
} catch (Exception e) {
logger.error(joiner.join(compressor.getClass().getCanonicalName(),
"checkSetInputArrayIndexOutOfBoundsException error !!!"));
}
return false;
}
private boolean checkCompressArrayIndexOutOfBoundsException(
Compressor compressor, byte[] rawData) {
try {
compressor.setInput(rawData, 0, rawData.length);
compressor.compress(new byte[rawData.length], 0, -1);
} catch (ArrayIndexOutOfBoundsException e) {
return true;
} catch (Exception e) {
logger.error(joiner.join(compressor.getClass().getCanonicalName(),
"checkCompressArrayIndexOutOfBoundsException error !!!"));
}
return false;
}
private boolean checkCompressArrayIndexOutOfBoundsException(
Decompressor decompressor, byte[] rawData) {
try {
decompressor.setInput(rawData, 0, rawData.length);
decompressor.decompress(new byte[rawData.length], 0, -1);
} catch (ArrayIndexOutOfBoundsException e) {
return true;
} catch (Exception e) {
logger.error(joiner.join(decompressor.getClass().getCanonicalName(),
"checkCompressArrayIndexOutOfBoundsException error !!!"));
}
return false;
}
private boolean checkSetInputArrayIndexOutOfBoundsException(
Decompressor decompressor) {
try {
decompressor.setInput(new byte[] { (byte) 0 }, 0, -1);
} catch (ArrayIndexOutOfBoundsException e) {
return true;
} catch (Exception e) {
logger.error(joiner.join(decompressor.getClass().getCanonicalName(),
"checkNullPointerException error !!!"));
}
return false;
}
}),
COMPRESS_DECOMPRESS_SINGLE_BLOCK(new TesterCompressionStrategy() {
final Joiner joiner = Joiner.on("- ");
@Override
public void assertCompression(String name, Compressor compressor,
Decompressor decompressor, byte[] rawData) {
int cSize = 0;
int decompressedSize = 0;
byte[] compressedResult = new byte[rawData.length];
byte[] decompressedBytes = new byte[rawData.length];
try {
assertTrue(
joiner.join(name, "compressor.needsInput before error !!!"),
compressor.needsInput());
assertTrue(
joiner.join(name, "compressor.getBytesWritten before error !!!"),
compressor.getBytesWritten() == 0);
compressor.setInput(rawData, 0, rawData.length);
compressor.finish();
while (!compressor.finished()) {
cSize += compressor.compress(compressedResult, 0,
compressedResult.length);
}
compressor.reset();
assertTrue(
joiner.join(name, "decompressor.needsInput() before error !!!"),
decompressor.needsInput());
decompressor.setInput(compressedResult, 0, cSize);
assertFalse(
joiner.join(name, "decompressor.needsInput() after error !!!"),
decompressor.needsInput());
while (!decompressor.finished()) {
decompressedSize = decompressor.decompress(decompressedBytes, 0,
decompressedBytes.length);
}
decompressor.reset();
assertTrue(joiner.join(name, " byte size not equals error !!!"),
decompressedSize == rawData.length);
assertArrayEquals(
joiner.join(name, " byte arrays not equals error !!!"), rawData,
decompressedBytes);
} catch (Exception ex) {
fail(joiner.join(name, ex.getMessage()));
}
}
}),
COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM(new TesterCompressionStrategy() {
final Joiner joiner = Joiner.on("- ");
final ImmutableMap<Class<? extends Compressor>, Integer> emptySize = ImmutableMap
.of(Lz4Compressor.class, 4, ZlibCompressor.class, 16,
SnappyCompressor.class, 4, BuiltInZlibDeflater.class, 16);
@Override
void assertCompression(String name, Compressor compressor,
Decompressor decompressor, byte[] originalRawData) {
byte[] buf = null;
ByteArrayInputStream bytesIn = null;
BlockDecompressorStream blockDecompressorStream = null;
ByteArrayOutputStream bytesOut = new ByteArrayOutputStream();
// close without write
try {
compressor.reset();
// decompressor.end();
BlockCompressorStream blockCompressorStream = new BlockCompressorStream(
bytesOut, compressor, 1024, 0);
blockCompressorStream.close();
// check compressed output
buf = bytesOut.toByteArray();
int emSize = emptySize.get(compressor.getClass());
Assert.assertEquals(
joiner.join(name, "empty stream compressed output size != "
+ emSize), emSize, buf.length);
// use compressed output as input for decompression
bytesIn = new ByteArrayInputStream(buf);
// create decompression stream
blockDecompressorStream = new BlockDecompressorStream(bytesIn,
decompressor, 1024);
// no byte is available because stream was closed
assertEquals(joiner.join(name, " return value is not -1"), -1,
blockDecompressorStream.read());
} catch (IOException e) {
fail(joiner.join(name, e.getMessage()));
} finally {
if (blockDecompressorStream != null)
try {
bytesOut.close();
blockDecompressorStream.close();
bytesIn.close();
blockDecompressorStream.close();
} catch (IOException e) {
}
}
}
}),
COMPRESS_DECOMPRESS_BLOCK(new TesterCompressionStrategy() {
private final Joiner joiner = Joiner.on("- ");
private static final int BLOCK_SIZE = 512;
private final byte[] operationBlock = new byte[BLOCK_SIZE];
// Use default of 512 as bufferSize and compressionOverhead of
// (1% of bufferSize + 12 bytes) = 18 bytes (zlib algorithm).
private static final int overheadSpace = BLOCK_SIZE / 100 + 12;
@Override
public void assertCompression(String name, Compressor compressor,
Decompressor decompressor, byte[] originalRawData) {
int off = 0;
int len = originalRawData.length;
int maxSize = BLOCK_SIZE - overheadSpace;
int compresSize = 0;
List<Integer> blockLabels = new ArrayList<Integer>();
ByteArrayOutputStream compressedOut = new ByteArrayOutputStream();
ByteArrayOutputStream decompressOut = new ByteArrayOutputStream();
try {
if (originalRawData.length > maxSize) {
do {
int bufLen = Math.min(len, maxSize);
compressor.setInput(originalRawData, off, bufLen);
compressor.finish();
while (!compressor.finished()) {
compresSize = compressor.compress(operationBlock, 0,
operationBlock.length);
compressedOut.write(operationBlock, 0, compresSize);
blockLabels.add(compresSize);
}
compressor.reset();
off += bufLen;
len -= bufLen;
} while (len > 0);
}
off = 0;
// compressed bytes
byte[] compressedBytes = compressedOut.toByteArray();
for (Integer step : blockLabels) {
decompressor.setInput(compressedBytes, off, step);
while (!decompressor.finished()) {
int dSize = decompressor.decompress(operationBlock, 0,
operationBlock.length);
decompressOut.write(operationBlock, 0, dSize);
}
decompressor.reset();
off = off + step;
}
assertArrayEquals(
joiner.join(name, "byte arrays not equals error !!!"),
originalRawData, decompressOut.toByteArray());
} catch (Exception ex) {
fail(joiner.join(name, ex.getMessage()));
} finally {
try {
compressedOut.close();
} catch (IOException e) {
}
try {
decompressOut.close();
} catch (IOException e) {
}
}
}
});
private final TesterCompressionStrategy testerStrategy;
CompressionTestStrategy(TesterCompressionStrategy testStrategy) {
this.testerStrategy = testStrategy;
}
public TesterCompressionStrategy getTesterStrategy() {
return testerStrategy;
}
}
static final class TesterPair<T extends Compressor, E extends Decompressor> {
private final T compressor;
private final E decompressor;
private final String name;
TesterPair(String name, T compressor, E decompressor) {
this.compressor = compressor;
this.decompressor = decompressor;
this.name = name;
}
public void end() {
Configuration cfg = new Configuration();
compressor.reinit(cfg);
compressor.end();
decompressor.end();
}
public T getCompressor() {
return compressor;
}
public E getDecompressor() {
return decompressor;
}
public String getName() {
return name;
}
}
/**
* Method for compressor availability check
*/
private static <T extends Compressor, E extends Decompressor> boolean isAvailable(TesterPair<T, E> pair) {
Compressor compressor = pair.compressor;
if (compressor.getClass().isAssignableFrom(Lz4Compressor.class)
&& (NativeCodeLoader.isNativeCodeLoaded()))
return true;
else if (compressor.getClass().isAssignableFrom(BuiltInZlibDeflater.class)
&& NativeCodeLoader.isNativeCodeLoaded())
return true;
else if (compressor.getClass().isAssignableFrom(ZlibCompressor.class)) {
return ZlibFactory.isNativeZlibLoaded(new Configuration());
}
else if (compressor.getClass().isAssignableFrom(SnappyCompressor.class)
&& isNativeSnappyLoadable())
return true;
return false;
}
abstract static class TesterCompressionStrategy {
protected final Logger logger = Logger.getLogger(getClass());
abstract void assertCompression(String name, Compressor compressor,
Decompressor decompressor, byte[] originalRawData);
}
}
| 18,995 | 35.182857 | 108 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestBlockDecompressorStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import org.apache.hadoop.conf.Configuration;
import org.junit.Test;
public class TestBlockDecompressorStream {
private byte[] buf;
private ByteArrayInputStream bytesIn;
private ByteArrayOutputStream bytesOut;
@Test
public void testRead1() throws IOException {
testRead(0);
}
@Test
public void testRead2() throws IOException {
// Test eof after getting non-zero block size info
testRead(4);
}
private void testRead(int bufLen) throws IOException {
// compress empty stream
bytesOut = new ByteArrayOutputStream();
if (bufLen > 0) {
bytesOut.write(ByteBuffer.allocate(bufLen).putInt(1024).array(), 0,
bufLen);
}
BlockCompressorStream blockCompressorStream =
new BlockCompressorStream(bytesOut,
new FakeCompressor(), 1024, 0);
// close without any write
blockCompressorStream.close();
// check compressed output
buf = bytesOut.toByteArray();
assertEquals("empty file compressed output size is not " + (bufLen + 4),
bufLen + 4, buf.length);
// use compressed output as input for decompression
bytesIn = new ByteArrayInputStream(buf);
// get decompression stream
BlockDecompressorStream blockDecompressorStream =
new BlockDecompressorStream(bytesIn, new FakeDecompressor(), 1024);
try {
assertEquals("return value is not -1",
-1 , blockDecompressorStream.read());
} catch (IOException e) {
fail("unexpected IOException : " + e);
} finally {
blockDecompressorStream.close();
}
}
}
/**
* A fake compressor
* Its input and output is the same.
*/
class FakeCompressor implements Compressor{
private boolean finish;
private boolean finished;
int nread;
int nwrite;
byte [] userBuf;
int userBufOff;
int userBufLen;
@Override
public int compress(byte[] b, int off, int len) throws IOException {
int n = Math.min(len, userBufLen);
if (userBuf != null && b != null)
System.arraycopy(userBuf, userBufOff, b, off, n);
userBufOff += n;
userBufLen -= n;
nwrite += n;
if (finish && userBufLen <= 0)
finished = true;
return n;
}
@Override
public void end() {
// nop
}
@Override
public void finish() {
finish = true;
}
@Override
public boolean finished() {
return finished;
}
@Override
public long getBytesRead() {
return nread;
}
@Override
public long getBytesWritten() {
return nwrite;
}
@Override
public boolean needsInput() {
return userBufLen <= 0;
}
@Override
public void reset() {
finish = false;
finished = false;
nread = 0;
nwrite = 0;
userBuf = null;
userBufOff = 0;
userBufLen = 0;
}
@Override
public void setDictionary(byte[] b, int off, int len) {
// nop
}
@Override
public void setInput(byte[] b, int off, int len) {
nread += len;
userBuf = b;
userBufOff = off;
userBufLen = len;
}
@Override
public void reinit(Configuration conf) {
// nop
}
}
/**
* A fake decompressor, just like FakeCompressor
* Its input and output is the same.
*/
class FakeDecompressor implements Decompressor {
private boolean finish;
private boolean finished;
int nread;
int nwrite;
byte [] userBuf;
int userBufOff;
int userBufLen;
@Override
public int decompress(byte[] b, int off, int len) throws IOException {
int n = Math.min(len, userBufLen);
if (userBuf != null && b != null)
System.arraycopy(userBuf, userBufOff, b, off, n);
userBufOff += n;
userBufLen -= n;
nwrite += n;
if (finish && userBufLen <= 0)
finished = true;
return n;
}
@Override
public void end() {
// nop
}
@Override
public boolean finished() {
return finished;
}
@Override
public boolean needsDictionary() {
return false;
}
@Override
public boolean needsInput() {
return userBufLen <= 0;
}
@Override
public void reset() {
finish = false;
finished = false;
nread = 0;
nwrite = 0;
userBuf = null;
userBufOff = 0;
userBufLen = 0;
}
@Override
public void setDictionary(byte[] b, int off, int len) {
// nop
}
@Override
public void setInput(byte[] b, int off, int len) {
nread += len;
userBuf = b;
userBufOff = off;
userBufLen = len;
}
@Override
public int getRemaining() {
return 0;
}
}
| 5,533 | 20.873518 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/snappy/TestSnappyCompressorDecompressor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress.snappy;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.lang.reflect.Array;
import java.nio.ByteBuffer;
import java.util.Random;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.compress.BlockCompressorStream;
import org.apache.hadoop.io.compress.BlockDecompressorStream;
import org.apache.hadoop.io.compress.CompressionInputStream;
import org.apache.hadoop.io.compress.CompressionOutputStream;
import org.apache.hadoop.io.compress.SnappyCodec;
import org.apache.hadoop.io.compress.snappy.SnappyDecompressor.SnappyDirectDecompressor;
import org.apache.hadoop.test.MultithreadedTestUtil;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assume.*;
public class TestSnappyCompressorDecompressor {
@Before
public void before() {
assumeTrue(SnappyCodec.isNativeCodeLoaded());
}
@Test
public void testSnappyCompressorSetInputNullPointerException() {
try {
SnappyCompressor compressor = new SnappyCompressor();
compressor.setInput(null, 0, 10);
fail("testSnappyCompressorSetInputNullPointerException error !!!");
} catch (NullPointerException ex) {
// excepted
} catch (Exception ex) {
fail("testSnappyCompressorSetInputNullPointerException ex error !!!");
}
}
@Test
public void testSnappyDecompressorSetInputNullPointerException() {
try {
SnappyDecompressor decompressor = new SnappyDecompressor();
decompressor.setInput(null, 0, 10);
fail("testSnappyDecompressorSetInputNullPointerException error !!!");
} catch (NullPointerException ex) {
// expected
} catch (Exception e) {
fail("testSnappyDecompressorSetInputNullPointerException ex error !!!");
}
}
@Test
public void testSnappyCompressorSetInputAIOBException() {
try {
SnappyCompressor compressor = new SnappyCompressor();
compressor.setInput(new byte[] {}, -5, 10);
fail("testSnappyCompressorSetInputAIOBException error !!!");
} catch (ArrayIndexOutOfBoundsException ex) {
// expected
} catch (Exception ex) {
fail("testSnappyCompressorSetInputAIOBException ex error !!!");
}
}
@Test
public void testSnappyDecompressorSetInputAIOUBException() {
try {
SnappyDecompressor decompressor = new SnappyDecompressor();
decompressor.setInput(new byte[] {}, -5, 10);
fail("testSnappyDecompressorSetInputAIOUBException error !!!");
} catch (ArrayIndexOutOfBoundsException ex) {
// expected
} catch (Exception e) {
fail("testSnappyDecompressorSetInputAIOUBException ex error !!!");
}
}
@Test
public void testSnappyCompressorCompressNullPointerException() {
try {
SnappyCompressor compressor = new SnappyCompressor();
byte[] bytes = BytesGenerator.get(1024 * 6);
compressor.setInput(bytes, 0, bytes.length);
compressor.compress(null, 0, 0);
fail("testSnappyCompressorCompressNullPointerException error !!!");
} catch (NullPointerException ex) {
// expected
} catch (Exception e) {
fail("testSnappyCompressorCompressNullPointerException ex error !!!");
}
}
@Test
public void testSnappyDecompressorCompressNullPointerException() {
try {
SnappyDecompressor decompressor = new SnappyDecompressor();
byte[] bytes = BytesGenerator.get(1024 * 6);
decompressor.setInput(bytes, 0, bytes.length);
decompressor.decompress(null, 0, 0);
fail("testSnappyDecompressorCompressNullPointerException error !!!");
} catch (NullPointerException ex) {
// expected
} catch (Exception e) {
fail("testSnappyDecompressorCompressNullPointerException ex error !!!");
}
}
@Test
public void testSnappyCompressorCompressAIOBException() {
try {
SnappyCompressor compressor = new SnappyCompressor();
byte[] bytes = BytesGenerator.get(1024 * 6);
compressor.setInput(bytes, 0, bytes.length);
compressor.compress(new byte[] {}, 0, -1);
fail("testSnappyCompressorCompressAIOBException error !!!");
} catch (ArrayIndexOutOfBoundsException ex) {
// expected
} catch (Exception e) {
fail("testSnappyCompressorCompressAIOBException ex error !!!");
}
}
@Test
public void testSnappyDecompressorCompressAIOBException() {
try {
SnappyDecompressor decompressor = new SnappyDecompressor();
byte[] bytes = BytesGenerator.get(1024 * 6);
decompressor.setInput(bytes, 0, bytes.length);
decompressor.decompress(new byte[] {}, 0, -1);
fail("testSnappyDecompressorCompressAIOBException error !!!");
} catch (ArrayIndexOutOfBoundsException ex) {
// expected
} catch (Exception e) {
fail("testSnappyDecompressorCompressAIOBException ex error !!!");
}
}
@Test
public void testSnappyCompressDecompress() {
int BYTE_SIZE = 1024 * 54;
byte[] bytes = BytesGenerator.get(BYTE_SIZE);
SnappyCompressor compressor = new SnappyCompressor();
try {
compressor.setInput(bytes, 0, bytes.length);
assertTrue("SnappyCompressDecompress getBytesRead error !!!",
compressor.getBytesRead() > 0);
assertTrue(
"SnappyCompressDecompress getBytesWritten before compress error !!!",
compressor.getBytesWritten() == 0);
byte[] compressed = new byte[BYTE_SIZE];
int cSize = compressor.compress(compressed, 0, compressed.length);
assertTrue(
"SnappyCompressDecompress getBytesWritten after compress error !!!",
compressor.getBytesWritten() > 0);
SnappyDecompressor decompressor = new SnappyDecompressor(BYTE_SIZE);
// set as input for decompressor only compressed data indicated with cSize
decompressor.setInput(compressed, 0, cSize);
byte[] decompressed = new byte[BYTE_SIZE];
decompressor.decompress(decompressed, 0, decompressed.length);
assertTrue("testSnappyCompressDecompress finished error !!!",
decompressor.finished());
Assert.assertArrayEquals(bytes, decompressed);
compressor.reset();
decompressor.reset();
assertTrue("decompressor getRemaining error !!!",
decompressor.getRemaining() == 0);
} catch (Exception e) {
fail("testSnappyCompressDecompress ex error!!!");
}
}
@Test
public void testCompressorDecompressorEmptyStreamLogic() {
ByteArrayInputStream bytesIn = null;
ByteArrayOutputStream bytesOut = null;
byte[] buf = null;
BlockDecompressorStream blockDecompressorStream = null;
try {
// compress empty stream
bytesOut = new ByteArrayOutputStream();
BlockCompressorStream blockCompressorStream = new BlockCompressorStream(
bytesOut, new SnappyCompressor(), 1024, 0);
// close without write
blockCompressorStream.close();
// check compressed output
buf = bytesOut.toByteArray();
assertEquals("empty stream compressed output size != 4", 4, buf.length);
// use compressed output as input for decompression
bytesIn = new ByteArrayInputStream(buf);
// create decompression stream
blockDecompressorStream = new BlockDecompressorStream(bytesIn,
new SnappyDecompressor(), 1024);
// no byte is available because stream was closed
assertEquals("return value is not -1", -1, blockDecompressorStream.read());
} catch (Exception e) {
fail("testCompressorDecompressorEmptyStreamLogic ex error !!!"
+ e.getMessage());
} finally {
if (blockDecompressorStream != null)
try {
bytesIn.close();
bytesOut.close();
blockDecompressorStream.close();
} catch (IOException e) {
}
}
}
@Test
public void testSnappyBlockCompression() {
int BYTE_SIZE = 1024 * 50;
int BLOCK_SIZE = 512;
ByteArrayOutputStream out = new ByteArrayOutputStream();
byte[] block = new byte[BLOCK_SIZE];
byte[] bytes = BytesGenerator.get(BYTE_SIZE);
try {
// Use default of 512 as bufferSize and compressionOverhead of
// (1% of bufferSize + 12 bytes) = 18 bytes (zlib algorithm).
SnappyCompressor compressor = new SnappyCompressor();
int off = 0;
int len = BYTE_SIZE;
int maxSize = BLOCK_SIZE - 18;
if (BYTE_SIZE > maxSize) {
do {
int bufLen = Math.min(len, maxSize);
compressor.setInput(bytes, off, bufLen);
compressor.finish();
while (!compressor.finished()) {
compressor.compress(block, 0, block.length);
out.write(block);
}
compressor.reset();
off += bufLen;
len -= bufLen;
} while (len > 0);
}
assertTrue("testSnappyBlockCompression error !!!",
out.toByteArray().length > 0);
} catch (Exception ex) {
fail("testSnappyBlockCompression ex error !!!");
}
}
private void compressDecompressLoop(int rawDataSize) throws IOException {
byte[] rawData = BytesGenerator.get(rawDataSize);
byte[] compressedResult = new byte[rawDataSize+20];
int directBufferSize = Math.max(rawDataSize*2, 64*1024);
SnappyCompressor compressor = new SnappyCompressor(directBufferSize);
compressor.setInput(rawData, 0, rawDataSize);
int compressedSize = compressor.compress(compressedResult, 0, compressedResult.length);
SnappyDirectDecompressor decompressor = new SnappyDirectDecompressor();
ByteBuffer inBuf = ByteBuffer.allocateDirect(compressedSize);
ByteBuffer outBuf = ByteBuffer.allocateDirect(rawDataSize);
inBuf.put(compressedResult, 0, compressedSize);
inBuf.flip();
ByteBuffer expected = ByteBuffer.wrap(rawData);
outBuf.clear();
while(!decompressor.finished()) {
decompressor.decompress(inBuf, outBuf);
if (outBuf.remaining() == 0) {
outBuf.flip();
while (outBuf.remaining() > 0) {
assertEquals(expected.get(), outBuf.get());
}
outBuf.clear();
}
}
outBuf.flip();
while (outBuf.remaining() > 0) {
assertEquals(expected.get(), outBuf.get());
}
outBuf.clear();
assertEquals(0, expected.remaining());
}
@Test
public void testSnappyDirectBlockCompression() {
int[] size = { 4 * 1024, 64 * 1024, 128 * 1024, 1024 * 1024 };
assumeTrue(SnappyCodec.isNativeCodeLoaded());
try {
for (int i = 0; i < size.length; i++) {
compressDecompressLoop(size[i]);
}
} catch (IOException ex) {
fail("testSnappyDirectBlockCompression ex !!!" + ex);
}
}
@Test
public void testSnappyCompressorDecopressorLogicWithCompressionStreams() {
int BYTE_SIZE = 1024 * 100;
byte[] bytes = BytesGenerator.get(BYTE_SIZE);
int bufferSize = 262144;
int compressionOverhead = (bufferSize / 6) + 32;
DataOutputStream deflateOut = null;
DataInputStream inflateIn = null;
try {
DataOutputBuffer compressedDataBuffer = new DataOutputBuffer();
CompressionOutputStream deflateFilter = new BlockCompressorStream(
compressedDataBuffer, new SnappyCompressor(bufferSize), bufferSize,
compressionOverhead);
deflateOut = new DataOutputStream(new BufferedOutputStream(deflateFilter));
deflateOut.write(bytes, 0, bytes.length);
deflateOut.flush();
deflateFilter.finish();
DataInputBuffer deCompressedDataBuffer = new DataInputBuffer();
deCompressedDataBuffer.reset(compressedDataBuffer.getData(), 0,
compressedDataBuffer.getLength());
CompressionInputStream inflateFilter = new BlockDecompressorStream(
deCompressedDataBuffer, new SnappyDecompressor(bufferSize),
bufferSize);
inflateIn = new DataInputStream(new BufferedInputStream(inflateFilter));
byte[] result = new byte[BYTE_SIZE];
inflateIn.read(result);
Assert.assertArrayEquals(
"original array not equals compress/decompressed array", result,
bytes);
} catch (IOException e) {
fail("testSnappyCompressorDecopressorLogicWithCompressionStreams ex error !!!");
} finally {
try {
if (deflateOut != null)
deflateOut.close();
if (inflateIn != null)
inflateIn.close();
} catch (Exception e) {
}
}
}
static final class BytesGenerator {
private BytesGenerator() {
}
private static final byte[] CACHE = new byte[] { 0x0, 0x1, 0x2, 0x3, 0x4,
0x5, 0x6, 0x7, 0x8, 0x9, 0xA, 0xB, 0xC, 0xD, 0xE, 0xF };
private static final Random rnd = new Random(12345l);
public static byte[] get(int size) {
byte[] array = (byte[]) Array.newInstance(byte.class, size);
for (int i = 0; i < size; i++)
array[i] = CACHE[rnd.nextInt(CACHE.length - 1)];
return array;
}
}
@Test
public void testSnappyCompressDecompressInMultiThreads() throws Exception {
MultithreadedTestUtil.TestContext ctx = new MultithreadedTestUtil.TestContext();
for(int i=0;i<10;i++) {
ctx.addThread( new MultithreadedTestUtil.TestingThread(ctx) {
@Override
public void doWork() throws Exception {
testSnappyCompressDecompress();
}
});
}
ctx.startThreads();
ctx.waitFor(60000);
}
}
| 14,530 | 34.269417 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/zlib/TestZlibCompressorDecompressor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress.zlib;
import static org.junit.Assert.*;
import static org.junit.Assume.*;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import java.util.Random;
import java.util.zip.DeflaterOutputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.compress.CompressDecompressTester;
import org.apache.hadoop.io.compress.Compressor;
import org.apache.hadoop.io.compress.Decompressor;
import org.apache.hadoop.io.compress.DecompressorStream;
import org.apache.hadoop.io.compress.CompressDecompressTester.CompressionTestStrategy;
import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel;
import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy;
import org.apache.hadoop.io.compress.zlib.ZlibDecompressor.ZlibDirectDecompressor;
import org.apache.hadoop.test.MultithreadedTestUtil;
import org.apache.hadoop.util.NativeCodeLoader;
import org.junit.Before;
import org.junit.Test;
import com.google.common.collect.ImmutableSet;
public class TestZlibCompressorDecompressor {
private static final Random random = new Random(12345L);
@Before
public void before() {
assumeTrue(ZlibFactory.isNativeZlibLoaded(new Configuration()));
}
@Test
public void testZlibCompressorDecompressor() {
try {
int SIZE = 44 * 1024;
byte[] rawData = generate(SIZE);
CompressDecompressTester.of(rawData)
.withCompressDecompressPair(new ZlibCompressor(), new ZlibDecompressor())
.withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
.test();
} catch (Exception ex) {
fail("testCompressorDecompressor error !!!" + ex);
}
}
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
int BYTE_SIZE = 100 * 1024;
byte[] rawData = generate(BYTE_SIZE);
try {
CompressDecompressTester.of(rawData)
.withCompressDecompressPair(
new ZlibCompressor(
org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel.BEST_COMPRESSION,
CompressionStrategy.DEFAULT_STRATEGY,
org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader.DEFAULT_HEADER,
BYTE_SIZE),
new ZlibDecompressor(
org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader.DEFAULT_HEADER,
BYTE_SIZE))
.withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
.test();
} catch (Exception ex) {
fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
}
}
@Test
public void testZlibCompressorDecompressorWithConfiguration() {
Configuration conf = new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, true);
if (ZlibFactory.isNativeZlibLoaded(conf)) {
byte[] rawData;
int tryNumber = 5;
int BYTE_SIZE = 10 * 1024;
Compressor zlibCompressor = ZlibFactory.getZlibCompressor(conf);
Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);
rawData = generate(BYTE_SIZE);
try {
for (int i = 0; i < tryNumber; i++)
compressDecompressZlib(rawData, (ZlibCompressor) zlibCompressor,
(ZlibDecompressor) zlibDecompressor);
zlibCompressor.reinit(conf);
} catch (Exception ex) {
fail("testZlibCompressorDecompressorWithConfiguration ex error " + ex);
}
} else {
assertTrue("ZlibFactory is using native libs against request",
ZlibFactory.isNativeZlibLoaded(conf));
}
}
@Test
public void testZlibCompressDecompress() {
byte[] rawData = null;
int rawDataSize = 0;
rawDataSize = 1024 * 64;
rawData = generate(rawDataSize);
try {
ZlibCompressor compressor = new ZlibCompressor();
ZlibDecompressor decompressor = new ZlibDecompressor();
assertFalse("testZlibCompressDecompress finished error",
compressor.finished());
compressor.setInput(rawData, 0, rawData.length);
assertTrue("testZlibCompressDecompress getBytesRead before error",
compressor.getBytesRead() == 0);
compressor.finish();
byte[] compressedResult = new byte[rawDataSize];
int cSize = compressor.compress(compressedResult, 0, rawDataSize);
assertTrue("testZlibCompressDecompress getBytesRead ather error",
compressor.getBytesRead() == rawDataSize);
assertTrue(
"testZlibCompressDecompress compressed size no less then original size",
cSize < rawDataSize);
decompressor.setInput(compressedResult, 0, cSize);
byte[] decompressedBytes = new byte[rawDataSize];
decompressor.decompress(decompressedBytes, 0, decompressedBytes.length);
assertArrayEquals("testZlibCompressDecompress arrays not equals ",
rawData, decompressedBytes);
compressor.reset();
decompressor.reset();
} catch (IOException ex) {
fail("testZlibCompressDecompress ex !!!" + ex);
}
}
private void compressDecompressLoop(int rawDataSize) throws IOException {
byte[] rawData = null;
rawData = generate(rawDataSize);
ByteArrayOutputStream baos = new ByteArrayOutputStream(rawDataSize+12);
DeflaterOutputStream dos = new DeflaterOutputStream(baos);
dos.write(rawData);
dos.flush();
dos.close();
byte[] compressedResult = baos.toByteArray();
int compressedSize = compressedResult.length;
ZlibDirectDecompressor decompressor = new ZlibDirectDecompressor();
ByteBuffer inBuf = ByteBuffer.allocateDirect(compressedSize);
ByteBuffer outBuf = ByteBuffer.allocateDirect(rawDataSize);
inBuf.put(compressedResult, 0, compressedSize);
inBuf.flip();
ByteBuffer expected = ByteBuffer.wrap(rawData);
outBuf.clear();
while(!decompressor.finished()) {
decompressor.decompress(inBuf, outBuf);
if (outBuf.remaining() == 0) {
outBuf.flip();
while (outBuf.remaining() > 0) {
assertEquals(expected.get(), outBuf.get());
}
outBuf.clear();
}
}
outBuf.flip();
while (outBuf.remaining() > 0) {
assertEquals(expected.get(), outBuf.get());
}
outBuf.clear();
assertEquals(0, expected.remaining());
}
@Test
public void testZlibDirectCompressDecompress() {
int[] size = { 1, 4, 16, 4 * 1024, 64 * 1024, 128 * 1024, 1024 * 1024 };
assumeTrue(NativeCodeLoader.isNativeCodeLoaded());
try {
for (int i = 0; i < size.length; i++) {
compressDecompressLoop(size[i]);
}
} catch (IOException ex) {
fail("testZlibDirectCompressDecompress ex !!!" + ex);
}
}
@Test
public void testZlibCompressorDecompressorSetDictionary() {
Configuration conf = new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, true);
if (ZlibFactory.isNativeZlibLoaded(conf)) {
Compressor zlibCompressor = ZlibFactory.getZlibCompressor(conf);
Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);
checkSetDictionaryNullPointerException(zlibCompressor);
checkSetDictionaryNullPointerException(zlibDecompressor);
checkSetDictionaryArrayIndexOutOfBoundsException(zlibDecompressor);
checkSetDictionaryArrayIndexOutOfBoundsException(zlibCompressor);
} else {
assertTrue("ZlibFactory is using native libs against request",
ZlibFactory.isNativeZlibLoaded(conf));
}
}
@Test
public void testZlibFactory() {
Configuration cfg = new Configuration();
assertTrue("testZlibFactory compression level error !!!",
CompressionLevel.DEFAULT_COMPRESSION == ZlibFactory
.getCompressionLevel(cfg));
assertTrue("testZlibFactory compression strategy error !!!",
CompressionStrategy.DEFAULT_STRATEGY == ZlibFactory
.getCompressionStrategy(cfg));
ZlibFactory.setCompressionLevel(cfg, CompressionLevel.BEST_COMPRESSION);
assertTrue("testZlibFactory compression strategy error !!!",
CompressionLevel.BEST_COMPRESSION == ZlibFactory
.getCompressionLevel(cfg));
ZlibFactory.setCompressionStrategy(cfg, CompressionStrategy.FILTERED);
assertTrue("testZlibFactory compression strategy error !!!",
CompressionStrategy.FILTERED == ZlibFactory.getCompressionStrategy(cfg));
}
private boolean checkSetDictionaryNullPointerException(
Decompressor decompressor) {
try {
decompressor.setDictionary(null, 0, 1);
} catch (NullPointerException ex) {
return true;
} catch (Exception ex) {
}
return false;
}
private boolean checkSetDictionaryNullPointerException(Compressor compressor) {
try {
compressor.setDictionary(null, 0, 1);
} catch (NullPointerException ex) {
return true;
} catch (Exception ex) {
}
return false;
}
private boolean checkSetDictionaryArrayIndexOutOfBoundsException(
Compressor compressor) {
try {
compressor.setDictionary(new byte[] { (byte) 0 }, 0, -1);
} catch (ArrayIndexOutOfBoundsException e) {
return true;
} catch (Exception e) {
}
return false;
}
private boolean checkSetDictionaryArrayIndexOutOfBoundsException(
Decompressor decompressor) {
try {
decompressor.setDictionary(new byte[] { (byte) 0 }, 0, -1);
} catch (ArrayIndexOutOfBoundsException e) {
return true;
} catch (Exception e) {
}
return false;
}
private byte[] compressDecompressZlib(byte[] rawData,
ZlibCompressor zlibCompressor, ZlibDecompressor zlibDecompressor)
throws IOException {
int cSize = 0;
byte[] compressedByte = new byte[rawData.length];
byte[] decompressedRawData = new byte[rawData.length];
zlibCompressor.setInput(rawData, 0, rawData.length);
zlibCompressor.finish();
while (!zlibCompressor.finished()) {
cSize = zlibCompressor.compress(compressedByte, 0, compressedByte.length);
}
zlibCompressor.reset();
assertTrue(zlibDecompressor.getBytesWritten() == 0);
assertTrue(zlibDecompressor.getBytesRead() == 0);
assertTrue(zlibDecompressor.needsInput());
zlibDecompressor.setInput(compressedByte, 0, cSize);
assertFalse(zlibDecompressor.needsInput());
while (!zlibDecompressor.finished()) {
zlibDecompressor.decompress(decompressedRawData, 0,
decompressedRawData.length);
}
assertTrue(zlibDecompressor.getBytesWritten() == rawData.length);
assertTrue(zlibDecompressor.getBytesRead() == cSize);
zlibDecompressor.reset();
assertTrue(zlibDecompressor.getRemaining() == 0);
assertArrayEquals(
"testZlibCompressorDecompressorWithConfiguration array equals error",
rawData, decompressedRawData);
return decompressedRawData;
}
@Test
public void testBuiltInGzipDecompressorExceptions() {
BuiltInGzipDecompressor decompresser = new BuiltInGzipDecompressor();
try {
decompresser.setInput(null, 0, 1);
} catch (NullPointerException ex) {
// expected
} catch (Exception ex) {
fail("testBuiltInGzipDecompressorExceptions npe error " + ex);
}
try {
decompresser.setInput(new byte[] { 0 }, 0, -1);
} catch (ArrayIndexOutOfBoundsException ex) {
// expected
} catch (Exception ex) {
fail("testBuiltInGzipDecompressorExceptions aioob error" + ex);
}
assertTrue("decompresser.getBytesRead error",
decompresser.getBytesRead() == 0);
assertTrue("decompresser.getRemaining error",
decompresser.getRemaining() == 0);
decompresser.reset();
decompresser.end();
InputStream decompStream = null;
try {
// invalid 0 and 1 bytes , must be 31, -117
int buffSize = 1 * 1024;
byte buffer[] = new byte[buffSize];
Decompressor decompressor = new BuiltInGzipDecompressor();
DataInputBuffer gzbuf = new DataInputBuffer();
decompStream = new DecompressorStream(gzbuf, decompressor);
gzbuf.reset(new byte[] { 0, 0, 1, 1, 1, 1, 11, 1, 1, 1, 1 }, 11);
decompStream.read(buffer);
} catch (IOException ioex) {
// expected
} catch (Exception ex) {
fail("invalid 0 and 1 byte in gzip stream" + ex);
}
// invalid 2 byte, must be 8
try {
int buffSize = 1 * 1024;
byte buffer[] = new byte[buffSize];
Decompressor decompressor = new BuiltInGzipDecompressor();
DataInputBuffer gzbuf = new DataInputBuffer();
decompStream = new DecompressorStream(gzbuf, decompressor);
gzbuf.reset(new byte[] { 31, -117, 7, 1, 1, 1, 1, 11, 1, 1, 1, 1 }, 11);
decompStream.read(buffer);
} catch (IOException ioex) {
// expected
} catch (Exception ex) {
fail("invalid 2 byte in gzip stream" + ex);
}
try {
int buffSize = 1 * 1024;
byte buffer[] = new byte[buffSize];
Decompressor decompressor = new BuiltInGzipDecompressor();
DataInputBuffer gzbuf = new DataInputBuffer();
decompStream = new DecompressorStream(gzbuf, decompressor);
gzbuf.reset(new byte[] { 31, -117, 8, -32, 1, 1, 1, 11, 1, 1, 1, 1 }, 11);
decompStream.read(buffer);
} catch (IOException ioex) {
// expected
} catch (Exception ex) {
fail("invalid 3 byte in gzip stream" + ex);
}
try {
int buffSize = 1 * 1024;
byte buffer[] = new byte[buffSize];
Decompressor decompressor = new BuiltInGzipDecompressor();
DataInputBuffer gzbuf = new DataInputBuffer();
decompStream = new DecompressorStream(gzbuf, decompressor);
gzbuf.reset(new byte[] { 31, -117, 8, 4, 1, 1, 1, 11, 1, 1, 1, 1 }, 11);
decompStream.read(buffer);
} catch (IOException ioex) {
// expected
} catch (Exception ex) {
fail("invalid 3 byte make hasExtraField" + ex);
}
}
public static byte[] generate(int size) {
byte[] data = new byte[size];
for (int i = 0; i < size; i++)
data[i] = (byte)random.nextInt(16);
return data;
}
@Test
public void testZlibCompressDecompressInMultiThreads() throws Exception {
MultithreadedTestUtil.TestContext ctx = new MultithreadedTestUtil.TestContext();
for(int i=0;i<10;i++) {
ctx.addThread( new MultithreadedTestUtil.TestingThread(ctx) {
@Override
public void doWork() throws Exception {
testZlibCompressDecompress();
}
});
}
ctx.startThreads();
ctx.waitFor(60000);
}
}
| 15,949 | 35.25 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/lz4/TestLz4CompressorDecompressor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress.lz4;
import static org.junit.Assert.*;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.Random;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.compress.BlockCompressorStream;
import org.apache.hadoop.io.compress.BlockDecompressorStream;
import org.apache.hadoop.io.compress.CompressionInputStream;
import org.apache.hadoop.io.compress.CompressionOutputStream;
import org.apache.hadoop.io.compress.Lz4Codec;
import org.apache.hadoop.io.compress.lz4.Lz4Compressor;
import org.apache.hadoop.io.compress.lz4.Lz4Decompressor;
import org.apache.hadoop.test.MultithreadedTestUtil;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assume.*;
public class TestLz4CompressorDecompressor {
private static final Random rnd = new Random(12345l);
@Before
public void before() {
assumeTrue(Lz4Codec.isNativeCodeLoaded());
}
//test on NullPointerException in {@code compressor.setInput()}
@Test
public void testCompressorSetInputNullPointerException() {
try {
Lz4Compressor compressor = new Lz4Compressor();
compressor.setInput(null, 0, 10);
fail("testCompressorSetInputNullPointerException error !!!");
} catch (NullPointerException ex) {
// expected
} catch (Exception e) {
fail("testCompressorSetInputNullPointerException ex error !!!");
}
}
//test on NullPointerException in {@code decompressor.setInput()}
@Test
public void testDecompressorSetInputNullPointerException() {
try {
Lz4Decompressor decompressor = new Lz4Decompressor();
decompressor.setInput(null, 0, 10);
fail("testDecompressorSetInputNullPointerException error !!!");
} catch (NullPointerException ex) {
// expected
} catch (Exception e) {
fail("testDecompressorSetInputNullPointerException ex error !!!");
}
}
//test on ArrayIndexOutOfBoundsException in {@code compressor.setInput()}
@Test
public void testCompressorSetInputAIOBException() {
try {
Lz4Compressor compressor = new Lz4Compressor();
compressor.setInput(new byte[] {}, -5, 10);
fail("testCompressorSetInputAIOBException error !!!");
} catch (ArrayIndexOutOfBoundsException ex) {
// expected
} catch (Exception ex) {
fail("testCompressorSetInputAIOBException ex error !!!");
}
}
//test on ArrayIndexOutOfBoundsException in {@code decompressor.setInput()}
@Test
public void testDecompressorSetInputAIOUBException() {
try {
Lz4Decompressor decompressor = new Lz4Decompressor();
decompressor.setInput(new byte[] {}, -5, 10);
fail("testDecompressorSetInputAIOBException error !!!");
} catch (ArrayIndexOutOfBoundsException ex) {
// expected
} catch (Exception e) {
fail("testDecompressorSetInputAIOBException ex error !!!");
}
}
//test on NullPointerException in {@code compressor.compress()}
@Test
public void testCompressorCompressNullPointerException() {
try {
Lz4Compressor compressor = new Lz4Compressor();
byte[] bytes = generate(1024 * 6);
compressor.setInput(bytes, 0, bytes.length);
compressor.compress(null, 0, 0);
fail("testCompressorCompressNullPointerException error !!!");
} catch (NullPointerException ex) {
// expected
} catch (Exception e) {
fail("testCompressorCompressNullPointerException ex error !!!");
}
}
//test on NullPointerException in {@code decompressor.decompress()}
@Test
public void testDecompressorCompressNullPointerException() {
try {
Lz4Decompressor decompressor = new Lz4Decompressor();
byte[] bytes = generate(1024 * 6);
decompressor.setInput(bytes, 0, bytes.length);
decompressor.decompress(null, 0, 0);
fail("testDecompressorCompressNullPointerException error !!!");
} catch (NullPointerException ex) {
// expected
} catch (Exception e) {
fail("testDecompressorCompressNullPointerException ex error !!!");
}
}
//test on ArrayIndexOutOfBoundsException in {@code compressor.compress()}
@Test
public void testCompressorCompressAIOBException() {
try {
Lz4Compressor compressor = new Lz4Compressor();
byte[] bytes = generate(1024 * 6);
compressor.setInput(bytes, 0, bytes.length);
compressor.compress(new byte[] {}, 0, -1);
fail("testCompressorCompressAIOBException error !!!");
} catch (ArrayIndexOutOfBoundsException ex) {
// expected
} catch (Exception e) {
fail("testCompressorCompressAIOBException ex error !!!");
}
}
//test on ArrayIndexOutOfBoundsException in decompressor.decompress()
@Test
public void testDecompressorCompressAIOBException() {
try {
Lz4Decompressor decompressor = new Lz4Decompressor();
byte[] bytes = generate(1024 * 6);
decompressor.setInput(bytes, 0, bytes.length);
decompressor.decompress(new byte[] {}, 0, -1);
fail("testDecompressorCompressAIOBException error !!!");
} catch (ArrayIndexOutOfBoundsException ex) {
// expected
} catch (Exception e) {
fail("testDecompressorCompressAIOBException ex error !!!");
}
}
// test Lz4Compressor compressor.compress()
@Test
public void testSetInputWithBytesSizeMoreThenDefaultLz4CompressorByfferSize() {
int BYTES_SIZE = 1024 * 64 + 1;
try {
Lz4Compressor compressor = new Lz4Compressor();
byte[] bytes = generate(BYTES_SIZE);
assertTrue("needsInput error !!!", compressor.needsInput());
compressor.setInput(bytes, 0, bytes.length);
byte[] emptyBytes = new byte[BYTES_SIZE];
int csize = compressor.compress(emptyBytes, 0, bytes.length);
assertTrue(
"testSetInputWithBytesSizeMoreThenDefaultLz4CompressorByfferSize error !!!",
csize != 0);
} catch (Exception ex) {
fail("testSetInputWithBytesSizeMoreThenDefaultLz4CompressorByfferSize ex error !!!");
}
}
// test compress/decompress process
@Test
public void testCompressDecompress() {
int BYTE_SIZE = 1024 * 54;
byte[] bytes = generate(BYTE_SIZE);
Lz4Compressor compressor = new Lz4Compressor();
try {
compressor.setInput(bytes, 0, bytes.length);
assertTrue("Lz4CompressDecompress getBytesRead error !!!",
compressor.getBytesRead() > 0);
assertTrue(
"Lz4CompressDecompress getBytesWritten before compress error !!!",
compressor.getBytesWritten() == 0);
byte[] compressed = new byte[BYTE_SIZE];
int cSize = compressor.compress(compressed, 0, compressed.length);
assertTrue(
"Lz4CompressDecompress getBytesWritten after compress error !!!",
compressor.getBytesWritten() > 0);
Lz4Decompressor decompressor = new Lz4Decompressor();
// set as input for decompressor only compressed data indicated with cSize
decompressor.setInput(compressed, 0, cSize);
byte[] decompressed = new byte[BYTE_SIZE];
decompressor.decompress(decompressed, 0, decompressed.length);
assertTrue("testLz4CompressDecompress finished error !!!", decompressor.finished());
assertArrayEquals(bytes, decompressed);
compressor.reset();
decompressor.reset();
assertTrue("decompressor getRemaining error !!!",decompressor.getRemaining() == 0);
} catch (Exception e) {
fail("testLz4CompressDecompress ex error!!!");
}
}
// test compress/decompress with empty stream
@Test
public void testCompressorDecompressorEmptyStreamLogic() {
ByteArrayInputStream bytesIn = null;
ByteArrayOutputStream bytesOut = null;
byte[] buf = null;
BlockDecompressorStream blockDecompressorStream = null;
try {
// compress empty stream
bytesOut = new ByteArrayOutputStream();
BlockCompressorStream blockCompressorStream = new BlockCompressorStream(
bytesOut, new Lz4Compressor(), 1024, 0);
// close without write
blockCompressorStream.close();
// check compressed output
buf = bytesOut.toByteArray();
assertEquals("empty stream compressed output size != 4", 4, buf.length);
// use compressed output as input for decompression
bytesIn = new ByteArrayInputStream(buf);
// create decompression stream
blockDecompressorStream = new BlockDecompressorStream(bytesIn,
new Lz4Decompressor(), 1024);
// no byte is available because stream was closed
assertEquals("return value is not -1", -1, blockDecompressorStream.read());
} catch (Exception e) {
fail("testCompressorDecompressorEmptyStreamLogic ex error !!!"
+ e.getMessage());
} finally {
if (blockDecompressorStream != null)
try {
bytesIn.close();
bytesOut.close();
blockDecompressorStream.close();
} catch (IOException e) {
}
}
}
// test compress/decompress process through CompressionOutputStream/CompressionInputStream api
@Test
public void testCompressorDecopressorLogicWithCompressionStreams() {
DataOutputStream deflateOut = null;
DataInputStream inflateIn = null;
int BYTE_SIZE = 1024 * 100;
byte[] bytes = generate(BYTE_SIZE);
int bufferSize = 262144;
int compressionOverhead = (bufferSize / 6) + 32;
try {
DataOutputBuffer compressedDataBuffer = new DataOutputBuffer();
CompressionOutputStream deflateFilter = new BlockCompressorStream(
compressedDataBuffer, new Lz4Compressor(bufferSize), bufferSize,
compressionOverhead);
deflateOut = new DataOutputStream(new BufferedOutputStream(deflateFilter));
deflateOut.write(bytes, 0, bytes.length);
deflateOut.flush();
deflateFilter.finish();
DataInputBuffer deCompressedDataBuffer = new DataInputBuffer();
deCompressedDataBuffer.reset(compressedDataBuffer.getData(), 0,
compressedDataBuffer.getLength());
CompressionInputStream inflateFilter = new BlockDecompressorStream(
deCompressedDataBuffer, new Lz4Decompressor(bufferSize), bufferSize);
inflateIn = new DataInputStream(new BufferedInputStream(inflateFilter));
byte[] result = new byte[BYTE_SIZE];
inflateIn.read(result);
assertArrayEquals("original array not equals compress/decompressed array", result,
bytes);
} catch (IOException e) {
fail("testLz4CompressorDecopressorLogicWithCompressionStreams ex error !!!");
} finally {
try {
if (deflateOut != null)
deflateOut.close();
if (inflateIn != null)
inflateIn.close();
} catch (Exception e) {
}
}
}
public static byte[] generate(int size) {
byte[] array = new byte[size];
for (int i = 0; i < size; i++)
array[i] = (byte)rnd.nextInt(16);
return array;
}
@Test
public void testLz4CompressDecompressInMultiThreads() throws Exception {
MultithreadedTestUtil.TestContext ctx = new MultithreadedTestUtil.TestContext();
for(int i=0;i<10;i++) {
ctx.addThread( new MultithreadedTestUtil.TestingThread(ctx) {
@Override
public void doWork() throws Exception {
testCompressDecompress();
}
});
}
ctx.startThreads();
ctx.waitFor(60000);
}
}
| 12,360 | 36.008982 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/bzip2/TestBzip2CompressorDecompressor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress.bzip2;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.compress.*;
import org.apache.hadoop.io.compress.bzip2.Bzip2Compressor;
import org.apache.hadoop.io.compress.bzip2.Bzip2Decompressor;
import org.apache.hadoop.test.MultithreadedTestUtil;
import org.junit.Before;
import org.junit.Test;
import java.io.*;
import java.util.Random;
import static org.junit.Assert.*;
import static org.junit.Assume.*;
import static org.junit.Assume.assumeTrue;
public class TestBzip2CompressorDecompressor {
private static final Random rnd = new Random(12345l);
@Before
public void before() {
assumeTrue(Bzip2Factory.isNativeBzip2Loaded(new Configuration()));
}
// test compress/decompress process
@Test
public void testCompressDecompress() {
byte[] rawData = null;
int rawDataSize = 0;
rawDataSize = 1024 * 64;
rawData = generate(rawDataSize);
try {
Bzip2Compressor compressor = new Bzip2Compressor();
Bzip2Decompressor decompressor = new Bzip2Decompressor();
assertFalse("testBzip2CompressDecompress finished error",
compressor.finished());
compressor.setInput(rawData, 0, rawData.length);
assertTrue("testBzip2CompressDecompress getBytesRead before error",
compressor.getBytesRead() == 0);
compressor.finish();
byte[] compressedResult = new byte[rawDataSize];
int cSize = compressor.compress(compressedResult, 0, rawDataSize);
assertTrue("testBzip2CompressDecompress getBytesRead after error",
compressor.getBytesRead() == rawDataSize);
assertTrue(
"testBzip2CompressDecompress compressed size no less than original size",
cSize < rawDataSize);
decompressor.setInput(compressedResult, 0, cSize);
byte[] decompressedBytes = new byte[rawDataSize];
decompressor.decompress(decompressedBytes, 0, decompressedBytes.length);
assertArrayEquals("testBzip2CompressDecompress arrays not equals ",
rawData, decompressedBytes);
compressor.reset();
decompressor.reset();
} catch (IOException ex) {
fail("testBzip2CompressDecompress ex !!!" + ex);
}
}
public static byte[] generate(int size) {
byte[] array = new byte[size];
for (int i = 0; i < size; i++)
array[i] = (byte)rnd.nextInt(16);
return array;
}
@Test
public void testBzip2CompressDecompressInMultiThreads() throws Exception {
MultithreadedTestUtil.TestContext ctx = new MultithreadedTestUtil.TestContext();
for(int i=0;i<10;i++) {
ctx.addThread( new MultithreadedTestUtil.TestingThread(ctx) {
@Override
public void doWork() throws Exception {
testCompressDecompress();
}
});
}
ctx.startThreads();
ctx.waitFor(60000);
}
}
| 3,733 | 34.561905 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceLifecycle.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.service;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.service.LoggingStateChangeListener;
import org.apache.hadoop.service.Service;
import org.apache.hadoop.service.ServiceStateChangeListener;
import org.apache.hadoop.service.ServiceStateException;
import org.junit.Test;
public class TestServiceLifecycle extends ServiceAssert {
private static Log LOG = LogFactory.getLog(TestServiceLifecycle.class);
/**
* Walk the {@link BreakableService} through it's lifecycle,
* more to verify that service's counters work than anything else
* @throws Throwable if necessary
*/
@Test
public void testWalkthrough() throws Throwable {
BreakableService svc = new BreakableService();
assertServiceStateCreated(svc);
assertStateCount(svc, Service.STATE.NOTINITED, 1);
assertStateCount(svc, Service.STATE.INITED, 0);
assertStateCount(svc, Service.STATE.STARTED, 0);
assertStateCount(svc, Service.STATE.STOPPED, 0);
svc.init(new Configuration());
assertServiceStateInited(svc);
assertStateCount(svc, Service.STATE.INITED, 1);
svc.start();
assertServiceStateStarted(svc);
assertStateCount(svc, Service.STATE.STARTED, 1);
svc.stop();
assertServiceStateStopped(svc);
assertStateCount(svc, Service.STATE.STOPPED, 1);
}
/**
* call init twice
* @throws Throwable if necessary
*/
@Test
public void testInitTwice() throws Throwable {
BreakableService svc = new BreakableService();
Configuration conf = new Configuration();
conf.set("test.init","t");
svc.init(conf);
svc.init(new Configuration());
assertStateCount(svc, Service.STATE.INITED, 1);
assertServiceConfigurationContains(svc, "test.init");
}
/**
* Call start twice
* @throws Throwable if necessary
*/
@Test
public void testStartTwice() throws Throwable {
BreakableService svc = new BreakableService();
svc.init(new Configuration());
svc.start();
svc.start();
assertStateCount(svc, Service.STATE.STARTED, 1);
}
/**
* Verify that when a service is stopped more than once, no exception
* is thrown.
* @throws Throwable if necessary
*/
@Test
public void testStopTwice() throws Throwable {
BreakableService svc = new BreakableService();
svc.init(new Configuration());
svc.start();
svc.stop();
assertStateCount(svc, Service.STATE.STOPPED, 1);
svc.stop();
assertStateCount(svc, Service.STATE.STOPPED, 1);
}
/**
* Show that if the service failed during an init
* operation, it stays in the created state, even after stopping it
* @throws Throwable if necessary
*/
@Test
public void testStopFailedInit() throws Throwable {
BreakableService svc = new BreakableService(true, false, false);
assertServiceStateCreated(svc);
try {
svc.init(new Configuration());
fail("Expected a failure, got " + svc);
} catch (BreakableService.BrokenLifecycleEvent e) {
//expected
}
//the service state wasn't passed
assertServiceStateStopped(svc);
assertStateCount(svc, Service.STATE.INITED, 1);
assertStateCount(svc, Service.STATE.STOPPED, 1);
//now try to stop
svc.stop();
assertStateCount(svc, Service.STATE.STOPPED, 1);
}
/**
* Show that if the service failed during an init
* operation, it stays in the created state, even after stopping it
* @throws Throwable if necessary
*/
@Test
public void testStopFailedStart() throws Throwable {
BreakableService svc = new BreakableService(false, true, false);
svc.init(new Configuration());
assertServiceStateInited(svc);
try {
svc.start();
fail("Expected a failure, got " + svc);
} catch (BreakableService.BrokenLifecycleEvent e) {
//expected
}
//the service state wasn't passed
assertServiceStateStopped(svc);
}
/**
* verify that when a service fails during its stop operation,
* its state does not change.
* @throws Throwable if necessary
*/
@Test
public void testFailingStop() throws Throwable {
BreakableService svc = new BreakableService(false, false, true);
svc.init(new Configuration());
svc.start();
try {
svc.stop();
fail("Expected a failure, got " + svc);
} catch (BreakableService.BrokenLifecycleEvent e) {
//expected
}
assertStateCount(svc, Service.STATE.STOPPED, 1);
}
/**
* verify that when a service that is not started is stopped, the
* service enters the stopped state
* @throws Throwable on a failure
*/
@Test
public void testStopUnstarted() throws Throwable {
BreakableService svc = new BreakableService();
svc.stop();
assertServiceStateStopped(svc);
assertStateCount(svc, Service.STATE.INITED, 0);
assertStateCount(svc, Service.STATE.STOPPED, 1);
}
/**
* Show that if the service failed during an init
* operation, stop was called.
*/
@Test
public void testStopFailingInitAndStop() throws Throwable {
BreakableService svc = new BreakableService(true, false, true);
svc.registerServiceListener(new LoggingStateChangeListener());
try {
svc.init(new Configuration());
fail("Expected a failure, got " + svc);
} catch (BreakableService.BrokenLifecycleEvent e) {
assertEquals(Service.STATE.INITED, e.state);
}
//the service state is stopped
assertServiceStateStopped(svc);
assertEquals(Service.STATE.INITED, svc.getFailureState());
Throwable failureCause = svc.getFailureCause();
assertNotNull("Null failure cause in " + svc, failureCause);
BreakableService.BrokenLifecycleEvent cause =
(BreakableService.BrokenLifecycleEvent) failureCause;
assertNotNull("null state in " + cause + " raised by " + svc, cause.state);
assertEquals(Service.STATE.INITED, cause.state);
}
@Test
public void testInitNullConf() throws Throwable {
BreakableService svc = new BreakableService(false, false, false);
try {
svc.init(null);
LOG.warn("Null Configurations are permitted ");
} catch (ServiceStateException e) {
//expected
}
}
@Test
public void testServiceNotifications() throws Throwable {
BreakableService svc = new BreakableService(false, false, false);
BreakableStateChangeListener listener = new BreakableStateChangeListener();
svc.registerServiceListener(listener);
svc.init(new Configuration());
assertEventCount(listener, 1);
svc.start();
assertEventCount(listener, 2);
svc.stop();
assertEventCount(listener, 3);
svc.stop();
assertEventCount(listener, 3);
}
/**
* Test that when a service listener is unregistered, it stops being invoked
* @throws Throwable on a failure
*/
@Test
public void testServiceNotificationsStopOnceUnregistered() throws Throwable {
BreakableService svc = new BreakableService(false, false, false);
BreakableStateChangeListener listener = new BreakableStateChangeListener();
svc.registerServiceListener(listener);
svc.init(new Configuration());
assertEventCount(listener, 1);
svc.unregisterServiceListener(listener);
svc.start();
assertEventCount(listener, 1);
svc.stop();
assertEventCount(listener, 1);
svc.stop();
}
/**
* This test uses a service listener that unregisters itself during the callbacks.
* This a test that verifies the concurrency logic on the listener management
* code, that it doesn't throw any immutable state change exceptions
* if you change list membership during the notifications.
* The standard <code>AbstractService</code> implementation copies the list
* to an array in a <code>synchronized</code> block then iterates through
* the copy precisely to prevent this problem.
* @throws Throwable on a failure
*/
@Test
public void testServiceNotificationsUnregisterDuringCallback() throws Throwable {
BreakableService svc = new BreakableService(false, false, false);
BreakableStateChangeListener listener =
new SelfUnregisteringBreakableStateChangeListener();
BreakableStateChangeListener l2 =
new BreakableStateChangeListener();
svc.registerServiceListener(listener);
svc.registerServiceListener(l2);
svc.init(new Configuration());
assertEventCount(listener, 1);
assertEventCount(l2, 1);
svc.unregisterServiceListener(listener);
svc.start();
assertEventCount(listener, 1);
assertEventCount(l2, 2);
svc.stop();
assertEventCount(listener, 1);
svc.stop();
}
private static class SelfUnregisteringBreakableStateChangeListener
extends BreakableStateChangeListener {
@Override
public synchronized void stateChanged(Service service) {
super.stateChanged(service);
service.unregisterServiceListener(this);
}
}
private void assertEventCount(BreakableStateChangeListener listener,
int expected) {
assertEquals(listener.toString(), expected, listener.getEventCount());
}
@Test
public void testServiceFailingNotifications() throws Throwable {
BreakableService svc = new BreakableService(false, false, false);
BreakableStateChangeListener listener = new BreakableStateChangeListener();
listener.setFailingState(Service.STATE.STARTED);
svc.registerServiceListener(listener);
svc.init(new Configuration());
assertEventCount(listener, 1);
//start this; the listener failed but this won't show
svc.start();
//counter went up
assertEventCount(listener, 2);
assertEquals(1, listener.getFailureCount());
//stop the service -this doesn't fail
svc.stop();
assertEventCount(listener, 3);
assertEquals(1, listener.getFailureCount());
svc.stop();
}
/**
* This test verifies that you can block waiting for something to happen
* and use notifications to manage it
* @throws Throwable on a failure
*/
@Test
public void testListenerWithNotifications() throws Throwable {
//this tests that a listener can get notified when a service is stopped
AsyncSelfTerminatingService service = new AsyncSelfTerminatingService(2000);
NotifyingListener listener = new NotifyingListener();
service.registerServiceListener(listener);
service.init(new Configuration());
service.start();
assertServiceInState(service, Service.STATE.STARTED);
long start = System.currentTimeMillis();
synchronized (listener) {
listener.wait(20000);
}
long duration = System.currentTimeMillis() - start;
assertEquals(Service.STATE.STOPPED, listener.notifyingState);
assertServiceInState(service, Service.STATE.STOPPED);
assertTrue("Duration of " + duration + " too long", duration < 10000);
}
@Test
public void testSelfTerminatingService() throws Throwable {
SelfTerminatingService service = new SelfTerminatingService();
BreakableStateChangeListener listener = new BreakableStateChangeListener();
service.registerServiceListener(listener);
service.init(new Configuration());
assertEventCount(listener, 1);
//start the service
service.start();
//and expect an event count of exactly two
assertEventCount(listener, 2);
}
@Test
public void testStartInInitService() throws Throwable {
Service service = new StartInInitService();
BreakableStateChangeListener listener = new BreakableStateChangeListener();
service.registerServiceListener(listener);
service.init(new Configuration());
assertServiceInState(service, Service.STATE.STARTED);
assertEventCount(listener, 1);
}
@Test
public void testStopInInitService() throws Throwable {
Service service = new StopInInitService();
BreakableStateChangeListener listener = new BreakableStateChangeListener();
service.registerServiceListener(listener);
service.init(new Configuration());
assertServiceInState(service, Service.STATE.STOPPED);
assertEventCount(listener, 1);
}
/**
* Listener that wakes up all threads waiting on it
*/
private static class NotifyingListener implements ServiceStateChangeListener {
public Service.STATE notifyingState = Service.STATE.NOTINITED;
public synchronized void stateChanged(Service service) {
notifyingState = service.getServiceState();
this.notifyAll();
}
}
/**
* Service that terminates itself after starting and sleeping for a while
*/
private static class AsyncSelfTerminatingService extends AbstractService
implements Runnable {
final int timeout;
private AsyncSelfTerminatingService(int timeout) {
super("AsyncSelfTerminatingService");
this.timeout = timeout;
}
@Override
protected void serviceStart() throws Exception {
new Thread(this).start();
super.serviceStart();
}
@Override
public void run() {
try {
Thread.sleep(timeout);
} catch (InterruptedException ignored) {
}
this.stop();
}
}
/**
* Service that terminates itself in startup
*/
private static class SelfTerminatingService extends AbstractService {
private SelfTerminatingService() {
super("SelfTerminatingService");
}
@Override
protected void serviceStart() throws Exception {
//start
super.serviceStart();
//then stop
stop();
}
}
/**
* Service that starts itself in init
*/
private static class StartInInitService extends AbstractService {
private StartInInitService() {
super("StartInInitService");
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
super.serviceInit(conf);
start();
}
}
/**
* Service that starts itself in init
*/
private static class StopInInitService extends AbstractService {
private StopInInitService() {
super("StopInInitService");
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
super.serviceInit(conf);
stop();
}
}
}
| 15,026 | 31.040512 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/BreakableService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.service;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.service.Service;
/**
* This is a service that can be configured to break on any of the lifecycle
* events, so test the failure handling of other parts of the service
* infrastructure.
*
* It retains a counter to the number of times each entry point is called -
* these counters are incremented before the exceptions are raised and
* before the superclass state methods are invoked.
*
*/
public class BreakableService extends AbstractService {
private boolean failOnInit;
private boolean failOnStart;
private boolean failOnStop;
private int[] counts = new int[4];
public BreakableService() {
this(false, false, false);
}
public BreakableService(boolean failOnInit,
boolean failOnStart,
boolean failOnStop) {
super("BreakableService");
this.failOnInit = failOnInit;
this.failOnStart = failOnStart;
this.failOnStop = failOnStop;
inc(STATE.NOTINITED);
}
private int convert(STATE state) {
return state.getValue();
}
private void inc(STATE state) {
int index = convert(state);
counts[index] ++;
}
public int getCount(STATE state) {
return counts[convert(state)];
}
private void maybeFail(boolean fail, String action) {
if (fail) {
throw new BrokenLifecycleEvent(this, action);
}
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
inc(STATE.INITED);
maybeFail(failOnInit, "init");
super.serviceInit(conf);
}
@Override
protected void serviceStart() {
inc(STATE.STARTED);
maybeFail(failOnStart, "start");
}
@Override
protected void serviceStop() {
inc(STATE.STOPPED);
maybeFail(failOnStop, "stop");
}
public void setFailOnInit(boolean failOnInit) {
this.failOnInit = failOnInit;
}
public void setFailOnStart(boolean failOnStart) {
this.failOnStart = failOnStart;
}
public void setFailOnStop(boolean failOnStop) {
this.failOnStop = failOnStop;
}
/**
* The exception explicitly raised on a failure
*/
public static class BrokenLifecycleEvent extends RuntimeException {
final STATE state;
public BrokenLifecycleEvent(Service service, String action) {
super("Lifecycle Failure during " + action + " state is "
+ service.getServiceState());
state = service.getServiceState();
}
}
}
| 3,345 | 26.203252 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/BreakableStateChangeListener.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.service;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.service.Service;
import org.apache.hadoop.service.ServiceStateChangeListener;
/**
* A state change listener that logs the number of state change events received,
* and the last state invoked.
*
* It can be configured to fail during a state change event
*/
public class BreakableStateChangeListener
implements ServiceStateChangeListener {
private final String name;
private int eventCount;
private int failureCount;
private Service lastService;
private Service.STATE lastState = Service.STATE.NOTINITED;
//no callbacks are ever received for this event, so it
//can be used as an 'undefined'.
private Service.STATE failingState = Service.STATE.NOTINITED;
private List<Service.STATE> stateEventList = new ArrayList<Service.STATE>(4);
public BreakableStateChangeListener() {
this( "BreakableStateChangeListener");
}
public BreakableStateChangeListener(String name) {
this.name = name;
}
@Override
public synchronized void stateChanged(Service service) {
eventCount++;
lastService = service;
lastState = service.getServiceState();
stateEventList.add(lastState);
if (lastState == failingState) {
failureCount++;
throw new BreakableService.BrokenLifecycleEvent(service,
"Failure entering "
+ lastState
+ " for "
+ service.getName());
}
}
public synchronized int getEventCount() {
return eventCount;
}
public synchronized Service getLastService() {
return lastService;
}
public synchronized Service.STATE getLastState() {
return lastState;
}
public synchronized void setFailingState(Service.STATE failingState) {
this.failingState = failingState;
}
public synchronized int getFailureCount() {
return failureCount;
}
public List<Service.STATE> getStateEventList() {
return stateEventList;
}
@Override
public synchronized String toString() {
String s =
name + " - event count = " + eventCount + " last state " + lastState;
StringBuilder history = new StringBuilder(stateEventList.size()*10);
for (Service.STATE state: stateEventList) {
history.append(state).append(" ");
}
return s + " [ " + history + "]";
}
}
| 3,334 | 30.462264 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/ServiceAssert.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.service;
import org.apache.hadoop.service.Service;
import org.junit.Assert;
/**
* A set of assertions about the state of any service
*/
public class ServiceAssert extends Assert {
public static void assertServiceStateCreated(Service service) {
assertServiceInState(service, Service.STATE.NOTINITED);
}
public static void assertServiceStateInited(Service service) {
assertServiceInState(service, Service.STATE.INITED);
}
public static void assertServiceStateStarted(Service service) {
assertServiceInState(service, Service.STATE.STARTED);
}
public static void assertServiceStateStopped(Service service) {
assertServiceInState(service, Service.STATE.STOPPED);
}
public static void assertServiceInState(Service service, Service.STATE state) {
assertNotNull("Null service", service);
assertEquals("Service in wrong state: " + service, state,
service.getServiceState());
}
/**
* Assert that the breakable service has entered a state exactly the number
* of time asserted.
* @param service service -if null an assertion is raised.
* @param state state to check.
* @param expected expected count.
*/
public static void assertStateCount(BreakableService service,
Service.STATE state,
int expected) {
assertNotNull("Null service", service);
int actual = service.getCount(state);
if (expected != actual) {
fail("Expected entry count for state [" + state +"] of " + service
+ " to be " + expected + " but was " + actual);
}
}
/**
* Assert that a service configuration contains a specific key; the value
* is ignored.
* @param service service to check
* @param key key to look for
*/
public static void assertServiceConfigurationContains(Service service,
String key) {
assertNotNull("No option "+ key + " in service configuration",
service.getConfig().get(key));
}
}
| 2,869 | 34.432099 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestGlobalStateChangeListener.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.service;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.service.LoggingStateChangeListener;
import org.apache.hadoop.service.Service;
import org.apache.hadoop.service.ServiceStateChangeListener;
import org.junit.After;
import org.junit.Test;
/**
* Test global state changes. It is critical for all tests to clean up the
* global listener afterwards to avoid interfering with follow-on tests.
*
* One listener, {@link #listener} is defined which is automatically
* unregistered on cleanup. All other listeners must be unregistered in the
* finally clauses of the tests.
*/
public class TestGlobalStateChangeListener extends ServiceAssert {
BreakableStateChangeListener listener = new BreakableStateChangeListener("listener");
private void register() {
register(listener);
}
private boolean unregister() {
return unregister(listener);
}
private void register(ServiceStateChangeListener l) {
AbstractService.registerGlobalListener(l);
}
private boolean unregister(ServiceStateChangeListener l) {
return AbstractService.unregisterGlobalListener(l);
}
/**
* After every test case reset the list of global listeners.
*/
@After
public void cleanup() {
AbstractService.resetGlobalListeners();
}
/**
* Assert that the last state of the listener is that the test expected.
* @param breakable a breakable listener
* @param state the expected state
*/
public void assertListenerState(BreakableStateChangeListener breakable,
Service.STATE state) {
assertEquals("Wrong state in " + breakable, state, breakable.getLastState());
}
/**
* Assert that the number of state change notifications matches expectations.
* @param breakable the listener
* @param count the expected count.
*/
public void assertListenerEventCount(BreakableStateChangeListener breakable,
int count) {
assertEquals("Wrong event count in " + breakable, count,
breakable.getEventCount());
}
/**
* Test that register/unregister works
*/
@Test
public void testRegisterListener() {
register();
assertTrue("listener not registered", unregister());
}
/**
* Test that double registration results in one registration only.
*/
@Test
public void testRegisterListenerTwice() {
register();
register();
assertTrue("listener not registered", unregister());
//there should be no listener to unregister the second time
assertFalse("listener double registered", unregister());
}
/**
* Test that the {@link BreakableStateChangeListener} is picking up
* the state changes and that its last event field is as expected.
*/
@Test
public void testEventHistory() {
register();
BreakableService service = new BreakableService();
assertListenerState(listener, Service.STATE.NOTINITED);
assertEquals(0, listener.getEventCount());
service.init(new Configuration());
assertListenerState(listener, Service.STATE.INITED);
assertSame(service, listener.getLastService());
assertListenerEventCount(listener, 1);
service.start();
assertListenerState(listener, Service.STATE.STARTED);
assertListenerEventCount(listener, 2);
service.stop();
assertListenerState(listener, Service.STATE.STOPPED);
assertListenerEventCount(listener, 3);
}
/**
* This test triggers a failure in the listener - the expectation is that the
* service has already reached it's desired state, purely because the
* notifications take place afterwards.
*
*/
@Test
public void testListenerFailure() {
listener.setFailingState(Service.STATE.INITED);
register();
BreakableStateChangeListener l2 = new BreakableStateChangeListener();
register(l2);
BreakableService service = new BreakableService();
service.init(new Configuration());
//expected notifications to fail
//still should record its invocation
assertListenerState(listener, Service.STATE.INITED);
assertListenerEventCount(listener, 1);
//and second listener didn't get notified of anything
assertListenerEventCount(l2, 0);
//service should still consider itself started
assertServiceStateInited(service);
service.start();
service.stop();
}
/**
* Create a chain of listeners and set one in the middle to fail; verify that
* those in front got called, and those after did not.
*/
@Test
public void testListenerChain() {
//create and register the listeners
LoggingStateChangeListener logListener = new LoggingStateChangeListener();
register(logListener);
BreakableStateChangeListener l0 = new BreakableStateChangeListener("l0");
register(l0);
listener.setFailingState(Service.STATE.STARTED);
register();
BreakableStateChangeListener l3 = new BreakableStateChangeListener("l3");
register(l3);
//create and init a service.
BreakableService service = new BreakableService();
service.init(new Configuration());
assertServiceStateInited(service);
assertListenerState(l0, Service.STATE.INITED);
assertListenerState(listener, Service.STATE.INITED);
assertListenerState(l3, Service.STATE.INITED);
service.start();
//expect that listener l1 and the failing listener are in start, but
//not the final one
assertServiceStateStarted(service);
assertListenerState(l0, Service.STATE.STARTED);
assertListenerEventCount(l0, 2);
assertListenerState(listener, Service.STATE.STARTED);
assertListenerEventCount(listener, 2);
//this is the listener that is not expected to have been invoked
assertListenerState(l3, Service.STATE.INITED);
assertListenerEventCount(l3, 1);
//stop the service
service.stop();
//listeners are all updated
assertListenerEventCount(l0, 3);
assertListenerEventCount(listener, 3);
assertListenerEventCount(l3, 2);
//can all be unregistered in any order
unregister(logListener);
unregister(l0);
unregister(l3);
//check that the listeners are all unregistered, even
//though they were registered in a different order.
//rather than do this by doing unregister checks, a new service is created
service = new BreakableService();
//this service is initialized
service.init(new Configuration());
//it is asserted that the event count has not changed for the unregistered
//listeners
assertListenerEventCount(l0, 3);
assertListenerEventCount(l3, 2);
//except for the one listener that was not unregistered, which
//has incremented by one
assertListenerEventCount(listener, 4);
}
}
| 7,591 | 32.742222 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestCompositeService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.service;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.service.Service.STATE;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
public class TestCompositeService {
private static final int NUM_OF_SERVICES = 5;
private static final int FAILED_SERVICE_SEQ_NUMBER = 2;
private static final Log LOG = LogFactory.getLog(TestCompositeService.class);
/**
* flag to state policy of CompositeService, and hence
* what to look for after trying to stop a service from another state
* (e.g inited)
*/
private static final boolean STOP_ONLY_STARTED_SERVICES =
CompositeServiceImpl.isPolicyToStopOnlyStartedServices();
@Before
public void setup() {
CompositeServiceImpl.resetCounter();
}
@Test
public void testCallSequence() {
ServiceManager serviceManager = new ServiceManager("ServiceManager");
// Add services
for (int i = 0; i < NUM_OF_SERVICES; i++) {
CompositeServiceImpl service = new CompositeServiceImpl(i);
serviceManager.addTestService(service);
}
CompositeServiceImpl[] services = serviceManager.getServices().toArray(
new CompositeServiceImpl[0]);
assertEquals("Number of registered services ", NUM_OF_SERVICES,
services.length);
Configuration conf = new Configuration();
// Initialise the composite service
serviceManager.init(conf);
//verify they were all inited
assertInState(STATE.INITED, services);
// Verify the init() call sequence numbers for every service
for (int i = 0; i < NUM_OF_SERVICES; i++) {
assertEquals("For " + services[i]
+ " service, init() call sequence number should have been ", i,
services[i].getCallSequenceNumber());
}
// Reset the call sequence numbers
resetServices(services);
serviceManager.start();
//verify they were all started
assertInState(STATE.STARTED, services);
// Verify the start() call sequence numbers for every service
for (int i = 0; i < NUM_OF_SERVICES; i++) {
assertEquals("For " + services[i]
+ " service, start() call sequence number should have been ", i,
services[i].getCallSequenceNumber());
}
resetServices(services);
serviceManager.stop();
//verify they were all stopped
assertInState(STATE.STOPPED, services);
// Verify the stop() call sequence numbers for every service
for (int i = 0; i < NUM_OF_SERVICES; i++) {
assertEquals("For " + services[i]
+ " service, stop() call sequence number should have been ",
((NUM_OF_SERVICES - 1) - i), services[i].getCallSequenceNumber());
}
// Try to stop again. This should be a no-op.
serviceManager.stop();
// Verify that stop() call sequence numbers for every service don't change.
for (int i = 0; i < NUM_OF_SERVICES; i++) {
assertEquals("For " + services[i]
+ " service, stop() call sequence number should have been ",
((NUM_OF_SERVICES - 1) - i), services[i].getCallSequenceNumber());
}
}
private void resetServices(CompositeServiceImpl[] services) {
// Reset the call sequence numbers
for (int i = 0; i < NUM_OF_SERVICES; i++) {
services[i].reset();
}
}
@Test
public void testServiceStartup() {
ServiceManager serviceManager = new ServiceManager("ServiceManager");
// Add services
for (int i = 0; i < NUM_OF_SERVICES; i++) {
CompositeServiceImpl service = new CompositeServiceImpl(i);
if (i == FAILED_SERVICE_SEQ_NUMBER) {
service.setThrowExceptionOnStart(true);
}
serviceManager.addTestService(service);
}
CompositeServiceImpl[] services = serviceManager.getServices().toArray(
new CompositeServiceImpl[0]);
Configuration conf = new Configuration();
// Initialise the composite service
serviceManager.init(conf);
// Start the composite service
try {
serviceManager.start();
fail("Exception should have been thrown due to startup failure of last service");
} catch (ServiceTestRuntimeException e) {
for (int i = 0; i < NUM_OF_SERVICES - 1; i++) {
if (i >= FAILED_SERVICE_SEQ_NUMBER && STOP_ONLY_STARTED_SERVICES) {
// Failed service state should be INITED
assertEquals("Service state should have been ", STATE.INITED,
services[NUM_OF_SERVICES - 1].getServiceState());
} else {
assertEquals("Service state should have been ", STATE.STOPPED,
services[i].getServiceState());
}
}
}
}
@Test
public void testServiceStop() {
ServiceManager serviceManager = new ServiceManager("ServiceManager");
// Add services
for (int i = 0; i < NUM_OF_SERVICES; i++) {
CompositeServiceImpl service = new CompositeServiceImpl(i);
if (i == FAILED_SERVICE_SEQ_NUMBER) {
service.setThrowExceptionOnStop(true);
}
serviceManager.addTestService(service);
}
CompositeServiceImpl[] services = serviceManager.getServices().toArray(
new CompositeServiceImpl[0]);
Configuration conf = new Configuration();
// Initialise the composite service
serviceManager.init(conf);
serviceManager.start();
// Stop the composite service
try {
serviceManager.stop();
} catch (ServiceTestRuntimeException e) {
}
assertInState(STATE.STOPPED, services);
}
/**
* Assert that all services are in the same expected state
* @param expected expected state value
* @param services services to examine
*/
private void assertInState(STATE expected, CompositeServiceImpl[] services) {
assertInState(expected, services,0, services.length);
}
/**
* Assert that all services are in the same expected state
* @param expected expected state value
* @param services services to examine
* @param start start offset
* @param finish finish offset: the count stops before this number
*/
private void assertInState(STATE expected,
CompositeServiceImpl[] services,
int start, int finish) {
for (int i = start; i < finish; i++) {
Service service = services[i];
assertInState(expected, service);
}
}
private void assertInState(STATE expected, Service service) {
assertEquals("Service state should have been " + expected + " in "
+ service,
expected,
service.getServiceState());
}
/**
* Shut down from not-inited: expect nothing to have happened
*/
@Test
public void testServiceStopFromNotInited() {
ServiceManager serviceManager = new ServiceManager("ServiceManager");
// Add services
for (int i = 0; i < NUM_OF_SERVICES; i++) {
CompositeServiceImpl service = new CompositeServiceImpl(i);
serviceManager.addTestService(service);
}
CompositeServiceImpl[] services = serviceManager.getServices().toArray(
new CompositeServiceImpl[0]);
serviceManager.stop();
assertInState(STATE.NOTINITED, services);
}
/**
* Shut down from inited
*/
@Test
public void testServiceStopFromInited() {
ServiceManager serviceManager = new ServiceManager("ServiceManager");
// Add services
for (int i = 0; i < NUM_OF_SERVICES; i++) {
CompositeServiceImpl service = new CompositeServiceImpl(i);
serviceManager.addTestService(service);
}
CompositeServiceImpl[] services = serviceManager.getServices().toArray(
new CompositeServiceImpl[0]);
serviceManager.init(new Configuration());
serviceManager.stop();
if (STOP_ONLY_STARTED_SERVICES) {
//this policy => no services were stopped
assertInState(STATE.INITED, services);
} else {
assertInState(STATE.STOPPED, services);
}
}
/**
* Use a null configuration & expect a failure
* @throws Throwable
*/
@Test
public void testInitNullConf() throws Throwable {
ServiceManager serviceManager = new ServiceManager("testInitNullConf");
CompositeServiceImpl service = new CompositeServiceImpl(0);
serviceManager.addTestService(service);
try {
serviceManager.init(null);
LOG.warn("Null Configurations are permitted " + serviceManager);
} catch (ServiceStateException e) {
//expected
}
}
/**
* Walk the service through their lifecycle without any children;
* verify that it all works.
*/
@Test
public void testServiceLifecycleNoChildren() {
ServiceManager serviceManager = new ServiceManager("ServiceManager");
serviceManager.init(new Configuration());
serviceManager.start();
serviceManager.stop();
}
@Test
public void testAddServiceInInit() throws Throwable {
BreakableService child = new BreakableService();
assertInState(STATE.NOTINITED, child);
CompositeServiceAddingAChild composite =
new CompositeServiceAddingAChild(child);
composite.init(new Configuration());
assertInState(STATE.INITED, child);
}
@Test (timeout = 1000)
public void testAddIfService() {
CompositeService testService = new CompositeService("TestService") {
Service service;
@Override
public void serviceInit(Configuration conf) {
Integer notAService = new Integer(0);
assertFalse("Added an integer as a service",
addIfService(notAService));
service = new AbstractService("Service") {};
assertTrue("Unable to add a service", addIfService(service));
}
};
testService.init(new Configuration());
assertEquals("Incorrect number of services",
1, testService.getServices().size());
}
@Test(timeout = 1000)
public void testAddInitedSiblingInInit() throws Throwable {
CompositeService parent = new CompositeService("parent");
BreakableService sibling = new BreakableService();
sibling.init(new Configuration());
parent.addService(new AddSiblingService(parent,
sibling,
STATE.INITED));
parent.init(new Configuration());
parent.start();
parent.stop();
assertEquals("Incorrect number of services",
2, parent.getServices().size());
}
@Test(timeout = 1000)
public void testAddUninitedSiblingInInit() throws Throwable {
CompositeService parent = new CompositeService("parent");
BreakableService sibling = new BreakableService();
parent.addService(new AddSiblingService(parent,
sibling,
STATE.INITED));
parent.init(new Configuration());
try {
parent.start();
fail("Expected an exception, got " + parent);
} catch (ServiceStateException e) {
//expected
}
parent.stop();
assertEquals("Incorrect number of services",
2, parent.getServices().size());
}
@Test
public void testRemoveService() {
CompositeService testService = new CompositeService("TestService") {
@Override
public void serviceInit(Configuration conf) {
Integer notAService = new Integer(0);
assertFalse("Added an integer as a service",
addIfService(notAService));
Service service1 = new AbstractService("Service1") {};
addIfService(service1);
Service service2 = new AbstractService("Service2") {};
addIfService(service2);
Service service3 = new AbstractService("Service3") {};
addIfService(service3);
removeService(service1);
}
};
testService.init(new Configuration());
assertEquals("Incorrect number of services",
2, testService.getServices().size());
}
@Test(timeout = 1000)
public void testAddStartedChildBeforeInit() throws Throwable {
CompositeService parent = new CompositeService("parent");
BreakableService child = new BreakableService();
child.init(new Configuration());
child.start();
AddSiblingService.addChildToService(parent, child);
try {
parent.init(new Configuration());
fail("Expected an exception, got " + parent);
} catch (ServiceStateException e) {
//expected
}
parent.stop();
}
@Test(timeout = 1000)
public void testAddStoppedChildBeforeInit() throws Throwable {
CompositeService parent = new CompositeService("parent");
BreakableService child = new BreakableService();
child.init(new Configuration());
child.start();
child.stop();
AddSiblingService.addChildToService(parent, child);
try {
parent.init(new Configuration());
fail("Expected an exception, got " + parent);
} catch (ServiceStateException e) {
//expected
}
parent.stop();
}
@Test(timeout = 1000)
public void testAddStartedSiblingInStart() throws Throwable {
CompositeService parent = new CompositeService("parent");
BreakableService sibling = new BreakableService();
sibling.init(new Configuration());
sibling.start();
parent.addService(new AddSiblingService(parent,
sibling,
STATE.STARTED));
parent.init(new Configuration());
parent.start();
parent.stop();
assertEquals("Incorrect number of services",
2, parent.getServices().size());
}
@Test(timeout = 1000)
public void testAddUninitedSiblingInStart() throws Throwable {
CompositeService parent = new CompositeService("parent");
BreakableService sibling = new BreakableService();
parent.addService(new AddSiblingService(parent,
sibling,
STATE.STARTED));
parent.init(new Configuration());
assertInState(STATE.NOTINITED, sibling);
parent.start();
parent.stop();
assertEquals("Incorrect number of services",
2, parent.getServices().size());
}
@Test(timeout = 1000)
public void testAddStartedSiblingInInit() throws Throwable {
CompositeService parent = new CompositeService("parent");
BreakableService sibling = new BreakableService();
sibling.init(new Configuration());
sibling.start();
parent.addService(new AddSiblingService(parent,
sibling,
STATE.INITED));
parent.init(new Configuration());
assertInState(STATE.STARTED, sibling);
parent.start();
assertInState(STATE.STARTED, sibling);
parent.stop();
assertEquals("Incorrect number of services",
2, parent.getServices().size());
assertInState(STATE.STOPPED, sibling);
}
@Test(timeout = 1000)
public void testAddStartedSiblingInStop() throws Throwable {
CompositeService parent = new CompositeService("parent");
BreakableService sibling = new BreakableService();
sibling.init(new Configuration());
sibling.start();
parent.addService(new AddSiblingService(parent,
sibling,
STATE.STOPPED));
parent.init(new Configuration());
parent.start();
parent.stop();
assertEquals("Incorrect number of services",
2, parent.getServices().size());
}
public static class CompositeServiceAddingAChild extends CompositeService{
Service child;
public CompositeServiceAddingAChild(Service child) {
super("CompositeServiceAddingAChild");
this.child = child;
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
addService(child);
super.serviceInit(conf);
}
}
public static class ServiceTestRuntimeException extends RuntimeException {
public ServiceTestRuntimeException(String message) {
super(message);
}
}
/**
* This is a composite service that keeps a count of the number of lifecycle
* events called, and can be set to throw a {@link ServiceTestRuntimeException }
* during service start or stop
*/
public static class CompositeServiceImpl extends CompositeService {
public static boolean isPolicyToStopOnlyStartedServices() {
return STOP_ONLY_STARTED_SERVICES;
}
private static int counter = -1;
private int callSequenceNumber = -1;
private boolean throwExceptionOnStart;
private boolean throwExceptionOnStop;
public CompositeServiceImpl(int sequenceNumber) {
super(Integer.toString(sequenceNumber));
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
counter++;
callSequenceNumber = counter;
super.serviceInit(conf);
}
@Override
protected void serviceStart() throws Exception {
if (throwExceptionOnStart) {
throw new ServiceTestRuntimeException("Fake service start exception");
}
counter++;
callSequenceNumber = counter;
super.serviceStart();
}
@Override
protected void serviceStop() throws Exception {
counter++;
callSequenceNumber = counter;
if (throwExceptionOnStop) {
throw new ServiceTestRuntimeException("Fake service stop exception");
}
super.serviceStop();
}
public static int getCounter() {
return counter;
}
public int getCallSequenceNumber() {
return callSequenceNumber;
}
public void reset() {
callSequenceNumber = -1;
counter = -1;
}
public static void resetCounter() {
counter = -1;
}
public void setThrowExceptionOnStart(boolean throwExceptionOnStart) {
this.throwExceptionOnStart = throwExceptionOnStart;
}
public void setThrowExceptionOnStop(boolean throwExceptionOnStop) {
this.throwExceptionOnStop = throwExceptionOnStop;
}
@Override
public String toString() {
return "Service " + getName();
}
}
/**
* Composite service that makes the addService method public to all
*/
public static class ServiceManager extends CompositeService {
public void addTestService(CompositeService service) {
addService(service);
}
public ServiceManager(String name) {
super(name);
}
}
public static class AddSiblingService extends CompositeService {
private final CompositeService parent;
private final Service serviceToAdd;
private STATE triggerState;
public AddSiblingService(CompositeService parent,
Service serviceToAdd,
STATE triggerState) {
super("ParentStateManipulatorService");
this.parent = parent;
this.serviceToAdd = serviceToAdd;
this.triggerState = triggerState;
}
/**
* Add the serviceToAdd to the parent if this service
* is in the state requested
*/
private void maybeAddSibling() {
if (getServiceState() == triggerState) {
parent.addService(serviceToAdd);
}
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
maybeAddSibling();
super.serviceInit(conf);
}
@Override
protected void serviceStart() throws Exception {
maybeAddSibling();
super.serviceStart();
}
@Override
protected void serviceStop() throws Exception {
maybeAddSibling();
super.serviceStop();
}
/**
* Expose addService method
* @param parent parent service
* @param child child to add
*/
public static void addChildToService(CompositeService parent, Service child) {
parent.addService(child);
}
}
}
| 20,723 | 30.210843 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.util.Locale;
import static org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix.long2String;
import static org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix.string2long;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.regex.Pattern;
import org.apache.hadoop.test.UnitTestcaseTimeLimit;
import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
import org.junit.Assume;
import org.junit.Test;
public class TestStringUtils extends UnitTestcaseTimeLimit {
final private static String NULL_STR = null;
final private static String EMPTY_STR = "";
final private static String STR_WO_SPECIAL_CHARS = "AB";
final private static String STR_WITH_COMMA = "A,B";
final private static String ESCAPED_STR_WITH_COMMA = "A\\,B";
final private static String STR_WITH_ESCAPE = "AB\\";
final private static String ESCAPED_STR_WITH_ESCAPE = "AB\\\\";
final private static String STR_WITH_BOTH2 = ",A\\,,B\\\\,";
final private static String ESCAPED_STR_WITH_BOTH2 =
"\\,A\\\\\\,\\,B\\\\\\\\\\,";
@Test (timeout = 30000)
public void testEscapeString() throws Exception {
assertEquals(NULL_STR, StringUtils.escapeString(NULL_STR));
assertEquals(EMPTY_STR, StringUtils.escapeString(EMPTY_STR));
assertEquals(STR_WO_SPECIAL_CHARS,
StringUtils.escapeString(STR_WO_SPECIAL_CHARS));
assertEquals(ESCAPED_STR_WITH_COMMA,
StringUtils.escapeString(STR_WITH_COMMA));
assertEquals(ESCAPED_STR_WITH_ESCAPE,
StringUtils.escapeString(STR_WITH_ESCAPE));
assertEquals(ESCAPED_STR_WITH_BOTH2,
StringUtils.escapeString(STR_WITH_BOTH2));
}
@Test (timeout = 30000)
public void testSplit() throws Exception {
assertEquals(NULL_STR, StringUtils.split(NULL_STR));
String[] splits = StringUtils.split(EMPTY_STR);
assertEquals(0, splits.length);
splits = StringUtils.split(",,");
assertEquals(0, splits.length);
splits = StringUtils.split(STR_WO_SPECIAL_CHARS);
assertEquals(1, splits.length);
assertEquals(STR_WO_SPECIAL_CHARS, splits[0]);
splits = StringUtils.split(STR_WITH_COMMA);
assertEquals(2, splits.length);
assertEquals("A", splits[0]);
assertEquals("B", splits[1]);
splits = StringUtils.split(ESCAPED_STR_WITH_COMMA);
assertEquals(1, splits.length);
assertEquals(ESCAPED_STR_WITH_COMMA, splits[0]);
splits = StringUtils.split(STR_WITH_ESCAPE);
assertEquals(1, splits.length);
assertEquals(STR_WITH_ESCAPE, splits[0]);
splits = StringUtils.split(STR_WITH_BOTH2);
assertEquals(3, splits.length);
assertEquals(EMPTY_STR, splits[0]);
assertEquals("A\\,", splits[1]);
assertEquals("B\\\\", splits[2]);
splits = StringUtils.split(ESCAPED_STR_WITH_BOTH2);
assertEquals(1, splits.length);
assertEquals(ESCAPED_STR_WITH_BOTH2, splits[0]);
}
@Test (timeout = 30000)
public void testSimpleSplit() throws Exception {
final String[] TO_TEST = {
"a/b/c",
"a/b/c////",
"///a/b/c",
"",
"/",
"////"};
for (String testSubject : TO_TEST) {
assertArrayEquals("Testing '" + testSubject + "'",
testSubject.split("/"),
StringUtils.split(testSubject, '/'));
}
}
@Test (timeout = 30000)
public void testUnescapeString() throws Exception {
assertEquals(NULL_STR, StringUtils.unEscapeString(NULL_STR));
assertEquals(EMPTY_STR, StringUtils.unEscapeString(EMPTY_STR));
assertEquals(STR_WO_SPECIAL_CHARS,
StringUtils.unEscapeString(STR_WO_SPECIAL_CHARS));
try {
StringUtils.unEscapeString(STR_WITH_COMMA);
fail("Should throw IllegalArgumentException");
} catch (IllegalArgumentException e) {
// expected
}
assertEquals(STR_WITH_COMMA,
StringUtils.unEscapeString(ESCAPED_STR_WITH_COMMA));
try {
StringUtils.unEscapeString(STR_WITH_ESCAPE);
fail("Should throw IllegalArgumentException");
} catch (IllegalArgumentException e) {
// expected
}
assertEquals(STR_WITH_ESCAPE,
StringUtils.unEscapeString(ESCAPED_STR_WITH_ESCAPE));
try {
StringUtils.unEscapeString(STR_WITH_BOTH2);
fail("Should throw IllegalArgumentException");
} catch (IllegalArgumentException e) {
// expected
}
assertEquals(STR_WITH_BOTH2,
StringUtils.unEscapeString(ESCAPED_STR_WITH_BOTH2));
}
@Test (timeout = 30000)
public void testTraditionalBinaryPrefix() throws Exception {
//test string2long(..)
String[] symbol = {"k", "m", "g", "t", "p", "e"};
long m = 1024;
for(String s : symbol) {
assertEquals(0, string2long(0 + s));
assertEquals(m, string2long(1 + s));
m *= 1024;
}
assertEquals(0L, string2long("0"));
assertEquals(1024L, string2long("1k"));
assertEquals(-1024L, string2long("-1k"));
assertEquals(1259520L, string2long("1230K"));
assertEquals(-1259520L, string2long("-1230K"));
assertEquals(104857600L, string2long("100m"));
assertEquals(-104857600L, string2long("-100M"));
assertEquals(956703965184L, string2long("891g"));
assertEquals(-956703965184L, string2long("-891G"));
assertEquals(501377302265856L, string2long("456t"));
assertEquals(-501377302265856L, string2long("-456T"));
assertEquals(11258999068426240L, string2long("10p"));
assertEquals(-11258999068426240L, string2long("-10P"));
assertEquals(1152921504606846976L, string2long("1e"));
assertEquals(-1152921504606846976L, string2long("-1E"));
String tooLargeNumStr = "10e";
try {
string2long(tooLargeNumStr);
fail("Test passed for a number " + tooLargeNumStr + " too large");
} catch (IllegalArgumentException e) {
assertEquals(tooLargeNumStr + " does not fit in a Long", e.getMessage());
}
String tooSmallNumStr = "-10e";
try {
string2long(tooSmallNumStr);
fail("Test passed for a number " + tooSmallNumStr + " too small");
} catch (IllegalArgumentException e) {
assertEquals(tooSmallNumStr + " does not fit in a Long", e.getMessage());
}
String invalidFormatNumStr = "10kb";
char invalidPrefix = 'b';
try {
string2long(invalidFormatNumStr);
fail("Test passed for a number " + invalidFormatNumStr
+ " has invalid format");
} catch (IllegalArgumentException e) {
assertEquals("Invalid size prefix '" + invalidPrefix + "' in '"
+ invalidFormatNumStr
+ "'. Allowed prefixes are k, m, g, t, p, e(case insensitive)",
e.getMessage());
}
//test long2string(..)
assertEquals("0", long2String(0, null, 2));
for(int decimalPlace = 0; decimalPlace < 2; decimalPlace++) {
for(int n = 1; n < TraditionalBinaryPrefix.KILO.value; n++) {
assertEquals(n + "", long2String(n, null, decimalPlace));
assertEquals(-n + "", long2String(-n, null, decimalPlace));
}
assertEquals("1 K", long2String(1L << 10, null, decimalPlace));
assertEquals("-1 K", long2String(-1L << 10, null, decimalPlace));
}
assertEquals("8.00 E", long2String(Long.MAX_VALUE, null, 2));
assertEquals("8.00 E", long2String(Long.MAX_VALUE - 1, null, 2));
assertEquals("-8 E", long2String(Long.MIN_VALUE, null, 2));
assertEquals("-8.00 E", long2String(Long.MIN_VALUE + 1, null, 2));
final String[] zeros = {" ", ".0 ", ".00 "};
for(int decimalPlace = 0; decimalPlace < zeros.length; decimalPlace++) {
final String trailingZeros = zeros[decimalPlace];
for(int e = 11; e < Long.SIZE - 1; e++) {
final TraditionalBinaryPrefix p
= TraditionalBinaryPrefix.values()[e/10 - 1];
{ // n = 2^e
final long n = 1L << e;
final String expected = (n/p.value) + " " + p.symbol;
assertEquals("n=" + n, expected, long2String(n, null, 2));
}
{ // n = 2^e + 1
final long n = (1L << e) + 1;
final String expected = (n/p.value) + trailingZeros + p.symbol;
assertEquals("n=" + n, expected, long2String(n, null, decimalPlace));
}
{ // n = 2^e - 1
final long n = (1L << e) - 1;
final String expected = ((n+1)/p.value) + trailingZeros + p.symbol;
assertEquals("n=" + n, expected, long2String(n, null, decimalPlace));
}
}
}
assertEquals("1.50 K", long2String(3L << 9, null, 2));
assertEquals("1.5 K", long2String(3L << 9, null, 1));
assertEquals("1.50 M", long2String(3L << 19, null, 2));
assertEquals("2 M", long2String(3L << 19, null, 0));
assertEquals("3 G", long2String(3L << 30, null, 2));
// test byteDesc(..)
assertEquals("0 B", StringUtils.byteDesc(0));
assertEquals("-100 B", StringUtils.byteDesc(-100));
assertEquals("1 KB", StringUtils.byteDesc(1024));
assertEquals("1.50 KB", StringUtils.byteDesc(3L << 9));
assertEquals("1.50 MB", StringUtils.byteDesc(3L << 19));
assertEquals("3 GB", StringUtils.byteDesc(3L << 30));
// test formatPercent(..)
assertEquals("10%", StringUtils.formatPercent(0.1, 0));
assertEquals("10.0%", StringUtils.formatPercent(0.1, 1));
assertEquals("10.00%", StringUtils.formatPercent(0.1, 2));
assertEquals("1%", StringUtils.formatPercent(0.00543, 0));
assertEquals("0.5%", StringUtils.formatPercent(0.00543, 1));
assertEquals("0.54%", StringUtils.formatPercent(0.00543, 2));
assertEquals("0.543%", StringUtils.formatPercent(0.00543, 3));
assertEquals("0.5430%", StringUtils.formatPercent(0.00543, 4));
}
@Test (timeout = 30000)
public void testJoin() {
List<String> s = new ArrayList<String>();
s.add("a");
s.add("b");
s.add("c");
assertEquals("", StringUtils.join(":", s.subList(0, 0)));
assertEquals("a", StringUtils.join(":", s.subList(0, 1)));
assertEquals("", StringUtils.join(':', s.subList(0, 0)));
assertEquals("a", StringUtils.join(':', s.subList(0, 1)));
assertEquals("a:b", StringUtils.join(":", s.subList(0, 2)));
assertEquals("a:b:c", StringUtils.join(":", s.subList(0, 3)));
assertEquals("a:b", StringUtils.join(':', s.subList(0, 2)));
assertEquals("a:b:c", StringUtils.join(':', s.subList(0, 3)));
}
@Test (timeout = 30000)
public void testGetTrimmedStrings() throws Exception {
String compactDirList = "/spindle1/hdfs,/spindle2/hdfs,/spindle3/hdfs";
String spacedDirList = "/spindle1/hdfs, /spindle2/hdfs, /spindle3/hdfs";
String pathologicalDirList1 = " /spindle1/hdfs , /spindle2/hdfs ,/spindle3/hdfs ";
String pathologicalDirList2 = " /spindle1/hdfs , /spindle2/hdfs ,/spindle3/hdfs , ";
String emptyList1 = "";
String emptyList2 = " ";
String[] expectedArray = {"/spindle1/hdfs", "/spindle2/hdfs", "/spindle3/hdfs"};
String[] emptyArray = {};
assertArrayEquals(expectedArray, StringUtils.getTrimmedStrings(compactDirList));
assertArrayEquals(expectedArray, StringUtils.getTrimmedStrings(spacedDirList));
assertArrayEquals(expectedArray, StringUtils.getTrimmedStrings(pathologicalDirList1));
assertArrayEquals(expectedArray, StringUtils.getTrimmedStrings(pathologicalDirList2));
assertArrayEquals(emptyArray, StringUtils.getTrimmedStrings(emptyList1));
String[] estring = StringUtils.getTrimmedStrings(emptyList2);
assertArrayEquals(emptyArray, estring);
}
@Test (timeout = 30000)
public void testCamelize() {
// common use cases
assertEquals("Map", StringUtils.camelize("MAP"));
assertEquals("JobSetup", StringUtils.camelize("JOB_SETUP"));
assertEquals("SomeStuff", StringUtils.camelize("some_stuff"));
// sanity checks for ascii alphabet against unexpected locale issues.
assertEquals("Aa", StringUtils.camelize("aA"));
assertEquals("Bb", StringUtils.camelize("bB"));
assertEquals("Cc", StringUtils.camelize("cC"));
assertEquals("Dd", StringUtils.camelize("dD"));
assertEquals("Ee", StringUtils.camelize("eE"));
assertEquals("Ff", StringUtils.camelize("fF"));
assertEquals("Gg", StringUtils.camelize("gG"));
assertEquals("Hh", StringUtils.camelize("hH"));
assertEquals("Ii", StringUtils.camelize("iI"));
assertEquals("Jj", StringUtils.camelize("jJ"));
assertEquals("Kk", StringUtils.camelize("kK"));
assertEquals("Ll", StringUtils.camelize("lL"));
assertEquals("Mm", StringUtils.camelize("mM"));
assertEquals("Nn", StringUtils.camelize("nN"));
assertEquals("Oo", StringUtils.camelize("oO"));
assertEquals("Pp", StringUtils.camelize("pP"));
assertEquals("Qq", StringUtils.camelize("qQ"));
assertEquals("Rr", StringUtils.camelize("rR"));
assertEquals("Ss", StringUtils.camelize("sS"));
assertEquals("Tt", StringUtils.camelize("tT"));
assertEquals("Uu", StringUtils.camelize("uU"));
assertEquals("Vv", StringUtils.camelize("vV"));
assertEquals("Ww", StringUtils.camelize("wW"));
assertEquals("Xx", StringUtils.camelize("xX"));
assertEquals("Yy", StringUtils.camelize("yY"));
assertEquals("Zz", StringUtils.camelize("zZ"));
}
@Test (timeout = 30000)
public void testStringToURI() {
String[] str = new String[] { "file://" };
try {
StringUtils.stringToURI(str);
fail("Ignoring URISyntaxException while creating URI from string file://");
} catch (IllegalArgumentException iae) {
assertEquals("Failed to create uri for file://", iae.getMessage());
}
}
@Test (timeout = 30000)
public void testSimpleHostName() {
assertEquals("Should return hostname when FQDN is specified",
"hadoop01",
StringUtils.simpleHostname("hadoop01.domain.com"));
assertEquals("Should return hostname when only hostname is specified",
"hadoop01",
StringUtils.simpleHostname("hadoop01"));
assertEquals("Should not truncate when IP address is passed",
"10.10.5.68",
StringUtils.simpleHostname("10.10.5.68"));
}
@Test (timeout = 5000)
public void testReplaceTokensShellEnvVars() {
Pattern pattern = StringUtils.SHELL_ENV_VAR_PATTERN;
Map<String, String> replacements = new HashMap<String, String>();
replacements.put("FOO", "one");
replacements.put("BAZ", "two");
replacements.put("NUMBERS123", "one-two-three");
replacements.put("UNDER_SCORES", "___");
assertEquals("one", StringUtils.replaceTokens("$FOO", pattern,
replacements));
assertEquals("two", StringUtils.replaceTokens("$BAZ", pattern,
replacements));
assertEquals("", StringUtils.replaceTokens("$BAR", pattern, replacements));
assertEquals("", StringUtils.replaceTokens("", pattern, replacements));
assertEquals("one-two-three", StringUtils.replaceTokens("$NUMBERS123",
pattern, replacements));
assertEquals("___", StringUtils.replaceTokens("$UNDER_SCORES", pattern,
replacements));
assertEquals("//one//two//", StringUtils.replaceTokens("//$FOO/$BAR/$BAZ//",
pattern, replacements));
}
@Test (timeout = 5000)
public void testReplaceTokensWinEnvVars() {
Pattern pattern = StringUtils.WIN_ENV_VAR_PATTERN;
Map<String, String> replacements = new HashMap<String, String>();
replacements.put("foo", "zoo");
replacements.put("baz", "zaz");
assertEquals("zoo", StringUtils.replaceTokens("%foo%", pattern,
replacements));
assertEquals("zaz", StringUtils.replaceTokens("%baz%", pattern,
replacements));
assertEquals("", StringUtils.replaceTokens("%bar%", pattern,
replacements));
assertEquals("", StringUtils.replaceTokens("", pattern, replacements));
assertEquals("zoo__zaz", StringUtils.replaceTokens("%foo%_%bar%_%baz%",
pattern, replacements));
assertEquals("begin zoo__zaz end", StringUtils.replaceTokens(
"begin %foo%_%bar%_%baz% end", pattern, replacements));
}
@Test
public void testGetUniqueNonEmptyTrimmedStrings (){
final String TO_SPLIT = ",foo, bar,baz,,blah,blah,bar,";
Collection<String> col = StringUtils.getTrimmedStringCollection(TO_SPLIT);
assertEquals(4, col.size());
assertTrue(col.containsAll(Arrays.asList(new String[]{"foo","bar","baz","blah"})));
}
@Test
public void testLowerAndUpperStrings() {
Locale defaultLocale = Locale.getDefault();
try {
Locale.setDefault(new Locale("tr", "TR"));
String upperStr = "TITLE";
String lowerStr = "title";
// Confirming TR locale.
assertNotEquals(lowerStr, upperStr.toLowerCase());
assertNotEquals(upperStr, lowerStr.toUpperCase());
// This should be true regardless of locale.
assertEquals(lowerStr, StringUtils.toLowerCase(upperStr));
assertEquals(upperStr, StringUtils.toUpperCase(lowerStr));
assertTrue(StringUtils.equalsIgnoreCase(upperStr, lowerStr));
} finally {
Locale.setDefault(defaultLocale);
}
}
// Benchmark for StringUtils split
public static void main(String []args) {
final String TO_SPLIT = "foo,bar,baz,blah,blah";
for (boolean useOurs : new boolean[] { false, true }) {
for (int outer=0; outer < 10; outer++) {
long st = System.nanoTime();
int components = 0;
for (int inner=0; inner < 1000000; inner++) {
String[] res;
if (useOurs) {
res = StringUtils.split(TO_SPLIT, ',');
} else {
res = TO_SPLIT.split(",");
}
// be sure to use res, otherwise might be optimized out
components += res.length;
}
long et = System.nanoTime();
if (outer > 3) {
System.out.println( (useOurs ? "StringUtils impl" : "Java impl")
+ " #" + outer + ":" + (et - st)/1000000 + "ms, components="
+ components );
}
}
}
}
}
| 18,951 | 39.495726 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStopWatch.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.junit.Assert;
import org.junit.Test;
public class TestStopWatch {
@Test
public void testStartAndStop() throws Exception {
try (StopWatch sw = new StopWatch()) {
Assert.assertFalse(sw.isRunning());
sw.start();
Assert.assertTrue(sw.isRunning());
sw.stop();
Assert.assertFalse(sw.isRunning());
}
}
@Test
public void testStopInTryWithResource() throws Exception {
try (StopWatch sw = new StopWatch()) {
// make sure that no exception is thrown.
}
}
@Test
public void testExceptions() throws Exception {
StopWatch sw = new StopWatch();
try {
sw.stop();
} catch (Exception e) {
Assert.assertTrue("IllegalStateException is expected",
e instanceof IllegalStateException);
}
sw.reset();
sw.start();
try {
sw.start();
} catch (Exception e) {
Assert.assertTrue("IllegalStateException is expected",
e instanceof IllegalStateException);
}
}
}
| 1,836 | 28.15873 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestOptions.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestOptions {
@Test
public void testAppend() throws Exception {
assertArrayEquals("first append",
new String[]{"Dr.", "Who", "hi", "there"},
Options.prependOptions(new String[]{"hi", "there"},
"Dr.", "Who"));
assertArrayEquals("second append",
new String[]{"aa","bb","cc","dd","ee","ff"},
Options.prependOptions(new String[]{"dd", "ee", "ff"},
"aa", "bb", "cc"));
}
@Test
public void testFind() throws Exception {
Object[] opts = new Object[]{1, "hi", true, "bye", 'x'};
assertEquals(1, Options.getOption(Integer.class, opts).intValue());
assertEquals("hi", Options.getOption(String.class, opts));
assertEquals(true, Options.getOption(Boolean.class, opts).booleanValue());
}
}
| 1,811 | 38.391304 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestJarFinder.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.apache.commons.logging.LogFactory;
import org.junit.Assert;
import org.junit.Test;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.io.OutputStream;
import java.io.Writer;
import java.text.MessageFormat;
import java.util.Properties;
import java.util.jar.JarInputStream;
import java.util.jar.JarOutputStream;
import java.util.jar.Manifest;
public class TestJarFinder {
@Test
public void testJar() throws Exception {
//picking a class that is for sure in a JAR in the classpath
String jar = JarFinder.getJar(LogFactory.class);
Assert.assertTrue(new File(jar).exists());
}
private static void delete(File file) throws IOException {
if (file.getAbsolutePath().length() < 5) {
throw new IllegalArgumentException(
MessageFormat.format("Path [{0}] is too short, not deleting",
file.getAbsolutePath()));
}
if (file.exists()) {
if (file.isDirectory()) {
File[] children = file.listFiles();
if (children != null) {
for (File child : children) {
delete(child);
}
}
}
if (!file.delete()) {
throw new RuntimeException(
MessageFormat.format("Could not delete path [{0}]",
file.getAbsolutePath()));
}
}
}
@Test
public void testExpandedClasspath() throws Exception {
//picking a class that is for sure in a directory in the classpath
//in this case the JAR is created on the fly
String jar = JarFinder.getJar(TestJarFinder.class);
Assert.assertTrue(new File(jar).exists());
}
@Test
public void testExistingManifest() throws Exception {
File dir = new File(System.getProperty("test.build.dir", "target/test-dir"),
TestJarFinder.class.getName() + "-testExistingManifest");
delete(dir);
dir.mkdirs();
File metaInfDir = new File(dir, "META-INF");
metaInfDir.mkdirs();
File manifestFile = new File(metaInfDir, "MANIFEST.MF");
Manifest manifest = new Manifest();
OutputStream os = new FileOutputStream(manifestFile);
manifest.write(os);
os.close();
File propsFile = new File(dir, "props.properties");
Writer writer = new FileWriter(propsFile);
new Properties().store(writer, "");
writer.close();
ByteArrayOutputStream baos = new ByteArrayOutputStream();
JarOutputStream zos = new JarOutputStream(baos);
JarFinder.jarDir(dir, "", zos);
JarInputStream jis =
new JarInputStream(new ByteArrayInputStream(baos.toByteArray()));
Assert.assertNotNull(jis.getManifest());
jis.close();
}
@Test
public void testNoManifest() throws Exception {
File dir = new File(System.getProperty("test.build.dir", "target/test-dir"),
TestJarFinder.class.getName() + "-testNoManifest");
delete(dir);
dir.mkdirs();
File propsFile = new File(dir, "props.properties");
Writer writer = new FileWriter(propsFile);
new Properties().store(writer, "");
writer.close();
ByteArrayOutputStream baos = new ByteArrayOutputStream();
JarOutputStream zos = new JarOutputStream(baos);
JarFinder.jarDir(dir, "", zos);
JarInputStream jis =
new JarInputStream(new ByteArrayInputStream(baos.toByteArray()));
Assert.assertNotNull(jis.getManifest());
jis.close();
}
}
| 4,342 | 33.19685 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClasspath.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import static org.junit.Assert.*;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.nio.charset.Charset;
import java.util.jar.Attributes;
import java.util.jar.JarFile;
import java.util.jar.Manifest;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.io.IOUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/**
* Tests covering the classpath command-line utility.
*/
public class TestClasspath {
private static final Log LOG = LogFactory.getLog(TestClasspath.class);
private static final File TEST_DIR = new File(
System.getProperty("test.build.data", "/tmp"), "TestClasspath");
private static final Charset UTF8 = Charset.forName("UTF-8");
static {
ExitUtil.disableSystemExit();
}
private PrintStream oldStdout, oldStderr;
private ByteArrayOutputStream stdout, stderr;
private PrintStream printStdout, printStderr;
@Before
public void setUp() {
assertTrue(FileUtil.fullyDelete(TEST_DIR));
assertTrue(TEST_DIR.mkdirs());
oldStdout = System.out;
oldStderr = System.err;
stdout = new ByteArrayOutputStream();
printStdout = new PrintStream(stdout);
System.setOut(printStdout);
stderr = new ByteArrayOutputStream();
printStderr = new PrintStream(stderr);
System.setErr(printStderr);
}
@After
public void tearDown() {
System.setOut(oldStdout);
System.setErr(oldStderr);
IOUtils.cleanup(LOG, printStdout, printStderr);
assertTrue(FileUtil.fullyDelete(TEST_DIR));
}
@Test
public void testGlob() {
Classpath.main(new String[] { "--glob" });
String strOut = new String(stdout.toByteArray(), UTF8);
assertEquals(System.getProperty("java.class.path"), strOut.trim());
assertTrue(stderr.toByteArray().length == 0);
}
@Test
public void testJar() throws IOException {
File file = new File(TEST_DIR, "classpath.jar");
Classpath.main(new String[] { "--jar", file.getAbsolutePath() });
assertTrue(stdout.toByteArray().length == 0);
assertTrue(stderr.toByteArray().length == 0);
assertTrue(file.exists());
assertJar(file);
}
@Test
public void testJarReplace() throws IOException {
// Run the command twice with the same output jar file, and expect success.
testJar();
testJar();
}
@Test
public void testJarFileMissing() throws IOException {
try {
Classpath.main(new String[] { "--jar" });
fail("expected exit");
} catch (ExitUtil.ExitException e) {
assertTrue(stdout.toByteArray().length == 0);
String strErr = new String(stderr.toByteArray(), UTF8);
assertTrue(strErr.contains("requires path of jar"));
}
}
@Test
public void testHelp() {
Classpath.main(new String[] { "--help" });
String strOut = new String(stdout.toByteArray(), UTF8);
assertTrue(strOut.contains("Prints the classpath"));
assertTrue(stderr.toByteArray().length == 0);
}
@Test
public void testHelpShort() {
Classpath.main(new String[] { "-h" });
String strOut = new String(stdout.toByteArray(), UTF8);
assertTrue(strOut.contains("Prints the classpath"));
assertTrue(stderr.toByteArray().length == 0);
}
@Test
public void testUnrecognized() {
try {
Classpath.main(new String[] { "--notarealoption" });
fail("expected exit");
} catch (ExitUtil.ExitException e) {
assertTrue(stdout.toByteArray().length == 0);
String strErr = new String(stderr.toByteArray(), UTF8);
assertTrue(strErr.contains("unrecognized option"));
}
}
/**
* Asserts that the specified file is a jar file with a manifest containing a
* non-empty classpath attribute.
*
* @param file File to check
* @throws IOException if there is an I/O error
*/
private static void assertJar(File file) throws IOException {
JarFile jarFile = null;
try {
jarFile = new JarFile(file);
Manifest manifest = jarFile.getManifest();
assertNotNull(manifest);
Attributes mainAttributes = manifest.getMainAttributes();
assertNotNull(mainAttributes);
assertTrue(mainAttributes.containsKey(Attributes.Name.CLASS_PATH));
String classPathAttr = mainAttributes.getValue(Attributes.Name.CLASS_PATH);
assertNotNull(classPathAttr);
assertFalse(classPathAttr.isEmpty());
} finally {
// It's too bad JarFile doesn't implement Closeable.
if (jarFile != null) {
try {
jarFile.close();
} catch (IOException e) {
LOG.warn("exception closing jarFile: " + jarFile, e);
}
}
}
}
}
| 5,573 | 30.491525 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShutdownHookManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.junit.Assert;
import org.junit.Test;
public class TestShutdownHookManager {
@Test
public void shutdownHookManager() {
ShutdownHookManager mgr = ShutdownHookManager.get();
Assert.assertNotNull(mgr);
Assert.assertEquals(0, mgr.getShutdownHooksInOrder().size());
Runnable hook1 = new Runnable() {
@Override
public void run() {
}
};
Runnable hook2 = new Runnable() {
@Override
public void run() {
}
};
mgr.addShutdownHook(hook1, 0);
Assert.assertTrue(mgr.hasShutdownHook(hook1));
Assert.assertEquals(1, mgr.getShutdownHooksInOrder().size());
Assert.assertEquals(hook1, mgr.getShutdownHooksInOrder().get(0));
mgr.removeShutdownHook(hook1);
Assert.assertFalse(mgr.hasShutdownHook(hook1));
mgr.addShutdownHook(hook1, 0);
Assert.assertTrue(mgr.hasShutdownHook(hook1));
Assert.assertEquals(1, mgr.getShutdownHooksInOrder().size());
Assert.assertTrue(mgr.hasShutdownHook(hook1));
Assert.assertEquals(1, mgr.getShutdownHooksInOrder().size());
mgr.addShutdownHook(hook2, 1);
Assert.assertTrue(mgr.hasShutdownHook(hook1));
Assert.assertTrue(mgr.hasShutdownHook(hook2));
Assert.assertEquals(2, mgr.getShutdownHooksInOrder().size());
Assert.assertEquals(hook2, mgr.getShutdownHooksInOrder().get(0));
Assert.assertEquals(hook1, mgr.getShutdownHooksInOrder().get(1));
}
}
| 2,250 | 34.730159 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeCrc32.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import static org.junit.Assert.*;
import static org.junit.Assume.*;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collection;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ChecksumException;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
@RunWith(Parameterized.class)
public class TestNativeCrc32 {
private static final long BASE_POSITION = 0;
private static final int IO_BYTES_PER_CHECKSUM_DEFAULT = 512;
private static final String IO_BYTES_PER_CHECKSUM_KEY =
"io.bytes.per.checksum";
private static final int NUM_CHUNKS = 3;
private final DataChecksum.Type checksumType;
private int bytesPerChecksum;
private String fileName;
private ByteBuffer data, checksums;
private DataChecksum checksum;
@Rule
public ExpectedException exception = ExpectedException.none();
@Parameters
public static Collection<Object[]> data() {
Collection<Object[]> params = new ArrayList<Object[]>(2);
params.add(new Object[] { DataChecksum.Type.CRC32 });
params.add(new Object[] { DataChecksum.Type.CRC32C });
return params;
}
public TestNativeCrc32(DataChecksum.Type checksumType) {
this.checksumType = checksumType;
}
@Before
public void setup() {
assumeTrue(NativeCrc32.isAvailable());
assertEquals(
"These tests assume they can write a checksum value as a 4-byte int.", 4,
checksumType.size);
Configuration conf = new Configuration();
bytesPerChecksum = conf.getInt(IO_BYTES_PER_CHECKSUM_KEY,
IO_BYTES_PER_CHECKSUM_DEFAULT);
fileName = this.getClass().getSimpleName();
checksum = DataChecksum.newDataChecksum(checksumType, bytesPerChecksum);
}
@Test
public void testVerifyChunkedSumsSuccess() throws ChecksumException {
allocateDirectByteBuffers();
fillDataAndValidChecksums();
NativeCrc32.verifyChunkedSums(bytesPerChecksum, checksumType.id,
checksums, data, fileName, BASE_POSITION);
}
@Test
public void testVerifyChunkedSumsFail() throws ChecksumException {
allocateDirectByteBuffers();
fillDataAndInvalidChecksums();
exception.expect(ChecksumException.class);
NativeCrc32.verifyChunkedSums(bytesPerChecksum, checksumType.id,
checksums, data, fileName, BASE_POSITION);
}
@Test
public void testVerifyChunkedSumsSuccessOddSize() throws ChecksumException {
// Test checksum with an odd number of bytes. This is a corner case that
// is often broken in checksum calculation, because there is an loop which
// handles an even multiple or 4 or 8 bytes and then some additional code
// to finish the few odd bytes at the end. This code can often be broken
// but is never tested because we are always calling it with an even value
// such as 512.
bytesPerChecksum--;
allocateDirectByteBuffers();
fillDataAndValidChecksums();
NativeCrc32.verifyChunkedSums(bytesPerChecksum, checksumType.id,
checksums, data, fileName, BASE_POSITION);
bytesPerChecksum++;
}
@Test
public void testVerifyChunkedSumsByteArraySuccess() throws ChecksumException {
allocateArrayByteBuffers();
fillDataAndValidChecksums();
NativeCrc32.verifyChunkedSumsByteArray(bytesPerChecksum, checksumType.id,
checksums.array(), checksums.position(), data.array(), data.position(),
data.remaining(), fileName, BASE_POSITION);
}
@Test
public void testVerifyChunkedSumsByteArrayFail() throws ChecksumException {
allocateArrayByteBuffers();
fillDataAndInvalidChecksums();
exception.expect(ChecksumException.class);
NativeCrc32.verifyChunkedSumsByteArray(bytesPerChecksum, checksumType.id,
checksums.array(), checksums.position(), data.array(), data.position(),
data.remaining(), fileName, BASE_POSITION);
}
@Test
public void testCalculateChunkedSumsSuccess() throws ChecksumException {
allocateDirectByteBuffers();
fillDataAndValidChecksums();
NativeCrc32.calculateChunkedSums(bytesPerChecksum, checksumType.id,
checksums, data);
}
@Test
public void testCalculateChunkedSumsFail() throws ChecksumException {
allocateDirectByteBuffers();
fillDataAndInvalidChecksums();
NativeCrc32.calculateChunkedSums(bytesPerChecksum, checksumType.id,
checksums, data);
}
@Test
public void testCalculateChunkedSumsByteArraySuccess() throws ChecksumException {
allocateArrayByteBuffers();
fillDataAndValidChecksums();
NativeCrc32.calculateChunkedSumsByteArray(bytesPerChecksum, checksumType.id,
checksums.array(), checksums.position(), data.array(), data.position(),
data.remaining());
}
@Test
public void testCalculateChunkedSumsByteArrayFail() throws ChecksumException {
allocateArrayByteBuffers();
fillDataAndInvalidChecksums();
NativeCrc32.calculateChunkedSumsByteArray(bytesPerChecksum, checksumType.id,
checksums.array(), checksums.position(), data.array(), data.position(),
data.remaining());
}
@Test
@SuppressWarnings("deprecation")
public void testNativeVerifyChunkedSumsSuccess() throws ChecksumException {
allocateDirectByteBuffers();
fillDataAndValidChecksums();
NativeCrc32.nativeVerifyChunkedSums(bytesPerChecksum, checksumType.id,
checksums, checksums.position(), data, data.position(), data.remaining(),
fileName, BASE_POSITION);
}
@Test
@SuppressWarnings("deprecation")
public void testNativeVerifyChunkedSumsFail() throws ChecksumException {
allocateDirectByteBuffers();
fillDataAndInvalidChecksums();
exception.expect(ChecksumException.class);
NativeCrc32.nativeVerifyChunkedSums(bytesPerChecksum, checksumType.id,
checksums, checksums.position(), data, data.position(), data.remaining(),
fileName, BASE_POSITION);
}
/**
* Allocates data buffer and checksums buffer as arrays on the heap.
*/
private void allocateArrayByteBuffers() {
data = ByteBuffer.wrap(new byte[bytesPerChecksum * NUM_CHUNKS]);
checksums = ByteBuffer.wrap(new byte[NUM_CHUNKS * checksumType.size]);
}
/**
* Allocates data buffer and checksums buffer as direct byte buffers.
*/
private void allocateDirectByteBuffers() {
data = ByteBuffer.allocateDirect(bytesPerChecksum * NUM_CHUNKS);
checksums = ByteBuffer.allocateDirect(NUM_CHUNKS * checksumType.size);
}
/**
* Fill data buffer with monotonically increasing byte values. Overflow is
* fine, because it's just test data. Update the checksum with the same byte
* values. After every chunk, write the checksum to the checksums buffer.
* After finished writing, flip the buffers to prepare them for reading.
*/
private void fillDataAndValidChecksums() {
for (int i = 0; i < NUM_CHUNKS; ++i) {
for (int j = 0; j < bytesPerChecksum; ++j) {
byte b = (byte)((i * bytesPerChecksum + j) & 0xFF);
data.put(b);
checksum.update(b);
}
checksums.putInt((int)checksum.getValue());
checksum.reset();
}
data.flip();
checksums.flip();
}
/**
* Fill data buffer with monotonically increasing byte values. Overflow is
* fine, because it's just test data. Update the checksum with different byte
* byte values, so that the checksums are incorrect intentionally. After every
* chunk, write the checksum to the checksums buffer. After finished writing,
* flip the buffers to prepare them for reading.
*/
private void fillDataAndInvalidChecksums() {
for (int i = 0; i < NUM_CHUNKS; ++i) {
for (int j = 0; j < bytesPerChecksum; ++j) {
byte b = (byte)((i * bytesPerChecksum + j) & 0xFF);
data.put(b);
checksum.update((byte)(b + 1));
}
checksums.putInt((int)checksum.getValue());
checksum.reset();
}
data.flip();
checksums.flip();
}
}
| 8,850 | 34.979675 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestProtoUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.util.Arrays;
import org.apache.hadoop.ipc.ClientId;
import org.apache.hadoop.ipc.RPC.RpcKind;
import org.apache.hadoop.ipc.RpcConstants;
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcRequestHeaderProto;
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcRequestHeaderProto.OperationProto;
import org.junit.Test;
import com.google.protobuf.CodedOutputStream;
public class TestProtoUtil {
/**
* Values to test encoding as variable length integers
*/
private static final int[] TEST_VINT_VALUES = new int[] {
0, 1, -1, 127, 128, 129, 255, 256, 257,
0x1234, -0x1234,
0x123456, -0x123456,
0x12345678, -0x12345678
};
/**
* Test that readRawVarint32 is compatible with the varints encoded
* by ProtoBuf's CodedOutputStream.
*/
@Test
public void testVarInt() throws IOException {
// Test a few manufactured values
for (int value : TEST_VINT_VALUES) {
doVarIntTest(value);
}
// Check 1-bits at every bit position
for (int i = 1; i != 0; i <<= 1) {
doVarIntTest(i);
doVarIntTest(-i);
doVarIntTest(i - 1);
doVarIntTest(~i);
}
}
private void doVarIntTest(int value) throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
CodedOutputStream cout = CodedOutputStream.newInstance(baos);
cout.writeRawVarint32(value);
cout.flush();
DataInputStream dis = new DataInputStream(
new ByteArrayInputStream(baos.toByteArray()));
assertEquals(value, ProtoUtil.readRawVarint32(dis));
}
@Test
public void testRpcClientId() {
byte[] uuid = ClientId.getClientId();
RpcRequestHeaderProto header = ProtoUtil.makeRpcRequestHeader(
RpcKind.RPC_PROTOCOL_BUFFER, OperationProto.RPC_FINAL_PACKET, 0,
RpcConstants.INVALID_RETRY_COUNT, uuid);
assertTrue(Arrays.equals(uuid, header.getClientId().toByteArray()));
}
}
| 2,978 | 32.47191 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLineReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.io.ByteArrayInputStream;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.util.LineReader;
import org.junit.Test;
import org.junit.Assert;
public class TestLineReader {
private LineReader lineReader;
private String TestData;
private String Delimiter;
private Text line;
@Test
public void testCustomDelimiter() throws Exception {
/* TEST_1
* The test scenario is the tail of the buffer
* equals the starting character/s of delimiter
*
* The Test Data is such that,
*
* 1) we will have "</entity>" as delimiter
*
* 2) The tail of the current buffer would be "</"
* which matches with the starting character sequence of delimiter.
*
* 3) The Head of the next buffer would be "id>"
* which does NOT match with the remaining characters of delimiter.
*
* 4) Input data would be prefixed by char 'a'
* about numberOfCharToFillTheBuffer times.
* So that, one iteration to buffer the input data,
* would end at '</' ie equals starting 2 char of delimiter
*
* 5) For this we would take BufferSize as 64 * 1024;
*
* Check Condition
* In the second key value pair, the value should contain
* "</" from currentToken and
* "id>" from next token
*/
Delimiter="</entity>";
String CurrentBufferTailToken=
"</entity><entity><id>Gelesh</";
// Ending part of Input Data Buffer
// It contains '</' ie delimiter character
String NextBufferHeadToken=
"id><name>Omathil</name></entity>";
// Supposing the start of next buffer is this
String Expected =
(CurrentBufferTailToken+NextBufferHeadToken)
.replace(Delimiter, "");
// Expected ,must capture from both the buffer, excluding Delimiter
String TestPartOfInput = CurrentBufferTailToken+NextBufferHeadToken;
int BufferSize=64 * 1024;
int numberOfCharToFillTheBuffer=BufferSize-CurrentBufferTailToken.length();
StringBuilder fillerString=new StringBuilder();
for (int i=0;i<numberOfCharToFillTheBuffer;i++) {
fillerString.append('a'); // char 'a' as a filler for the test string
}
TestData = fillerString + TestPartOfInput;
lineReader = new LineReader(
new ByteArrayInputStream(TestData.getBytes()),Delimiter.getBytes());
line = new Text();
lineReader.readLine(line);
Assert.assertEquals(fillerString.toString(),line.toString());
lineReader.readLine(line);
Assert.assertEquals(Expected, line.toString());
/*TEST_2
* The test scenario is such that,
* the character/s preceding the delimiter,
* equals the starting character/s of delimiter
*/
Delimiter = "record";
StringBuilder TestStringBuilder = new StringBuilder();
TestStringBuilder.append(Delimiter+"Kerala ");
TestStringBuilder.append(Delimiter+"Bangalore");
TestStringBuilder.append(Delimiter+" North Korea");
TestStringBuilder.append(Delimiter+Delimiter+
"Guantanamo");
TestStringBuilder.append(Delimiter+"ecord"+"recor"+"core"); //~EOF with 're'
TestData=TestStringBuilder.toString();
lineReader = new LineReader(
new ByteArrayInputStream(TestData.getBytes()),Delimiter.getBytes());
lineReader.readLine(line);
Assert.assertEquals("",line.toString());
lineReader.readLine(line);
Assert.assertEquals("Kerala ",line.toString());
lineReader.readLine(line);
Assert.assertEquals("Bangalore",line.toString());
lineReader.readLine(line);
Assert.assertEquals(" North Korea",line.toString());
lineReader.readLine(line);
Assert.assertEquals("",line.toString());
lineReader.readLine(line);
Assert.assertEquals("Guantanamo",line.toString());
lineReader.readLine(line);
Assert.assertEquals(("ecord"+"recor"+"core"),line.toString());
}
}
| 4,873 | 33.323944 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestChunkedArrayList.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import static org.junit.Assert.*;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.concurrent.TimeUnit;
import org.junit.Assert;
import org.junit.Test;
public class TestChunkedArrayList {
@Test
public void testBasics() {
final int N_ELEMS = 100000;
ChunkedArrayList<Integer> l = new ChunkedArrayList<Integer>();
assertTrue(l.isEmpty());
// Insert a bunch of elements.
for (int i = 0; i < N_ELEMS; i++) {
l.add(i);
}
assertFalse(l.isEmpty());
assertEquals(N_ELEMS, l.size());
// Check that it got chunked.
assertTrue(l.getNumChunks() > 10);
assertEquals(8192, l.getMaxChunkSize());
}
@Test
public void testIterator() {
ChunkedArrayList<Integer> l = new ChunkedArrayList<Integer>();
for (int i = 0; i < 30000; i++) {
l.add(i);
}
int i = 0;
for (int fromList : l) {
assertEquals(i, fromList);
i++;
}
}
@Test
public void testPerformance() {
String obj = "hello world";
final int numElems = 1000000;
final int numTrials = 5;
for (int trial = 0; trial < numTrials; trial++) {
System.gc();
{
ArrayList<String> arrayList = new ArrayList<String>();
StopWatch sw = new StopWatch();
sw.start();
for (int i = 0; i < numElems; i++) {
arrayList.add(obj);
}
System.out.println(" ArrayList " + sw.now(TimeUnit.MILLISECONDS));
}
// test ChunkedArrayList
System.gc();
{
ChunkedArrayList<String> chunkedList = new ChunkedArrayList<String>();
StopWatch sw = new StopWatch();
sw.start();
for (int i = 0; i < numElems; i++) {
chunkedList.add(obj);
}
System.out.println("ChunkedArrayList " + sw.now(TimeUnit.MILLISECONDS));
}
}
}
@Test
public void testRemovals() throws Exception {
final int NUM_ELEMS = 100000;
ChunkedArrayList<Integer> list = new ChunkedArrayList<Integer>();
for (int i = 0; i < NUM_ELEMS; i++) {
list.add(i);
}
// Iterate through all list elements.
Iterator<Integer> iter = list.iterator();
for (int i = 0; i < NUM_ELEMS; i++) {
Assert.assertTrue(iter.hasNext());
Integer val = iter.next();
Assert.assertEquals(Integer.valueOf(i), val);
}
Assert.assertFalse(iter.hasNext());
Assert.assertEquals(NUM_ELEMS, list.size());
// Remove even elements.
iter = list.iterator();
for (int i = 0; i < NUM_ELEMS; i++) {
Assert.assertTrue(iter.hasNext());
Integer val = iter.next();
Assert.assertEquals(Integer.valueOf(i), val);
if (i % 2 == 0) {
iter.remove();
}
}
Assert.assertFalse(iter.hasNext());
Assert.assertEquals(NUM_ELEMS / 2, list.size());
// Iterate through all odd list elements.
iter = list.iterator();
for (int i = 0; i < NUM_ELEMS / 2; i++) {
Assert.assertTrue(iter.hasNext());
Integer val = iter.next();
Assert.assertEquals(Integer.valueOf(1 + (2 * i)), val);
iter.remove();
}
Assert.assertFalse(iter.hasNext());
// Check that list is now empty.
Assert.assertEquals(0, list.size());
Assert.assertTrue(list.isEmpty());
iter = list.iterator();
Assert.assertFalse(iter.hasNext());
}
@Test
public void testGet() throws Exception {
final int NUM_ELEMS = 100001;
ChunkedArrayList<Integer> list = new ChunkedArrayList<Integer>();
for (int i = 0; i < NUM_ELEMS; i++) {
list.add(i);
}
Assert.assertEquals(Integer.valueOf(100), list.get(100));
Assert.assertEquals(Integer.valueOf(1000), list.get(1000));
Assert.assertEquals(Integer.valueOf(10000), list.get(10000));
Assert.assertEquals(Integer.valueOf(100000), list.get(100000));
Iterator<Integer> iter = list.iterator();
iter.next();
iter.remove();
Assert.assertEquals(Integer.valueOf(1), list.get(0));
iter = list.iterator();
for (int i = 0; i < 500; i++) {
iter.next();
}
iter.remove();
Assert.assertEquals(Integer.valueOf(502), list.get(500));
Assert.assertEquals(Integer.valueOf(602), list.get(600));
}
}
| 5,035 | 28.450292 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.io.*;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
import static org.apache.hadoop.test.MockitoMaker.*;
import static org.apache.hadoop.fs.permission.FsAction.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
import org.apache.hadoop.util.Shell;
public class TestDiskChecker {
final FsPermission defaultPerm = new FsPermission("755");
final FsPermission invalidPerm = new FsPermission("000");
@Test (timeout = 30000)
public void testMkdirs_dirExists() throws Throwable {
_mkdirs(true, defaultPerm, defaultPerm);
}
@Test (timeout = 30000)
public void testMkdirs_noDir() throws Throwable {
_mkdirs(false, defaultPerm, defaultPerm);
}
@Test (timeout = 30000)
public void testMkdirs_dirExists_badUmask() throws Throwable {
_mkdirs(true, defaultPerm, invalidPerm);
}
@Test (timeout = 30000)
public void testMkdirs_noDir_badUmask() throws Throwable {
_mkdirs(false, defaultPerm, invalidPerm);
}
private void _mkdirs(boolean exists, FsPermission before, FsPermission after)
throws Throwable {
File localDir = make(stub(File.class).returning(exists).from.exists());
when(localDir.mkdir()).thenReturn(true);
Path dir = mock(Path.class); // use default stubs
LocalFileSystem fs = make(stub(LocalFileSystem.class)
.returning(localDir).from.pathToFile(dir));
FileStatus stat = make(stub(FileStatus.class)
.returning(after).from.getPermission());
when(fs.getFileStatus(dir)).thenReturn(stat);
try {
DiskChecker.mkdirsWithExistsAndPermissionCheck(fs, dir, before);
if (!exists)
verify(fs).setPermission(dir, before);
else {
verify(fs).getFileStatus(dir);
verify(stat).getPermission();
}
}
catch (DiskErrorException e) {
if (before != after)
assertTrue(e.getMessage().startsWith("Incorrect permission"));
}
}
@Test (timeout = 30000)
public void testCheckDir_normal() throws Throwable {
_checkDirs(true, new FsPermission("755"), true);
}
@Test (timeout = 30000)
public void testCheckDir_notDir() throws Throwable {
_checkDirs(false, new FsPermission("000"), false);
}
@Test (timeout = 30000)
public void testCheckDir_notReadable() throws Throwable {
_checkDirs(true, new FsPermission("000"), false);
}
@Test (timeout = 30000)
public void testCheckDir_notWritable() throws Throwable {
_checkDirs(true, new FsPermission("444"), false);
}
@Test (timeout = 30000)
public void testCheckDir_notListable() throws Throwable {
_checkDirs(true, new FsPermission("666"), false); // not listable
}
private void _checkDirs(boolean isDir, FsPermission perm, boolean success)
throws Throwable {
File localDir = File.createTempFile("test", "tmp");
if (isDir) {
localDir.delete();
localDir.mkdir();
}
Shell.execCommand(Shell.getSetPermissionCommand(String.format("%04o",
perm.toShort()), false, localDir.getAbsolutePath()));
try {
DiskChecker.checkDir(FileSystem.getLocal(new Configuration()),
new Path(localDir.getAbsolutePath()), perm);
assertTrue("checkDir success", success);
} catch (DiskErrorException e) {
assertFalse("checkDir success", success);
}
localDir.delete();
}
/**
* These test cases test to test the creation of a local folder with correct
* permission for result of mapper.
*/
@Test (timeout = 30000)
public void testCheckDir_normal_local() throws Throwable {
_checkDirs(true, "755", true);
}
@Test (timeout = 30000)
public void testCheckDir_notDir_local() throws Throwable {
_checkDirs(false, "000", false);
}
@Test (timeout = 30000)
public void testCheckDir_notReadable_local() throws Throwable {
_checkDirs(true, "000", false);
}
@Test (timeout = 30000)
public void testCheckDir_notWritable_local() throws Throwable {
_checkDirs(true, "444", false);
}
@Test (timeout = 30000)
public void testCheckDir_notListable_local() throws Throwable {
_checkDirs(true, "666", false);
}
private void _checkDirs(boolean isDir, String perm, boolean success)
throws Throwable {
File localDir = File.createTempFile("test", "tmp");
if (isDir) {
localDir.delete();
localDir.mkdir();
}
Shell.execCommand(Shell.getSetPermissionCommand(perm, false,
localDir.getAbsolutePath()));
try {
DiskChecker.checkDir(localDir);
assertTrue("checkDir success", success);
} catch (DiskErrorException e) {
e.printStackTrace();
assertFalse("checkDir success", success);
}
localDir.delete();
System.out.println("checkDir success: " + success);
}
@Test (timeout = 30000)
public void testCheckDirsIOException() throws Throwable {
Path path = new Path("target", TestDiskChecker.class.getSimpleName());
File localDir = new File(path.toUri().getRawPath());
localDir.mkdir();
File localFile = new File(localDir, "test");
localFile.createNewFile();
File spyLocalDir = spy(localDir);
doReturn(localFile.toPath()).when(spyLocalDir).toPath();
try {
DiskChecker.checkDirs(spyLocalDir);
fail("Expected exception for I/O error");
} catch (DiskErrorException e) {
GenericTestUtils.assertExceptionContains("I/O error", e);
assertTrue(e.getCause() instanceof IOException);
} finally {
localFile.delete();
localDir.delete();
}
}
}
| 6,702 | 31.538835 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLightWeightGSet.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.LightWeightGSet.LinkedElement;
import org.junit.Assert;
import org.junit.Test;
/** Testing {@link LightWeightGSet} */
public class TestLightWeightGSet {
public static final Log LOG = LogFactory.getLog(TestLightWeightGSet.class);
private static ArrayList<Integer> getRandomList(int length, int randomSeed) {
Random random = new Random(randomSeed);
ArrayList<Integer> list = new ArrayList<Integer>(length);
for (int i = 0; i < length; i++) {
list.add(random.nextInt());
}
return list;
}
private static class TestElement implements LightWeightGSet.LinkedElement {
private final int val;
private LinkedElement next;
TestElement(int val) {
this.val = val;
this.next = null;
}
public int getVal() {
return val;
}
@Override
public void setNext(LinkedElement next) {
this.next = next;
}
@Override
public LinkedElement getNext() {
return next;
}
}
@Test(timeout=60000)
public void testRemoveAllViaIterator() {
ArrayList<Integer> list = getRandomList(100, 123);
LightWeightGSet<TestElement, TestElement> set =
new LightWeightGSet<TestElement, TestElement>(16);
for (Integer i : list) {
set.put(new TestElement(i));
}
for (Iterator<TestElement> iter = set.iterator();
iter.hasNext(); ) {
iter.next();
iter.remove();
}
Assert.assertEquals(0, set.size());
}
@Test(timeout=60000)
public void testRemoveSomeViaIterator() {
ArrayList<Integer> list = getRandomList(100, 123);
LightWeightGSet<TestElement, TestElement> set =
new LightWeightGSet<TestElement, TestElement>(16);
for (Integer i : list) {
set.put(new TestElement(i));
}
long sum = 0;
for (Iterator<TestElement> iter = set.iterator();
iter.hasNext(); ) {
sum += iter.next().getVal();
}
long mode = sum / set.size();
LOG.info("Removing all elements above " + mode);
for (Iterator<TestElement> iter = set.iterator();
iter.hasNext(); ) {
int item = iter.next().getVal();
if (item > mode) {
iter.remove();
}
}
for (Iterator<TestElement> iter = set.iterator();
iter.hasNext(); ) {
Assert.assertTrue(iter.next().getVal() <= mode);
}
}
}
| 3,341 | 29.108108 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import static org.junit.Assert.*;
import static org.junit.Assume.assumeTrue;
import static org.junit.matchers.JUnitMatchers.containsString;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileUtil;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import static org.hamcrest.CoreMatchers.*;
/**
* Test cases for helper Windows winutils.exe utility.
*/
public class TestWinUtils {
private static final Log LOG = LogFactory.getLog(TestWinUtils.class);
private static File TEST_DIR = new File(System.getProperty("test.build.data",
"/tmp"), TestWinUtils.class.getSimpleName());
@Before
public void setUp() {
// Not supported on non-Windows platforms
assumeTrue(Shell.WINDOWS);
TEST_DIR.mkdirs();
}
@After
public void tearDown() throws IOException {
FileUtil.fullyDelete(TEST_DIR);
}
// Helper routine that writes the given content to the file.
private void writeFile(File file, String content) throws IOException {
byte[] data = content.getBytes();
FileOutputStream os = new FileOutputStream(file);
os.write(data);
os.close();
}
// Helper routine that reads the first 100 bytes from the file.
private String readFile(File file) throws IOException {
FileInputStream fos = new FileInputStream(file);
byte[] b = new byte[100];
fos.read(b);
return b.toString();
}
@Test (timeout = 30000)
public void testLs() throws IOException {
final String content = "6bytes";
final int contentSize = content.length();
File testFile = new File(TEST_DIR, "file1");
writeFile(testFile, content);
// Verify permissions and file name return tokens
String output = Shell.execCommand(
Shell.WINUTILS, "ls", testFile.getCanonicalPath());
String[] outputArgs = output.split("[ \r\n]");
assertTrue(outputArgs[0].equals("-rwx------"));
assertTrue(outputArgs[outputArgs.length - 1]
.equals(testFile.getCanonicalPath()));
// Verify most tokens when using a formatted output (other tokens
// will be verified with chmod/chown)
output = Shell.execCommand(
Shell.WINUTILS, "ls", "-F", testFile.getCanonicalPath());
outputArgs = output.split("[|\r\n]");
assertEquals(9, outputArgs.length);
assertTrue(outputArgs[0].equals("-rwx------"));
assertEquals(contentSize, Long.parseLong(outputArgs[4]));
assertTrue(outputArgs[8].equals(testFile.getCanonicalPath()));
testFile.delete();
assertFalse(testFile.exists());
}
@Test (timeout = 30000)
public void testGroups() throws IOException {
String currentUser = System.getProperty("user.name");
// Verify that groups command returns information about the current user
// groups when invoked with no args
String outputNoArgs = Shell.execCommand(
Shell.WINUTILS, "groups").trim();
String output = Shell.execCommand(
Shell.WINUTILS, "groups", currentUser).trim();
assertEquals(output, outputNoArgs);
// Verify that groups command with the -F flag returns the same information
String outputFormat = Shell.execCommand(
Shell.WINUTILS, "groups", "-F", currentUser).trim();
outputFormat = outputFormat.replace("|", " ");
assertEquals(output, outputFormat);
}
private void chmod(String mask, File file) throws IOException {
Shell.execCommand(
Shell.WINUTILS, "chmod", mask, file.getCanonicalPath());
}
private void chmodR(String mask, File file) throws IOException {
Shell.execCommand(
Shell.WINUTILS, "chmod", "-R", mask, file.getCanonicalPath());
}
private String ls(File file) throws IOException {
return Shell.execCommand(
Shell.WINUTILS, "ls", file.getCanonicalPath());
}
private String lsF(File file) throws IOException {
return Shell.execCommand(
Shell.WINUTILS, "ls", "-F", file.getCanonicalPath());
}
private void assertPermissions(File file, String expected)
throws IOException {
String output = ls(file).split("[ \r\n]")[0];
assertEquals(expected, output);
}
private void testChmodInternal(String mode, String expectedPerm)
throws IOException {
File a = new File(TEST_DIR, "file1");
assertTrue(a.createNewFile());
// Reset permissions on the file to default
chmod("700", a);
// Apply the mode mask
chmod(mode, a);
// Compare the output
assertPermissions(a, expectedPerm);
a.delete();
assertFalse(a.exists());
}
private void testNewFileChmodInternal(String expectedPerm) throws IOException {
// Create a new directory
File dir = new File(TEST_DIR, "dir1");
assertTrue(dir.mkdir());
// Set permission use chmod
chmod("755", dir);
// Create a child file in the directory
File child = new File(dir, "file1");
assertTrue(child.createNewFile());
// Verify the child file has correct permissions
assertPermissions(child, expectedPerm);
child.delete();
dir.delete();
assertFalse(dir.exists());
}
private void testChmodInternalR(String mode, String expectedPerm,
String expectedPermx) throws IOException {
// Setup test folder hierarchy
File a = new File(TEST_DIR, "a");
assertTrue(a.mkdir());
chmod("700", a);
File aa = new File(a, "a");
assertTrue(aa.createNewFile());
chmod("600", aa);
File ab = new File(a, "b");
assertTrue(ab.mkdir());
chmod("700", ab);
File aba = new File(ab, "a");
assertTrue(aba.mkdir());
chmod("700", aba);
File abb = new File(ab, "b");
assertTrue(abb.createNewFile());
chmod("600", abb);
File abx = new File(ab, "x");
assertTrue(abx.createNewFile());
chmod("u+x", abx);
// Run chmod recursive
chmodR(mode, a);
// Verify outcome
assertPermissions(a, "d" + expectedPermx);
assertPermissions(aa, "-" + expectedPerm);
assertPermissions(ab, "d" + expectedPermx);
assertPermissions(aba, "d" + expectedPermx);
assertPermissions(abb, "-" + expectedPerm);
assertPermissions(abx, "-" + expectedPermx);
assertTrue(FileUtil.fullyDelete(a));
}
@Test (timeout = 30000)
public void testBasicChmod() throws IOException {
// - Create a file.
// - Change mode to 377 so owner does not have read permission.
// - Verify the owner truly does not have the permissions to read.
File a = new File(TEST_DIR, "a");
a.createNewFile();
chmod("377", a);
try {
readFile(a);
assertFalse("readFile should have failed!", true);
} catch (IOException ex) {
LOG.info("Expected: Failed read from a file with permissions 377");
}
// restore permissions
chmod("700", a);
// - Create a file.
// - Change mode to 577 so owner does not have write permission.
// - Verify the owner truly does not have the permissions to write.
chmod("577", a);
try {
writeFile(a, "test");
assertFalse("writeFile should have failed!", true);
} catch (IOException ex) {
LOG.info("Expected: Failed write to a file with permissions 577");
}
// restore permissions
chmod("700", a);
assertTrue(a.delete());
// - Copy WINUTILS to a new executable file, a.exe.
// - Change mode to 677 so owner does not have execute permission.
// - Verify the owner truly does not have the permissions to execute the file.
File winutilsFile = new File(Shell.WINUTILS);
File aExe = new File(TEST_DIR, "a.exe");
FileUtils.copyFile(winutilsFile, aExe);
chmod("677", aExe);
try {
Shell.execCommand(aExe.getCanonicalPath(), "ls");
assertFalse("executing " + aExe + " should have failed!", true);
} catch (IOException ex) {
LOG.info("Expected: Failed to execute a file with permissions 677");
}
assertTrue(aExe.delete());
}
/** Validate behavior of chmod commands on directories on Windows. */
@Test (timeout = 30000)
public void testBasicChmodOnDir() throws IOException {
// Validate that listing a directory with no read permission fails
File a = new File(TEST_DIR, "a");
File b = new File(a, "b");
a.mkdirs();
assertTrue(b.createNewFile());
// Remove read permissions on directory a
chmod("300", a);
String[] files = a.list();
assertTrue("Listing a directory without read permission should fail",
null == files);
// restore permissions
chmod("700", a);
// validate that the directory can be listed now
files = a.list();
assertEquals("b", files[0]);
// Remove write permissions on the directory and validate the
// behavior for adding, deleting and renaming files
chmod("500", a);
File c = new File(a, "c");
try {
// Adding a new file will fail as expected because the
// FILE_WRITE_DATA/FILE_ADD_FILE privilege is denied on
// the dir.
c.createNewFile();
assertFalse("writeFile should have failed!", true);
} catch (IOException ex) {
LOG.info("Expected: Failed to create a file when directory "
+ "permissions are 577");
}
// Deleting a file will succeed even if write permissions are not present
// on the parent dir. Check the following link for additional details:
// http://support.microsoft.com/kb/238018
assertTrue("Special behavior: deleting a file will succeed on Windows "
+ "even if a user does not have write permissions on the parent dir",
b.delete());
assertFalse("Renaming a file should fail on the dir where a user does "
+ "not have write permissions", b.renameTo(new File(a, "d")));
// restore permissions
chmod("700", a);
// Make sure adding new files and rename succeeds now
assertTrue(c.createNewFile());
File d = new File(a, "d");
assertTrue(c.renameTo(d));
// at this point in the test, d is the only remaining file in directory a
// Removing execute permissions does not have the same behavior on
// Windows as on Linux. Adding, renaming, deleting and listing files
// will still succeed. Windows default behavior is to bypass directory
// traverse checking (BYPASS_TRAVERSE_CHECKING privilege) for all users.
// See the following link for additional details:
// http://msdn.microsoft.com/en-us/library/windows/desktop/aa364399(v=vs.85).aspx
chmod("600", a);
// validate directory listing
files = a.list();
assertEquals("d", files[0]);
// validate delete
assertTrue(d.delete());
// validate add
File e = new File(a, "e");
assertTrue(e.createNewFile());
// validate rename
assertTrue(e.renameTo(new File(a, "f")));
// restore permissions
chmod("700", a);
}
@Test (timeout = 30000)
public void testChmod() throws IOException {
testChmodInternal("7", "-------rwx");
testChmodInternal("70", "----rwx---");
testChmodInternal("u-x,g+r,o=g", "-rw-r--r--");
testChmodInternal("u-x,g+rw", "-rw-rw----");
testChmodInternal("u-x,g+rwx-x,o=u", "-rw-rw-rw-");
testChmodInternal("+", "-rwx------");
// Recursive chmod tests
testChmodInternalR("755", "rwxr-xr-x", "rwxr-xr-x");
testChmodInternalR("u-x,g+r,o=g", "rw-r--r--", "rw-r--r--");
testChmodInternalR("u-x,g+rw", "rw-rw----", "rw-rw----");
testChmodInternalR("u-x,g+rwx-x,o=u", "rw-rw-rw-", "rw-rw-rw-");
testChmodInternalR("a+rX", "rw-r--r--", "rwxr-xr-x");
// Test a new file created in a chmod'ed directory has expected permission
testNewFileChmodInternal("-rwxr-xr-x");
}
private void chown(String userGroup, File file) throws IOException {
Shell.execCommand(
Shell.WINUTILS, "chown", userGroup, file.getCanonicalPath());
}
private void assertOwners(File file, String expectedUser,
String expectedGroup) throws IOException {
String [] args = lsF(file).trim().split("[\\|]");
assertEquals(StringUtils.toLowerCase(expectedUser),
StringUtils.toLowerCase(args[2]));
assertEquals(StringUtils.toLowerCase(expectedGroup),
StringUtils.toLowerCase(args[3]));
}
@Test (timeout = 30000)
public void testChown() throws IOException {
File a = new File(TEST_DIR, "a");
assertTrue(a.createNewFile());
String username = System.getProperty("user.name");
// username including the domain aka DOMAIN\\user
String qualifiedUsername = Shell.execCommand("whoami").trim();
String admins = "Administrators";
String qualifiedAdmins = "BUILTIN\\Administrators";
chown(username + ":" + admins, a);
assertOwners(a, qualifiedUsername, qualifiedAdmins);
chown(username, a);
chown(":" + admins, a);
assertOwners(a, qualifiedUsername, qualifiedAdmins);
chown(":" + admins, a);
chown(username + ":", a);
assertOwners(a, qualifiedUsername, qualifiedAdmins);
assertTrue(a.delete());
assertFalse(a.exists());
}
@Test (timeout = 30000)
public void testSymlinkRejectsForwardSlashesInLink() throws IOException {
File newFile = new File(TEST_DIR, "file");
assertTrue(newFile.createNewFile());
String target = newFile.getPath();
String link = new File(TEST_DIR, "link").getPath().replaceAll("\\\\", "/");
try {
Shell.execCommand(Shell.WINUTILS, "symlink", link, target);
fail(String.format("did not receive expected failure creating symlink "
+ "with forward slashes in link: link = %s, target = %s", link, target));
} catch (IOException e) {
LOG.info(
"Expected: Failed to create symlink with forward slashes in target");
}
}
@Test (timeout = 30000)
public void testSymlinkRejectsForwardSlashesInTarget() throws IOException {
File newFile = new File(TEST_DIR, "file");
assertTrue(newFile.createNewFile());
String target = newFile.getPath().replaceAll("\\\\", "/");
String link = new File(TEST_DIR, "link").getPath();
try {
Shell.execCommand(Shell.WINUTILS, "symlink", link, target);
fail(String.format("did not receive expected failure creating symlink "
+ "with forward slashes in target: link = %s, target = %s", link, target));
} catch (IOException e) {
LOG.info(
"Expected: Failed to create symlink with forward slashes in target");
}
}
@Test (timeout = 30000)
public void testReadLink() throws IOException {
// Create TEST_DIR\dir1\file1.txt
//
File dir1 = new File(TEST_DIR, "dir1");
assertTrue(dir1.mkdirs());
File file1 = new File(dir1, "file1.txt");
assertTrue(file1.createNewFile());
File dirLink = new File(TEST_DIR, "dlink");
File fileLink = new File(TEST_DIR, "flink");
// Next create a directory symlink to dir1 and a file
// symlink to file1.txt.
//
Shell.execCommand(
Shell.WINUTILS, "symlink", dirLink.toString(), dir1.toString());
Shell.execCommand(
Shell.WINUTILS, "symlink", fileLink.toString(), file1.toString());
// Read back the two links and ensure we get what we expected.
//
String readLinkOutput = Shell.execCommand(Shell.WINUTILS,
"readlink",
dirLink.toString());
assertThat(readLinkOutput, equalTo(dir1.toString()));
readLinkOutput = Shell.execCommand(Shell.WINUTILS,
"readlink",
fileLink.toString());
assertThat(readLinkOutput, equalTo(file1.toString()));
// Try a few invalid inputs and verify we get an ExitCodeException for each.
//
try {
// No link name specified.
//
Shell.execCommand(Shell.WINUTILS, "readlink", "");
fail("Failed to get Shell.ExitCodeException when reading bad symlink");
} catch (Shell.ExitCodeException ece) {
assertThat(ece.getExitCode(), is(1));
}
try {
// Bad link name.
//
Shell.execCommand(Shell.WINUTILS, "readlink", "ThereIsNoSuchLink");
fail("Failed to get Shell.ExitCodeException when reading bad symlink");
} catch (Shell.ExitCodeException ece) {
assertThat(ece.getExitCode(), is(1));
}
try {
// Non-symlink directory target.
//
Shell.execCommand(Shell.WINUTILS, "readlink", dir1.toString());
fail("Failed to get Shell.ExitCodeException when reading bad symlink");
} catch (Shell.ExitCodeException ece) {
assertThat(ece.getExitCode(), is(1));
}
try {
// Non-symlink file target.
//
Shell.execCommand(Shell.WINUTILS, "readlink", file1.toString());
fail("Failed to get Shell.ExitCodeException when reading bad symlink");
} catch (Shell.ExitCodeException ece) {
assertThat(ece.getExitCode(), is(1));
}
try {
// Too many parameters.
//
Shell.execCommand(Shell.WINUTILS, "readlink", "a", "b");
fail("Failed to get Shell.ExitCodeException with bad parameters");
} catch (Shell.ExitCodeException ece) {
assertThat(ece.getExitCode(), is(1));
}
}
@SuppressWarnings("deprecation")
@Test(timeout=10000)
public void testTaskCreate() throws IOException {
File batch = new File(TEST_DIR, "testTaskCreate.cmd");
File proof = new File(TEST_DIR, "testTaskCreate.out");
FileWriter fw = new FileWriter(batch);
String testNumber = String.format("%f", Math.random());
fw.write(String.format("echo %s > \"%s\"", testNumber, proof.getAbsolutePath()));
fw.close();
assertFalse(proof.exists());
Shell.execCommand(Shell.WINUTILS, "task", "create", "testTaskCreate" + testNumber,
batch.getAbsolutePath());
assertTrue(proof.exists());
String outNumber = FileUtils.readFileToString(proof);
assertThat(outNumber, containsString(testNumber));
}
@Test (timeout = 30000)
public void testTaskCreateWithLimits() throws IOException {
// Generate a unique job id
String jobId = String.format("%f", Math.random());
// Run a task without any options
String out = Shell.execCommand(Shell.WINUTILS, "task", "create",
"job" + jobId, "cmd /c echo job" + jobId);
assertTrue(out.trim().equals("job" + jobId));
// Run a task without any limits
jobId = String.format("%f", Math.random());
out = Shell.execCommand(Shell.WINUTILS, "task", "create", "-c", "-1", "-m",
"-1", "job" + jobId, "cmd /c echo job" + jobId);
assertTrue(out.trim().equals("job" + jobId));
// Run a task with limits (128MB should be enough for a cmd)
jobId = String.format("%f", Math.random());
out = Shell.execCommand(Shell.WINUTILS, "task", "create", "-c", "10000", "-m",
"128", "job" + jobId, "cmd /c echo job" + jobId);
assertTrue(out.trim().equals("job" + jobId));
// Run a task without enough memory
try {
jobId = String.format("%f", Math.random());
out = Shell.execCommand(Shell.WINUTILS, "task", "create", "-m", "128", "job"
+ jobId, "java -Xmx256m -version");
fail("Failed to get Shell.ExitCodeException with insufficient memory");
} catch (Shell.ExitCodeException ece) {
assertThat(ece.getExitCode(), is(1));
}
// Run tasks with wrong parameters
//
try {
jobId = String.format("%f", Math.random());
Shell.execCommand(Shell.WINUTILS, "task", "create", "-c", "-1", "-m",
"-1", "foo", "job" + jobId, "cmd /c echo job" + jobId);
fail("Failed to get Shell.ExitCodeException with bad parameters");
} catch (Shell.ExitCodeException ece) {
assertThat(ece.getExitCode(), is(1639));
}
try {
jobId = String.format("%f", Math.random());
Shell.execCommand(Shell.WINUTILS, "task", "create", "-c", "-m", "-1",
"job" + jobId, "cmd /c echo job" + jobId);
fail("Failed to get Shell.ExitCodeException with bad parameters");
} catch (Shell.ExitCodeException ece) {
assertThat(ece.getExitCode(), is(1639));
}
try {
jobId = String.format("%f", Math.random());
Shell.execCommand(Shell.WINUTILS, "task", "create", "-c", "foo",
"job" + jobId, "cmd /c echo job" + jobId);
fail("Failed to get Shell.ExitCodeException with bad parameters");
} catch (Shell.ExitCodeException ece) {
assertThat(ece.getExitCode(), is(1639));
}
}
}
| 21,161 | 33.522023 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Random;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.fs.Path;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
/**
* A JUnit test to test {@link SysInfoLinux}
* Create the fake /proc/ information and verify the parsing and calculation
*/
public class TestSysInfoLinux {
/**
* LinuxResourceCalculatorPlugin with a fake timer
*/
static class FakeLinuxResourceCalculatorPlugin extends SysInfoLinux {
static final int SECTORSIZE = 4096;
long currentTime = 0;
public FakeLinuxResourceCalculatorPlugin(String procfsMemFile,
String procfsCpuFile,
String procfsStatFile,
String procfsNetFile,
String procfsDisksFile,
long jiffyLengthInMillis) {
super(procfsMemFile, procfsCpuFile, procfsStatFile, procfsNetFile,
procfsDisksFile, jiffyLengthInMillis);
}
@Override
long getCurrentTime() {
return currentTime;
}
public void advanceTime(long adv) {
currentTime += adv * this.getJiffyLengthInMillis();
}
@Override
int readDiskBlockInformation(String diskName, int defSector) {
return SECTORSIZE;
}
}
private static final FakeLinuxResourceCalculatorPlugin plugin;
private static String TEST_ROOT_DIR = new Path(System.getProperty(
"test.build.data", "/tmp")).toString().replace(' ', '+');
private static final String FAKE_MEMFILE;
private static final String FAKE_CPUFILE;
private static final String FAKE_STATFILE;
private static final String FAKE_NETFILE;
private static final String FAKE_DISKSFILE;
private static final long FAKE_JIFFY_LENGTH = 10L;
static {
int randomNum = (new Random()).nextInt(1000000000);
FAKE_MEMFILE = TEST_ROOT_DIR + File.separator + "MEMINFO_" + randomNum;
FAKE_CPUFILE = TEST_ROOT_DIR + File.separator + "CPUINFO_" + randomNum;
FAKE_STATFILE = TEST_ROOT_DIR + File.separator + "STATINFO_" + randomNum;
FAKE_NETFILE = TEST_ROOT_DIR + File.separator + "NETINFO_" + randomNum;
FAKE_DISKSFILE = TEST_ROOT_DIR + File.separator + "DISKSINFO_" + randomNum;
plugin = new FakeLinuxResourceCalculatorPlugin(FAKE_MEMFILE, FAKE_CPUFILE,
FAKE_STATFILE,
FAKE_NETFILE,
FAKE_DISKSFILE,
FAKE_JIFFY_LENGTH);
}
static final String MEMINFO_FORMAT =
"MemTotal: %d kB\n" +
"MemFree: %d kB\n" +
"Buffers: 138244 kB\n" +
"Cached: 947780 kB\n" +
"SwapCached: 142880 kB\n" +
"Active: 3229888 kB\n" +
"Inactive: %d kB\n" +
"SwapTotal: %d kB\n" +
"SwapFree: %d kB\n" +
"Dirty: 122012 kB\n" +
"Writeback: 0 kB\n" +
"AnonPages: 2710792 kB\n" +
"Mapped: 24740 kB\n" +
"Slab: 132528 kB\n" +
"SReclaimable: 105096 kB\n" +
"SUnreclaim: 27432 kB\n" +
"PageTables: 11448 kB\n" +
"NFS_Unstable: 0 kB\n" +
"Bounce: 0 kB\n" +
"CommitLimit: 4125904 kB\n" +
"Committed_AS: 4143556 kB\n" +
"VmallocTotal: 34359738367 kB\n" +
"VmallocUsed: 1632 kB\n" +
"VmallocChunk: 34359736375 kB\n" +
"HugePages_Total: 0\n" +
"HugePages_Free: 0\n" +
"HugePages_Rsvd: 0\n" +
"Hugepagesize: 2048 kB";
static final String CPUINFO_FORMAT =
"processor : %s\n" +
"vendor_id : AuthenticAMD\n" +
"cpu family : 15\n" +
"model : 33\n" +
"model name : Dual Core AMD Opteron(tm) Processor 280\n" +
"stepping : 2\n" +
"cpu MHz : %f\n" +
"cache size : 1024 KB\n" +
"physical id : %s\n" +
"siblings : 2\n" +
"core id : %s\n" +
"cpu cores : 2\n" +
"fpu : yes\n" +
"fpu_exception : yes\n" +
"cpuid level : 1\n" +
"wp : yes\n" +
"flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov " +
"pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt lm " +
"3dnowext 3dnow pni lahf_lm cmp_legacy\n" +
"bogomips : 4792.41\n" +
"TLB size : 1024 4K pages\n" +
"clflush size : 64\n" +
"cache_alignment : 64\n" +
"address sizes : 40 bits physical, 48 bits virtual\n" +
"power management: ts fid vid ttp";
static final String STAT_FILE_FORMAT =
"cpu %d %d %d 1646495089 831319 48713 164346 0\n" +
"cpu0 15096055 30805 3823005 411456015 206027 13 14269 0\n" +
"cpu1 14760561 89890 6432036 408707910 456857 48074 130857 0\n" +
"cpu2 12761169 20842 3758639 413976772 98028 411 10288 0\n" +
"cpu3 12355207 47322 5789691 412354390 70406 213 8931 0\n" +
"intr 114648668 20010764 2 0 945665 2 0 0 0 0 0 0 0 4 0 0 0 0 0 0\n" +
"ctxt 242017731764\n" +
"btime 1257808753\n" +
"processes 26414943\n" +
"procs_running 1\n" +
"procs_blocked 0\n";
static final String NETINFO_FORMAT =
"Inter-| Receive | Transmit\n"+
"face |bytes packets errs drop fifo frame compressed multicast|bytes packets"+
"errs drop fifo colls carrier compressed\n"+
" lo: 42236310 563003 0 0 0 0 0 0 42236310 563003 " +
"0 0 0 0 0 0\n"+
" eth0: %d 3452527 0 0 0 0 0 299787 %d 1866280 0 0 " +
"0 0 0 0\n"+
" eth1: %d 3152521 0 0 0 0 0 219781 %d 1866290 0 0 " +
"0 0 0 0\n";
static final String DISKSINFO_FORMAT =
"1 0 ram0 0 0 0 0 0 0 0 0 0 0 0\n"+
"1 1 ram1 0 0 0 0 0 0 0 0 0 0 0\n"+
"1 2 ram2 0 0 0 0 0 0 0 0 0 0 0\n"+
"1 3 ram3 0 0 0 0 0 0 0 0 0 0 0\n"+
"1 4 ram4 0 0 0 0 0 0 0 0 0 0 0\n"+
"1 5 ram5 0 0 0 0 0 0 0 0 0 0 0\n"+
"1 6 ram6 0 0 0 0 0 0 0 0 0 0 0\n"+
"7 0 loop0 0 0 0 0 0 0 0 0 0 0 0\n"+
"7 1 loop1 0 0 0 0 0 0 0 0 0 0 0\n"+
"8 0 sda 82575678 2486518 %d 59876600 3225402 19761924 %d " +
"6407705 4 48803346 66227952\n"+
"8 1 sda1 732 289 21354 787 7 3 32 4 0 769 791"+
"8 2 sda2 744272 2206315 23605200 6742762 336830 2979630 " +
"26539520 1424776 4 1820130 8165444\n"+
"8 3 sda3 81830497 279914 17881852954 53132969 2888558 16782291 " +
"157367552 4982925 0 47077660 58061635\n"+
"8 32 sdc 10148118 693255 %d 122125461 6090515 401630172 %d 2696685590 " +
"0 26848216 2818793840\n"+
"8 33 sdc1 10147917 693230 2054138426 122125426 6090506 401630172 " +
"3261765880 2696685589 0 26848181 2818793804\n"+
"8 64 sde 9989771 553047 %d 93407551 5978572 391997273 %d 2388274325 " +
"0 24396646 2481664818\n"+
"8 65 sde1 9989570 553022 1943973346 93407489 5978563 391997273 3183807264 " +
"2388274325 0 24396584 2481666274\n"+
"8 80 sdf 10197163 693995 %d 144374395 6216644 408395438 %d 2669389056 0 " +
"26164759 2813746348\n"+
"8 81 sdf1 10196962 693970 2033452794 144374355 6216635 408395438 3316897064 " +
"2669389056 0 26164719 2813746308\n"+
"8 129 sdi1 10078602 657936 2056552626 108362198 6134036 403851153 3279882064 " +
"2639256086 0 26260432 2747601085\n";
/**
* Test parsing /proc/stat and /proc/cpuinfo
* @throws IOException
*/
@Test
public void parsingProcStatAndCpuFile() throws IOException {
// Write fake /proc/cpuinfo file.
long numProcessors = 8;
long cpuFrequencyKHz = 2392781;
String fileContent = "";
for (int i = 0; i < numProcessors; i++) {
fileContent +=
String.format(CPUINFO_FORMAT, i, cpuFrequencyKHz / 1000D, 0, 0)
+ "\n";
}
File tempFile = new File(FAKE_CPUFILE);
tempFile.deleteOnExit();
FileWriter fWriter = new FileWriter(FAKE_CPUFILE);
fWriter.write(fileContent);
fWriter.close();
assertEquals(plugin.getNumProcessors(), numProcessors);
assertEquals(plugin.getCpuFrequency(), cpuFrequencyKHz);
// Write fake /proc/stat file.
long uTime = 54972994;
long nTime = 188860;
long sTime = 19803373;
tempFile = new File(FAKE_STATFILE);
tempFile.deleteOnExit();
updateStatFile(uTime, nTime, sTime);
assertEquals(plugin.getCumulativeCpuTime(),
FAKE_JIFFY_LENGTH * (uTime + nTime + sTime));
assertEquals(plugin.getCpuUsage(), (float)(CpuTimeTracker.UNAVAILABLE),0.0);
// Advance the time and sample again to test the CPU usage calculation
uTime += 100L;
plugin.advanceTime(200L);
updateStatFile(uTime, nTime, sTime);
assertEquals(plugin.getCumulativeCpuTime(),
FAKE_JIFFY_LENGTH * (uTime + nTime + sTime));
assertEquals(plugin.getCpuUsage(), 6.25F, 0.0);
// Advance the time and sample again. This time, we call getCpuUsage() only.
uTime += 600L;
plugin.advanceTime(300L);
updateStatFile(uTime, nTime, sTime);
assertEquals(plugin.getCpuUsage(), 25F, 0.0);
// Advance very short period of time (one jiffy length).
// In this case, CPU usage should not be updated.
uTime += 1L;
plugin.advanceTime(1L);
updateStatFile(uTime, nTime, sTime);
assertEquals(plugin.getCumulativeCpuTime(),
FAKE_JIFFY_LENGTH * (uTime + nTime + sTime));
assertEquals(plugin.getCpuUsage(), 25F, 0.0); // CPU usage is not updated.
}
/**
* Write information to fake /proc/stat file
*/
private void updateStatFile(long uTime, long nTime, long sTime)
throws IOException {
FileWriter fWriter = new FileWriter(FAKE_STATFILE);
fWriter.write(String.format(STAT_FILE_FORMAT, uTime, nTime, sTime));
fWriter.close();
}
/**
* Test parsing /proc/meminfo
* @throws IOException
*/
@Test
public void parsingProcMemFile() throws IOException {
long memTotal = 4058864L;
long memFree = 99632L;
long inactive = 567732L;
long swapTotal = 2096472L;
long swapFree = 1818480L;
File tempFile = new File(FAKE_MEMFILE);
tempFile.deleteOnExit();
FileWriter fWriter = new FileWriter(FAKE_MEMFILE);
fWriter.write(String.format(MEMINFO_FORMAT,
memTotal, memFree, inactive, swapTotal, swapFree));
fWriter.close();
assertEquals(plugin.getAvailablePhysicalMemorySize(),
1024L * (memFree + inactive));
assertEquals(plugin.getAvailableVirtualMemorySize(),
1024L * (memFree + inactive + swapFree));
assertEquals(plugin.getPhysicalMemorySize(), 1024L * memTotal);
assertEquals(plugin.getVirtualMemorySize(), 1024L * (memTotal + swapTotal));
}
@Test
public void testCoreCounts() throws IOException {
String fileContent = "";
// single core, hyper threading
long numProcessors = 2;
long cpuFrequencyKHz = 2392781;
for (int i = 0; i < numProcessors; i++) {
fileContent =
fileContent.concat(String.format(CPUINFO_FORMAT, i,
cpuFrequencyKHz / 1000D, 0, 0));
fileContent = fileContent.concat("\n");
}
writeFakeCPUInfoFile(fileContent);
plugin.setReadCpuInfoFile(false);
assertEquals(numProcessors, plugin.getNumProcessors());
assertEquals(1, plugin.getNumCores());
// single socket quad core, no hyper threading
fileContent = "";
numProcessors = 4;
for (int i = 0; i < numProcessors; i++) {
fileContent =
fileContent.concat(String.format(CPUINFO_FORMAT, i,
cpuFrequencyKHz / 1000D, 0, i));
fileContent = fileContent.concat("\n");
}
writeFakeCPUInfoFile(fileContent);
plugin.setReadCpuInfoFile(false);
assertEquals(numProcessors, plugin.getNumProcessors());
assertEquals(4, plugin.getNumCores());
// dual socket single core, hyper threading
fileContent = "";
numProcessors = 4;
for (int i = 0; i < numProcessors; i++) {
fileContent =
fileContent.concat(String.format(CPUINFO_FORMAT, i,
cpuFrequencyKHz / 1000D, i / 2, 0));
fileContent = fileContent.concat("\n");
}
writeFakeCPUInfoFile(fileContent);
plugin.setReadCpuInfoFile(false);
assertEquals(numProcessors, plugin.getNumProcessors());
assertEquals(2, plugin.getNumCores());
// dual socket, dual core, no hyper threading
fileContent = "";
numProcessors = 4;
for (int i = 0; i < numProcessors; i++) {
fileContent =
fileContent.concat(String.format(CPUINFO_FORMAT, i,
cpuFrequencyKHz / 1000D, i / 2, i % 2));
fileContent = fileContent.concat("\n");
}
writeFakeCPUInfoFile(fileContent);
plugin.setReadCpuInfoFile(false);
assertEquals(numProcessors, plugin.getNumProcessors());
assertEquals(4, plugin.getNumCores());
// dual socket, dual core, hyper threading
fileContent = "";
numProcessors = 8;
for (int i = 0; i < numProcessors; i++) {
fileContent =
fileContent.concat(String.format(CPUINFO_FORMAT, i,
cpuFrequencyKHz / 1000D, i / 4, (i % 4) / 2));
fileContent = fileContent.concat("\n");
}
writeFakeCPUInfoFile(fileContent);
plugin.setReadCpuInfoFile(false);
assertEquals(numProcessors, plugin.getNumProcessors());
assertEquals(4, plugin.getNumCores());
}
private void writeFakeCPUInfoFile(String content) throws IOException {
File tempFile = new File(FAKE_CPUFILE);
FileWriter fWriter = new FileWriter(FAKE_CPUFILE);
tempFile.deleteOnExit();
try {
fWriter.write(content);
} finally {
IOUtils.closeQuietly(fWriter);
}
}
/**
* Test parsing /proc/net/dev
* @throws IOException
*/
@Test
public void parsingProcNetFile() throws IOException {
long numBytesReadIntf1 = 2097172468L;
long numBytesWrittenIntf1 = 1355620114L;
long numBytesReadIntf2 = 1097172460L;
long numBytesWrittenIntf2 = 1055620110L;
File tempFile = new File(FAKE_NETFILE);
tempFile.deleteOnExit();
FileWriter fWriter = new FileWriter(FAKE_NETFILE);
fWriter.write(String.format(NETINFO_FORMAT,
numBytesReadIntf1, numBytesWrittenIntf1,
numBytesReadIntf2, numBytesWrittenIntf2));
fWriter.close();
assertEquals(plugin.getNetworkBytesRead(), numBytesReadIntf1 + numBytesReadIntf2);
assertEquals(plugin.getNetworkBytesWritten(), numBytesWrittenIntf1 + numBytesWrittenIntf2);
}
/**
* Test parsing /proc/diskstats
* @throws IOException
*/
@Test
public void parsingProcDisksFile() throws IOException {
long numSectorsReadsda = 1790549L; long numSectorsWrittensda = 1839071L;
long numSectorsReadsdc = 20541402L; long numSectorsWrittensdc = 32617658L;
long numSectorsReadsde = 19439751L; long numSectorsWrittensde = 31838072L;
long numSectorsReadsdf = 20334546L; long numSectorsWrittensdf = 33168970L;
File tempFile = new File(FAKE_DISKSFILE);
tempFile.deleteOnExit();
FileWriter fWriter = new FileWriter(FAKE_DISKSFILE);
fWriter.write(String.format(DISKSINFO_FORMAT,
numSectorsReadsda, numSectorsWrittensda,
numSectorsReadsdc, numSectorsWrittensdc,
numSectorsReadsde, numSectorsWrittensde,
numSectorsReadsdf, numSectorsWrittensdf));
fWriter.close();
long expectedNumSectorsRead = numSectorsReadsda + numSectorsReadsdc +
numSectorsReadsde + numSectorsReadsdf;
long expectedNumSectorsWritten = numSectorsWrittensda + numSectorsWrittensdc +
numSectorsWrittensde + numSectorsWrittensdf;
// use non-default sector size
int diskSectorSize = FakeLinuxResourceCalculatorPlugin.SECTORSIZE;
assertEquals(expectedNumSectorsRead * diskSectorSize,
plugin.getStorageBytesRead());
assertEquals(expectedNumSectorsWritten * diskSectorSize,
plugin.getStorageBytesWritten());
}
}
| 17,144 | 38.595843 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGenericOptionsParser.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import junit.framework.TestCase;
import org.apache.commons.math3.util.Pair;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.junit.Assert;
import com.google.common.collect.Maps;
import static org.junit.Assert.fail;
public class TestGenericOptionsParser extends TestCase {
File testDir;
Configuration conf;
FileSystem localFs;
public void testFilesOption() throws Exception {
File tmpFile = new File(testDir, "tmpfile");
Path tmpPath = new Path(tmpFile.toString());
localFs.create(tmpPath);
String[] args = new String[2];
// pass a files option
args[0] = "-files";
// Convert a file to a URI as File.toString() is not a valid URI on
// all platforms and GenericOptionsParser accepts only valid URIs
args[1] = tmpFile.toURI().toString();
new GenericOptionsParser(conf, args);
String files = conf.get("tmpfiles");
assertNotNull("files is null", files);
assertEquals("files option does not match",
localFs.makeQualified(tmpPath).toString(), files);
// pass file as uri
Configuration conf1 = new Configuration();
URI tmpURI = new URI(tmpFile.toURI().toString() + "#link");
args[0] = "-files";
args[1] = tmpURI.toString();
new GenericOptionsParser(conf1, args);
files = conf1.get("tmpfiles");
assertNotNull("files is null", files);
assertEquals("files option does not match",
localFs.makeQualified(new Path(tmpURI)).toString(), files);
// pass a file that does not exist.
// GenericOptionParser should throw exception
Configuration conf2 = new Configuration();
args[0] = "-files";
args[1] = "file:///xyz.txt";
Throwable th = null;
try {
new GenericOptionsParser(conf2, args);
} catch (Exception e) {
th = e;
}
assertNotNull("throwable is null", th);
assertTrue("FileNotFoundException is not thrown",
th instanceof FileNotFoundException);
files = conf2.get("tmpfiles");
assertNull("files is not null", files);
}
/**
* Test the case where the libjars, files and archives arguments
* contains an empty token, which should create an IllegalArgumentException.
*/
public void testEmptyFilenames() throws Exception {
List<Pair<String, String>> argsAndConfNames = new ArrayList<Pair<String, String>>();
argsAndConfNames.add(new Pair<String, String>("-libjars", "tmpjars"));
argsAndConfNames.add(new Pair<String, String>("-files", "tmpfiles"));
argsAndConfNames.add(new Pair<String, String>("-archives", "tmparchives"));
for (Pair<String, String> argAndConfName : argsAndConfNames) {
String arg = argAndConfName.getFirst();
String configName = argAndConfName.getSecond();
File tmpFileOne = new File(testDir, "tmpfile1");
Path tmpPathOne = new Path(tmpFileOne.toString());
File tmpFileTwo = new File(testDir, "tmpfile2");
Path tmpPathTwo = new Path(tmpFileTwo.toString());
localFs.create(tmpPathOne);
localFs.create(tmpPathTwo);
String[] args = new String[2];
args[0] = arg;
// create an empty path in between two valid files,
// which prior to HADOOP-10820 used to result in the
// working directory being added to "tmpjars" (or equivalent)
args[1] = String.format("%s,,%s",
tmpFileOne.toURI().toString(), tmpFileTwo.toURI().toString());
try {
new GenericOptionsParser(conf, args);
fail("Expected exception for empty filename");
} catch (IllegalArgumentException e) {
// expect to receive an IllegalArgumentException
GenericTestUtils.assertExceptionContains("File name can't be"
+ " empty string", e);
}
// test zero file list length - it should create an exception
args[1] = ",,";
try {
new GenericOptionsParser(conf, args);
fail("Expected exception for zero file list length");
} catch (IllegalArgumentException e) {
// expect to receive an IllegalArgumentException
GenericTestUtils.assertExceptionContains("File name can't be"
+ " empty string", e);
}
// test filename with space character
// it should create exception from parser in URI class
// due to URI syntax error
args[1] = String.format("%s, ,%s",
tmpFileOne.toURI().toString(), tmpFileTwo.toURI().toString());
try {
new GenericOptionsParser(conf, args);
fail("Expected exception for filename with space character");
} catch (IllegalArgumentException e) {
// expect to receive an IllegalArgumentException
GenericTestUtils.assertExceptionContains("URISyntaxException", e);
}
}
}
/**
* Test that options passed to the constructor are used.
*/
@SuppressWarnings("static-access")
public void testCreateWithOptions() throws Exception {
// Create new option newOpt
Option opt = OptionBuilder.withArgName("int")
.hasArg()
.withDescription("A new option")
.create("newOpt");
Options opts = new Options();
opts.addOption(opt);
// Check newOpt is actually used to parse the args
String[] args = new String[2];
args[0] = "--newOpt";
args[1] = "7";
GenericOptionsParser g = new GenericOptionsParser(opts, args);
assertEquals("New option was ignored",
"7", g.getCommandLine().getOptionValues("newOpt")[0]);
}
/**
* Test that multiple conf arguments can be used.
*/
public void testConfWithMultipleOpts() throws Exception {
String[] args = new String[2];
args[0] = "--conf=foo";
args[1] = "--conf=bar";
GenericOptionsParser g = new GenericOptionsParser(args);
assertEquals("1st conf param is incorrect",
"foo", g.getCommandLine().getOptionValues("conf")[0]);
assertEquals("2st conf param is incorrect",
"bar", g.getCommandLine().getOptionValues("conf")[1]);
}
@Override
protected void setUp() throws Exception {
super.setUp();
conf = new Configuration();
localFs = FileSystem.getLocal(conf);
testDir = new File(System.getProperty("test.build.data", "/tmp"), "generic");
if(testDir.exists())
localFs.delete(new Path(testDir.toString()), true);
}
@Override
protected void tearDown() throws Exception {
super.tearDown();
if(testDir.exists()) {
localFs.delete(new Path(testDir.toString()), true);
}
}
/**
* testing -fileCache option
* @throws IOException
*/
public void testTokenCacheOption() throws IOException {
FileSystem localFs = FileSystem.getLocal(conf);
File tmpFile = new File(testDir, "tokenCacheFile");
if(tmpFile.exists()) {
tmpFile.delete();
}
String[] args = new String[2];
// pass a files option
args[0] = "-tokenCacheFile";
args[1] = tmpFile.toURI().toString();
// test non existing file
Throwable th = null;
try {
new GenericOptionsParser(conf, args);
} catch (Exception e) {
th = e;
}
assertNotNull(th);
assertTrue("FileNotFoundException is not thrown",
th instanceof FileNotFoundException);
// create file
Path tmpPath = localFs.makeQualified(new Path(tmpFile.toString()));
Token<?> token = new Token<AbstractDelegationTokenIdentifier>(
"identifier".getBytes(), "password".getBytes(),
new Text("token-kind"), new Text("token-service"));
Credentials creds = new Credentials();
creds.addToken(new Text("token-alias"), token);
creds.writeTokenStorageFile(tmpPath, conf);
new GenericOptionsParser(conf, args);
String fileName = conf.get("mapreduce.job.credentials.binary");
assertNotNull("files is null", fileName);
assertEquals("files option does not match", tmpPath.toString(), fileName);
Credentials ugiCreds =
UserGroupInformation.getCurrentUser().getCredentials();
assertEquals(1, ugiCreds.numberOfTokens());
Token<?> ugiToken = ugiCreds.getToken(new Text("token-alias"));
assertNotNull(ugiToken);
assertEquals(token, ugiToken);
localFs.delete(new Path(testDir.getAbsolutePath()), true);
}
/** Test -D parsing */
public void testDOptionParsing() throws Exception {
String[] args;
Map<String,String> expectedMap;
String[] expectedRemainingArgs;
args = new String[]{};
expectedRemainingArgs = new String[]{};
expectedMap = Maps.newHashMap();
assertDOptionParsing(args, expectedMap, expectedRemainingArgs);
args = new String[]{"-Dkey1=value1"};
expectedRemainingArgs = new String[]{};
expectedMap = Maps.newHashMap();
expectedMap.put("key1", "value1");
assertDOptionParsing(args, expectedMap, expectedRemainingArgs);
args = new String[]{"-fs", "hdfs://somefs/", "-Dkey1=value1", "arg1"};
expectedRemainingArgs = new String[]{"arg1"};
assertDOptionParsing(args, expectedMap, expectedRemainingArgs);
args = new String[]{"-fs", "hdfs://somefs/", "-D", "key1=value1", "arg1"};
assertDOptionParsing(args, expectedMap, expectedRemainingArgs);
if (Shell.WINDOWS) {
args = new String[]{"-fs", "hdfs://somefs/", "-D", "key1",
"value1", "arg1"};
assertDOptionParsing(args, expectedMap, expectedRemainingArgs);
args = new String[]{"-fs", "hdfs://somefs/", "-Dkey1", "value1", "arg1"};
assertDOptionParsing(args, expectedMap, expectedRemainingArgs);
args = new String[]{"-fs", "hdfs://somefs/", "-D", "key1", "value1",
"-fs", "someother", "-D", "key2", "value2", "arg1", "arg2"};
expectedRemainingArgs = new String[]{"arg1", "arg2"};
expectedMap = Maps.newHashMap();
expectedMap.put("key1", "value1");
expectedMap.put("key2", "value2");
assertDOptionParsing(args, expectedMap, expectedRemainingArgs);
args = new String[]{"-fs", "hdfs://somefs/", "-D", "key1", "value1",
"-fs", "someother", "-D", "key2", "value2"};
expectedRemainingArgs = new String[]{};
assertDOptionParsing(args, expectedMap, expectedRemainingArgs);
args = new String[]{"-fs", "hdfs://somefs/", "-D", "key1", "value1",
"-fs", "someother", "-D", "key2"};
expectedMap = Maps.newHashMap();
expectedMap.put("key1", "value1");
expectedMap.put("key2", null); // we expect key2 not set
assertDOptionParsing(args, expectedMap, expectedRemainingArgs);
}
args = new String[]{"-fs", "hdfs://somefs/", "-D", "key1=value1",
"-fs", "someother", "-Dkey2"};
expectedRemainingArgs = new String[]{};
expectedMap = Maps.newHashMap();
expectedMap.put("key1", "value1");
expectedMap.put("key2", null); // we expect key2 not set
assertDOptionParsing(args, expectedMap, expectedRemainingArgs);
args = new String[]{"-fs", "hdfs://somefs/", "-D"};
expectedMap = Maps.newHashMap();
assertDOptionParsing(args, expectedMap, expectedRemainingArgs);
}
private void assertDOptionParsing(String[] args,
Map<String,String> expectedMap, String[] expectedRemainingArgs)
throws Exception {
for (Map.Entry<String, String> entry : expectedMap.entrySet()) {
assertNull(conf.get(entry.getKey()));
}
Configuration conf = new Configuration();
GenericOptionsParser parser = new GenericOptionsParser(conf, args);
String[] remainingArgs = parser.getRemainingArgs();
for (Map.Entry<String, String> entry : expectedMap.entrySet()) {
assertEquals(entry.getValue(), conf.get(entry.getKey()));
}
Assert.assertArrayEquals(
Arrays.toString(remainingArgs) + Arrays.toString(expectedRemainingArgs),
expectedRemainingArgs, remainingArgs);
}
/** Test passing null as args. Some classes still call
* Tool interface from java passing null.
*/
public void testNullArgs() throws IOException {
GenericOptionsParser parser = new GenericOptionsParser(conf, null);
parser.getRemainingArgs();
}
}
| 13,435 | 36.322222 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringInterner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import static org.junit.Assert.*;
import static org.apache.hadoop.util.StringInterner.*;
import org.junit.Test;
/**
*
* Tests string interning {@link StringInterner}
*/
public class TestStringInterner {
/**
* Test different references are returned for any of string
* instances that are equal to each other but not interned.
*/
@Test
public void testNoIntern() {
String literalABC = "ABC";
String substringABC = "ABCDE".substring(0,3);
String heapABC = new String("ABC");
assertNotSame(literalABC, substringABC);
assertNotSame(literalABC, heapABC);
assertNotSame(substringABC, heapABC);
}
/**
* Test the same strong reference is returned for any
* of string instances that are equal to each other.
*/
@Test
public void testStrongIntern() {
String strongInternLiteralABC = strongIntern("ABC");
String strongInternSubstringABC = strongIntern("ABCDE".substring(0,3));
String strongInternHeapABC = strongIntern(new String("ABC"));
assertSame(strongInternLiteralABC, strongInternSubstringABC);
assertSame(strongInternLiteralABC, strongInternHeapABC);
assertSame(strongInternSubstringABC, strongInternHeapABC);
}
/**
* Test the same weak reference is returned for any
* of string instances that are equal to each other.
*/
@Test
public void testWeakIntern() {
String weakInternLiteralABC = weakIntern("ABC");
String weakInternSubstringABC = weakIntern("ABCDE".substring(0,3));
String weakInternHeapABC = weakIntern(new String("ABC"));
assertSame(weakInternLiteralABC, weakInternSubstringABC);
assertSame(weakInternLiteralABC, weakInternHeapABC);
assertSame(weakInternSubstringABC, weakInternHeapABC);
}
}
| 2,586 | 32.597403 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShell.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import junit.framework.TestCase;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.PrintWriter;
import java.lang.management.ManagementFactory;
import java.lang.management.ThreadInfo;
import java.lang.management.ThreadMXBean;
import org.apache.hadoop.fs.FileUtil;
public class TestShell extends TestCase {
private static class Command extends Shell {
private int runCount = 0;
private Command(long interval) {
super(interval);
}
@Override
protected String[] getExecString() {
// There is no /bin/echo equivalent on Windows so just launch it as a
// shell built-in.
//
return Shell.WINDOWS ?
(new String[] {"cmd.exe", "/c", "echo", "hello"}) :
(new String[] {"echo", "hello"});
}
@Override
protected void parseExecResult(BufferedReader lines) throws IOException {
++runCount;
}
public int getRunCount() {
return runCount;
}
}
public void testInterval() throws IOException {
testInterval(Long.MIN_VALUE / 60000); // test a negative interval
testInterval(0L); // test a zero interval
testInterval(10L); // interval equal to 10mins
testInterval(Time.now() / 60000 + 60); // test a very big interval
}
/**
* Assert that a string has a substring in it
* @param string string to search
* @param search what to search for it
*/
private void assertInString(String string, String search) {
assertNotNull("Empty String", string);
if (!string.contains(search)) {
fail("Did not find \"" + search + "\" in " + string);
}
}
public void testShellCommandExecutorToString() throws Throwable {
Shell.ShellCommandExecutor sce=new Shell.ShellCommandExecutor(
new String[] { "ls", "..","arg 2"});
String command = sce.toString();
assertInString(command,"ls");
assertInString(command, " .. ");
assertInString(command, "\"arg 2\"");
}
public void testShellCommandTimeout() throws Throwable {
if(Shell.WINDOWS) {
// setExecutable does not work on Windows
return;
}
String rootDir = new File(System.getProperty(
"test.build.data", "/tmp")).getAbsolutePath();
File shellFile = new File(rootDir, "timeout.sh");
String timeoutCommand = "sleep 4; echo \"hello\"";
PrintWriter writer = new PrintWriter(new FileOutputStream(shellFile));
writer.println(timeoutCommand);
writer.close();
FileUtil.setExecutable(shellFile, true);
Shell.ShellCommandExecutor shexc
= new Shell.ShellCommandExecutor(new String[]{shellFile.getAbsolutePath()},
null, null, 100);
try {
shexc.execute();
} catch (Exception e) {
//When timing out exception is thrown.
}
shellFile.delete();
assertTrue("Script didnt not timeout" , shexc.isTimedOut());
}
private static int countTimerThreads() {
ThreadMXBean threadBean = ManagementFactory.getThreadMXBean();
int count = 0;
ThreadInfo[] infos = threadBean.getThreadInfo(threadBean.getAllThreadIds(), 20);
for (ThreadInfo info : infos) {
if (info == null) continue;
for (StackTraceElement elem : info.getStackTrace()) {
if (elem.getClassName().contains("Timer")) {
count++;
break;
}
}
}
return count;
}
public void testShellCommandTimerLeak() throws Exception {
String quickCommand[] = new String[] {"/bin/sleep", "100"};
int timersBefore = countTimerThreads();
System.err.println("before: " + timersBefore);
for (int i = 0; i < 10; i++) {
Shell.ShellCommandExecutor shexec = new Shell.ShellCommandExecutor(
quickCommand, null, null, 1);
try {
shexec.execute();
fail("Bad command should throw exception");
} catch (Exception e) {
// expected
}
}
Thread.sleep(1000);
int timersAfter = countTimerThreads();
System.err.println("after: " + timersAfter);
assertEquals(timersBefore, timersAfter);
}
private void testInterval(long interval) throws IOException {
Command command = new Command(interval);
command.run();
assertEquals(1, command.getRunCount());
command.run();
if (interval > 0) {
assertEquals(1, command.getRunCount());
} else {
assertEquals(2, command.getRunCount());
}
}
}
| 5,294 | 30.331361 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGSet.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.util.ConcurrentModificationException;
import java.util.Iterator;
import java.util.Random;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.util.Time;
import org.junit.Assert;
import org.junit.Test;
public class TestGSet {
private static final Random ran = new Random();
private static final long starttime = Time.now();
private static void print(Object s) {
System.out.print(s);
System.out.flush();
}
private static void println(Object s) {
System.out.println(s);
}
@Test
public void testExceptionCases() {
{
//test contains
final LightWeightGSet<Integer, Integer> gset
= new LightWeightGSet<Integer, Integer>(16);
try {
//test contains with a null element
gset.contains(null);
Assert.fail();
} catch(NullPointerException e) {
LightWeightGSet.LOG.info("GOOD: getting " + e, e);
}
}
{
//test get
final LightWeightGSet<Integer, Integer> gset
= new LightWeightGSet<Integer, Integer>(16);
try {
//test get with a null element
gset.get(null);
Assert.fail();
} catch(NullPointerException e) {
LightWeightGSet.LOG.info("GOOD: getting " + e, e);
}
}
{
//test put
final LightWeightGSet<Integer, Integer> gset
= new LightWeightGSet<Integer, Integer>(16);
try {
//test put with a null element
gset.put(null);
Assert.fail();
} catch(NullPointerException e) {
LightWeightGSet.LOG.info("GOOD: getting " + e, e);
}
try {
//test putting an element which is not implementing LinkedElement
gset.put(1);
Assert.fail();
} catch(IllegalArgumentException e) {
LightWeightGSet.LOG.info("GOOD: getting " + e, e);
}
}
{
//test iterator
final IntElement[] data = new IntElement[5];
for(int i = 0; i < data.length; i++) {
data[i] = new IntElement(i, i);
}
for(int v = 1; v < data.length-1; v++) {
{
//test remove while iterating
final GSet<IntElement, IntElement> gset = createGSet(data);
for(IntElement i : gset) {
if (i.value == v) {
//okay because data[0] is not in gset
gset.remove(data[0]);
}
}
try {
//exception because data[1] is in gset
for(IntElement i : gset) {
if (i.value == v) {
gset.remove(data[1]);
}
}
Assert.fail();
} catch(ConcurrentModificationException e) {
LightWeightGSet.LOG.info("GOOD: getting " + e, e);
}
}
{
//test put new element while iterating
final GSet<IntElement, IntElement> gset = createGSet(data);
try {
for(IntElement i : gset) {
if (i.value == v) {
gset.put(data[0]);
}
}
Assert.fail();
} catch(ConcurrentModificationException e) {
LightWeightGSet.LOG.info("GOOD: getting " + e, e);
}
}
{
//test put existing element while iterating
final GSet<IntElement, IntElement> gset = createGSet(data);
try {
for(IntElement i : gset) {
if (i.value == v) {
gset.put(data[3]);
}
}
Assert.fail();
} catch(ConcurrentModificationException e) {
LightWeightGSet.LOG.info("GOOD: getting " + e, e);
}
}
}
}
}
private static GSet<IntElement, IntElement> createGSet(final IntElement[] data) {
final GSet<IntElement, IntElement> gset
= new LightWeightGSet<IntElement, IntElement>(8);
for(int i = 1; i < data.length; i++) {
gset.put(data[i]);
}
return gset;
}
@Test
public void testGSet() {
//The parameters are: table length, data size, modulus.
check(new GSetTestCase(1, 1 << 4, 65537));
check(new GSetTestCase(17, 1 << 16, 17));
check(new GSetTestCase(255, 1 << 10, 65537));
}
/**
* A long running test with various data sets and parameters.
* It may take ~5 hours,
* If you are changing the implementation,
* please un-comment the following line in order to run the test.
*/
//@Test
public void runMultipleTestGSet() {
for(int offset = -2; offset <= 2; offset++) {
runTestGSet(1, offset);
for(int i = 1; i < Integer.SIZE - 1; i++) {
runTestGSet((1 << i) + 1, offset);
}
}
}
private static void runTestGSet(final int modulus, final int offset) {
println("\n\nmodulus=" + modulus + ", offset=" + offset);
for(int i = 0; i <= 16; i += 4) {
final int tablelength = (1 << i) + offset;
final int upper = i + 2;
final int steps = Math.max(1, upper/3);
for(int j = 0; j <= upper; j += steps) {
final int datasize = 1 << j;
check(new GSetTestCase(tablelength, datasize, modulus));
}
}
}
private static void check(final GSetTestCase test) {
//check add
print(" check add .................. ");
for(int i = 0; i < test.data.size()/2; i++) {
test.put(test.data.get(i));
}
for(int i = 0; i < test.data.size(); i++) {
test.put(test.data.get(i));
}
println("DONE " + test.stat());
//check remove and add
print(" check remove & add ......... ");
for(int j = 0; j < 10; j++) {
for(int i = 0; i < test.data.size()/2; i++) {
final int r = ran.nextInt(test.data.size());
test.remove(test.data.get(r));
}
for(int i = 0; i < test.data.size()/2; i++) {
final int r = ran.nextInt(test.data.size());
test.put(test.data.get(r));
}
}
println("DONE " + test.stat());
//check remove
print(" check remove ............... ");
for(int i = 0; i < test.data.size(); i++) {
test.remove(test.data.get(i));
}
Assert.assertEquals(0, test.gset.size());
println("DONE " + test.stat());
//check remove and add again
print(" check remove & add again ... ");
for(int j = 0; j < 10; j++) {
for(int i = 0; i < test.data.size()/2; i++) {
final int r = ran.nextInt(test.data.size());
test.remove(test.data.get(r));
}
for(int i = 0; i < test.data.size()/2; i++) {
final int r = ran.nextInt(test.data.size());
test.put(test.data.get(r));
}
}
println("DONE " + test.stat());
final long s = (Time.now() - starttime)/1000L;
println("total time elapsed=" + s + "s\n");
}
/** Test cases */
private static class GSetTestCase implements GSet<IntElement, IntElement> {
final GSet<IntElement, IntElement> expected
= new GSetByHashMap<IntElement, IntElement>(1024, 0.75f);
final GSet<IntElement, IntElement> gset;
final IntData data;
final String info;
final long starttime = Time.now();
/** Determine the probability in {@link #check()}. */
final int denominator;
int iterate_count = 0;
int contain_count = 0;
GSetTestCase(int tablelength, int datasize, int modulus) {
denominator = Math.min((datasize >> 7) + 1, 1 << 16);
info = getClass().getSimpleName()
+ ": tablelength=" + tablelength
+ ", datasize=" + datasize
+ ", modulus=" + modulus
+ ", denominator=" + denominator;
println(info);
data = new IntData(datasize, modulus);
gset = new LightWeightGSet<IntElement, IntElement>(tablelength);
Assert.assertEquals(0, gset.size());
}
private boolean containsTest(IntElement key) {
final boolean e = expected.contains(key);
Assert.assertEquals(e, gset.contains(key));
return e;
}
@Override
public boolean contains(IntElement key) {
final boolean e = containsTest(key);
check();
return e;
}
private IntElement getTest(IntElement key) {
final IntElement e = expected.get(key);
Assert.assertEquals(e.id, gset.get(key).id);
return e;
}
@Override
public IntElement get(IntElement key) {
final IntElement e = getTest(key);
check();
return e;
}
private IntElement putTest(IntElement element) {
final IntElement e = expected.put(element);
if (e == null) {
Assert.assertEquals(null, gset.put(element));
} else {
Assert.assertEquals(e.id, gset.put(element).id);
}
return e;
}
@Override
public IntElement put(IntElement element) {
final IntElement e = putTest(element);
check();
return e;
}
private IntElement removeTest(IntElement key) {
final IntElement e = expected.remove(key);
if (e == null) {
Assert.assertEquals(null, gset.remove(key));
} else {
Assert.assertEquals(e.id, gset.remove(key).id);
}
return e;
}
@Override
public IntElement remove(IntElement key) {
final IntElement e = removeTest(key);
check();
return e;
}
private int sizeTest() {
final int s = expected.size();
Assert.assertEquals(s, gset.size());
return s;
}
@Override
public int size() {
final int s = sizeTest();
check();
return s;
}
@Override
public Iterator<IntElement> iterator() {
throw new UnsupportedOperationException();
}
void check() {
//test size
sizeTest();
if (ran.nextInt(denominator) == 0) {
//test get(..), check content and test iterator
iterate_count++;
for(IntElement i : gset) {
getTest(i);
}
}
if (ran.nextInt(denominator) == 0) {
//test contains(..)
contain_count++;
final int count = Math.min(data.size(), 1000);
if (count == data.size()) {
for(IntElement i : data.integers) {
containsTest(i);
}
} else {
for(int j = 0; j < count; j++) {
containsTest(data.get(ran.nextInt(data.size())));
}
}
}
}
String stat() {
final long t = Time.now() - starttime;
return String.format(" iterate=%5d, contain=%5d, time elapsed=%5d.%03ds",
iterate_count, contain_count, t/1000, t%1000);
}
@Override
public void clear() {
expected.clear();
gset.clear();
Assert.assertEquals(0, size());
}
}
/** Test data set */
private static class IntData {
final IntElement[] integers;
IntData(int size, int modulus) {
integers = new IntElement[size];
for(int i = 0; i < integers.length; i++) {
integers[i] = new IntElement(i, ran.nextInt(modulus));
}
}
IntElement get(int i) {
return integers[i];
}
int size() {
return integers.length;
}
}
/** Elements of {@link LightWeightGSet} in this test */
private static class IntElement implements LightWeightGSet.LinkedElement,
Comparable<IntElement> {
private LightWeightGSet.LinkedElement next;
final int id;
final int value;
IntElement(int id, int value) {
this.id = id;
this.value = value;
}
@Override
public boolean equals(Object obj) {
return obj != null && obj instanceof IntElement
&& value == ((IntElement)obj).value;
}
@Override
public int hashCode() {
return value;
}
@Override
public int compareTo(IntElement that) {
return value - that.value;
}
@Override
public String toString() {
return id + "#" + value;
}
@Override
public LightWeightGSet.LinkedElement getNext() {
return next;
}
@Override
public void setNext(LightWeightGSet.LinkedElement e) {
next = e;
}
}
/**
* Test for {@link LightWeightGSet#computeCapacity(double, String)}
* with invalid percent less than 0.
*/
@Test(expected=HadoopIllegalArgumentException.class)
public void testComputeCapacityNegativePercent() {
LightWeightGSet.computeCapacity(1024, -1.0, "testMap");
}
/**
* Test for {@link LightWeightGSet#computeCapacity(double, String)}
* with invalid percent greater than 100.
*/
@Test(expected=HadoopIllegalArgumentException.class)
public void testComputeCapacityInvalidPercent() {
LightWeightGSet.computeCapacity(1024, 101.0, "testMap");
}
/**
* Test for {@link LightWeightGSet#computeCapacity(double, String)}
* with invalid negative max memory
*/
@Test(expected=HadoopIllegalArgumentException.class)
public void testComputeCapacityInvalidMemory() {
LightWeightGSet.computeCapacity(-1, 50.0, "testMap");
}
private static boolean isPowerOfTwo(int num) {
return num == 0 || (num > 0 && Integer.bitCount(num) == 1);
}
/** Return capacity as percentage of total memory */
private static int getPercent(long total, int capacity) {
// Reference size in bytes
double referenceSize =
System.getProperty("sun.arch.data.model").equals("32") ? 4.0 : 8.0;
return (int)(((capacity * referenceSize)/total) * 100.0);
}
/** Return capacity as percentage of total memory */
private static void testCapacity(long maxMemory, double percent) {
int capacity = LightWeightGSet.computeCapacity(maxMemory, percent, "map");
LightWeightGSet.LOG.info("Validating - total memory " + maxMemory + " percent "
+ percent + " returned capacity " + capacity);
// Returned capacity is zero or power of two
Assert.assertTrue(isPowerOfTwo(capacity));
// Ensure the capacity returned is the nearest to the asked perecentage
int capacityPercent = getPercent(maxMemory, capacity);
if (capacityPercent == percent) {
return;
} else if (capacityPercent > percent) {
Assert.assertTrue(getPercent(maxMemory, capacity * 2) > percent);
} else {
Assert.assertTrue(getPercent(maxMemory, capacity / 2) < percent);
}
}
/**
* Test for {@link LightWeightGSet#computeCapacity(double, String)}
*/
@Test
public void testComputeCapacity() {
// Tests for boundary conditions where percent or memory are zero
testCapacity(0, 0.0);
testCapacity(100, 0.0);
testCapacity(0, 100.0);
// Compute capacity for some 100 random max memory and percentage
Random r = new Random();
for (int i = 0; i < 100; i++) {
long maxMemory = r.nextInt(Integer.MAX_VALUE);
double percent = r.nextInt(101);
testCapacity(maxMemory, percent);
}
}
}
| 15,497 | 27.806691 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestZKUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import static org.junit.Assert.*;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.util.ZKUtil.BadAclFormatException;
import org.apache.hadoop.util.ZKUtil.ZKAuthInfo;
import org.apache.zookeeper.ZooDefs.Perms;
import org.apache.zookeeper.data.ACL;
import org.junit.Test;
import com.google.common.base.Charsets;
import com.google.common.io.Files;
public class TestZKUtil {
private static final String TEST_ROOT_DIR = System.getProperty(
"test.build.data", "/tmp") + "/TestZKUtil";
private static final File TEST_FILE = new File(TEST_ROOT_DIR,
"test-file");
/** A path which is expected not to exist */
private static final String BOGUS_FILE =
new File("/xxxx-this-does-not-exist").getPath();
@Test
public void testEmptyACL() {
List<ACL> result = ZKUtil.parseACLs("");
assertTrue(result.isEmpty());
}
@Test
public void testNullACL() {
List<ACL> result = ZKUtil.parseACLs(null);
assertTrue(result.isEmpty());
}
@Test
public void testInvalidACLs() {
badAcl("a:b",
"ACL 'a:b' not of expected form scheme:id:perm"); // not enough parts
badAcl("a",
"ACL 'a' not of expected form scheme:id:perm"); // not enough parts
badAcl("password:foo:rx",
"Invalid permission 'x' in permission string 'rx'");
}
private static void badAcl(String acls, String expectedErr) {
try {
ZKUtil.parseACLs(acls);
fail("Should have failed to parse '" + acls + "'");
} catch (BadAclFormatException e) {
assertEquals(expectedErr, e.getMessage());
}
}
@Test
public void testRemoveSpecificPerms() {
int perms = Perms.ALL;
int remove = Perms.CREATE;
int newPerms = ZKUtil.removeSpecificPerms(perms, remove);
assertEquals("Removal failed", 0, newPerms & Perms.CREATE);
}
@Test
public void testGoodACLs() {
List<ACL> result = ZKUtil.parseACLs(
"sasl:hdfs/[email protected]:cdrwa, sasl:hdfs/[email protected]:ca");
ACL acl0 = result.get(0);
assertEquals(Perms.CREATE | Perms.DELETE | Perms.READ |
Perms.WRITE | Perms.ADMIN, acl0.getPerms());
assertEquals("sasl", acl0.getId().getScheme());
assertEquals("hdfs/[email protected]", acl0.getId().getId());
ACL acl1 = result.get(1);
assertEquals(Perms.CREATE | Perms.ADMIN, acl1.getPerms());
assertEquals("sasl", acl1.getId().getScheme());
assertEquals("hdfs/[email protected]", acl1.getId().getId());
}
@Test
public void testEmptyAuth() {
List<ZKAuthInfo> result = ZKUtil.parseAuth("");
assertTrue(result.isEmpty());
}
@Test
public void testNullAuth() {
List<ZKAuthInfo> result = ZKUtil.parseAuth(null);
assertTrue(result.isEmpty());
}
@Test
public void testGoodAuths() {
List<ZKAuthInfo> result = ZKUtil.parseAuth(
"scheme:data,\n scheme2:user:pass");
assertEquals(2, result.size());
ZKAuthInfo auth0 = result.get(0);
assertEquals("scheme", auth0.getScheme());
assertEquals("data", new String(auth0.getAuth()));
ZKAuthInfo auth1 = result.get(1);
assertEquals("scheme2", auth1.getScheme());
assertEquals("user:pass", new String(auth1.getAuth()));
}
@Test
public void testConfIndirection() throws IOException {
assertNull(ZKUtil.resolveConfIndirection(null));
assertEquals("x", ZKUtil.resolveConfIndirection("x"));
TEST_FILE.getParentFile().mkdirs();
Files.write("hello world", TEST_FILE, Charsets.UTF_8);
assertEquals("hello world", ZKUtil.resolveConfIndirection(
"@" + TEST_FILE.getAbsolutePath()));
try {
ZKUtil.resolveConfIndirection("@" + BOGUS_FILE);
fail("Did not throw for non-existent file reference");
} catch (FileNotFoundException fnfe) {
assertTrue(fnfe.getMessage().startsWith(BOGUS_FILE));
}
}
}
| 4,736 | 31.668966 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestVersionUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import static org.junit.Assert.*;
import org.junit.Test;
public class TestVersionUtil {
@Test
public void testCompareVersions() {
// Equal versions are equal.
assertEquals(0, VersionUtil.compareVersions("2.0.0", "2.0.0"));
assertEquals(0, VersionUtil.compareVersions("2.0.0a", "2.0.0a"));
assertEquals(0, VersionUtil.compareVersions(
"2.0.0-SNAPSHOT", "2.0.0-SNAPSHOT"));
assertEquals(0, VersionUtil.compareVersions("1", "1"));
assertEquals(0, VersionUtil.compareVersions("1", "1.0"));
assertEquals(0, VersionUtil.compareVersions("1", "1.0.0"));
assertEquals(0, VersionUtil.compareVersions("1.0", "1"));
assertEquals(0, VersionUtil.compareVersions("1.0", "1.0"));
assertEquals(0, VersionUtil.compareVersions("1.0", "1.0.0"));
assertEquals(0, VersionUtil.compareVersions("1.0.0", "1"));
assertEquals(0, VersionUtil.compareVersions("1.0.0", "1.0"));
assertEquals(0, VersionUtil.compareVersions("1.0.0", "1.0.0"));
assertEquals(0, VersionUtil.compareVersions("1.0.0-alpha-1", "1.0.0-a1"));
assertEquals(0, VersionUtil.compareVersions("1.0.0-alpha-2", "1.0.0-a2"));
assertEquals(0, VersionUtil.compareVersions("1.0.0-alpha1", "1.0.0-alpha-1"));
assertEquals(0, VersionUtil.compareVersions("1a0", "1.0.0-alpha-0"));
assertEquals(0, VersionUtil.compareVersions("1a0", "1-a0"));
assertEquals(0, VersionUtil.compareVersions("1.a0", "1-a0"));
assertEquals(0, VersionUtil.compareVersions("1.a0", "1.0.0-alpha-0"));
// Assert that lower versions are lower, and higher versions are higher.
assertExpectedValues("1", "2.0.0");
assertExpectedValues("1.0.0", "2");
assertExpectedValues("1.0.0", "2.0.0");
assertExpectedValues("1.0", "2.0.0");
assertExpectedValues("1.0.0", "2.0.0");
assertExpectedValues("1.0.0", "1.0.0a");
assertExpectedValues("1.0.0.0", "2.0.0");
assertExpectedValues("1.0.0", "1.0.0-dev");
assertExpectedValues("1.0.0", "1.0.1");
assertExpectedValues("1.0.0", "1.0.2");
assertExpectedValues("1.0.0", "1.1.0");
assertExpectedValues("2.0.0", "10.0.0");
assertExpectedValues("1.0.0", "1.0.0a");
assertExpectedValues("1.0.2a", "1.0.10");
assertExpectedValues("1.0.2a", "1.0.2b");
assertExpectedValues("1.0.2a", "1.0.2ab");
assertExpectedValues("1.0.0a1", "1.0.0a2");
assertExpectedValues("1.0.0a2", "1.0.0a10");
// The 'a' in "1.a" is not followed by digit, thus not treated as "alpha",
// and treated larger than "1.0", per maven's ComparableVersion class
// implementation.
assertExpectedValues("1.0", "1.a");
//The 'a' in "1.a0" is followed by digit, thus treated as "alpha-<digit>"
assertExpectedValues("1.a0", "1.0");
assertExpectedValues("1a0", "1.0");
assertExpectedValues("1.0.1-alpha-1", "1.0.1-alpha-2");
assertExpectedValues("1.0.1-beta-1", "1.0.1-beta-2");
// Snapshot builds precede their eventual releases.
assertExpectedValues("1.0-SNAPSHOT", "1.0");
assertExpectedValues("1.0.0-SNAPSHOT", "1.0");
assertExpectedValues("1.0.0-SNAPSHOT", "1.0.0");
assertExpectedValues("1.0.0", "1.0.1-SNAPSHOT");
assertExpectedValues("1.0.1-SNAPSHOT", "1.0.1");
assertExpectedValues("1.0.1-SNAPSHOT", "1.0.2");
assertExpectedValues("1.0.1-alpha-1", "1.0.1-SNAPSHOT");
assertExpectedValues("1.0.1-beta-1", "1.0.1-SNAPSHOT");
assertExpectedValues("1.0.1-beta-2", "1.0.1-SNAPSHOT");
}
private static void assertExpectedValues(String lower, String higher) {
assertTrue(VersionUtil.compareVersions(lower, higher) < 0);
assertTrue(VersionUtil.compareVersions(higher, lower) > 0);
}
}
| 4,508 | 42.776699 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeLibraryChecker.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.io.ByteArrayOutputStream;
import java.io.PrintStream;
import junit.framework.TestCase;
import org.apache.hadoop.util.ExitUtil.ExitException;
import org.junit.Test;
public class TestNativeLibraryChecker extends TestCase {
private void expectExit(String [] args) {
try {
// should throw exit exception
NativeLibraryChecker.main(args);
fail("should call exit");
} catch (ExitException e) {
// pass
ExitUtil.resetFirstExitException();
}
}
@Test
public void testNativeLibraryChecker() {
ExitUtil.disableSystemExit();
// help should return normally
NativeLibraryChecker.main(new String[] {"-h"});
// illegal argmuments should exit
expectExit(new String[] {"-a", "-h"});
expectExit(new String[] {"aaa"});
if (NativeCodeLoader.isNativeCodeLoaded()) {
// no argument should return normally
NativeLibraryChecker.main(new String[0]);
} else {
// no argument should exit
expectExit(new String[0]);
}
}
@Test
public void testNativeLibraryCheckerOutput(){
expectOutput(new String[]{"-a"});
// no argument
expectOutput(new String[0]);
}
private void expectOutput(String [] args) {
ExitUtil.disableSystemExit();
ByteArrayOutputStream outContent = new ByteArrayOutputStream();
PrintStream originalPs = System.out;
System.setOut(new PrintStream(outContent));
try {
NativeLibraryChecker.main(args);
} catch (ExitException e) {
ExitUtil.resetFirstExitException();
} finally {
if (Shell.WINDOWS) {
assertEquals(outContent.toString().indexOf("winutils: true") != -1, true);
}
if (NativeCodeLoader.isNativeCodeLoaded()) {
assertEquals(outContent.toString().indexOf("hadoop: true") != -1, true);
}
System.setOut(originalPs);
}
}
}
| 2,692 | 31.059524 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShutdownThreadsHelper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.junit.Test;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
public class TestShutdownThreadsHelper {
private Runnable sampleRunnable = new Runnable() {
@Override
public void run() {
try {
Thread.sleep(2 * ShutdownThreadsHelper.SHUTDOWN_WAIT_MS);
} catch (InterruptedException ie) {
System.out.println("Thread interrupted");
}
}
};
@Test (timeout = 3000)
public void testShutdownThread() {
Thread thread = new Thread(sampleRunnable);
thread.start();
boolean ret = ShutdownThreadsHelper.shutdownThread(thread);
boolean isTerminated = !thread.isAlive();
assertEquals("Incorrect return value", ret, isTerminated);
assertTrue("Thread is not shutdown", isTerminated);
}
@Test
public void testShutdownThreadPool() throws InterruptedException {
ScheduledThreadPoolExecutor executor = new ScheduledThreadPoolExecutor(1);
executor.execute(sampleRunnable);
boolean ret = ShutdownThreadsHelper.shutdownExecutorService(executor);
boolean isTerminated = executor.isTerminated();
assertEquals("Incorrect return value", ret, isTerminated);
assertTrue("ExecutorService is not shutdown", isTerminated);
}
}
| 2,153 | 35.508475 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/ClassLoaderCheckMain.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
/**
* Test class used by {@link TestRunJar} to verify that it is loaded by the
* {@link ApplicationClassLoader}.
*/
public class ClassLoaderCheckMain {
public static void main(String[] args) {
// ClassLoaderCheckMain should be loaded by the application classloader
ClassLoaderCheck.checkClassLoader(ClassLoaderCheckMain.class, true);
// ClassLoaderCheckSecond should NOT be loaded by the application
// classloader
ClassLoaderCheck.checkClassLoader(ClassLoaderCheckSecond.class, false);
// ClassLoaderCheckThird should be loaded by the application classloader
ClassLoaderCheck.checkClassLoader(ClassLoaderCheckThird.class, true);
}
}
| 1,508 | 43.382353 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDirectBufferPool.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotSame;
import static org.junit.Assert.assertSame;
import java.nio.ByteBuffer;
import java.util.List;
import org.junit.Test;
import com.google.common.collect.Lists;
public class TestDirectBufferPool {
final org.apache.hadoop.util.DirectBufferPool pool = new org.apache.hadoop.util.DirectBufferPool();
@Test
public void testBasics() {
ByteBuffer a = pool.getBuffer(100);
assertEquals(100, a.capacity());
assertEquals(100, a.remaining());
pool.returnBuffer(a);
// Getting a new buffer should return the same one
ByteBuffer b = pool.getBuffer(100);
assertSame(a, b);
// Getting a new buffer before returning "B" should
// not return the same one
ByteBuffer c = pool.getBuffer(100);
assertNotSame(b, c);
pool.returnBuffer(b);
pool.returnBuffer(c);
}
@Test
public void testBuffersAreReset() {
ByteBuffer a = pool.getBuffer(100);
a.putInt(0xdeadbeef);
assertEquals(96, a.remaining());
pool.returnBuffer(a);
// Even though we return the same buffer,
// its position should be reset to 0
ByteBuffer b = pool.getBuffer(100);
assertSame(a, b);
assertEquals(100, a.remaining());
pool.returnBuffer(b);
}
@Test
public void testWeakRefClearing() {
// Allocate and return 10 buffers.
List<ByteBuffer> bufs = Lists.newLinkedList();
for (int i = 0; i < 10; i++) {
ByteBuffer buf = pool.getBuffer(100);
bufs.add(buf);
}
for (ByteBuffer buf : bufs) {
pool.returnBuffer(buf);
}
assertEquals(10, pool.countBuffersOfSize(100));
// Clear out any references to the buffers, and force
// GC. Weak refs should get cleared.
bufs.clear();
bufs = null;
for (int i = 0; i < 3; i++) {
System.gc();
}
ByteBuffer buf = pool.getBuffer(100);
// the act of getting a buffer should clear all the nulled
// references from the pool.
assertEquals(0, pool.countBuffersOfSize(100));
pool.returnBuffer(buf);
}
}
| 2,938 | 29.298969 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestIndexedSort.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.io.IOException;
import java.util.Arrays;
import java.util.Random;
import junit.framework.TestCase;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparator;
public class TestIndexedSort extends TestCase {
public void sortAllEqual(IndexedSorter sorter) throws Exception {
final int SAMPLE = 500;
int[] values = new int[SAMPLE];
Arrays.fill(values, 10);
SampleSortable s = new SampleSortable(values);
sorter.sort(s, 0, SAMPLE);
int[] check = s.getSorted();
assertTrue(Arrays.toString(values) + "\ndoesn't match\n" +
Arrays.toString(check), Arrays.equals(values, check));
// Set random min/max, re-sort.
Random r = new Random();
int min = r.nextInt(SAMPLE);
int max = (min + 1 + r.nextInt(SAMPLE - 2)) % SAMPLE;
values[min] = 9;
values[max] = 11;
System.out.println("testAllEqual setting min/max at " + min + "/" + max +
"(" + sorter.getClass().getName() + ")");
s = new SampleSortable(values);
sorter.sort(s, 0, SAMPLE);
check = s.getSorted();
Arrays.sort(values);
assertTrue(check[0] == 9);
assertTrue(check[SAMPLE - 1] == 11);
assertTrue(Arrays.toString(values) + "\ndoesn't match\n" +
Arrays.toString(check), Arrays.equals(values, check));
}
public void sortSorted(IndexedSorter sorter) throws Exception {
final int SAMPLE = 500;
int[] values = new int[SAMPLE];
Random r = new Random();
long seed = r.nextLong();
r.setSeed(seed);
System.out.println("testSorted seed: " + seed +
"(" + sorter.getClass().getName() + ")");
for (int i = 0; i < SAMPLE; ++i) {
values[i] = r.nextInt(100);
}
Arrays.sort(values);
SampleSortable s = new SampleSortable(values);
sorter.sort(s, 0, SAMPLE);
int[] check = s.getSorted();
assertTrue(Arrays.toString(values) + "\ndoesn't match\n" +
Arrays.toString(check), Arrays.equals(values, check));
}
public void sortSequential(IndexedSorter sorter) throws Exception {
final int SAMPLE = 500;
int[] values = new int[SAMPLE];
for (int i = 0; i < SAMPLE; ++i) {
values[i] = i;
}
SampleSortable s = new SampleSortable(values);
sorter.sort(s, 0, SAMPLE);
int[] check = s.getSorted();
assertTrue(Arrays.toString(values) + "\ndoesn't match\n" +
Arrays.toString(check), Arrays.equals(values, check));
}
public void sortSingleRecord(IndexedSorter sorter) throws Exception {
final int SAMPLE = 1;
SampleSortable s = new SampleSortable(SAMPLE);
int[] values = s.getValues();
sorter.sort(s, 0, SAMPLE);
int[] check = s.getSorted();
assertTrue(Arrays.toString(values) + "\ndoesn't match\n" +
Arrays.toString(check), Arrays.equals(values, check));
}
public void sortRandom(IndexedSorter sorter) throws Exception {
final int SAMPLE = 256 * 1024;
SampleSortable s = new SampleSortable(SAMPLE);
long seed = s.getSeed();
System.out.println("sortRandom seed: " + seed +
"(" + sorter.getClass().getName() + ")");
int[] values = s.getValues();
Arrays.sort(values);
sorter.sort(s, 0, SAMPLE);
int[] check = s.getSorted();
assertTrue("seed: " + seed + "\ndoesn't match\n",
Arrays.equals(values, check));
}
public void sortWritable(IndexedSorter sorter) throws Exception {
final int SAMPLE = 1000;
WritableSortable s = new WritableSortable(SAMPLE);
long seed = s.getSeed();
System.out.println("sortWritable seed: " + seed +
"(" + sorter.getClass().getName() + ")");
String[] values = s.getValues();
Arrays.sort(values);
sorter.sort(s, 0, SAMPLE);
String[] check = s.getSorted();
assertTrue("seed: " + seed + "\ndoesn't match",
Arrays.equals(values, check));
}
public void testQuickSort() throws Exception {
QuickSort sorter = new QuickSort();
sortRandom(sorter);
sortSingleRecord(sorter);
sortSequential(sorter);
sortSorted(sorter);
sortAllEqual(sorter);
sortWritable(sorter);
// test degenerate case for median-of-three partitioning
// a_n, a_1, a_2, ..., a_{n-1}
final int DSAMPLE = 500;
int[] values = new int[DSAMPLE];
for (int i = 0; i < DSAMPLE; ++i) { values[i] = i; }
values[0] = values[DSAMPLE - 1] + 1;
SampleSortable s = new SampleSortable(values);
values = s.getValues();
final int DSS = (DSAMPLE / 2) * (DSAMPLE / 2);
// Worst case is (N/2)^2 comparisons, not including those effecting
// the median-of-three partitioning; impl should handle this case
MeasuredSortable m = new MeasuredSortable(s, DSS);
sorter.sort(m, 0, DSAMPLE);
System.out.println("QuickSort degen cmp/swp: " +
m.getCmp() + "/" + m.getSwp() +
"(" + sorter.getClass().getName() + ")");
Arrays.sort(values);
int[] check = s.getSorted();
assertTrue(Arrays.equals(values, check));
}
public void testHeapSort() throws Exception {
HeapSort sorter = new HeapSort();
sortRandom(sorter);
sortSingleRecord(sorter);
sortSequential(sorter);
sortSorted(sorter);
sortAllEqual(sorter);
sortWritable(sorter);
}
// Sortables //
private static class SampleSortable implements IndexedSortable {
private int[] valindex;
private int[] valindirect;
private int[] values;
private final long seed;
public SampleSortable() {
this(50);
}
public SampleSortable(int j) {
Random r = new Random();
seed = r.nextLong();
r.setSeed(seed);
values = new int[j];
valindex = new int[j];
valindirect = new int[j];
for (int i = 0; i < j; ++i) {
valindex[i] = valindirect[i] = i;
values[i] = r.nextInt(1000);
}
}
public SampleSortable(int[] values) {
this.values = values;
valindex = new int[values.length];
valindirect = new int[values.length];
for (int i = 0; i < values.length; ++i) {
valindex[i] = valindirect[i] = i;
}
seed = 0;
}
public long getSeed() {
return seed;
}
@Override
public int compare(int i, int j) {
// assume positive
return
values[valindirect[valindex[i]]] - values[valindirect[valindex[j]]];
}
@Override
public void swap(int i, int j) {
int tmp = valindex[i];
valindex[i] = valindex[j];
valindex[j] = tmp;
}
public int[] getSorted() {
int[] ret = new int[values.length];
for (int i = 0; i < ret.length; ++i) {
ret[i] = values[valindirect[valindex[i]]];
}
return ret;
}
public int[] getValues() {
int[] ret = new int[values.length];
System.arraycopy(values, 0, ret, 0, values.length);
return ret;
}
}
public static class MeasuredSortable implements IndexedSortable {
private int comparisions;
private int swaps;
private final int maxcmp;
private final int maxswp;
private IndexedSortable s;
public MeasuredSortable(IndexedSortable s) {
this(s, Integer.MAX_VALUE);
}
public MeasuredSortable(IndexedSortable s, int maxcmp) {
this(s, maxcmp, Integer.MAX_VALUE);
}
public MeasuredSortable(IndexedSortable s, int maxcmp, int maxswp) {
this.s = s;
this.maxcmp = maxcmp;
this.maxswp = maxswp;
}
public int getCmp() { return comparisions; }
public int getSwp() { return swaps; }
@Override
public int compare(int i, int j) {
assertTrue("Expected fewer than " + maxcmp + " comparisons",
++comparisions < maxcmp);
return s.compare(i, j);
}
@Override
public void swap(int i, int j) {
assertTrue("Expected fewer than " + maxswp + " swaps",
++swaps < maxswp);
s.swap(i, j);
}
}
private static class WritableSortable implements IndexedSortable {
private static Random r = new Random();
private final int eob;
private final int[] indices;
private final int[] offsets;
private final byte[] bytes;
private final WritableComparator comparator;
private final String[] check;
private final long seed;
public WritableSortable() throws IOException {
this(100);
}
public WritableSortable(int j) throws IOException {
seed = r.nextLong();
r.setSeed(seed);
Text t = new Text();
StringBuilder sb = new StringBuilder();
indices = new int[j];
offsets = new int[j];
check = new String[j];
DataOutputBuffer dob = new DataOutputBuffer();
for (int i = 0; i < j; ++i) {
indices[i] = i;
offsets[i] = dob.getLength();
genRandom(t, r.nextInt(15) + 1, sb);
t.write(dob);
check[i] = t.toString();
}
eob = dob.getLength();
bytes = dob.getData();
comparator = WritableComparator.get(Text.class);
}
public long getSeed() {
return seed;
}
private static void genRandom(Text t, int len, StringBuilder sb) {
sb.setLength(0);
for (int i = 0; i < len; ++i) {
sb.append(Integer.toString(r.nextInt(26) + 10, 36));
}
t.set(sb.toString());
}
@Override
public int compare(int i, int j) {
final int ii = indices[i];
final int ij = indices[j];
return comparator.compare(bytes, offsets[ii],
((ii + 1 == indices.length) ? eob : offsets[ii + 1]) - offsets[ii],
bytes, offsets[ij],
((ij + 1 == indices.length) ? eob : offsets[ij + 1]) - offsets[ij]);
}
@Override
public void swap(int i, int j) {
int tmp = indices[i];
indices[i] = indices[j];
indices[j] = tmp;
}
public String[] getValues() {
return check;
}
public String[] getSorted() throws IOException {
String[] ret = new String[indices.length];
Text t = new Text();
DataInputBuffer dib = new DataInputBuffer();
for (int i = 0; i < ret.length; ++i) {
int ii = indices[i];
dib.reset(bytes, offsets[ii],
((ii + 1 == indices.length) ? eob : offsets[ii + 1]) - offsets[ii]);
t.readFields(dib);
ret[i] = t.toString();
}
return ret;
}
}
}
| 11,139 | 29.354223 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestPureJavaCrc32.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.PrintStream;
import java.lang.reflect.Constructor;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import java.util.Random;
import java.util.zip.CRC32;
import java.util.zip.Checksum;
import org.junit.Assert;
import org.junit.Test;
/**
* Unit test to verify that the pure-Java CRC32 algorithm gives
* the same results as the built-in implementation.
*/
public class TestPureJavaCrc32 {
private final CRC32 theirs = new CRC32();
private final PureJavaCrc32 ours = new PureJavaCrc32();
@Test
public void testCorrectness() throws Exception {
checkSame();
theirs.update(104);
ours.update(104);
checkSame();
checkOnBytes(new byte[] {40, 60, 97, -70}, false);
checkOnBytes("hello world!".getBytes("UTF-8"), false);
for (int i = 0; i < 10000; i++) {
byte randomBytes[] = new byte[new Random().nextInt(2048)];
new Random().nextBytes(randomBytes);
checkOnBytes(randomBytes, false);
}
}
private void checkOnBytes(byte[] bytes, boolean print) {
theirs.reset();
ours.reset();
checkSame();
for (int i = 0; i < bytes.length; i++) {
ours.update(bytes[i]);
theirs.update(bytes[i]);
checkSame();
}
if (print) {
System.out.println("theirs:\t" + Long.toHexString(theirs.getValue())
+ "\nours:\t" + Long.toHexString(ours.getValue()));
}
theirs.reset();
ours.reset();
ours.update(bytes, 0, bytes.length);
theirs.update(bytes, 0, bytes.length);
if (print) {
System.out.println("theirs:\t" + Long.toHexString(theirs.getValue())
+ "\nours:\t" + Long.toHexString(ours.getValue()));
}
checkSame();
if (bytes.length >= 10) {
ours.update(bytes, 5, 5);
theirs.update(bytes, 5, 5);
checkSame();
}
}
private void checkSame() {
Assert.assertEquals(theirs.getValue(), ours.getValue());
}
/**
* Generate a table to perform checksums based on the same CRC-32 polynomial
* that java.util.zip.CRC32 uses.
*/
public static class Table {
private final int[][] tables;
private Table(final int nBits, final int nTables,
long polynomial) {
tables = new int[nTables][];
final int size = 1 << nBits;
for(int i = 0; i < tables.length; i++) {
tables[i] = new int[size];
}
//compute the first table
final int[] first = tables[0];
for (int i = 0; i < first.length; i++) {
int crc = i;
for (int j = 0; j < nBits; j++) {
if ((crc & 1) == 1) {
crc >>>= 1;
crc ^= polynomial;
} else {
crc >>>= 1;
}
}
first[i] = crc;
}
//compute the remaining tables
final int mask = first.length - 1;
for(int j = 1; j < tables.length; j++) {
final int[] previous = tables[j-1];
final int[] current = tables[j];
for (int i = 0; i < current.length; i++) {
current[i] = (previous[i] >>> nBits) ^ first[previous[i] & mask];
}
}
}
String[] toStrings(String nameformat) {
final String[] s = new String[tables.length];
for (int j = 0; j < tables.length; j++) {
final int[] t = tables[j];
final StringBuilder b = new StringBuilder();
b.append(String.format(" /* "+ nameformat +" */", j));
for (int i = 0; i < t.length;) {
b.append("\n ");
for(int k = 0; k < 4; k++) {
b.append(String.format("0x%08X, ", t[i++]));
}
}
s[j] = b.toString();
}
return s;
}
@Override
public String toString() {
final StringBuilder b = new StringBuilder();
final String tableFormat = String.format("T%d_",
Integer.numberOfTrailingZeros(tables[0].length)) + "%d";
final String startFormat = " private static final int "+tableFormat+"_start = %d*256;";
for (int j = 0; j < tables.length; j++) {
b.append(String.format(startFormat, j, j));
b.append("\n");
}
b.append(" private static final int[] T = new int[] {");
for(String s : toStrings(tableFormat)) {
b.append("\n");
b.append(s);
}
b.setCharAt(b.length() - 2, '\n');
b.append(" };\n");
return b.toString();
}
/** Generate CRC-32 lookup tables */
public static void main(String[] args) throws FileNotFoundException {
if (args.length != 1) {
System.err.println("Usage: " + Table.class.getName() +
" <polynomial>");
System.exit(1);
}
long polynomial = Long.parseLong(args[0], 16);
int i = 8;
final Table t = new Table(i, 16, polynomial);
final String s = t.toString();
System.out.println(s);
//print to a file
final PrintStream out = new PrintStream(
new FileOutputStream("table" + i + ".txt"), true);
try {
out.println(s);
} finally {
out.close();
}
}
}
/**
* Performance tests to compare performance of the Pure Java implementation
* to the built-in java.util.zip implementation. This can be run from the
* command line with:
*
* java -cp path/to/test/classes:path/to/common/classes \
* 'org.apache.hadoop.util.TestPureJavaCrc32$PerformanceTest'
*
* The output is in JIRA table format.
*/
public static class PerformanceTest {
public static final int MAX_LEN = 32*1024*1024; // up to 32MB chunks
public static final int BYTES_PER_SIZE = MAX_LEN * 4;
static final Class<? extends Checksum> zip = CRC32.class;
static final List<Class<? extends Checksum>> CRCS = new ArrayList<Class<? extends Checksum>>();
static {
CRCS.add(zip);
CRCS.add(PureJavaCrc32.class);
}
public static void main(String args[]) throws Exception {
printSystemProperties(System.out);
doBench(CRCS, System.out);
}
private static void printCell(String s, int width, PrintStream out) {
final int w = s.length() > width? s.length(): width;
out.printf(" %" + w + "s |", s);
}
private static void doBench(final List<Class<? extends Checksum>> crcs,
final PrintStream out) throws Exception {
final byte[] bytes = new byte[MAX_LEN];
new Random().nextBytes(bytes);
// Print header
out.printf("\nPerformance Table (The unit is MB/sec; #T = #Theads)\n");
// Warm up implementations to get jit going.
for (Class<? extends Checksum> c : crcs) {
doBench(c, 1, bytes, 2);
doBench(c, 1, bytes, 2101);
}
// Test on a variety of sizes with different number of threads
for (int size = 32; size <= MAX_LEN; size <<= 1) {
doBench(crcs, bytes, size, out);
}
}
private static void doBench(final List<Class<? extends Checksum>> crcs,
final byte[] bytes, final int size, final PrintStream out) throws Exception {
final String numBytesStr = " #Bytes ";
final String numThreadsStr = "#T";
final String diffStr = "% diff";
out.print('|');
printCell(numBytesStr, 0, out);
printCell(numThreadsStr, 0, out);
for (int i = 0; i < crcs.size(); i++) {
final Class<? extends Checksum> c = crcs.get(i);
out.print('|');
printCell(c.getSimpleName(), 8, out);
for(int j = 0; j < i; j++) {
printCell(diffStr, diffStr.length(), out);
}
}
out.printf("\n");
for(int numThreads = 1; numThreads <= 16; numThreads <<= 1) {
out.printf("|");
printCell(String.valueOf(size), numBytesStr.length(), out);
printCell(String.valueOf(numThreads), numThreadsStr.length(), out);
BenchResult expected = null;
final List<BenchResult> previous = new ArrayList<BenchResult>();
for(Class<? extends Checksum> c : crcs) {
System.gc();
final BenchResult result = doBench(c, numThreads, bytes, size);
printCell(String.format("%9.1f", result.mbps),
c.getSimpleName().length()+1, out);
//check result
if(c == zip) {
expected = result;
} else if (expected == null) {
throw new RuntimeException("The first class is "
+ c.getName() + " but not " + zip.getName());
} else if (result.value != expected.value) {
throw new RuntimeException(c + " has bugs!");
}
//compare result with previous
for(BenchResult p : previous) {
final double diff = (result.mbps - p.mbps) / p.mbps * 100;
printCell(String.format("%5.1f%%", diff), diffStr.length(), out);
}
previous.add(result);
}
out.printf("\n");
}
}
private static BenchResult doBench(Class<? extends Checksum> clazz,
final int numThreads, final byte[] bytes, final int size)
throws Exception {
final Thread[] threads = new Thread[numThreads];
final BenchResult[] results = new BenchResult[threads.length];
{
final int trials = BYTES_PER_SIZE / size;
final double mbProcessed = trials * size / 1024.0 / 1024.0;
final Constructor<? extends Checksum> ctor = clazz.getConstructor();
for(int i = 0; i < threads.length; i++) {
final int index = i;
threads[i] = new Thread() {
final Checksum crc = ctor.newInstance();
@Override
public void run() {
final long st = System.nanoTime();
crc.reset();
for (int i = 0; i < trials; i++) {
crc.update(bytes, 0, size);
}
final long et = System.nanoTime();
double secsElapsed = (et - st) / 1000000000.0d;
results[index] = new BenchResult(crc.getValue(), mbProcessed/secsElapsed);
}
};
}
}
for(int i = 0; i < threads.length; i++) {
threads[i].start();
}
for(int i = 0; i < threads.length; i++) {
threads[i].join();
}
final long expected = results[0].value;
double sum = results[0].mbps;
for(int i = 1; i < results.length; i++) {
if (results[i].value != expected) {
throw new AssertionError(clazz.getSimpleName() + " results not matched.");
}
sum += results[i].mbps;
}
return new BenchResult(expected, sum/results.length);
}
private static class BenchResult {
/** CRC value */
final long value;
/** Speed (MB per second) */
final double mbps;
BenchResult(long value, double mbps) {
this.value = value;
this.mbps = mbps;
}
}
private static void printSystemProperties(PrintStream out) {
final String[] names = {
"java.version",
"java.runtime.name",
"java.runtime.version",
"java.vm.version",
"java.vm.vendor",
"java.vm.name",
"java.vm.specification.version",
"java.specification.version",
"os.arch",
"os.name",
"os.version"
};
final Properties p = System.getProperties();
for(String n : names) {
out.println(n + " = " + p.getProperty(n));
}
}
}
}
| 12,334 | 30.14899 | 99 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestMachineList.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.Collection;
import org.junit.Test;
import org.mockito.Mockito;
public class TestMachineList {
private static String IP_LIST = "10.119.103.110,10.119.103.112,10.119.103.114";
private static String IP_LIST_SPACES =
" 10.119.103.110 , 10.119.103.112,10.119.103.114 ,10.119.103.110, ";
private static String CIDR_LIST = "10.222.0.0/16,10.241.23.0/24";
private static String CIDR_LIST1 = "10.222.0.0/16";
private static String CIDR_LIST2 = "10.241.23.0/24";
private static String INVALID_CIDR = "10.241/24";
private static String IP_CIDR_LIST =
"10.222.0.0/16,10.119.103.110,10.119.103.112,10.119.103.114,10.241.23.0/24";
private static String HOST_LIST = "host1,host4";
private static String HOSTNAME_IP_CIDR_LIST =
"host1,10.222.0.0/16,10.119.103.110,10.119.103.112,10.119.103.114,10.241.23.0/24,host4,";
@Test
public void testWildCard() {
//create MachineList with a list of of IPs
MachineList ml = new MachineList("*");
//test for inclusion with any IP
assertTrue(ml.includes("10.119.103.112"));
assertTrue(ml.includes("1.2.3.4"));
}
@Test
public void testIPList() {
//create MachineList with a list of of IPs
MachineList ml = new MachineList(IP_LIST);
//test for inclusion with an known IP
assertTrue(ml.includes("10.119.103.112"));
//test for exclusion with an unknown IP
assertFalse(ml.includes("10.119.103.111"));
}
@Test
public void testIPListSpaces() {
//create MachineList with a ip string which has duplicate ip and spaces
MachineList ml = new MachineList(IP_LIST_SPACES);
//test for inclusion with an known IP
assertTrue(ml.includes("10.119.103.112"));
//test for exclusion with an unknown IP
assertFalse(ml.includes("10.119.103.111"));
}
@Test
public void testStaticIPHostNameList()throws UnknownHostException {
//create MachineList with a list of of Hostnames
InetAddress addressHost1 = InetAddress.getByName("1.2.3.1");
InetAddress addressHost4 = InetAddress.getByName("1.2.3.4");
MachineList.InetAddressFactory addressFactory =
Mockito.mock(MachineList.InetAddressFactory.class);
Mockito.when(addressFactory.getByName("host1")).thenReturn(addressHost1);
Mockito.when(addressFactory.getByName("host4")).thenReturn(addressHost4);
MachineList ml = new MachineList(
StringUtils.getTrimmedStringCollection(HOST_LIST), addressFactory);
//test for inclusion with an known IP
assertTrue(ml.includes("1.2.3.4"));
//test for exclusion with an unknown IP
assertFalse(ml.includes("1.2.3.5"));
}
@Test
public void testHostNames() throws UnknownHostException {
//create MachineList with a list of of Hostnames
InetAddress addressHost1 = InetAddress.getByName("1.2.3.1");
InetAddress addressHost4 = InetAddress.getByName("1.2.3.4");
InetAddress addressMockHost4 = Mockito.mock(InetAddress.class);
Mockito.when(addressMockHost4.getCanonicalHostName()).thenReturn("differentName");
InetAddress addressMockHost5 = Mockito.mock(InetAddress.class);
Mockito.when(addressMockHost5.getCanonicalHostName()).thenReturn("host5");
MachineList.InetAddressFactory addressFactory =
Mockito.mock(MachineList.InetAddressFactory.class);
Mockito.when(addressFactory.getByName("1.2.3.4")).thenReturn(addressMockHost4);
Mockito.when(addressFactory.getByName("1.2.3.5")).thenReturn(addressMockHost5);
Mockito.when(addressFactory.getByName("host1")).thenReturn(addressHost1);
Mockito.when(addressFactory.getByName("host4")).thenReturn(addressHost4);
MachineList ml = new MachineList(
StringUtils.getTrimmedStringCollection(HOST_LIST), addressFactory );
//test for inclusion with an known IP
assertTrue(ml.includes("1.2.3.4"));
//test for exclusion with an unknown IP
assertFalse(ml.includes("1.2.3.5"));
}
@Test
public void testHostNamesReverserIpMatch() throws UnknownHostException {
//create MachineList with a list of of Hostnames
InetAddress addressHost1 = InetAddress.getByName("1.2.3.1");
InetAddress addressHost4 = InetAddress.getByName("1.2.3.4");
InetAddress addressMockHost4 = Mockito.mock(InetAddress.class);
Mockito.when(addressMockHost4.getCanonicalHostName()).thenReturn("host4");
InetAddress addressMockHost5 = Mockito.mock(InetAddress.class);
Mockito.when(addressMockHost5.getCanonicalHostName()).thenReturn("host5");
MachineList.InetAddressFactory addressFactory =
Mockito.mock(MachineList.InetAddressFactory.class);
Mockito.when(addressFactory.getByName("1.2.3.4")).thenReturn(addressMockHost4);
Mockito.when(addressFactory.getByName("1.2.3.5")).thenReturn(addressMockHost5);
Mockito.when(addressFactory.getByName("host1")).thenReturn(addressHost1);
Mockito.when(addressFactory.getByName("host4")).thenReturn(addressHost4);
MachineList ml = new MachineList(
StringUtils.getTrimmedStringCollection(HOST_LIST), addressFactory );
//test for inclusion with an known IP
assertTrue(ml.includes("1.2.3.4"));
//test for exclusion with an unknown IP
assertFalse(ml.includes("1.2.3.5"));
}
@Test
public void testCIDRs() {
//create MachineList with a list of of ip ranges specified in CIDR format
MachineList ml = new MachineList(CIDR_LIST);
//test for inclusion/exclusion
assertFalse(ml.includes("10.221.255.255"));
assertTrue(ml.includes("10.222.0.0"));
assertTrue(ml.includes("10.222.0.1"));
assertTrue(ml.includes("10.222.0.255"));
assertTrue(ml.includes("10.222.255.0"));
assertTrue(ml.includes("10.222.255.254"));
assertTrue(ml.includes("10.222.255.255"));
assertFalse(ml.includes("10.223.0.0"));
assertTrue(ml.includes("10.241.23.0"));
assertTrue(ml.includes("10.241.23.1"));
assertTrue(ml.includes("10.241.23.254"));
assertTrue(ml.includes("10.241.23.255"));
//test for exclusion with an unknown IP
assertFalse(ml.includes("10.119.103.111"));
}
@Test
public void testCIDRWith16bitmask() {
//create MachineList with a list of of ip ranges specified in CIDR format
MachineList ml = new MachineList(CIDR_LIST1);
//test for inclusion/exclusion
assertFalse(ml.includes("10.221.255.255"));
assertTrue(ml.includes("10.222.0.0"));
assertTrue(ml.includes("10.222.0.1"));
assertTrue(ml.includes("10.222.0.255"));
assertTrue(ml.includes("10.222.255.0"));
assertTrue(ml.includes("10.222.255.254"));
assertTrue(ml.includes("10.222.255.255"));
assertFalse(ml.includes("10.223.0.0"));
//test for exclusion with an unknown IP
assertFalse(ml.includes("10.119.103.111"));
}
@Test
public void testCIDRWith8BitMask() {
//create MachineList with a list of of ip ranges specified in CIDR format
MachineList ml = new MachineList(CIDR_LIST2);
//test for inclusion/exclusion
assertFalse(ml.includes("10.241.22.255"));
assertTrue(ml.includes("10.241.23.0"));
assertTrue(ml.includes("10.241.23.1"));
assertTrue(ml.includes("10.241.23.254"));
assertTrue(ml.includes("10.241.23.255"));
assertFalse(ml.includes("10.241.24.0"));
//test for exclusion with an unknown IP
assertFalse(ml.includes("10.119.103.111"));
}
//test invalid cidr
@Test
public void testInvalidCIDR() {
//create MachineList with an Invalid CIDR
try {
new MachineList(INVALID_CIDR);
fail("Expected IllegalArgumentException");
} catch (IllegalArgumentException e) {
//expected Exception
} catch (Throwable t) {
fail ("Expected only IllegalArgumentException");
}
}
//
@Test
public void testIPandCIDRs() {
//create MachineList with a list of of ip ranges and ip addresses
MachineList ml = new MachineList(IP_CIDR_LIST);
//test for inclusion with an known IP
assertTrue(ml.includes("10.119.103.112"));
//test for exclusion with an unknown IP
assertFalse(ml.includes("10.119.103.111"));
//CIDR Ranges
assertFalse(ml.includes("10.221.255.255"));
assertTrue(ml.includes("10.222.0.0"));
assertTrue(ml.includes("10.222.255.255"));
assertFalse(ml.includes("10.223.0.0"));
assertFalse(ml.includes("10.241.22.255"));
assertTrue(ml.includes("10.241.23.0"));
assertTrue(ml.includes("10.241.23.255"));
assertFalse(ml.includes("10.241.24.0"));
}
@Test
public void testHostNameIPandCIDRs() {
//create MachineList with a mix of ip addresses , hostnames and ip ranges
MachineList ml = new MachineList(HOSTNAME_IP_CIDR_LIST);
//test for inclusion with an known IP
assertTrue(ml.includes("10.119.103.112"));
//test for exclusion with an unknown IP
assertFalse(ml.includes("10.119.103.111"));
//CIDR Ranges
assertFalse(ml.includes("10.221.255.255"));
assertTrue(ml.includes("10.222.0.0"));
assertTrue(ml.includes("10.222.255.255"));
assertFalse(ml.includes("10.223.0.0"));
assertFalse(ml.includes("10.241.22.255"));
assertTrue(ml.includes("10.241.23.0"));
assertTrue(ml.includes("10.241.23.255"));
assertFalse(ml.includes("10.241.24.0"));
}
@Test
public void testGetCollection() {
//create MachineList with a mix of ip addresses , hostnames and ip ranges
MachineList ml = new MachineList(HOSTNAME_IP_CIDR_LIST);
Collection<String> col = ml.getCollection();
//test getCollectionton to return the full collection
assertEquals(7,ml.getCollection().size());
for (String item:StringUtils.getTrimmedStringCollection(HOSTNAME_IP_CIDR_LIST)) {
assertTrue(col.contains(item));
}
}
}
| 10,704 | 35.660959 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNodeHealthScriptRunner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.TimerTask;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class TestNodeHealthScriptRunner {
protected static File testRootDir = new File("target",
TestNodeHealthScriptRunner.class.getName() +
"-localDir").getAbsoluteFile();
private File nodeHealthscriptFile = new File(testRootDir,
Shell.appendScriptExtension("failingscript"));
@Before
public void setup() {
testRootDir.mkdirs();
}
@After
public void tearDown() throws Exception {
if (testRootDir.exists()) {
FileContext.getLocalFSFileContext().delete(
new Path(testRootDir.getAbsolutePath()), true);
}
}
private void writeNodeHealthScriptFile(String scriptStr, boolean setExecutable)
throws IOException {
PrintWriter pw = null;
try {
FileUtil.setWritable(nodeHealthscriptFile, true);
FileUtil.setReadable(nodeHealthscriptFile, true);
pw = new PrintWriter(new FileOutputStream(nodeHealthscriptFile));
pw.println(scriptStr);
pw.flush();
} finally {
pw.close();
}
FileUtil.setExecutable(nodeHealthscriptFile, setExecutable);
}
@Test
public void testNodeHealthScriptShouldRun() throws IOException {
Assert.assertFalse("Node health script should start",
NodeHealthScriptRunner.shouldRun(
nodeHealthscriptFile.getAbsolutePath()));
writeNodeHealthScriptFile("", false);
// Node health script should not start if the node health script is not
// executable.
Assert.assertFalse("Node health script should start",
NodeHealthScriptRunner.shouldRun(
nodeHealthscriptFile.getAbsolutePath()));
writeNodeHealthScriptFile("", true);
Assert.assertTrue("Node health script should start",
NodeHealthScriptRunner.shouldRun(
nodeHealthscriptFile.getAbsolutePath()));
}
@Test
public void testNodeHealthScript() throws Exception {
String errorScript = "echo ERROR\n echo \"Tracker not healthy\"";
String normalScript = "echo \"I am all fine\"";
String timeOutScript =
Shell.WINDOWS ? "@echo off\nping -n 4 127.0.0.1 >nul\necho \"I am fine\""
: "sleep 4\necho \"I am fine\"";
Configuration conf = new Configuration();
writeNodeHealthScriptFile(normalScript, true);
NodeHealthScriptRunner nodeHealthScriptRunner = new NodeHealthScriptRunner(
nodeHealthscriptFile.getAbsolutePath(),
500, 1000, new String[] {});
nodeHealthScriptRunner.init(conf);
TimerTask timerTask = nodeHealthScriptRunner.getTimerTask();
timerTask.run();
// Normal Script runs successfully
Assert.assertTrue("Node health status reported unhealthy",
nodeHealthScriptRunner.isHealthy());
Assert.assertEquals("", nodeHealthScriptRunner.getHealthReport());
// Error script.
writeNodeHealthScriptFile(errorScript, true);
// Run timer
timerTask.run();
Assert.assertFalse("Node health status reported healthy",
nodeHealthScriptRunner.isHealthy());
Assert.assertTrue(
nodeHealthScriptRunner.getHealthReport().contains("ERROR"));
// Healthy script.
writeNodeHealthScriptFile(normalScript, true);
timerTask.run();
Assert.assertTrue("Node health status reported unhealthy",
nodeHealthScriptRunner.isHealthy());
Assert.assertEquals("", nodeHealthScriptRunner.getHealthReport());
// Timeout script.
writeNodeHealthScriptFile(timeOutScript, true);
timerTask.run();
Assert.assertFalse("Node health status reported healthy even after timeout",
nodeHealthScriptRunner.isHealthy());
Assert.assertEquals(
NodeHealthScriptRunner.NODE_HEALTH_SCRIPT_TIMED_OUT_MSG,
nodeHealthScriptRunner.getHealthReport());
}
}
| 4,927 | 34.970803 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestFileBasedIPList.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.util.FileBasedIPList;
import org.apache.hadoop.util.IPList;
import org.junit.After;
import org.junit.Test;
import junit.framework.TestCase;
public class TestFileBasedIPList extends TestCase {
@After
public void tearDown() {
removeFile("ips.txt");
}
/**
* Add a bunch of IPS to the file
* Check for inclusion
* Check for exclusion
*/
@Test
public void testSubnetsAndIPs() throws IOException {
String[] ips = {"10.119.103.112", "10.221.102.0/23"};
createFileWithEntries ("ips.txt", ips);
IPList ipList = new FileBasedIPList("ips.txt");
assertTrue ("10.119.103.112 is not in the list",
ipList.isIn("10.119.103.112"));
assertFalse ("10.119.103.113 is in the list",
ipList.isIn("10.119.103.113"));
assertTrue ("10.221.102.0 is not in the list",
ipList.isIn("10.221.102.0"));
assertTrue ("10.221.102.1 is not in the list",
ipList.isIn("10.221.102.1"));
assertTrue ("10.221.103.1 is not in the list",
ipList.isIn("10.221.103.1"));
assertTrue ("10.221.103.255 is not in the list",
ipList.isIn("10.221.103.255"));
assertFalse("10.221.104.0 is in the list",
ipList.isIn("10.221.104.0"));
assertFalse("10.221.104.1 is in the list",
ipList.isIn("10.221.104.1"));
}
/**
* Add a bunch of IPS to the file
* Check for inclusion
* Check for exclusion
*/
@Test
public void testNullIP() throws IOException {
String[] ips = {"10.119.103.112", "10.221.102.0/23"};
createFileWithEntries ("ips.txt", ips);
IPList ipList = new FileBasedIPList("ips.txt");
assertFalse ("Null Ip is in the list",
ipList.isIn(null));
}
/**
* Add a bunch of subnets and IPSs to the file
* Check for inclusion
* Check for exclusion
*/
@Test
public void testWithMultipleSubnetAndIPs() throws IOException {
String[] ips = {"10.119.103.112", "10.221.102.0/23", "10.222.0.0/16",
"10.113.221.221"};
createFileWithEntries ("ips.txt", ips);
IPList ipList = new FileBasedIPList("ips.txt");
assertTrue ("10.119.103.112 is not in the list",
ipList.isIn("10.119.103.112"));
assertFalse ("10.119.103.113 is in the list",
ipList.isIn("10.119.103.113"));
assertTrue ("10.221.103.121 is not in the list",
ipList.isIn("10.221.103.121"));
assertFalse("10.221.104.0 is in the list",
ipList.isIn("10.221.104.0"));
assertTrue ("10.222.103.121 is not in the list",
ipList.isIn("10.222.103.121"));
assertFalse("10.223.104.0 is in the list",
ipList.isIn("10.223.104.0"));
assertTrue ("10.113.221.221 is not in the list",
ipList.isIn("10.113.221.221"));
assertFalse("10.113.221.222 is in the list",
ipList.isIn("10.113.221.222"));
}
/**
* Do not specify the file
* test for inclusion
* should be true as if the feature is turned off
*/
public void testFileNotSpecified() {
IPList ipl = new FileBasedIPList(null);
assertFalse("110.113.221.222 is in the list",
ipl.isIn("110.113.221.222"));
}
/**
* Specify a non existent file
* test for inclusion
* should be true as if the feature is turned off
*/
public void testFileMissing() {
IPList ipl = new FileBasedIPList("missingips.txt");
assertFalse("110.113.221.222 is in the list",
ipl.isIn("110.113.221.222"));
}
/**
* Specify an existing file, but empty
* test for inclusion
* should be true as if the feature is turned off
*/
public void testWithEmptyList() throws IOException {
String[] ips = {};
createFileWithEntries ("ips.txt", ips);
IPList ipl = new FileBasedIPList("ips.txt");
assertFalse("110.113.221.222 is in the list",
ipl.isIn("110.113.221.222"));
}
/**
* Specify an existing file, but ips in wrong format
* test for inclusion
* should be true as if the feature is turned off
*/
public void testForBadFIle() throws IOException {
String[] ips = { "10.221.102/23"};
createFileWithEntries ("ips.txt", ips);
try {
new FileBasedIPList("ips.txt");
fail();
} catch (Exception e) {
//expects Exception
}
}
/**
* Add a bunch of subnets and IPSs to the file. Keep one entry wrong.
* The good entries will still be used.
* Check for inclusion with good entries
* Check for exclusion
*/
public void testWithAWrongEntry() throws IOException {
String[] ips = {"10.119.103.112", "10.221.102/23", "10.221.204.1/23"};
createFileWithEntries ("ips.txt", ips);
try {
new FileBasedIPList("ips.txt");
fail();
} catch (Exception e) {
//expects Exception
}
}
public static void createFileWithEntries(String fileName, String[] ips)
throws IOException {
FileUtils.writeLines(new File(fileName), Arrays.asList(ips));
}
public static void removeFile(String fileName) {
File file = new File(fileName);
if (file.exists()) {
new File(fileName).delete();
}
}
}
| 6,019 | 26.87037 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGenericsUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.util.ArrayList;
import java.util.List;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
public class TestGenericsUtil extends TestCase {
public void testToArray() {
//test a list of size 10
List<Integer> list = new ArrayList<Integer>();
for(int i=0; i<10; i++) {
list.add(i);
}
Integer[] arr = GenericsUtil.toArray(list);
for (int i = 0; i < arr.length; i++) {
assertEquals(
"Array has identical elements as input list",
list.get(i), arr[i]);
}
}
public void testWithEmptyList() {
try {
List<String> list = new ArrayList<String>();
String[] arr = GenericsUtil.toArray(list);
fail("Empty array should throw exception");
System.out.println(arr); //use arr so that compiler will not complain
}catch (IndexOutOfBoundsException ex) {
//test case is successful
}
}
public void testWithEmptyList2() {
List<String> list = new ArrayList<String>();
//this method should not throw IndexOutOfBoundsException
String[] arr = GenericsUtil.<String>toArray(String.class, list);
assertEquals("Assert list creation w/ no elements results in length 0",
0, arr.length);
}
/** This class uses generics */
private class GenericClass<T> {
T dummy;
List<T> list = new ArrayList<T>();
void add(T item) {
list.add(item);
}
T[] funcThatUsesToArray() {
T[] arr = GenericsUtil.toArray(list);
return arr;
}
}
public void testWithGenericClass() {
GenericClass<String> testSubject = new GenericClass<String>();
testSubject.add("test1");
testSubject.add("test2");
try {
//this cast would fail, if we had not used GenericsUtil.toArray, since the
//rmethod would return Object[] rather than String[]
String[] arr = testSubject.funcThatUsesToArray();
assertEquals("test1", arr[0]);
assertEquals("test2", arr[1]);
}catch (ClassCastException ex) {
fail("GenericsUtil#toArray() is not working for generic classes");
}
}
public void testGenericOptionsParser() throws Exception {
GenericOptionsParser parser = new GenericOptionsParser(
new Configuration(), new String[] {"-jt"});
assertEquals(0, parser.getRemainingArgs().length);
// test if -D accepts -Dx=y=z
parser =
new GenericOptionsParser(new Configuration(),
new String[] {"-Dx=y=z"});
assertEquals(
"Options parser gets entire ='s expresion",
"y=z", parser.getConfiguration().get("x"));
}
public void testGetClass() {
//test with Integer
Integer x = new Integer(42);
Class<Integer> c = GenericsUtil.getClass(x);
assertEquals("Correct generic type is acquired from object",
Integer.class, c);
//test with GenericClass<Integer>
GenericClass<Integer> testSubject = new GenericClass<Integer>();
Class<GenericClass<Integer>> c2 = GenericsUtil.getClass(testSubject);
assertEquals("Inner generics are acquired from object.",
GenericClass.class, c2);
}
}
| 3,990 | 28.562963 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/ClassLoaderCheckSecond.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
/**
* A class {@link ClassLoaderCheckMain} depends on that should be loaded by the
* system classloader.
*/
public class ClassLoaderCheckSecond {}
| 989 | 40.25 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
import java.io.BufferedInputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.jar.JarOutputStream;
import java.util.regex.Pattern;
import java.util.zip.ZipEntry;
import junit.framework.TestCase;
import org.apache.hadoop.fs.FileUtil;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestRunJar extends TestCase {
private File TEST_ROOT_DIR;
private static final String TEST_JAR_NAME="test-runjar.jar";
private static final String TEST_JAR_2_NAME = "test-runjar2.jar";
@Override
@Before
protected void setUp()
throws Exception {
TEST_ROOT_DIR =
new File(System.getProperty("test.build.data", "/tmp"), getClass()
.getSimpleName());
if (!TEST_ROOT_DIR.exists()) {
TEST_ROOT_DIR.mkdirs();
}
makeTestJar();
}
@Override
@After
protected void tearDown() {
FileUtil.fullyDelete(TEST_ROOT_DIR);
}
/**
* Construct a jar with two files in it in our
* test dir.
*/
private void makeTestJar() throws IOException {
File jarFile = new File(TEST_ROOT_DIR, TEST_JAR_NAME);
JarOutputStream jstream =
new JarOutputStream(new FileOutputStream(jarFile));
jstream.putNextEntry(new ZipEntry("foobar.txt"));
jstream.closeEntry();
jstream.putNextEntry(new ZipEntry("foobaz.txt"));
jstream.closeEntry();
jstream.close();
}
/**
* Test default unjarring behavior - unpack everything
*/
@Test
public void testUnJar() throws Exception {
File unjarDir = new File(TEST_ROOT_DIR, "unjar-all");
assertFalse("unjar dir shouldn't exist at test start",
new File(unjarDir, "foobar.txt").exists());
// Unjar everything
RunJar.unJar(new File(TEST_ROOT_DIR, TEST_JAR_NAME),
unjarDir);
assertTrue("foobar unpacked",
new File(unjarDir, "foobar.txt").exists());
assertTrue("foobaz unpacked",
new File(unjarDir, "foobaz.txt").exists());
}
/**
* Test unjarring a specific regex
*/
public void testUnJarWithPattern() throws Exception {
File unjarDir = new File(TEST_ROOT_DIR, "unjar-pattern");
assertFalse("unjar dir shouldn't exist at test start",
new File(unjarDir, "foobar.txt").exists());
// Unjar only a regex
RunJar.unJar(new File(TEST_ROOT_DIR, TEST_JAR_NAME),
unjarDir,
Pattern.compile(".*baz.*"));
assertFalse("foobar not unpacked",
new File(unjarDir, "foobar.txt").exists());
assertTrue("foobaz unpacked",
new File(unjarDir, "foobaz.txt").exists());
}
/**
* Tests the client classloader to verify the main class and its dependent
* class are loaded correctly by the application classloader, and others are
* loaded by the system classloader.
*/
@Test
public void testClientClassLoader() throws Throwable {
RunJar runJar = spy(new RunJar());
// enable the client classloader
when(runJar.useClientClassLoader()).thenReturn(true);
// set the system classes and blacklist the test main class and the test
// third class so they can be loaded by the application classloader
String mainCls = ClassLoaderCheckMain.class.getName();
String thirdCls = ClassLoaderCheckThird.class.getName();
String systemClasses = "-" + mainCls + "," +
"-" + thirdCls + "," +
ApplicationClassLoader.SYSTEM_CLASSES_DEFAULT;
when(runJar.getSystemClasses()).thenReturn(systemClasses);
// create the test jar
File testJar = makeClassLoaderTestJar(mainCls, thirdCls);
// form the args
String[] args = new String[3];
args[0] = testJar.getAbsolutePath();
args[1] = mainCls;
// run RunJar
runJar.run(args);
// it should not throw an exception
}
private File makeClassLoaderTestJar(String... clsNames) throws IOException {
File jarFile = new File(TEST_ROOT_DIR, TEST_JAR_2_NAME);
JarOutputStream jstream =
new JarOutputStream(new FileOutputStream(jarFile));
for (String clsName: clsNames) {
String name = clsName.replace('.', '/') + ".class";
InputStream entryInputStream = this.getClass().getResourceAsStream(
"/" + name);
ZipEntry entry = new ZipEntry(name);
jstream.putNextEntry(entry);
BufferedInputStream bufInputStream = new BufferedInputStream(
entryInputStream, 2048);
int count;
byte[] data = new byte[2048];
while ((count = bufInputStream.read(data, 0, 2048)) != -1) {
jstream.write(data, 0, count);
}
jstream.closeEntry();
}
jstream.close();
return jarFile;
}
}
| 5,633 | 31.755814 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoWindows.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestSysInfoWindows {
static class SysInfoWindowsMock extends SysInfoWindows {
private long time = SysInfoWindows.REFRESH_INTERVAL_MS + 1;
private String infoStr = null;
void setSysinfoString(String infoStr) {
this.infoStr = infoStr;
}
void advance(long dur) {
time += dur;
}
@Override
String getSystemInfoInfoFromShell() {
return infoStr;
}
@Override
long now() {
return time;
}
}
@Test(timeout = 10000)
public void parseSystemInfoString() {
SysInfoWindowsMock tester = new SysInfoWindowsMock();
tester.setSysinfoString(
"17177038848,8589467648,15232745472,6400417792,1,2805000,6261812\r\n");
// info str derived from windows shell command has \r\n termination
assertEquals(17177038848L, tester.getVirtualMemorySize());
assertEquals(8589467648L, tester.getPhysicalMemorySize());
assertEquals(15232745472L, tester.getAvailableVirtualMemorySize());
assertEquals(6400417792L, tester.getAvailablePhysicalMemorySize());
assertEquals(1, tester.getNumProcessors());
assertEquals(1, tester.getNumCores());
assertEquals(2805000L, tester.getCpuFrequency());
assertEquals(6261812L, tester.getCumulativeCpuTime());
// undef on first call
assertEquals(-1.0, tester.getCpuUsage(), 0.0);
}
@Test(timeout = 10000)
public void refreshAndCpuUsage() throws InterruptedException {
SysInfoWindowsMock tester = new SysInfoWindowsMock();
tester.setSysinfoString(
"17177038848,8589467648,15232745472,6400417792,1,2805000,6261812\r\n");
// info str derived from windows shell command has \r\n termination
tester.getAvailablePhysicalMemorySize();
// verify information has been refreshed
assertEquals(6400417792L, tester.getAvailablePhysicalMemorySize());
assertEquals(-1.0, tester.getCpuUsage(), 0.0);
tester.setSysinfoString(
"17177038848,8589467648,15232745472,5400417792,1,2805000,6263012\r\n");
tester.getAvailablePhysicalMemorySize();
// verify information has not been refreshed
assertEquals(6400417792L, tester.getAvailablePhysicalMemorySize());
assertEquals(-1.0, tester.getCpuUsage(), 0.0);
// advance clock
tester.advance(SysInfoWindows.REFRESH_INTERVAL_MS + 1);
// verify information has been refreshed
assertEquals(5400417792L, tester.getAvailablePhysicalMemorySize());
assertEquals((6263012 - 6261812) / (SysInfoWindows.REFRESH_INTERVAL_MS + 1f),
tester.getCpuUsage(), 0.0);
}
@Test(timeout = 10000)
public void errorInGetSystemInfo() {
SysInfoWindowsMock tester = new SysInfoWindowsMock();
// info str derived from windows shell command has \r\n termination
tester.setSysinfoString(null);
// call a method to refresh values
tester.getAvailablePhysicalMemorySize();
}
}
| 3,745 | 36.089109 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestIdentityHashStore.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import org.junit.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.IdentityHashStore;
import org.apache.hadoop.util.IdentityHashStore.Visitor;
import org.junit.Test;
public class TestIdentityHashStore {
private static final Log LOG = LogFactory.getLog(TestIdentityHashStore.class.getName());
private static class Key {
private final String name;
Key(String name) {
this.name = name;
}
@Override
public int hashCode() {
throw new RuntimeException("should not be used!");
}
@Override
public boolean equals(Object o) {
if (!(o instanceof Key)) {
return false;
}
Key other = (Key)o;
return name.equals(other.name);
}
}
@Test(timeout=60000)
public void testStartingWithZeroCapacity() {
IdentityHashStore<Key, Integer> store =
new IdentityHashStore<Key, Integer>(0);
store.visitAll(new Visitor<Key, Integer>() {
@Override
public void accept(Key k, Integer v) {
Assert.fail("found key " + k + " in empty IdentityHashStore.");
}
});
Assert.assertTrue(store.isEmpty());
final Key key1 = new Key("key1");
Integer value1 = new Integer(100);
store.put(key1, value1);
Assert.assertTrue(!store.isEmpty());
Assert.assertEquals(value1, store.get(key1));
store.visitAll(new Visitor<Key, Integer>() {
@Override
public void accept(Key k, Integer v) {
Assert.assertEquals(key1, k);
}
});
Assert.assertEquals(value1, store.remove(key1));
Assert.assertTrue(store.isEmpty());
}
@Test(timeout=60000)
public void testDuplicateInserts() {
IdentityHashStore<Key, Integer> store =
new IdentityHashStore<Key, Integer>(4);
store.visitAll(new Visitor<Key, Integer>() {
@Override
public void accept(Key k, Integer v) {
Assert.fail("found key " + k + " in empty IdentityHashStore.");
}
});
Assert.assertTrue(store.isEmpty());
Key key1 = new Key("key1");
Integer value1 = new Integer(100);
Integer value2 = new Integer(200);
Integer value3 = new Integer(300);
store.put(key1, value1);
Key equalToKey1 = new Key("key1");
// IdentityHashStore compares by object equality, not equals()
Assert.assertNull(store.get(equalToKey1));
Assert.assertTrue(!store.isEmpty());
Assert.assertEquals(value1, store.get(key1));
store.put(key1, value2);
store.put(key1, value3);
final List<Integer> allValues = new LinkedList<Integer>();
store.visitAll(new Visitor<Key, Integer>() {
@Override
public void accept(Key k, Integer v) {
allValues.add(v);
}
});
Assert.assertEquals(3, allValues.size());
for (int i = 0; i < 3; i++) {
Integer value = store.remove(key1);
Assert.assertTrue(allValues.remove(value));
}
Assert.assertNull(store.remove(key1));
Assert.assertTrue(store.isEmpty());
}
@Test(timeout=60000)
public void testAdditionsAndRemovals() {
IdentityHashStore<Key, Integer> store =
new IdentityHashStore<Key, Integer>(0);
final int NUM_KEYS = 1000;
LOG.debug("generating " + NUM_KEYS + " keys");
final List<Key> keys = new ArrayList<Key>(NUM_KEYS);
for (int i = 0; i < NUM_KEYS; i++) {
keys.add(new Key("key " + i));
}
for (int i = 0; i < NUM_KEYS; i++) {
store.put(keys.get(i), i);
}
store.visitAll(new Visitor<Key, Integer>() {
@Override
public void accept(Key k, Integer v) {
Assert.assertTrue(keys.contains(k));
}
});
for (int i = 0; i < NUM_KEYS; i++) {
Assert.assertEquals(Integer.valueOf(i),
store.remove(keys.get(i)));
}
store.visitAll(new Visitor<Key, Integer>() {
@Override
public void accept(Key k, Integer v) {
Assert.fail("expected all entries to be removed");
}
});
Assert.assertTrue("expected the store to be " +
"empty, but found " + store.numElements() + " elements.",
store.isEmpty());
Assert.assertEquals(1024, store.capacity());
}
}
| 5,082 | 30.76875 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestAsyncDiskService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.AsyncDiskService;
import org.junit.Test;
/**
* A test for AsyncDiskService.
*/
public class TestAsyncDiskService extends TestCase {
public static final Log LOG = LogFactory.getLog(TestAsyncDiskService.class);
// Access by multiple threads from the ThreadPools in AsyncDiskService.
volatile int count;
/** An example task for incrementing a counter.
*/
class ExampleTask implements Runnable {
ExampleTask() {
}
@Override
public void run() {
synchronized (TestAsyncDiskService.this) {
count ++;
}
}
};
/**
* This test creates some ExampleTasks and runs them.
*/
@Test
public void testAsyncDiskService() throws Throwable {
String[] vols = new String[]{"/0", "/1"};
AsyncDiskService service = new AsyncDiskService(vols);
int total = 100;
for (int i = 0; i < total; i++) {
service.execute(vols[i%2], new ExampleTask());
}
Exception e = null;
try {
service.execute("no_such_volume", new ExampleTask());
} catch (RuntimeException ex) {
e = ex;
}
assertNotNull("Executing a task on a non-existing volume should throw an "
+ "Exception.", e);
service.shutdown();
if (!service.awaitTermination(5000)) {
fail("AsyncDiskService didn't shutdown in 5 seconds.");
}
assertEquals(total, count);
}
}
| 2,380 | 27.011765 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestProgress.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.junit.Assert;
import org.junit.Test;
public class TestProgress {
@Test
public void testSet(){
Progress progress = new Progress();
progress.set(Float.NaN);
Assert.assertEquals(0, progress.getProgress(), 0.0);
progress.set(Float.NEGATIVE_INFINITY);
Assert.assertEquals(0,progress.getProgress(),0.0);
progress.set(-1);
Assert.assertEquals(0,progress.getProgress(),0.0);
progress.set((float) 1.1);
Assert.assertEquals(1,progress.getProgress(),0.0);
progress.set(Float.POSITIVE_INFINITY);
Assert.assertEquals(1,progress.getProgress(),0.0);
}
}
| 1,447 | 31.177778 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLightWeightCache.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.util.Date;
import java.util.Iterator;
import java.util.Random;
import org.junit.Assert;
import org.junit.Test;
/** Testing {@link LightWeightCache} */
public class TestLightWeightCache {
private static final long starttime = Time.now();
private static final long seed = starttime;
private static final Random ran = new Random(seed);
static {
println("Start time = " + new Date(starttime) + ", seed=" + seed);
}
private static void print(Object s) {
System.out.print(s);
System.out.flush();
}
private static void println(Object s) {
System.out.println(s);
}
@Test
public void testLightWeightCache() {
// test randomized creation expiration with zero access expiration
{
final long creationExpiration = ran.nextInt(1024) + 1;
check(1, creationExpiration, 0L, 1 << 10, 65537);
check(17, creationExpiration, 0L, 1 << 16, 17);
check(255, creationExpiration, 0L, 1 << 16, 65537);
}
// test randomized creation/access expiration periods
for(int i = 0; i < 3; i++) {
final long creationExpiration = ran.nextInt(1024) + 1;
final long accessExpiration = ran.nextInt(1024) + 1;
check(1, creationExpiration, accessExpiration, 1 << 10, 65537);
check(17, creationExpiration, accessExpiration, 1 << 16, 17);
check(255, creationExpiration, accessExpiration, 1 << 16, 65537);
}
// test size limit
final int dataSize = 1 << 16;
for(int i = 0; i < 10; i++) {
final int modulus = ran.nextInt(1024) + 1;
final int sizeLimit = ran.nextInt(modulus) + 1;
checkSizeLimit(sizeLimit, dataSize, modulus);
}
}
private static void checkSizeLimit(final int sizeLimit, final int datasize,
final int modulus) {
final LightWeightCacheTestCase test = new LightWeightCacheTestCase(
sizeLimit, sizeLimit, 1L << 32, 1L << 32, datasize, modulus);
// keep putting entries and check size limit
print(" check size ................. ");
for(int i = 0; i < test.data.size(); i++) {
test.cache.put(test.data.get(i));
Assert.assertTrue(test.cache.size() <= sizeLimit);
}
println("DONE " + test.stat());
}
/**
* Test various createionExpirationPeriod and accessExpirationPeriod.
* It runs ~2 minutes. If you are changing the implementation,
* please un-comment the following line in order to run the test.
*/
// @Test
public void testExpirationPeriods() {
for(int k = -4; k < 10; k += 4) {
final long accessExpirationPeriod = k < 0? 0L: (1L << k);
for(int j = 0; j < 10; j += 4) {
final long creationExpirationPeriod = 1L << j;
runTests(1, creationExpirationPeriod, accessExpirationPeriod);
for(int i = 1; i < Integer.SIZE - 1; i += 8) {
runTests((1 << i) + 1, creationExpirationPeriod, accessExpirationPeriod);
}
}
}
}
/** Run tests with various table lengths. */
private static void runTests(final int modulus,
final long creationExpirationPeriod,
final long accessExpirationPeriod) {
println("\n\n\n*** runTest: modulus=" + modulus
+ ", creationExpirationPeriod=" + creationExpirationPeriod
+ ", accessExpirationPeriod=" + accessExpirationPeriod);
for(int i = 0; i <= 16; i += 4) {
final int tablelength = (1 << i);
final int upper = i + 2;
final int steps = Math.max(1, upper/3);
for(int j = upper; j > 0; j -= steps) {
final int datasize = 1 << j;
check(tablelength, creationExpirationPeriod, accessExpirationPeriod,
datasize, modulus);
}
}
}
private static void check(int tablelength, long creationExpirationPeriod,
long accessExpirationPeriod, int datasize, int modulus) {
check(new LightWeightCacheTestCase(tablelength, -1,
creationExpirationPeriod, accessExpirationPeriod, datasize, modulus));
}
/**
* check the following operations
* (1) put
* (2) remove & put
* (3) remove
* (4) remove & put again
*/
private static void check(final LightWeightCacheTestCase test) {
//check put
print(" check put .................. ");
for(int i = 0; i < test.data.size()/2; i++) {
test.put(test.data.get(i));
}
for(int i = 0; i < test.data.size(); i++) {
test.put(test.data.get(i));
}
println("DONE " + test.stat());
//check remove and put
print(" check remove & put ......... ");
for(int j = 0; j < 10; j++) {
for(int i = 0; i < test.data.size()/2; i++) {
final int r = ran.nextInt(test.data.size());
test.remove(test.data.get(r));
}
for(int i = 0; i < test.data.size()/2; i++) {
final int r = ran.nextInt(test.data.size());
test.put(test.data.get(r));
}
}
println("DONE " + test.stat());
//check remove
print(" check remove ............... ");
for(int i = 0; i < test.data.size(); i++) {
test.remove(test.data.get(i));
}
Assert.assertEquals(0, test.cache.size());
println("DONE " + test.stat());
//check remove and put again
print(" check remove & put again ... ");
for(int j = 0; j < 10; j++) {
for(int i = 0; i < test.data.size()/2; i++) {
final int r = ran.nextInt(test.data.size());
test.remove(test.data.get(r));
}
for(int i = 0; i < test.data.size()/2; i++) {
final int r = ran.nextInt(test.data.size());
test.put(test.data.get(r));
}
}
println("DONE " + test.stat());
final long s = (Time.now() - starttime)/1000L;
println("total time elapsed=" + s + "s\n");
}
/**
* The test case contains two data structures, a cache and a hashMap.
* The hashMap is used to verify the correctness of the cache. Note that
* no automatic eviction is performed in the hashMap. Thus, we have
* (1) If an entry exists in cache, it MUST exist in the hashMap.
* (2) If an entry does not exist in the cache, it may or may not exist in the
* hashMap. If it exists, it must be expired.
*/
private static class LightWeightCacheTestCase implements GSet<IntEntry, IntEntry> {
/** hashMap will not evict entries automatically. */
final GSet<IntEntry, IntEntry> hashMap
= new GSetByHashMap<IntEntry, IntEntry>(1024, 0.75f);
final LightWeightCache<IntEntry, IntEntry> cache;
final IntData data;
final String info;
final long starttime = Time.now();
/** Determine the probability in {@link #check()}. */
final int denominator;
int iterate_count = 0;
int contain_count = 0;
private long currentTestTime = ran.nextInt();
LightWeightCacheTestCase(int tablelength, int sizeLimit,
long creationExpirationPeriod, long accessExpirationPeriod,
int datasize, int modulus) {
denominator = Math.min((datasize >> 7) + 1, 1 << 16);
info = getClass().getSimpleName() + "(" + new Date(starttime)
+ "): tablelength=" + tablelength
+ ", creationExpirationPeriod=" + creationExpirationPeriod
+ ", accessExpirationPeriod=" + accessExpirationPeriod
+ ", datasize=" + datasize
+ ", modulus=" + modulus
+ ", denominator=" + denominator;
println(info);
data = new IntData(datasize, modulus);
cache = new LightWeightCache<IntEntry, IntEntry>(tablelength, sizeLimit,
creationExpirationPeriod, 0, new LightWeightCache.Clock() {
@Override
long currentTime() {
return currentTestTime;
}
});
Assert.assertEquals(0, cache.size());
}
private boolean containsTest(IntEntry key) {
final boolean c = cache.contains(key);
if (c) {
Assert.assertTrue(hashMap.contains(key));
} else {
final IntEntry h = hashMap.remove(key);
if (h != null) {
Assert.assertTrue(cache.isExpired(h, currentTestTime));
}
}
return c;
}
@Override
public boolean contains(IntEntry key) {
final boolean e = containsTest(key);
check();
return e;
}
private IntEntry getTest(IntEntry key) {
final IntEntry c = cache.get(key);
if (c != null) {
Assert.assertEquals(hashMap.get(key).id, c.id);
} else {
final IntEntry h = hashMap.remove(key);
if (h != null) {
Assert.assertTrue(cache.isExpired(h, currentTestTime));
}
}
return c;
}
@Override
public IntEntry get(IntEntry key) {
final IntEntry e = getTest(key);
check();
return e;
}
private IntEntry putTest(IntEntry entry) {
final IntEntry c = cache.put(entry);
if (c != null) {
Assert.assertEquals(hashMap.put(entry).id, c.id);
} else {
final IntEntry h = hashMap.put(entry);
if (h != null && h != entry) {
// if h == entry, its expiration time is already updated
Assert.assertTrue(cache.isExpired(h, currentTestTime));
}
}
return c;
}
@Override
public IntEntry put(IntEntry entry) {
final IntEntry e = putTest(entry);
check();
return e;
}
private IntEntry removeTest(IntEntry key) {
final IntEntry c = cache.remove(key);
if (c != null) {
Assert.assertEquals(c.id, hashMap.remove(key).id);
} else {
final IntEntry h = hashMap.remove(key);
if (h != null) {
Assert.assertTrue(cache.isExpired(h, currentTestTime));
}
}
return c;
}
@Override
public IntEntry remove(IntEntry key) {
final IntEntry e = removeTest(key);
check();
return e;
}
private int sizeTest() {
final int c = cache.size();
Assert.assertTrue(hashMap.size() >= c);
return c;
}
@Override
public int size() {
final int s = sizeTest();
check();
return s;
}
@Override
public Iterator<IntEntry> iterator() {
throw new UnsupportedOperationException();
}
boolean tossCoin() {
return ran.nextInt(denominator) == 0;
}
void check() {
currentTestTime += ran.nextInt() & 0x3;
//test size
sizeTest();
if (tossCoin()) {
//test get(..), check content and test iterator
iterate_count++;
for(IntEntry i : cache) {
getTest(i);
}
}
if (tossCoin()) {
//test contains(..)
contain_count++;
final int count = Math.min(data.size(), 1000);
if (count == data.size()) {
for(IntEntry i : data.integers) {
containsTest(i);
}
} else {
for(int j = 0; j < count; j++) {
containsTest(data.get(ran.nextInt(data.size())));
}
}
}
}
String stat() {
final long t = Time.now() - starttime;
return String.format(" iterate=%5d, contain=%5d, time elapsed=%5d.%03ds",
iterate_count, contain_count, t/1000, t%1000);
}
@Override
public void clear() {
hashMap.clear();
cache.clear();
Assert.assertEquals(0, size());
}
}
private static class IntData {
final IntEntry[] integers;
IntData(int size, int modulus) {
integers = new IntEntry[size];
for(int i = 0; i < integers.length; i++) {
integers[i] = new IntEntry(i, ran.nextInt(modulus));
}
}
IntEntry get(int i) {
return integers[i];
}
int size() {
return integers.length;
}
}
/** Entries of {@link LightWeightCache} in this test */
private static class IntEntry implements LightWeightCache.Entry,
Comparable<IntEntry> {
private LightWeightGSet.LinkedElement next;
final int id;
final int value;
private long expirationTime = 0;
IntEntry(int id, int value) {
this.id = id;
this.value = value;
}
@Override
public boolean equals(Object obj) {
return obj != null && obj instanceof IntEntry
&& value == ((IntEntry)obj).value;
}
@Override
public int hashCode() {
return value;
}
@Override
public int compareTo(IntEntry that) {
return value - that.value;
}
@Override
public String toString() {
return id + "#" + value + ",expirationTime=" + expirationTime;
}
@Override
public LightWeightGSet.LinkedElement getNext() {
return next;
}
@Override
public void setNext(LightWeightGSet.LinkedElement e) {
next = e;
}
@Override
public void setExpirationTime(long timeNano) {
this.expirationTime = timeNano;
}
@Override
public long getExpirationTime() {
return expirationTime;
}
}
}
| 13,571 | 28.633188 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSignalLogger.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.apache.commons.lang.SystemUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Test;
public class TestSignalLogger {
public static final Log LOG = LogFactory.getLog(TestSignalLogger.class);
@Test(timeout=60000)
public void testInstall() throws Exception {
Assume.assumeTrue(SystemUtils.IS_OS_UNIX);
SignalLogger.INSTANCE.register(LOG);
try {
SignalLogger.INSTANCE.register(LOG);
Assert.fail("expected IllegalStateException from double registration");
} catch (IllegalStateException e) {
// fall through
}
}
}
| 1,517 | 34.302326 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/ClassLoaderCheckThird.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
/**
* A class {@link ClassLoaderCheckMain} depends on that should be loaded by the
* application classloader.
*/
public class ClassLoaderCheckThird {}
| 993 | 40.416667 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.util;
import com.google.common.base.Preconditions;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.net.URL;
import java.net.URLDecoder;
import java.text.MessageFormat;
import java.util.Enumeration;
import java.util.jar.JarFile;
import java.util.jar.JarOutputStream;
import java.util.jar.Manifest;
import java.util.zip.ZipEntry;
import java.util.zip.ZipOutputStream;
/**
* Finds the Jar for a class. If the class is in a directory in the
* classpath, it creates a Jar on the fly with the contents of the directory
* and returns the path to that Jar. If a Jar is created, it is created in
* the system temporary directory.
*/
public class JarFinder {
private static void copyToZipStream(File file, ZipEntry entry,
ZipOutputStream zos) throws IOException {
InputStream is = new FileInputStream(file);
try {
zos.putNextEntry(entry);
byte[] arr = new byte[4096];
int read = is.read(arr);
while (read > -1) {
zos.write(arr, 0, read);
read = is.read(arr);
}
} finally {
try {
is.close();
} finally {
zos.closeEntry();
}
}
}
public static void jarDir(File dir, String relativePath, ZipOutputStream zos)
throws IOException {
Preconditions.checkNotNull(relativePath, "relativePath");
Preconditions.checkNotNull(zos, "zos");
// by JAR spec, if there is a manifest, it must be the first entry in the
// ZIP.
File manifestFile = new File(dir, JarFile.MANIFEST_NAME);
ZipEntry manifestEntry = new ZipEntry(JarFile.MANIFEST_NAME);
if (!manifestFile.exists()) {
zos.putNextEntry(manifestEntry);
new Manifest().write(new BufferedOutputStream(zos));
zos.closeEntry();
} else {
copyToZipStream(manifestFile, manifestEntry, zos);
}
zos.closeEntry();
zipDir(dir, relativePath, zos, true);
zos.close();
}
private static void zipDir(File dir, String relativePath, ZipOutputStream zos,
boolean start) throws IOException {
String[] dirList = dir.list();
for (String aDirList : dirList) {
File f = new File(dir, aDirList);
if (!f.isHidden()) {
if (f.isDirectory()) {
if (!start) {
ZipEntry dirEntry = new ZipEntry(relativePath + f.getName() + "/");
zos.putNextEntry(dirEntry);
zos.closeEntry();
}
String filePath = f.getPath();
File file = new File(filePath);
zipDir(file, relativePath + f.getName() + "/", zos, false);
}
else {
String path = relativePath + f.getName();
if (!path.equals(JarFile.MANIFEST_NAME)) {
ZipEntry anEntry = new ZipEntry(path);
copyToZipStream(f, anEntry, zos);
}
}
}
}
}
private static void createJar(File dir, File jarFile) throws IOException {
Preconditions.checkNotNull(dir, "dir");
Preconditions.checkNotNull(jarFile, "jarFile");
File jarDir = jarFile.getParentFile();
if (!jarDir.exists()) {
if (!jarDir.mkdirs()) {
throw new IOException(MessageFormat.format("could not create dir [{0}]",
jarDir));
}
}
JarOutputStream zos = new JarOutputStream(new FileOutputStream(jarFile));
jarDir(dir, "", zos);
}
/**
* Returns the full path to the Jar containing the class. It always return a
* JAR.
*
* @param klass class.
*
* @return path to the Jar containing the class.
*/
public static String getJar(Class klass) {
Preconditions.checkNotNull(klass, "klass");
ClassLoader loader = klass.getClassLoader();
if (loader != null) {
String class_file = klass.getName().replaceAll("\\.", "/") + ".class";
try {
for (Enumeration itr = loader.getResources(class_file);
itr.hasMoreElements(); ) {
URL url = (URL) itr.nextElement();
String path = url.getPath();
if (path.startsWith("file:")) {
path = path.substring("file:".length());
}
path = URLDecoder.decode(path, "UTF-8");
if ("jar".equals(url.getProtocol())) {
path = URLDecoder.decode(path, "UTF-8");
return path.replaceAll("!.*$", "");
}
else if ("file".equals(url.getProtocol())) {
String klassName = klass.getName();
klassName = klassName.replace(".", "/") + ".class";
path = path.substring(0, path.length() - klassName.length());
File baseDir = new File(path);
File testDir = new File(System.getProperty("test.build.dir", "target/test-dir"));
testDir = testDir.getAbsoluteFile();
if (!testDir.exists()) {
testDir.mkdirs();
}
File tempJar = File.createTempFile("hadoop-", "", testDir);
tempJar = new File(tempJar.getAbsolutePath() + ".jar");
createJar(baseDir, tempJar);
tempJar.deleteOnExit();
return tempJar.getAbsolutePath();
}
}
}
catch (IOException e) {
throw new RuntimeException(e);
}
}
return null;
}
}
| 5,990 | 33.234286 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeCodeLoader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.junit.Test;
import static org.junit.Assert.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.crypto.OpensslCipher;
import org.apache.hadoop.io.compress.Lz4Codec;
import org.apache.hadoop.io.compress.SnappyCodec;
import org.apache.hadoop.io.compress.zlib.ZlibFactory;
import org.apache.hadoop.util.NativeCodeLoader;
public class TestNativeCodeLoader {
static final Log LOG = LogFactory.getLog(TestNativeCodeLoader.class);
private static boolean requireTestJni() {
String rtj = System.getProperty("require.test.libhadoop");
if (rtj == null) return false;
if (rtj.compareToIgnoreCase("false") == 0) return false;
return true;
}
@Test
public void testNativeCodeLoaded() {
if (requireTestJni() == false) {
LOG.info("TestNativeCodeLoader: libhadoop.so testing is not required.");
return;
}
if (!NativeCodeLoader.isNativeCodeLoaded()) {
fail("TestNativeCodeLoader: libhadoop.so testing was required, but " +
"libhadoop.so was not loaded.");
}
assertFalse(NativeCodeLoader.getLibraryName().isEmpty());
// library names are depended on platform and build envs
// so just check names are available
assertFalse(ZlibFactory.getLibraryName().isEmpty());
if (NativeCodeLoader.buildSupportsSnappy()) {
assertFalse(SnappyCodec.getLibraryName().isEmpty());
}
if (NativeCodeLoader.buildSupportsOpenssl()) {
assertFalse(OpensslCipher.getLibraryName().isEmpty());
}
assertFalse(Lz4Codec.getLibraryName().isEmpty());
LOG.info("TestNativeCodeLoader: libhadoop.so is loaded.");
}
}
| 2,501 | 37.492308 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/FakeTimer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* FakeTimer can be used for test purposes to control the return values
* from {{@link Timer}}.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class FakeTimer extends Timer {
private long nowMillis;
/** Constructs a FakeTimer with a non-zero value */
public FakeTimer() {
nowMillis = 1000; // Initialize with a non-trivial value.
}
@Override
public long now() {
return nowMillis;
}
@Override
public long monotonicNow() {
return nowMillis;
}
/** Increases the time by milliseconds */
public void advance(long advMillis) {
nowMillis += advMillis;
}
}
| 1,582 | 28.867925 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/ClassLoaderCheck.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
public class ClassLoaderCheck {
/**
* Verifies the class is loaded by the right classloader.
*/
public static void checkClassLoader(Class cls,
boolean shouldBeLoadedByAppClassLoader) {
boolean loadedByAppClassLoader =
cls.getClassLoader() instanceof ApplicationClassLoader;
if ((shouldBeLoadedByAppClassLoader && !loadedByAppClassLoader) ||
(!shouldBeLoadedByAppClassLoader && loadedByAppClassLoader)) {
throw new RuntimeException("incorrect classloader used");
}
}
}
| 1,360 | 39.029412 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestHttpExceptionUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.codehaus.jackson.map.ObjectMapper;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.net.HttpURLConnection;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
public class TestHttpExceptionUtils {
@Test
public void testCreateServletException() throws IOException {
StringWriter writer = new StringWriter();
PrintWriter printWriter = new PrintWriter(writer);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(response.getWriter()).thenReturn(printWriter);
int status = HttpServletResponse.SC_INTERNAL_SERVER_ERROR;
Exception ex = new IOException("Hello IOEX");
HttpExceptionUtils.createServletExceptionResponse(response, status, ex);
Mockito.verify(response).setStatus(status);
Mockito.verify(response).setContentType(Mockito.eq("application/json"));
ObjectMapper mapper = new ObjectMapper();
Map json = mapper.readValue(writer.toString(), Map.class);
json = (Map) json.get(HttpExceptionUtils.ERROR_JSON);
Assert.assertEquals(IOException.class.getName(),
json.get(HttpExceptionUtils.ERROR_CLASSNAME_JSON));
Assert.assertEquals(IOException.class.getSimpleName(),
json.get(HttpExceptionUtils.ERROR_EXCEPTION_JSON));
Assert.assertEquals("Hello IOEX",
json.get(HttpExceptionUtils.ERROR_MESSAGE_JSON));
}
@Test
public void testCreateJerseyException() throws IOException {
Exception ex = new IOException("Hello IOEX");
Response response = HttpExceptionUtils.createJerseyExceptionResponse(
Response.Status.INTERNAL_SERVER_ERROR, ex);
Assert.assertEquals(Response.Status.INTERNAL_SERVER_ERROR.getStatusCode(),
response.getStatus());
Assert.assertArrayEquals(
Arrays.asList(MediaType.APPLICATION_JSON_TYPE).toArray(),
response.getMetadata().get("Content-Type").toArray());
Map entity = (Map) response.getEntity();
entity = (Map) entity.get(HttpExceptionUtils.ERROR_JSON);
Assert.assertEquals(IOException.class.getName(),
entity.get(HttpExceptionUtils.ERROR_CLASSNAME_JSON));
Assert.assertEquals(IOException.class.getSimpleName(),
entity.get(HttpExceptionUtils.ERROR_EXCEPTION_JSON));
Assert.assertEquals("Hello IOEX",
entity.get(HttpExceptionUtils.ERROR_MESSAGE_JSON));
}
@Test
public void testValidateResponseOK() throws IOException {
HttpURLConnection conn = Mockito.mock(HttpURLConnection.class);
Mockito.when(conn.getResponseCode()).thenReturn(
HttpURLConnection.HTTP_CREATED);
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_CREATED);
}
@Test(expected = IOException.class)
public void testValidateResponseFailNoErrorMessage() throws IOException {
HttpURLConnection conn = Mockito.mock(HttpURLConnection.class);
Mockito.when(conn.getResponseCode()).thenReturn(
HttpURLConnection.HTTP_BAD_REQUEST);
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_CREATED);
}
@Test
public void testValidateResponseNonJsonErrorMessage() throws IOException {
String msg = "stream";
InputStream is = new ByteArrayInputStream(msg.getBytes());
HttpURLConnection conn = Mockito.mock(HttpURLConnection.class);
Mockito.when(conn.getErrorStream()).thenReturn(is);
Mockito.when(conn.getResponseMessage()).thenReturn("msg");
Mockito.when(conn.getResponseCode()).thenReturn(
HttpURLConnection.HTTP_BAD_REQUEST);
try {
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_CREATED);
Assert.fail();
} catch (IOException ex) {
Assert.assertTrue(ex.getMessage().contains("msg"));
Assert.assertTrue(ex.getMessage().contains("" +
HttpURLConnection.HTTP_BAD_REQUEST));
}
}
@Test
public void testValidateResponseJsonErrorKnownException() throws IOException {
Map<String, Object> json = new HashMap<String, Object>();
json.put(HttpExceptionUtils.ERROR_EXCEPTION_JSON, IllegalStateException.class.getSimpleName());
json.put(HttpExceptionUtils.ERROR_CLASSNAME_JSON, IllegalStateException.class.getName());
json.put(HttpExceptionUtils.ERROR_MESSAGE_JSON, "EX");
Map<String, Object> response = new HashMap<String, Object>();
response.put(HttpExceptionUtils.ERROR_JSON, json);
ObjectMapper jsonMapper = new ObjectMapper();
String msg = jsonMapper.writeValueAsString(response);
InputStream is = new ByteArrayInputStream(msg.getBytes());
HttpURLConnection conn = Mockito.mock(HttpURLConnection.class);
Mockito.when(conn.getErrorStream()).thenReturn(is);
Mockito.when(conn.getResponseMessage()).thenReturn("msg");
Mockito.when(conn.getResponseCode()).thenReturn(
HttpURLConnection.HTTP_BAD_REQUEST);
try {
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_CREATED);
Assert.fail();
} catch (IllegalStateException ex) {
Assert.assertEquals("EX", ex.getMessage());
}
}
@Test
public void testValidateResponseJsonErrorUnknownException()
throws IOException {
Map<String, Object> json = new HashMap<String, Object>();
json.put(HttpExceptionUtils.ERROR_EXCEPTION_JSON, "FooException");
json.put(HttpExceptionUtils.ERROR_CLASSNAME_JSON, "foo.FooException");
json.put(HttpExceptionUtils.ERROR_MESSAGE_JSON, "EX");
Map<String, Object> response = new HashMap<String, Object>();
response.put(HttpExceptionUtils.ERROR_JSON, json);
ObjectMapper jsonMapper = new ObjectMapper();
String msg = jsonMapper.writeValueAsString(response);
InputStream is = new ByteArrayInputStream(msg.getBytes());
HttpURLConnection conn = Mockito.mock(HttpURLConnection.class);
Mockito.when(conn.getErrorStream()).thenReturn(is);
Mockito.when(conn.getResponseMessage()).thenReturn("msg");
Mockito.when(conn.getResponseCode()).thenReturn(
HttpURLConnection.HTTP_BAD_REQUEST);
try {
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_CREATED);
Assert.fail();
} catch (IOException ex) {
Assert.assertTrue(ex.getMessage().contains("EX"));
Assert.assertTrue(ex.getMessage().contains("foo.FooException"));
}
}
}
| 7,339 | 42.690476 | 99 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestHostsFileReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileWriter;
import org.junit.*;
import static org.junit.Assert.*;
/*
* Test for HostsFileReader.java
*
*/
public class TestHostsFileReader {
// Using /test/build/data/tmp directory to store temprory files
final String HOSTS_TEST_DIR = new File(System.getProperty(
"test.build.data", "/tmp")).getAbsolutePath();
File EXCLUDES_FILE = new File(HOSTS_TEST_DIR, "dfs.exclude");
File INCLUDES_FILE = new File(HOSTS_TEST_DIR, "dfs.include");
String excludesFile = HOSTS_TEST_DIR + "/dfs.exclude";
String includesFile = HOSTS_TEST_DIR + "/dfs.include";
@Before
public void setUp() throws Exception {
}
@After
public void tearDown() throws Exception {
// Delete test files after running tests
EXCLUDES_FILE.delete();
INCLUDES_FILE.delete();
}
/*
* 1.Create dfs.exclude,dfs.include file
* 2.Write host names per line
* 3.Write comments starting with #
* 4.Close file
* 5.Compare if number of hosts reported by HostsFileReader
* are equal to the number of hosts written
*/
@Test
public void testHostsFileReader() throws Exception {
FileWriter efw = new FileWriter(excludesFile);
FileWriter ifw = new FileWriter(includesFile);
efw.write("#DFS-Hosts-excluded\n");
efw.write("somehost1\n");
efw.write("#This-is-comment\n");
efw.write("somehost2\n");
efw.write("somehost3 # host3\n");
efw.write("somehost4\n");
efw.write("somehost4 somehost5\n");
efw.close();
ifw.write("#Hosts-in-DFS\n");
ifw.write("somehost1\n");
ifw.write("somehost2\n");
ifw.write("somehost3\n");
ifw.write("#This-is-comment\n");
ifw.write("somehost4 # host4\n");
ifw.write("somehost4 somehost5\n");
ifw.close();
HostsFileReader hfp = new HostsFileReader(includesFile, excludesFile);
int includesLen = hfp.getHosts().size();
int excludesLen = hfp.getExcludedHosts().size();
assertEquals(5, includesLen);
assertEquals(5, excludesLen);
assertTrue(hfp.getHosts().contains("somehost5"));
assertFalse(hfp.getHosts().contains("host3"));
assertTrue(hfp.getExcludedHosts().contains("somehost5"));
assertFalse(hfp.getExcludedHosts().contains("host4"));
}
/*
* Test creating a new HostsFileReader with nonexistent files
*/
@Test
public void testCreateHostFileReaderWithNonexistentFile() throws Exception {
try {
new HostsFileReader(
HOSTS_TEST_DIR + "/doesnt-exist",
HOSTS_TEST_DIR + "/doesnt-exist");
Assert.fail("Should throw FileNotFoundException");
} catch (FileNotFoundException ex) {
// Exception as expected
}
}
/*
* Test refreshing an existing HostsFileReader with an includes file that no longer exists
*/
@Test
public void testRefreshHostFileReaderWithNonexistentFile() throws Exception {
FileWriter efw = new FileWriter(excludesFile);
FileWriter ifw = new FileWriter(includesFile);
efw.close();
ifw.close();
HostsFileReader hfp = new HostsFileReader(includesFile, excludesFile);
assertTrue(INCLUDES_FILE.delete());
try {
hfp.refresh();
Assert.fail("Should throw FileNotFoundException");
} catch (FileNotFoundException ex) {
// Exception as expected
}
}
/*
* Test for null file
*/
@Test
public void testHostFileReaderWithNull() throws Exception {
FileWriter efw = new FileWriter(excludesFile);
FileWriter ifw = new FileWriter(includesFile);
efw.close();
ifw.close();
HostsFileReader hfp = new HostsFileReader(includesFile, excludesFile);
int includesLen = hfp.getHosts().size();
int excludesLen = hfp.getExcludedHosts().size();
// TestCase1: Check if lines beginning with # are ignored
assertEquals(0, includesLen);
assertEquals(0, excludesLen);
// TestCase2: Check if given host names are reported by getHosts and
// getExcludedHosts
assertFalse(hfp.getHosts().contains("somehost5"));
assertFalse(hfp.getExcludedHosts().contains("somehost5"));
}
/*
* Check if only comments can be written to hosts file
*/
@Test
public void testHostFileReaderWithCommentsOnly() throws Exception {
FileWriter efw = new FileWriter(excludesFile);
FileWriter ifw = new FileWriter(includesFile);
efw.write("#DFS-Hosts-excluded\n");
efw.close();
ifw.write("#Hosts-in-DFS\n");
ifw.close();
HostsFileReader hfp = new HostsFileReader(includesFile, excludesFile);
int includesLen = hfp.getHosts().size();
int excludesLen = hfp.getExcludedHosts().size();
assertEquals(0, includesLen);
assertEquals(0, excludesLen);
assertFalse(hfp.getHosts().contains("somehost5"));
assertFalse(hfp.getExcludedHosts().contains("somehost5"));
}
/*
* Test if spaces are allowed in host names
*/
@Test
public void testHostFileReaderWithSpaces() throws Exception {
FileWriter efw = new FileWriter(excludesFile);
FileWriter ifw = new FileWriter(includesFile);
efw.write("#DFS-Hosts-excluded\n");
efw.write(" somehost somehost2");
efw.write(" somehost3 # somehost4");
efw.close();
ifw.write("#Hosts-in-DFS\n");
ifw.write(" somehost somehost2");
ifw.write(" somehost3 # somehost4");
ifw.close();
HostsFileReader hfp = new HostsFileReader(includesFile, excludesFile);
int includesLen = hfp.getHosts().size();
int excludesLen = hfp.getExcludedHosts().size();
assertEquals(3, includesLen);
assertEquals(3, excludesLen);
assertTrue(hfp.getHosts().contains("somehost3"));
assertFalse(hfp.getHosts().contains("somehost5"));
assertFalse(hfp.getHosts().contains("somehost4"));
assertTrue(hfp.getExcludedHosts().contains("somehost3"));
assertFalse(hfp.getExcludedHosts().contains("somehost5"));
assertFalse(hfp.getExcludedHosts().contains("somehost4"));
}
/*
* Test if spaces , tabs and new lines are allowed
*/
@Test
public void testHostFileReaderWithTabs() throws Exception {
FileWriter efw = new FileWriter(excludesFile);
FileWriter ifw = new FileWriter(includesFile);
efw.write("#DFS-Hosts-excluded\n");
efw.write(" \n");
efw.write(" somehost \t somehost2 \n somehost4");
efw.write(" somehost3 \t # somehost5");
efw.close();
ifw.write("#Hosts-in-DFS\n");
ifw.write(" \n");
ifw.write(" somehost \t somehost2 \n somehost4");
ifw.write(" somehost3 \t # somehost5");
ifw.close();
HostsFileReader hfp = new HostsFileReader(includesFile, excludesFile);
int includesLen = hfp.getHosts().size();
int excludesLen = hfp.getExcludedHosts().size();
assertEquals(4, includesLen);
assertEquals(4, excludesLen);
assertTrue(hfp.getHosts().contains("somehost2"));
assertFalse(hfp.getHosts().contains("somehost5"));
assertTrue(hfp.getExcludedHosts().contains("somehost2"));
assertFalse(hfp.getExcludedHosts().contains("somehost5"));
}
}
| 7,867 | 28.578947 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDataChecksum.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.nio.ByteBuffer;
import java.util.Random;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.fs.ChecksumException;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestDataChecksum {
// Set up buffers that have some header and trailer before the
// actual data or checksums, to make sure the code handles
// buffer.position(), limit, etc correctly.
private static final int SUMS_OFFSET_IN_BUFFER = 3;
private static final int DATA_OFFSET_IN_BUFFER = 3;
private static final int DATA_TRAILER_IN_BUFFER = 3;
private static final int BYTES_PER_CHUNK = 512;
private static final DataChecksum.Type CHECKSUM_TYPES[] = {
DataChecksum.Type.CRC32, DataChecksum.Type.CRC32C
};
@Test
public void testBulkOps() throws Exception {
for (DataChecksum.Type type : CHECKSUM_TYPES) {
System.err.println(
"---- beginning tests with checksum type " + type + "----");
DataChecksum checksum = DataChecksum.newDataChecksum(
type, BYTES_PER_CHUNK);
for (boolean useDirect : new boolean[]{false, true}) {
doBulkTest(checksum, 1023, useDirect);
doBulkTest(checksum, 1024, useDirect);
doBulkTest(checksum, 1025, useDirect);
}
}
}
private static class Harness {
final DataChecksum checksum;
final int dataLength, sumsLength, numSums;
ByteBuffer dataBuf, checksumBuf;
Harness(DataChecksum checksum, int dataLength, boolean useDirect) {
this.checksum = checksum;
this.dataLength = dataLength;
numSums = (dataLength - 1)/checksum.getBytesPerChecksum() + 1;
sumsLength = numSums * checksum.getChecksumSize();
byte data[] = new byte[dataLength +
DATA_OFFSET_IN_BUFFER +
DATA_TRAILER_IN_BUFFER];
new Random().nextBytes(data);
dataBuf = ByteBuffer.wrap(
data, DATA_OFFSET_IN_BUFFER, dataLength);
byte checksums[] = new byte[SUMS_OFFSET_IN_BUFFER + sumsLength];
checksumBuf = ByteBuffer.wrap(
checksums, SUMS_OFFSET_IN_BUFFER, sumsLength);
// Swap out for direct buffers if requested.
if (useDirect) {
dataBuf = directify(dataBuf);
checksumBuf = directify(checksumBuf);
}
}
void testCorrectness() throws ChecksumException {
// calculate real checksum, make sure it passes
checksum.calculateChunkedSums(dataBuf, checksumBuf);
checksum.verifyChunkedSums(dataBuf, checksumBuf, "fake file", 0);
// Change a byte in the header and in the trailer, make sure
// it doesn't affect checksum result
corruptBufferOffset(checksumBuf, 0);
checksum.verifyChunkedSums(dataBuf, checksumBuf, "fake file", 0);
corruptBufferOffset(dataBuf, 0);
dataBuf.limit(dataBuf.limit() + 1);
corruptBufferOffset(dataBuf, dataLength + DATA_OFFSET_IN_BUFFER);
dataBuf.limit(dataBuf.limit() - 1);
checksum.verifyChunkedSums(dataBuf, checksumBuf, "fake file", 0);
// Make sure bad checksums fail - error at beginning of array
corruptBufferOffset(checksumBuf, SUMS_OFFSET_IN_BUFFER);
try {
checksum.verifyChunkedSums(dataBuf, checksumBuf, "fake file", 0);
fail("Did not throw on bad checksums");
} catch (ChecksumException ce) {
assertEquals(0, ce.getPos());
}
// Make sure bad checksums fail - error at end of array
uncorruptBufferOffset(checksumBuf, SUMS_OFFSET_IN_BUFFER);
corruptBufferOffset(checksumBuf, SUMS_OFFSET_IN_BUFFER + sumsLength - 1);
try {
checksum.verifyChunkedSums(dataBuf, checksumBuf, "fake file", 0);
fail("Did not throw on bad checksums");
} catch (ChecksumException ce) {
int expectedPos = checksum.getBytesPerChecksum() * (numSums - 1);
assertEquals(expectedPos, ce.getPos());
assertTrue(ce.getMessage().contains("fake file"));
}
}
}
private void doBulkTest(DataChecksum checksum, int dataLength,
boolean useDirect) throws Exception {
System.err.println("Testing bulk checksums of length " +
dataLength + " with " +
(useDirect ? "direct" : "array-backed") + " buffers");
new Harness(checksum, dataLength, useDirect).testCorrectness();
}
/**
* Simple performance test for the "common case" checksum usage in HDFS:
* computing and verifying CRC32C with 512 byte chunking on native
* buffers.
*/
@Test
public void commonUsagePerfTest() throws Exception {
final int NUM_RUNS = 5;
final DataChecksum checksum = DataChecksum.newDataChecksum(DataChecksum.Type.CRC32C, 512);
final int dataLength = 512 * 1024 * 1024;
Harness h = new Harness(checksum, dataLength, true);
for (int i = 0; i < NUM_RUNS; i++) {
StopWatch s = new StopWatch().start();
// calculate real checksum, make sure it passes
checksum.calculateChunkedSums(h.dataBuf, h.checksumBuf);
s.stop();
System.err.println("Calculate run #" + i + ": " +
s.now(TimeUnit.MICROSECONDS) + "us");
s = new StopWatch().start();
// calculate real checksum, make sure it passes
checksum.verifyChunkedSums(h.dataBuf, h.checksumBuf, "fake file", 0);
s.stop();
System.err.println("Verify run #" + i + ": " +
s.now(TimeUnit.MICROSECONDS) + "us");
}
}
@Test
public void testEquality() {
assertEquals(
DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512),
DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
assertFalse(
DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512).equals(
DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 1024)));
assertFalse(
DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512).equals(
DataChecksum.newDataChecksum(DataChecksum.Type.CRC32C, 512)));
}
@Test
public void testToString() {
assertEquals("DataChecksum(type=CRC32, chunkSize=512)",
DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512).toString());
}
private static void corruptBufferOffset(ByteBuffer buf, int offset) {
buf.put(offset, (byte)(buf.get(offset) + 1));
}
private static void uncorruptBufferOffset(ByteBuffer buf, int offset) {
buf.put(offset, (byte)(buf.get(offset) - 1));
}
private static ByteBuffer directify(ByteBuffer dataBuf) {
ByteBuffer newBuf = ByteBuffer.allocateDirect(dataBuf.capacity());
newBuf.position(dataBuf.position());
newBuf.mark();
newBuf.put(dataBuf);
newBuf.reset();
newBuf.limit(dataBuf.limit());
return newBuf;
}
}
| 7,517 | 36.402985 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import java.net.URL;
import java.net.URLClassLoader;
import java.util.HashMap;
import java.util.List;
import static org.junit.Assert.*;
import org.junit.Before;
import org.junit.Test;
public class TestReflectionUtils {
private static Class toConstruct[] = { String.class, TestReflectionUtils.class, HashMap.class };
private Throwable failure = null;
@Before
public void setUp() {
ReflectionUtils.clearCache();
}
@Test
public void testCache() throws Exception {
assertEquals(0, cacheSize());
doTestCache();
assertEquals(toConstruct.length, cacheSize());
ReflectionUtils.clearCache();
assertEquals(0, cacheSize());
}
@SuppressWarnings("unchecked")
private void doTestCache() {
for (int i=0; i<toConstruct.length; i++) {
Class cl = toConstruct[i];
Object x = ReflectionUtils.newInstance(cl, null);
Object y = ReflectionUtils.newInstance(cl, null);
assertEquals(cl, x.getClass());
assertEquals(cl, y.getClass());
}
}
@Test
public void testThreadSafe() throws Exception {
Thread[] th = new Thread[32];
for (int i=0; i<th.length; i++) {
th[i] = new Thread() {
@Override
public void run() {
try {
doTestCache();
} catch (Throwable t) {
failure = t;
}
}
};
th[i].start();
}
for (int i=0; i<th.length; i++) {
th[i].join();
}
if (failure != null) {
failure.printStackTrace();
fail(failure.getMessage());
}
}
private int cacheSize() throws Exception {
return ReflectionUtils.getCacheSize();
}
@Test
public void testCantCreate() {
try {
ReflectionUtils.newInstance(NoDefaultCtor.class, null);
fail("invalid call should fail");
} catch (RuntimeException rte) {
assertEquals(NoSuchMethodException.class, rte.getCause().getClass());
}
}
@SuppressWarnings("unchecked")
@Test
public void testCacheDoesntLeak() throws Exception {
int iterations=9999; // very fast, but a bit less reliable - bigger numbers force GC
for (int i=0; i<iterations; i++) {
URLClassLoader loader = new URLClassLoader(new URL[0], getClass().getClassLoader());
Class cl = Class.forName("org.apache.hadoop.util.TestReflectionUtils$LoadedInChild", false, loader);
Object o = ReflectionUtils.newInstance(cl, null);
assertEquals(cl, o.getClass());
}
System.gc();
assertTrue(cacheSize()+" too big", cacheSize()<iterations);
}
@Test
public void testGetDeclaredFieldsIncludingInherited() {
Parent child = new Parent() {
private int childField;
@SuppressWarnings("unused")
public int getChildField() { return childField; }
};
List<Field> fields = ReflectionUtils.getDeclaredFieldsIncludingInherited(
child.getClass());
boolean containsParentField = false;
boolean containsChildField = false;
for (Field field : fields) {
if (field.getName().equals("parentField")) {
containsParentField = true;
} else if (field.getName().equals("childField")) {
containsChildField = true;
}
}
List<Method> methods = ReflectionUtils.getDeclaredMethodsIncludingInherited(
child.getClass());
boolean containsParentMethod = false;
boolean containsChildMethod = false;
for (Method method : methods) {
if (method.getName().equals("getParentField")) {
containsParentMethod = true;
} else if (method.getName().equals("getChildField")) {
containsChildMethod = true;
}
}
assertTrue("Missing parent field", containsParentField);
assertTrue("Missing child field", containsChildField);
assertTrue("Missing parent method", containsParentMethod);
assertTrue("Missing child method", containsChildMethod);
}
// Used for testGetDeclaredFieldsIncludingInherited
private class Parent {
private int parentField;
@SuppressWarnings("unused")
public int getParentField() { return parentField; }
}
private static class LoadedInChild {
}
public static class NoDefaultCtor {
public NoDefaultCtor(int x) {}
}
}
| 5,126 | 29.517857 | 106 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestCacheableIPList.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.io.IOException;
import org.apache.hadoop.util.CacheableIPList;
import org.apache.hadoop.util.FileBasedIPList;
import junit.framework.TestCase;
public class TestCacheableIPList extends TestCase {
/**
* Add a bunch of subnets and IPSs to the file
* setup a low cache refresh
* test for inclusion
* Check for exclusion
* Add a bunch of subnets and Ips
* wait for cache timeout.
* test for inclusion
* Check for exclusion
*/
public void testAddWithSleepForCacheTimeout() throws IOException, InterruptedException {
String[] ips = {"10.119.103.112", "10.221.102.0/23", "10.113.221.221"};
TestFileBasedIPList.createFileWithEntries ("ips.txt", ips);
CacheableIPList cipl = new CacheableIPList(
new FileBasedIPList("ips.txt"),100);
assertFalse("10.113.221.222 is in the list",
cipl.isIn("10.113.221.222"));
assertFalse ("10.222.103.121 is in the list",
cipl.isIn("10.222.103.121"));
TestFileBasedIPList.removeFile("ips.txt");
String[]ips2 = {"10.119.103.112", "10.221.102.0/23",
"10.222.0.0/16", "10.113.221.221", "10.113.221.222"};
TestFileBasedIPList.createFileWithEntries ("ips.txt", ips2);
Thread.sleep(101);
assertTrue("10.113.221.222 is not in the list",
cipl.isIn("10.113.221.222"));
assertTrue ("10.222.103.121 is not in the list",
cipl.isIn("10.222.103.121"));
TestFileBasedIPList.removeFile("ips.txt");
}
/**
* Add a bunch of subnets and IPSs to the file
* setup a low cache refresh
* test for inclusion
* Check for exclusion
* Remove a bunch of subnets and Ips
* wait for cache timeout.
* test for inclusion
* Check for exclusion
*/
public void testRemovalWithSleepForCacheTimeout() throws IOException, InterruptedException {
String[] ips = {"10.119.103.112", "10.221.102.0/23",
"10.222.0.0/16", "10.113.221.221", "10.113.221.222"};
TestFileBasedIPList.createFileWithEntries ("ips.txt", ips);
CacheableIPList cipl = new CacheableIPList(
new FileBasedIPList("ips.txt"),100);
assertTrue("10.113.221.222 is not in the list",
cipl.isIn("10.113.221.222"));
assertTrue ("10.222.103.121 is not in the list",
cipl.isIn("10.222.103.121"));
TestFileBasedIPList.removeFile("ips.txt");
String[]ips2 = {"10.119.103.112", "10.221.102.0/23", "10.113.221.221"};
TestFileBasedIPList.createFileWithEntries ("ips.txt", ips2);
Thread.sleep(1005);
assertFalse("10.113.221.222 is in the list",
cipl.isIn("10.113.221.222"));
assertFalse ("10.222.103.121 is in the list",
cipl.isIn("10.222.103.121"));
TestFileBasedIPList.removeFile("ips.txt");
}
/**
* Add a bunch of subnets and IPSs to the file
* setup a low cache refresh
* test for inclusion
* Check for exclusion
* Add a bunch of subnets and Ips
* do a refresh
* test for inclusion
* Check for exclusion
*/
public void testAddWithRefresh() throws IOException, InterruptedException {
String[] ips = {"10.119.103.112", "10.221.102.0/23", "10.113.221.221"};
TestFileBasedIPList.createFileWithEntries ("ips.txt", ips);
CacheableIPList cipl = new CacheableIPList(
new FileBasedIPList("ips.txt"),100);
assertFalse("10.113.221.222 is in the list",
cipl.isIn("10.113.221.222"));
assertFalse ("10.222.103.121 is in the list",
cipl.isIn("10.222.103.121"));
TestFileBasedIPList.removeFile("ips.txt");
String[]ips2 = {"10.119.103.112", "10.221.102.0/23",
"10.222.0.0/16", "10.113.221.221", "10.113.221.222"};
TestFileBasedIPList.createFileWithEntries ("ips.txt", ips2);
cipl.refresh();
assertTrue("10.113.221.222 is not in the list",
cipl.isIn("10.113.221.222"));
assertTrue ("10.222.103.121 is not in the list",
cipl.isIn("10.222.103.121"));
TestFileBasedIPList.removeFile("ips.txt");
}
/**
* Add a bunch of subnets and IPSs to the file
* setup a low cache refresh
* test for inclusion
* Check for exclusion
* Remove a bunch of subnets and Ips
* wait for cache timeout.
* test for inclusion
* Check for exclusion
*/
public void testRemovalWithRefresh() throws IOException, InterruptedException {
String[] ips = {"10.119.103.112", "10.221.102.0/23",
"10.222.0.0/16", "10.113.221.221", "10.113.221.222"};
TestFileBasedIPList.createFileWithEntries ("ips.txt", ips);
CacheableIPList cipl = new CacheableIPList(
new FileBasedIPList("ips.txt"),100);
assertTrue("10.113.221.222 is not in the list",
cipl.isIn("10.113.221.222"));
assertTrue ("10.222.103.121 is not in the list",
cipl.isIn("10.222.103.121"));
TestFileBasedIPList.removeFile("ips.txt");
String[]ips2 = {"10.119.103.112", "10.221.102.0/23", "10.113.221.221"};
TestFileBasedIPList.createFileWithEntries ("ips.txt", ips2);
cipl.refresh();
assertFalse("10.113.221.222 is in the list",
cipl.isIn("10.113.221.222"));
assertFalse ("10.222.103.121 is in the list",
cipl.isIn("10.222.103.121"));
TestFileBasedIPList.removeFile("ips.txt");
}
}
| 6,028 | 30.899471 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClassUtil.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.io.File;
import org.junit.Assert;
import org.apache.log4j.Logger;
import org.junit.Test;
public class TestClassUtil {
@Test(timeout=1000)
public void testFindContainingJar() {
String containingJar = ClassUtil.findContainingJar(Logger.class);
Assert.assertNotNull("Containing jar not found for Logger",
containingJar);
File jarFile = new File(containingJar);
Assert.assertTrue("Containing jar does not exist on file system",
jarFile.exists());
Assert.assertTrue("Incorrect jar file" + containingJar,
jarFile.getName().matches("log4j.+[.]jar"));
}
}
| 1,457 | 34.560976 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestApplicationClassLoader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import static org.apache.hadoop.util.ApplicationClassLoader.constructUrlsFromClasspath;
import static org.apache.hadoop.util.ApplicationClassLoader.isSystemClass;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.net.URL;
import java.util.List;
import java.util.jar.JarOutputStream;
import java.util.zip.ZipEntry;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.fs.FileUtil;
import org.junit.Before;
import org.junit.Test;
import com.google.common.base.Splitter;
import com.google.common.collect.Lists;
public class TestApplicationClassLoader {
private static File testDir = new File(System.getProperty("test.build.data",
System.getProperty("java.io.tmpdir")), "appclassloader");
@Before
public void setUp() {
FileUtil.fullyDelete(testDir);
testDir.mkdirs();
}
@Test
public void testConstructUrlsFromClasspath() throws Exception {
File file = new File(testDir, "file");
assertTrue("Create file", file.createNewFile());
File dir = new File(testDir, "dir");
assertTrue("Make dir", dir.mkdir());
File jarsDir = new File(testDir, "jarsdir");
assertTrue("Make jarsDir", jarsDir.mkdir());
File nonJarFile = new File(jarsDir, "nonjar");
assertTrue("Create non-jar file", nonJarFile.createNewFile());
File jarFile = new File(jarsDir, "a.jar");
assertTrue("Create jar file", jarFile.createNewFile());
File nofile = new File(testDir, "nofile");
// don't create nofile
StringBuilder cp = new StringBuilder();
cp.append(file.getAbsolutePath()).append(File.pathSeparator)
.append(dir.getAbsolutePath()).append(File.pathSeparator)
.append(jarsDir.getAbsolutePath() + "/*").append(File.pathSeparator)
.append(nofile.getAbsolutePath()).append(File.pathSeparator)
.append(nofile.getAbsolutePath() + "/*").append(File.pathSeparator);
URL[] urls = constructUrlsFromClasspath(cp.toString());
assertEquals(3, urls.length);
assertEquals(file.toURI().toURL(), urls[0]);
assertEquals(dir.toURI().toURL(), urls[1]);
assertEquals(jarFile.toURI().toURL(), urls[2]);
// nofile should be ignored
}
@Test
public void testIsSystemClass() {
testIsSystemClassInternal("");
}
@Test
public void testIsSystemNestedClass() {
testIsSystemClassInternal("$Klass");
}
private void testIsSystemClassInternal(String nestedClass) {
assertFalse(isSystemClass("org.example.Foo" + nestedClass, null));
assertTrue(isSystemClass("org.example.Foo" + nestedClass,
classes("org.example.Foo")));
assertTrue(isSystemClass("/org.example.Foo" + nestedClass,
classes("org.example.Foo")));
assertTrue(isSystemClass("org.example.Foo" + nestedClass,
classes("org.example.")));
assertTrue(isSystemClass("net.example.Foo" + nestedClass,
classes("org.example.,net.example.")));
assertFalse(isSystemClass("org.example.Foo" + nestedClass,
classes("-org.example.Foo,org.example.")));
assertTrue(isSystemClass("org.example.Bar" + nestedClass,
classes("-org.example.Foo.,org.example.")));
assertFalse(isSystemClass("org.example.Foo" + nestedClass,
classes("org.example.,-org.example.Foo")));
assertFalse(isSystemClass("org.example.Foo" + nestedClass,
classes("org.example.Foo,-org.example.Foo")));
}
private List<String> classes(String classes) {
return Lists.newArrayList(Splitter.on(',').split(classes));
}
@Test
public void testGetResource() throws IOException {
URL testJar = makeTestJar().toURI().toURL();
ClassLoader currentClassLoader = getClass().getClassLoader();
ClassLoader appClassloader = new ApplicationClassLoader(
new URL[] { testJar }, currentClassLoader, null);
assertNull("Resource should be null for current classloader",
currentClassLoader.getResourceAsStream("resource.txt"));
InputStream in = appClassloader.getResourceAsStream("resource.txt");
assertNotNull("Resource should not be null for app classloader", in);
assertEquals("hello", IOUtils.toString(in));
}
private File makeTestJar() throws IOException {
File jarFile = new File(testDir, "test.jar");
JarOutputStream out = new JarOutputStream(new FileOutputStream(jarFile));
ZipEntry entry = new ZipEntry("resource.txt");
out.putNextEntry(entry);
out.write("hello".getBytes());
out.closeEntry();
out.close();
return jarFile;
}
}
| 5,587 | 35.522876 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/hash/TestHash.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util.hash;
import static org.junit.Assert.*;
import org.apache.hadoop.conf.Configuration;
import org.junit.Test;
public class TestHash {
static final String LINE = "34563@45kjkksdf/ljfdb9d8fbusd*89uggjsk<dfgjsdfh@sddc2q3esc";
@Test
public void testHash() {
int iterations = 30;
assertTrue("testHash jenkins error !!!",
Hash.JENKINS_HASH == Hash.parseHashType("jenkins"));
assertTrue("testHash murmur error !!!",
Hash.MURMUR_HASH == Hash.parseHashType("murmur"));
assertTrue("testHash undefined",
Hash.INVALID_HASH == Hash.parseHashType("undefined"));
Configuration cfg = new Configuration();
cfg.set("hadoop.util.hash.type", "murmur");
assertTrue("testHash", MurmurHash.getInstance() == Hash.getInstance(cfg));
cfg = new Configuration();
cfg.set("hadoop.util.hash.type", "jenkins");
assertTrue("testHash jenkins configuration error !!!",
JenkinsHash.getInstance() == Hash.getInstance(cfg));
cfg = new Configuration();
assertTrue("testHash undefine configuration error !!!",
MurmurHash.getInstance() == Hash.getInstance(cfg));
assertTrue("testHash error jenkin getInstance !!!",
JenkinsHash.getInstance() == Hash.getInstance(Hash.JENKINS_HASH));
assertTrue("testHash error murmur getInstance !!!",
MurmurHash.getInstance() == Hash.getInstance(Hash.MURMUR_HASH));
assertNull("testHash error invalid getInstance !!!",
Hash.getInstance(Hash.INVALID_HASH));
int murmurHash = Hash.getInstance(Hash.MURMUR_HASH).hash(LINE.getBytes());
for (int i = 0; i < iterations; i++) {
assertTrue("multiple evaluation murmur hash error !!!",
murmurHash == Hash.getInstance(Hash.MURMUR_HASH)
.hash(LINE.getBytes()));
}
murmurHash = Hash.getInstance(Hash.MURMUR_HASH).hash(LINE.getBytes(), 67);
for (int i = 0; i < iterations; i++) {
assertTrue(
"multiple evaluation murmur hash error !!!",
murmurHash == Hash.getInstance(Hash.MURMUR_HASH).hash(
LINE.getBytes(), 67));
}
int jenkinsHash = Hash.getInstance(Hash.JENKINS_HASH).hash(LINE.getBytes());
for (int i = 0; i < iterations; i++) {
assertTrue(
"multiple evaluation jenkins hash error !!!",
jenkinsHash == Hash.getInstance(Hash.JENKINS_HASH).hash(
LINE.getBytes()));
}
jenkinsHash = Hash.getInstance(Hash.JENKINS_HASH).hash(LINE.getBytes(), 67);
for (int i = 0; i < iterations; i++) {
assertTrue(
"multiple evaluation jenkins hash error !!!",
jenkinsHash == Hash.getInstance(Hash.JENKINS_HASH).hash(
LINE.getBytes(), 67));
}
}
}
| 3,541 | 38.355556 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/BloomFilterCommonTester.java
|
package org.apache.hadoop.util.bloom;
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.AbstractCollection;
import java.util.Collection;
import java.util.Iterator;
import java.util.Random;
import org.junit.Assert;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.util.hash.Hash;
import org.apache.log4j.Logger;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
public class BloomFilterCommonTester<T extends Filter> {
private static final double LN2 = Math.log(2);
private static final double LN2_SQUARED = LN2 * LN2;
private final int hashType;
private final int numInsertions;
private final ImmutableList.Builder<T> builder = ImmutableList.builder();
private ImmutableSet<BloomFilterTestStrategy> filterTestStrateges;
private final PreAssertionHelper preAssertionHelper;
static int optimalNumOfBits(int n, double p) {
return (int) (-n * Math.log(p) / LN2_SQUARED);
}
public static <T extends Filter> BloomFilterCommonTester<T> of(int hashId,
int numInsertions) {
return new BloomFilterCommonTester<T>(hashId, numInsertions);
}
public BloomFilterCommonTester<T> withFilterInstance(T filter) {
builder.add(filter);
return this;
}
private BloomFilterCommonTester(int hashId, int numInsertions) {
this.hashType = hashId;
this.numInsertions = numInsertions;
this.preAssertionHelper = new PreAssertionHelper() {
@Override
public ImmutableSet<Integer> falsePositives(int hashId) {
switch (hashId) {
case Hash.JENKINS_HASH: {
// // false pos for odd and event under 1000
return ImmutableSet.of(99, 963);
}
case Hash.MURMUR_HASH: {
// false pos for odd and event under 1000
return ImmutableSet.of(769, 772, 810, 874);
}
default: {
// fail fast with unknown hash error !!!
Assert.assertFalse("unknown hash error", true);
return ImmutableSet.of();
}
}
}
};
}
public BloomFilterCommonTester<T> withTestCases(
ImmutableSet<BloomFilterTestStrategy> filterTestStrateges) {
this.filterTestStrateges = ImmutableSet.copyOf(filterTestStrateges);
return this;
}
@SuppressWarnings("unchecked")
public void test() {
final ImmutableList<T> filtersList = builder.build();
final ImmutableSet<Integer> falsePositives = preAssertionHelper
.falsePositives(hashType);
for (T filter : filtersList) {
for (BloomFilterTestStrategy strategy : filterTestStrateges) {
strategy.getStrategy().assertWhat(filter, numInsertions, hashType, falsePositives);
// create fresh instance for next test iteration
filter = (T) getSymmetricFilter(filter.getClass(), numInsertions, hashType);
}
}
}
interface FilterTesterStrategy {
final Logger logger = Logger.getLogger(FilterTesterStrategy.class);
void assertWhat(Filter filter, int numInsertions, int hashId,
ImmutableSet<Integer> falsePositives);
}
private static Filter getSymmetricFilter(Class<?> filterClass,
int numInsertions, int hashType) {
int bitSetSize = optimalNumOfBits(numInsertions, 0.03);
int hashFunctionNumber = 5;
if (filterClass == BloomFilter.class) {
return new BloomFilter(bitSetSize, hashFunctionNumber, hashType);
} else if (filterClass == CountingBloomFilter.class) {
return new CountingBloomFilter(bitSetSize, hashFunctionNumber, hashType);
} else if (filterClass == RetouchedBloomFilter.class) {
return new RetouchedBloomFilter(bitSetSize, hashFunctionNumber, hashType);
} else if (filterClass == DynamicBloomFilter.class) {
return new DynamicBloomFilter(bitSetSize, hashFunctionNumber, hashType, 3);
} else {
//fail fast
assertFalse("unexpected filterClass", true);
return null;
}
}
public enum BloomFilterTestStrategy {
ADD_KEYS_STRATEGY(new FilterTesterStrategy() {
private final ImmutableList<Key> keys = ImmutableList.of(new Key(
new byte[] { 49, 48, 48 }), new Key(new byte[] { 50, 48, 48 }));
@Override
public void assertWhat(Filter filter, int numInsertions, int hashId,
ImmutableSet<Integer> falsePositives) {
filter.add(keys);
assertTrue(" might contain key error ",
filter.membershipTest(new Key("100".getBytes())));
assertTrue(" might contain key error ",
filter.membershipTest(new Key("200".getBytes())));
filter.add(keys.toArray(new Key[] {}));
assertTrue(" might contain key error ",
filter.membershipTest(new Key("100".getBytes())));
assertTrue(" might contain key error ",
filter.membershipTest(new Key("200".getBytes())));
filter.add(new AbstractCollection<Key>() {
@Override
public Iterator<Key> iterator() {
return keys.iterator();
}
@Override
public int size() {
return keys.size();
}
});
assertTrue(" might contain key error ",
filter.membershipTest(new Key("100".getBytes())));
assertTrue(" might contain key error ",
filter.membershipTest(new Key("200".getBytes())));
}
}),
KEY_TEST_STRATEGY(new FilterTesterStrategy() {
private void checkOnKeyMethods() {
String line = "werabsdbe";
Key key = new Key(line.getBytes());
assertTrue("default key weight error ", key.getWeight() == 1d);
key.set(line.getBytes(), 2d);
assertTrue(" setted key weight error ", key.getWeight() == 2d);
Key sKey = new Key(line.getBytes(), 2d);
assertTrue("equals error", key.equals(sKey));
assertTrue("hashcode error", key.hashCode() == sKey.hashCode());
sKey = new Key(line.concat("a").getBytes(), 2d);
assertFalse("equals error", key.equals(sKey));
assertFalse("hashcode error", key.hashCode() == sKey.hashCode());
sKey = new Key(line.getBytes(), 3d);
assertFalse("equals error", key.equals(sKey));
assertFalse("hashcode error", key.hashCode() == sKey.hashCode());
key.incrementWeight();
assertTrue("weight error", key.getWeight() == 3d);
key.incrementWeight(2d);
assertTrue("weight error", key.getWeight() == 5d);
}
private void checkOnReadWrite() {
String line = "qryqeb354645rghdfvbaq23312fg";
DataOutputBuffer out = new DataOutputBuffer();
DataInputBuffer in = new DataInputBuffer();
Key originKey = new Key(line.getBytes(), 100d);
try {
originKey.write(out);
in.reset(out.getData(), out.getData().length);
Key restoredKey = new Key(new byte[] { 0 });
assertFalse("checkOnReadWrite equals error", restoredKey.equals(originKey));
restoredKey.readFields(in);
assertTrue("checkOnReadWrite equals error", restoredKey.equals(originKey));
out.reset();
} catch (Exception ioe) {
Assert.fail("checkOnReadWrite ex error");
}
}
private void checkSetOnIAE() {
Key key = new Key();
try {
key.set(null, 0);
} catch (IllegalArgumentException ex) {
// expected
} catch (Exception e) {
Assert.fail("checkSetOnIAE ex error");
}
}
@Override
public void assertWhat(Filter filter, int numInsertions, int hashId,
ImmutableSet<Integer> falsePositives) {
checkOnKeyMethods();
checkOnReadWrite();
checkSetOnIAE();
}
}),
EXCEPTIONS_CHECK_STRATEGY(new FilterTesterStrategy() {
@Override
public void assertWhat(Filter filter, int numInsertions, int hashId,
ImmutableSet<Integer> falsePositives) {
checkAddOnNPE(filter);
checkTestMembershipOnNPE(filter);
checkAndOnIAE(filter);
}
private void checkAndOnIAE(Filter filter) {
Filter tfilter = null;
try {
Collection<Key> keys = null;
filter.add(keys);
} catch (IllegalArgumentException ex) {
//
} catch (Exception e) {
Assert.fail("" + e);
}
try {
Key[] keys = null;
filter.add(keys);
} catch (IllegalArgumentException ex) {
//
} catch (Exception e) {
Assert.fail("" + e);
}
try {
ImmutableList<Key> keys = null;
filter.add(keys);
} catch (IllegalArgumentException ex) {
//
} catch (Exception e) {
Assert.fail("" + e);
}
try {
filter.and(tfilter);
} catch (IllegalArgumentException ex) {
// expected
} catch (Exception e) {
Assert.fail("" + e);
}
try {
filter.or(tfilter);
} catch (IllegalArgumentException ex) {
// expected
} catch (Exception e) {
Assert.fail("" + e);
}
try {
filter.xor(tfilter);
} catch (IllegalArgumentException ex) {
// expected
} catch (UnsupportedOperationException unex) {
//
} catch (Exception e) {
Assert.fail("" + e);
}
}
private void checkTestMembershipOnNPE(Filter filter) {
try {
Key nullKey = null;
filter.membershipTest(nullKey);
} catch (NullPointerException ex) {
// expected
} catch (Exception e) {
Assert.fail("" + e);
}
}
private void checkAddOnNPE(Filter filter) {
try {
Key nullKey = null;
filter.add(nullKey);
} catch (NullPointerException ex) {
// expected
} catch (Exception e) {
Assert.fail("" + e);
}
}
}),
ODD_EVEN_ABSENT_STRATEGY(new FilterTesterStrategy() {
@Override
public void assertWhat(Filter filter, int numInsertions, int hashId,
ImmutableSet<Integer> falsePositives) {
// add all even keys
for (int i = 0; i < numInsertions; i += 2) {
filter.add(new Key(Integer.toString(i).getBytes()));
}
// check on present even key
for (int i = 0; i < numInsertions; i += 2) {
Assert.assertTrue(" filter might contains " + i,
filter.membershipTest(new Key(Integer.toString(i).getBytes())));
}
// check on absent odd in event
for (int i = 1; i < numInsertions; i += 2) {
if (!falsePositives.contains(i)) {
assertFalse(" filter should not contain " + i,
filter.membershipTest(new Key(Integer.toString(i).getBytes())));
}
}
}
}),
WRITE_READ_STRATEGY(new FilterTesterStrategy() {
private int slotSize = 10;
@Override
public void assertWhat(Filter filter, int numInsertions, int hashId,
ImmutableSet<Integer> falsePositives) {
final Random rnd = new Random();
final DataOutputBuffer out = new DataOutputBuffer();
final DataInputBuffer in = new DataInputBuffer();
try {
Filter tempFilter = getSymmetricFilter(filter.getClass(),
numInsertions, hashId);
ImmutableList.Builder<Integer> blist = ImmutableList.builder();
for (int i = 0; i < slotSize; i++) {
blist.add(rnd.nextInt(numInsertions * 2));
}
ImmutableList<Integer> list = blist.build();
// mark bits for later check
for (Integer slot : list) {
filter.add(new Key(String.valueOf(slot).getBytes()));
}
filter.write(out);
in.reset(out.getData(), out.getLength());
tempFilter.readFields(in);
for (Integer slot : list) {
assertTrue("read/write mask check filter error on " + slot,
filter.membershipTest(new Key(String.valueOf(slot).getBytes())));
}
} catch (IOException ex) {
Assert.fail("error ex !!!" + ex);
}
}
}),
FILTER_XOR_STRATEGY(new FilterTesterStrategy() {
@Override
public void assertWhat(Filter filter, int numInsertions, int hashId,
ImmutableSet<Integer> falsePositives) {
Filter symmetricFilter = getSymmetricFilter(filter.getClass(),
numInsertions, hashId);
try {
// 0 xor 0 -> 0
filter.xor(symmetricFilter);
// check on present all key
for (int i = 0; i < numInsertions; i++) {
Assert.assertFalse(" filter might contains " + i,
filter.membershipTest(new Key(Integer.toString(i).getBytes())));
}
// add all even keys
for (int i = 0; i < numInsertions; i += 2) {
filter.add(new Key(Integer.toString(i).getBytes()));
}
// add all odd keys
for (int i = 0; i < numInsertions; i += 2) {
symmetricFilter.add(new Key(Integer.toString(i).getBytes()));
}
filter.xor(symmetricFilter);
// 1 xor 1 -> 0
// check on absent all key
for (int i = 0; i < numInsertions; i++) {
Assert.assertFalse(" filter might not contains " + i,
filter.membershipTest(new Key(Integer.toString(i).getBytes())));
}
} catch (UnsupportedOperationException ex) {
// not all Filter's implements this method
return;
}
}
}),
FILTER_AND_STRATEGY(new FilterTesterStrategy() {
@Override
public void assertWhat(Filter filter, int numInsertions, int hashId,
ImmutableSet<Integer> falsePositives) {
int startIntersection = numInsertions - (numInsertions - 100);
int endIntersection = numInsertions - 100;
Filter partialFilter = getSymmetricFilter(filter.getClass(),
numInsertions, hashId);
for (int i = 0; i < numInsertions; i++) {
String digit = Integer.toString(i);
filter.add(new Key(digit.getBytes()));
if (i >= startIntersection && i <= endIntersection) {
partialFilter.add(new Key(digit.getBytes()));
}
}
// do logic AND
filter.and(partialFilter);
for (int i = 0; i < numInsertions; i++) {
if (i >= startIntersection && i <= endIntersection) {
Assert.assertTrue(" filter might contains " + i,
filter.membershipTest(new Key(Integer.toString(i).getBytes())));
}
}
}
}),
FILTER_OR_STRATEGY(new FilterTesterStrategy() {
@Override
public void assertWhat(Filter filter, int numInsertions, int hashId,
ImmutableSet<Integer> falsePositives) {
Filter evenFilter = getSymmetricFilter(filter.getClass(),
numInsertions, hashId);
// add all even
for (int i = 0; i < numInsertions; i += 2) {
evenFilter.add(new Key(Integer.toString(i).getBytes()));
}
// add all odd
for (int i = 1; i < numInsertions; i += 2) {
filter.add(new Key(Integer.toString(i).getBytes()));
}
// union odd with even
filter.or(evenFilter);
// check on present all key
for (int i = 0; i < numInsertions; i++) {
Assert.assertTrue(" filter might contains " + i,
filter.membershipTest(new Key(Integer.toString(i).getBytes())));
}
}
});
private final FilterTesterStrategy testerStrategy;
BloomFilterTestStrategy(FilterTesterStrategy testerStrategy) {
this.testerStrategy = testerStrategy;
}
public FilterTesterStrategy getStrategy() {
return testerStrategy;
}
}
interface PreAssertionHelper {
public ImmutableSet<Integer> falsePositives(int hashId);
}
}
| 16,963 | 30.76779 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/TestBloomFilters.java
|
package org.apache.hadoop.util.bloom;
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import java.util.AbstractCollection;
import java.util.BitSet;
import java.util.Iterator;
import org.apache.hadoop.util.bloom.BloomFilterCommonTester.BloomFilterTestStrategy;
import org.apache.hadoop.util.hash.Hash;
import org.junit.Assert;
import org.junit.Test;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
public class TestBloomFilters {
int numInsertions = 1000;
int bitSize = BloomFilterCommonTester.optimalNumOfBits(numInsertions, 0.03);
int hashFunctionNumber = 5;
private static final ImmutableMap<Integer, ? extends AbstractCollection<Key>> FALSE_POSITIVE_UNDER_1000 = ImmutableMap
.of(Hash.JENKINS_HASH, new AbstractCollection<Key>() {
final ImmutableList<Key> falsePositive = ImmutableList.<Key> of(
new Key("99".getBytes()), new Key("963".getBytes()));
@Override
public Iterator<Key> iterator() {
return falsePositive.iterator();
}
@Override
public int size() {
return falsePositive.size();
}
}, Hash.MURMUR_HASH, new AbstractCollection<Key>() {
final ImmutableList<Key> falsePositive = ImmutableList.<Key> of(
new Key("769".getBytes()), new Key("772".getBytes()),
new Key("810".getBytes()), new Key("874".getBytes()));
@Override
public Iterator<Key> iterator() {
return falsePositive.iterator();
}
@Override
public int size() {
return falsePositive.size();
}
});
private enum Digits {
ODD(1), EVEN(0);
int start;
Digits(int start) {
this.start = start;
}
int getStart() {
return start;
}
}
@Test
public void testDynamicBloomFilter() {
int hashId = Hash.JENKINS_HASH;
Filter filter = new DynamicBloomFilter(bitSize, hashFunctionNumber,
Hash.JENKINS_HASH, 3);
BloomFilterCommonTester.of(hashId, numInsertions)
.withFilterInstance(filter)
.withTestCases(ImmutableSet.of(BloomFilterTestStrategy.KEY_TEST_STRATEGY,
BloomFilterTestStrategy.ADD_KEYS_STRATEGY,
BloomFilterTestStrategy.EXCEPTIONS_CHECK_STRATEGY,
BloomFilterTestStrategy.WRITE_READ_STRATEGY,
BloomFilterTestStrategy.ODD_EVEN_ABSENT_STRATEGY))
.test();
assertNotNull("testDynamicBloomFilter error ", filter.toString());
}
@Test
public void testCountingBloomFilter() {
int hashId = Hash.JENKINS_HASH;
CountingBloomFilter filter = new CountingBloomFilter(bitSize,
hashFunctionNumber, hashId);
Key key = new Key(new byte[] { 48, 48 });
filter.add(key);
assertTrue("CountingBloomFilter.membership error ",
filter.membershipTest(key));
assertTrue("CountingBloomFilter.approximateCount error",
filter.approximateCount(key) == 1);
filter.add(key);
assertTrue("CountingBloomFilter.approximateCount error",
filter.approximateCount(key) == 2);
filter.delete(key);
assertTrue("CountingBloomFilter.membership error ",
filter.membershipTest(key));
filter.delete(key);
assertFalse("CountingBloomFilter.membership error ",
filter.membershipTest(key));
assertTrue("CountingBloomFilter.approximateCount error",
filter.approximateCount(key) == 0);
BloomFilterCommonTester.of(hashId, numInsertions)
.withFilterInstance(filter)
.withTestCases(ImmutableSet.of(BloomFilterTestStrategy.KEY_TEST_STRATEGY,
BloomFilterTestStrategy.ADD_KEYS_STRATEGY,
BloomFilterTestStrategy.EXCEPTIONS_CHECK_STRATEGY,
BloomFilterTestStrategy.ODD_EVEN_ABSENT_STRATEGY,
BloomFilterTestStrategy.WRITE_READ_STRATEGY,
BloomFilterTestStrategy.FILTER_OR_STRATEGY,
BloomFilterTestStrategy.FILTER_XOR_STRATEGY)).test();
}
@Test
public void testRetouchedBloomFilterSpecific() {
int numInsertions = 1000;
int hashFunctionNumber = 5;
ImmutableSet<Integer> hashes = ImmutableSet.of(Hash.MURMUR_HASH,
Hash.JENKINS_HASH);
for (Integer hashId : hashes) {
RetouchedBloomFilter filter = new RetouchedBloomFilter(bitSize,
hashFunctionNumber, hashId);
checkOnAbsentFalsePositive(hashId, numInsertions, filter, Digits.ODD,
RemoveScheme.MAXIMUM_FP);
filter.and(new RetouchedBloomFilter(bitSize, hashFunctionNumber, hashId));
checkOnAbsentFalsePositive(hashId, numInsertions, filter, Digits.EVEN,
RemoveScheme.MAXIMUM_FP);
filter.and(new RetouchedBloomFilter(bitSize, hashFunctionNumber, hashId));
checkOnAbsentFalsePositive(hashId, numInsertions, filter, Digits.ODD,
RemoveScheme.MINIMUM_FN);
filter.and(new RetouchedBloomFilter(bitSize, hashFunctionNumber, hashId));
checkOnAbsentFalsePositive(hashId, numInsertions, filter, Digits.EVEN,
RemoveScheme.MINIMUM_FN);
filter.and(new RetouchedBloomFilter(bitSize, hashFunctionNumber, hashId));
checkOnAbsentFalsePositive(hashId, numInsertions, filter, Digits.ODD,
RemoveScheme.RATIO);
filter.and(new RetouchedBloomFilter(bitSize, hashFunctionNumber, hashId));
checkOnAbsentFalsePositive(hashId, numInsertions, filter, Digits.EVEN,
RemoveScheme.RATIO);
filter.and(new RetouchedBloomFilter(bitSize, hashFunctionNumber, hashId));
}
}
private void checkOnAbsentFalsePositive(int hashId, int numInsertions,
final RetouchedBloomFilter filter, Digits digits, short removeSchema) {
AbstractCollection<Key> falsePositives = FALSE_POSITIVE_UNDER_1000
.get(hashId);
if (falsePositives == null)
Assert.fail(String.format("false positives for hash %d not founded",
hashId));
filter.addFalsePositive(falsePositives);
for (int i = digits.getStart(); i < numInsertions; i += 2) {
filter.add(new Key(Integer.toString(i).getBytes()));
}
for (Key key : falsePositives) {
filter.selectiveClearing(key, removeSchema);
}
for (int i = 1 - digits.getStart(); i < numInsertions; i += 2) {
assertFalse(" testRetouchedBloomFilterAddFalsePositive error " + i,
filter.membershipTest(new Key(Integer.toString(i).getBytes())));
}
}
@Test
public void testFiltersWithJenkinsHash() {
int hashId = Hash.JENKINS_HASH;
BloomFilterCommonTester.of(hashId, numInsertions)
.withFilterInstance(new BloomFilter(bitSize, hashFunctionNumber, hashId))
.withFilterInstance(new RetouchedBloomFilter(bitSize, hashFunctionNumber, hashId))
.withTestCases(ImmutableSet.of(BloomFilterTestStrategy.KEY_TEST_STRATEGY,
BloomFilterTestStrategy.ADD_KEYS_STRATEGY,
BloomFilterTestStrategy.EXCEPTIONS_CHECK_STRATEGY,
BloomFilterTestStrategy.ODD_EVEN_ABSENT_STRATEGY,
BloomFilterTestStrategy.WRITE_READ_STRATEGY,
BloomFilterTestStrategy.FILTER_OR_STRATEGY,
BloomFilterTestStrategy.FILTER_AND_STRATEGY,
BloomFilterTestStrategy.FILTER_XOR_STRATEGY)).test();
}
@Test
public void testFiltersWithMurmurHash() {
int hashId = Hash.MURMUR_HASH;
BloomFilterCommonTester.of(hashId, numInsertions)
.withFilterInstance(new BloomFilter(bitSize, hashFunctionNumber, hashId))
.withFilterInstance(new RetouchedBloomFilter(bitSize, hashFunctionNumber, hashId))
.withTestCases(ImmutableSet.of(BloomFilterTestStrategy.KEY_TEST_STRATEGY,
BloomFilterTestStrategy.ADD_KEYS_STRATEGY,
BloomFilterTestStrategy.EXCEPTIONS_CHECK_STRATEGY,
BloomFilterTestStrategy.ODD_EVEN_ABSENT_STRATEGY,
BloomFilterTestStrategy.WRITE_READ_STRATEGY,
BloomFilterTestStrategy.FILTER_OR_STRATEGY,
BloomFilterTestStrategy.FILTER_AND_STRATEGY,
BloomFilterTestStrategy.FILTER_XOR_STRATEGY)).test();
}
@Test
public void testNot() {
BloomFilter bf = new BloomFilter(8, 1, Hash.JENKINS_HASH);
bf.bits = BitSet.valueOf(new byte[] { (byte) 0x95 });
BitSet origBitSet = (BitSet) bf.bits.clone();
bf.not();
assertFalse("BloomFilter#not should have inverted all bits",
bf.bits.intersects(origBitSet));
}
}
| 9,411 | 36.349206 | 120 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/curator/TestChildReaper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util.curator;
import org.apache.curator.framework.recipes.locks.Reaper;
import org.apache.curator.test.TestingServer;
import org.apache.curator.utils.CloseableUtils;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.retry.RetryOneTime;
import org.apache.curator.test.Timing;
import org.apache.zookeeper.data.Stat;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import java.net.BindException;
import java.util.Random;
/**
* This is a copy of Curator 2.7.1's TestChildReaper class, with minor
* modifications to make it work with JUnit (some setup code taken from
* Curator's BaseClassForTests). This is to ensure that the ChildReaper
* class we modified is still correct.
*/
public class TestChildReaper
{
protected TestingServer server;
@Before
public void setup() throws Exception {
while(this.server == null) {
try {
this.server = new TestingServer();
} catch (BindException var2) {
System.err.println("Getting bind exception - retrying to allocate server");
this.server = null;
}
}
}
@After
public void teardown() throws Exception {
this.server.close();
this.server = null;
}
@Test
public void testSomeNodes() throws Exception
{
Timing timing = new Timing();
ChildReaper reaper = null;
CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), timing.session(), timing.connection(), new RetryOneTime(1));
try
{
client.start();
Random r = new Random();
int nonEmptyNodes = 0;
for ( int i = 0; i < 10; ++i )
{
client.create().creatingParentsIfNeeded().forPath("/test/" + Integer.toString(i));
if ( r.nextBoolean() )
{
client.create().forPath("/test/" + Integer.toString(i) + "/foo");
++nonEmptyNodes;
}
}
reaper = new ChildReaper(client, "/test", Reaper.Mode.REAP_UNTIL_DELETE, 1);
reaper.start();
timing.forWaiting().sleepABit();
Stat stat = client.checkExists().forPath("/test");
Assert.assertEquals(stat.getNumChildren(), nonEmptyNodes);
}
finally
{
CloseableUtils.closeQuietly(reaper);
CloseableUtils.closeQuietly(client);
}
}
@Test
public void testSimple() throws Exception
{
Timing timing = new Timing();
ChildReaper reaper = null;
CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), timing.session(), timing.connection(), new RetryOneTime(1));
try
{
client.start();
for ( int i = 0; i < 10; ++i )
{
client.create().creatingParentsIfNeeded().forPath("/test/" + Integer.toString(i));
}
reaper = new ChildReaper(client, "/test", Reaper.Mode.REAP_UNTIL_DELETE, 1);
reaper.start();
timing.forWaiting().sleepABit();
Stat stat = client.checkExists().forPath("/test");
Assert.assertEquals(stat.getNumChildren(), 0);
}
finally
{
CloseableUtils.closeQuietly(reaper);
CloseableUtils.closeQuietly(client);
}
}
@Test
public void testMultiPath() throws Exception
{
Timing timing = new Timing();
ChildReaper reaper = null;
CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), timing.session(), timing.connection(), new RetryOneTime(1));
try
{
client.start();
for ( int i = 0; i < 10; ++i )
{
client.create().creatingParentsIfNeeded().forPath("/test1/" + Integer.toString(i));
client.create().creatingParentsIfNeeded().forPath("/test2/" + Integer.toString(i));
client.create().creatingParentsIfNeeded().forPath("/test3/" + Integer.toString(i));
}
reaper = new ChildReaper(client, "/test2", Reaper.Mode.REAP_UNTIL_DELETE, 1);
reaper.start();
reaper.addPath("/test1");
timing.forWaiting().sleepABit();
Stat stat = client.checkExists().forPath("/test1");
Assert.assertEquals(stat.getNumChildren(), 0);
stat = client.checkExists().forPath("/test2");
Assert.assertEquals(stat.getNumChildren(), 0);
stat = client.checkExists().forPath("/test3");
Assert.assertEquals(stat.getNumChildren(), 10);
}
finally
{
CloseableUtils.closeQuietly(reaper);
CloseableUtils.closeQuietly(client);
}
}
@Test
public void testNamespace() throws Exception
{
Timing timing = new Timing();
ChildReaper reaper = null;
CuratorFramework client = CuratorFrameworkFactory.builder()
.connectString(server.getConnectString())
.sessionTimeoutMs(timing.session())
.connectionTimeoutMs(timing.connection())
.retryPolicy(new RetryOneTime(1))
.namespace("foo")
.build();
try
{
client.start();
for ( int i = 0; i < 10; ++i )
{
client.create().creatingParentsIfNeeded().forPath("/test/" + Integer.toString(i));
}
reaper = new ChildReaper(client, "/test", Reaper.Mode.REAP_UNTIL_DELETE, 1);
reaper.start();
timing.forWaiting().sleepABit();
Stat stat = client.checkExists().forPath("/test");
Assert.assertEquals(stat.getNumChildren(), 0);
stat = client.usingNamespace(null).checkExists().forPath("/foo/test");
Assert.assertNotNull(stat);
Assert.assertEquals(stat.getNumChildren(), 0);
}
finally
{
CloseableUtils.closeQuietly(reaper);
CloseableUtils.closeQuietly(client);
}
}
}
| 6,642 | 30.784689 | 158 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TimedOutTestsListener.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.lang.management.LockInfo;
import java.lang.management.ManagementFactory;
import java.lang.management.MonitorInfo;
import java.lang.management.ThreadInfo;
import java.lang.management.ThreadMXBean;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Map;
import org.apache.hadoop.util.StringUtils;
import org.junit.runner.notification.Failure;
import org.junit.runner.notification.RunListener;
/**
* JUnit run listener which prints full thread dump into System.err
* in case a test is failed due to timeout.
*/
public class TimedOutTestsListener extends RunListener {
static final String TEST_TIMED_OUT_PREFIX = "test timed out after";
private static String INDENT = " ";
private final PrintWriter output;
public TimedOutTestsListener() {
this.output = new PrintWriter(System.err);
}
public TimedOutTestsListener(PrintWriter output) {
this.output = output;
}
@Override
public void testFailure(Failure failure) throws Exception {
if (failure != null && failure.getMessage() != null
&& failure.getMessage().startsWith(TEST_TIMED_OUT_PREFIX)) {
output.println("====> TEST TIMED OUT. PRINTING THREAD DUMP. <====");
output.println();
output.print(buildThreadDiagnosticString());
}
}
public static String buildThreadDiagnosticString() {
StringWriter sw = new StringWriter();
PrintWriter output = new PrintWriter(sw);
DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd hh:mm:ss,SSS");
output.println(String.format("Timestamp: %s", dateFormat.format(new Date())));
output.println();
output.println(buildThreadDump());
String deadlocksInfo = buildDeadlockInfo();
if (deadlocksInfo != null) {
output.println("====> DEADLOCKS DETECTED <====");
output.println();
output.println(deadlocksInfo);
}
return sw.toString();
}
static String buildThreadDump() {
StringBuilder dump = new StringBuilder();
Map<Thread, StackTraceElement[]> stackTraces = Thread.getAllStackTraces();
for (Map.Entry<Thread, StackTraceElement[]> e : stackTraces.entrySet()) {
Thread thread = e.getKey();
dump.append(String.format(
"\"%s\" %s prio=%d tid=%d %s\njava.lang.Thread.State: %s",
thread.getName(),
(thread.isDaemon() ? "daemon" : ""),
thread.getPriority(),
thread.getId(),
Thread.State.WAITING.equals(thread.getState()) ?
"in Object.wait()" :
StringUtils.toLowerCase(thread.getState().name()),
Thread.State.WAITING.equals(thread.getState()) ?
"WAITING (on object monitor)" : thread.getState()));
for (StackTraceElement stackTraceElement : e.getValue()) {
dump.append("\n at ");
dump.append(stackTraceElement);
}
dump.append("\n");
}
return dump.toString();
}
static String buildDeadlockInfo() {
ThreadMXBean threadBean = ManagementFactory.getThreadMXBean();
long[] threadIds = threadBean.findMonitorDeadlockedThreads();
if (threadIds != null && threadIds.length > 0) {
StringWriter stringWriter = new StringWriter();
PrintWriter out = new PrintWriter(stringWriter);
ThreadInfo[] infos = threadBean.getThreadInfo(threadIds, true, true);
for (ThreadInfo ti : infos) {
printThreadInfo(ti, out);
printLockInfo(ti.getLockedSynchronizers(), out);
out.println();
}
out.close();
return stringWriter.toString();
} else {
return null;
}
}
private static void printThreadInfo(ThreadInfo ti, PrintWriter out) {
// print thread information
printThread(ti, out);
// print stack trace with locks
StackTraceElement[] stacktrace = ti.getStackTrace();
MonitorInfo[] monitors = ti.getLockedMonitors();
for (int i = 0; i < stacktrace.length; i++) {
StackTraceElement ste = stacktrace[i];
out.println(INDENT + "at " + ste.toString());
for (MonitorInfo mi : monitors) {
if (mi.getLockedStackDepth() == i) {
out.println(INDENT + " - locked " + mi);
}
}
}
out.println();
}
private static void printThread(ThreadInfo ti, PrintWriter out) {
out.print("\"" + ti.getThreadName() + "\"" + " Id="
+ ti.getThreadId() + " in " + ti.getThreadState());
if (ti.getLockName() != null) {
out.print(" on lock=" + ti.getLockName());
}
if (ti.isSuspended()) {
out.print(" (suspended)");
}
if (ti.isInNative()) {
out.print(" (running in native)");
}
out.println();
if (ti.getLockOwnerName() != null) {
out.println(INDENT + " owned by " + ti.getLockOwnerName() + " Id="
+ ti.getLockOwnerId());
}
}
private static void printLockInfo(LockInfo[] locks, PrintWriter out) {
out.println(INDENT + "Locked synchronizers: count = " + locks.length);
for (LockInfo li : locks) {
out.println(INDENT + " - " + li);
}
out.println();
}
}
| 5,985 | 32.629213 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MetricsAsserts.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test;
import static com.google.common.base.Preconditions.*;
import org.hamcrest.Description;
import org.junit.Assert;
import static org.mockito.AdditionalMatchers.geq;
import static org.mockito.Mockito.*;
import org.mockito.stubbing.Answer;
import org.mockito.internal.matchers.GreaterThan;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.ArgumentCaptor;
import org.mockito.ArgumentMatcher;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.metrics2.MetricsInfo;
import org.apache.hadoop.metrics2.MetricsCollector;
import org.apache.hadoop.metrics2.MetricsSource;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.MetricsSystem;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.lib.MutableQuantiles;
import org.apache.hadoop.metrics2.util.Quantile;
import static org.apache.hadoop.metrics2.lib.Interns.*;
/**
* Helpers for metrics source tests
*/
public class MetricsAsserts {
final static Log LOG = LogFactory.getLog(MetricsAsserts.class);
private static final double EPSILON = 0.00001;
public static MetricsSystem mockMetricsSystem() {
MetricsSystem ms = mock(MetricsSystem.class);
DefaultMetricsSystem.setInstance(ms);
return ms;
}
public static MetricsRecordBuilder mockMetricsRecordBuilder() {
final MetricsCollector mc = mock(MetricsCollector.class);
MetricsRecordBuilder rb = mock(MetricsRecordBuilder.class,
new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocation) {
Object[] args = invocation.getArguments();
StringBuilder sb = new StringBuilder();
for (Object o : args) {
if (sb.length() > 0) sb.append(", ");
sb.append(String.valueOf(o));
}
String methodName = invocation.getMethod().getName();
LOG.debug(methodName +": "+ sb);
return methodName.equals("parent") || methodName.equals("endRecord") ?
mc : invocation.getMock();
}
});
when(mc.addRecord(anyString())).thenReturn(rb);
when(mc.addRecord(anyInfo())).thenReturn(rb);
return rb;
}
/**
* Call getMetrics on source and get a record builder mock to verify
* @param source the metrics source
* @param all if true, return all metrics even if not changed
* @return the record builder mock to verify
*/
public static MetricsRecordBuilder getMetrics(MetricsSource source,
boolean all) {
MetricsRecordBuilder rb = mockMetricsRecordBuilder();
MetricsCollector mc = rb.parent();
source.getMetrics(mc, all);
return rb;
}
public static MetricsRecordBuilder getMetrics(String name) {
return getMetrics(DefaultMetricsSystem.instance().getSource(name));
}
public static MetricsRecordBuilder getMetrics(MetricsSource source) {
return getMetrics(source, true);
}
private static class InfoWithSameName extends ArgumentMatcher<MetricsInfo> {
private final String expected;
InfoWithSameName(MetricsInfo info) {
expected = checkNotNull(info.name(), "info name");
}
@Override public boolean matches(Object info) {
return expected.equals(((MetricsInfo)info).name());
}
@Override public void describeTo(Description desc) {
desc.appendText("Info with name="+ expected);
}
}
/**
* MetricInfo with the same name
* @param info to match
* @return <code>null</code>
*/
public static MetricsInfo eqName(MetricsInfo info) {
return argThat(new InfoWithSameName(info));
}
private static class AnyInfo extends ArgumentMatcher<MetricsInfo> {
@Override public boolean matches(Object info) {
return info instanceof MetricsInfo; // not null as well
}
}
public static MetricsInfo anyInfo() {
return argThat(new AnyInfo());
}
/**
* Assert an int gauge metric as expected
* @param name of the metric
* @param expected value of the metric
* @param rb the record builder mock used to getMetrics
*/
public static void assertGauge(String name, int expected,
MetricsRecordBuilder rb) {
Assert.assertEquals("Bad value for metric " + name,
expected, getIntGauge(name, rb));
}
public static int getIntGauge(String name, MetricsRecordBuilder rb) {
ArgumentCaptor<Integer> captor = ArgumentCaptor.forClass(Integer.class);
verify(rb, atLeast(0)).addGauge(eqName(info(name, "")), captor.capture());
checkCaptured(captor, name);
return captor.getValue();
}
/**
* Assert an int counter metric as expected
* @param name of the metric
* @param expected value of the metric
* @param rb the record builder mock used to getMetrics
*/
public static void assertCounter(String name, int expected,
MetricsRecordBuilder rb) {
Assert.assertEquals("Bad value for metric " + name,
expected, getIntCounter(name, rb));
}
public static int getIntCounter(String name, MetricsRecordBuilder rb) {
ArgumentCaptor<Integer> captor = ArgumentCaptor.forClass(
Integer.class);
verify(rb, atLeast(0)).addCounter(eqName(info(name, "")), captor.capture());
checkCaptured(captor, name);
return captor.getValue();
}
/**
* Assert a long gauge metric as expected
* @param name of the metric
* @param expected value of the metric
* @param rb the record builder mock used to getMetrics
*/
public static void assertGauge(String name, long expected,
MetricsRecordBuilder rb) {
Assert.assertEquals("Bad value for metric " + name,
expected, getLongGauge(name, rb));
}
public static long getLongGauge(String name, MetricsRecordBuilder rb) {
ArgumentCaptor<Long> captor = ArgumentCaptor.forClass(Long.class);
verify(rb, atLeast(0)).addGauge(eqName(info(name, "")), captor.capture());
checkCaptured(captor, name);
return captor.getValue();
}
/**
* Assert a double gauge metric as expected
* @param name of the metric
* @param expected value of the metric
* @param rb the record builder mock used to getMetrics
*/
public static void assertGauge(String name, double expected,
MetricsRecordBuilder rb) {
Assert.assertEquals("Bad value for metric " + name,
expected, getDoubleGauge(name, rb), EPSILON);
}
public static double getDoubleGauge(String name, MetricsRecordBuilder rb) {
ArgumentCaptor<Double> captor = ArgumentCaptor.forClass(Double.class);
verify(rb, atLeast(0)).addGauge(eqName(info(name, "")), captor.capture());
checkCaptured(captor, name);
return captor.getValue();
}
/**
* Assert a long counter metric as expected
* @param name of the metric
* @param expected value of the metric
* @param rb the record builder mock used to getMetrics
*/
public static void assertCounter(String name, long expected,
MetricsRecordBuilder rb) {
Assert.assertEquals("Bad value for metric " + name,
expected, getLongCounter(name, rb));
}
public static long getLongCounter(String name, MetricsRecordBuilder rb) {
ArgumentCaptor<Long> captor = ArgumentCaptor.forClass(Long.class);
verify(rb, atLeast(0)).addCounter(eqName(info(name, "")), captor.capture());
checkCaptured(captor, name);
return captor.getValue();
}
/**
* Assert a float gauge metric as expected
* @param name of the metric
* @param expected value of the metric
* @param rb the record builder mock used to getMetrics
*/
public static void assertGauge(String name, float expected,
MetricsRecordBuilder rb) {
Assert.assertEquals("Bad value for metric " + name,
expected, getFloatGauge(name, rb), EPSILON);
}
public static float getFloatGauge(String name, MetricsRecordBuilder rb) {
ArgumentCaptor<Float> captor = ArgumentCaptor.forClass(Float.class);
verify(rb, atLeast(0)).addGauge(eqName(info(name, "")), captor.capture());
checkCaptured(captor, name);
return captor.getValue();
}
/**
* Check that this metric was captured exactly once.
*/
private static void checkCaptured(ArgumentCaptor<?> captor, String name) {
Assert.assertEquals("Expected exactly one metric for name " + name,
1, captor.getAllValues().size());
}
/**
* Assert an int gauge metric as expected
* @param name of the metric
* @param expected value of the metric
* @param source to get metrics from
*/
public static void assertGauge(String name, int expected,
MetricsSource source) {
assertGauge(name, expected, getMetrics(source));
}
/**
* Assert an int counter metric as expected
* @param name of the metric
* @param expected value of the metric
* @param source to get metrics from
*/
public static void assertCounter(String name, int expected,
MetricsSource source) {
assertCounter(name, expected, getMetrics(source));
}
/**
* Assert a long gauge metric as expected
* @param name of the metric
* @param expected value of the metric
* @param source to get metrics from
*/
public static void assertGauge(String name, long expected,
MetricsSource source) {
assertGauge(name, expected, getMetrics(source));
}
/**
* Assert a long counter metric as expected
* @param name of the metric
* @param expected value of the metric
* @param source to get metrics from
*/
public static void assertCounter(String name, long expected,
MetricsSource source) {
assertCounter(name, expected, getMetrics(source));
}
/**
* Assert that a long counter metric is greater than a value
* @param name of the metric
* @param greater value of the metric should be greater than this
* @param rb the record builder mock used to getMetrics
*/
public static void assertCounterGt(String name, long greater,
MetricsRecordBuilder rb) {
Assert.assertThat("Bad value for metric " + name, getLongCounter(name, rb),
new GreaterThan<Long>(greater));
}
/**
* Assert that a long counter metric is greater than a value
* @param name of the metric
* @param greater value of the metric should be greater than this
* @param source the metrics source
*/
public static void assertCounterGt(String name, long greater,
MetricsSource source) {
assertCounterGt(name, greater, getMetrics(source));
}
/**
* Assert that a double gauge metric is greater than a value
* @param name of the metric
* @param greater value of the metric should be greater than this
* @param rb the record builder mock used to getMetrics
*/
public static void assertGaugeGt(String name, double greater,
MetricsRecordBuilder rb) {
Assert.assertThat("Bad value for metric " + name, getDoubleGauge(name, rb),
new GreaterThan<Double>(greater));
}
/**
* Assert that a double gauge metric is greater than a value
* @param name of the metric
* @param greater value of the metric should be greater than this
* @param source the metrics source
*/
public static void assertGaugeGt(String name, double greater,
MetricsSource source) {
assertGaugeGt(name, greater, getMetrics(source));
}
/**
* Asserts that the NumOps and quantiles for a metric have been changed at
* some point to a non-zero value.
*
* @param prefix of the metric
* @param rb MetricsRecordBuilder with the metric
*/
public static void assertQuantileGauges(String prefix,
MetricsRecordBuilder rb) {
verify(rb).addGauge(eqName(info(prefix + "NumOps", "")), geq(0l));
for (Quantile q : MutableQuantiles.quantiles) {
String nameTemplate = prefix + "%dthPercentileLatency";
int percentile = (int) (100 * q.quantile);
verify(rb).addGauge(
eqName(info(String.format(nameTemplate, percentile), "")),
geq(0l));
}
}
}
| 13,192 | 34.181333 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestTimedOutTestsListener.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.notification.Failure;
public class TestTimedOutTestsListener {
public static class Deadlock {
private CyclicBarrier barrier = new CyclicBarrier(6);
public Deadlock() {
DeadlockThread[] dThreads = new DeadlockThread[6];
Monitor a = new Monitor("a");
Monitor b = new Monitor("b");
Monitor c = new Monitor("c");
dThreads[0] = new DeadlockThread("MThread-1", a, b);
dThreads[1] = new DeadlockThread("MThread-2", b, c);
dThreads[2] = new DeadlockThread("MThread-3", c, a);
Lock d = new ReentrantLock();
Lock e = new ReentrantLock();
Lock f = new ReentrantLock();
dThreads[3] = new DeadlockThread("SThread-4", d, e);
dThreads[4] = new DeadlockThread("SThread-5", e, f);
dThreads[5] = new DeadlockThread("SThread-6", f, d);
// make them daemon threads so that the test will exit
for (int i = 0; i < 6; i++) {
dThreads[i].setDaemon(true);
dThreads[i].start();
}
}
class DeadlockThread extends Thread {
private Lock lock1 = null;
private Lock lock2 = null;
private Monitor mon1 = null;
private Monitor mon2 = null;
private boolean useSync;
DeadlockThread(String name, Lock lock1, Lock lock2) {
super(name);
this.lock1 = lock1;
this.lock2 = lock2;
this.useSync = true;
}
DeadlockThread(String name, Monitor mon1, Monitor mon2) {
super(name);
this.mon1 = mon1;
this.mon2 = mon2;
this.useSync = false;
}
public void run() {
if (useSync) {
syncLock();
} else {
monitorLock();
}
}
private void syncLock() {
lock1.lock();
try {
try {
barrier.await();
} catch (Exception e) {
}
goSyncDeadlock();
} finally {
lock1.unlock();
}
}
private void goSyncDeadlock() {
try {
barrier.await();
} catch (Exception e) {
}
lock2.lock();
throw new RuntimeException("should not reach here.");
}
private void monitorLock() {
synchronized (mon1) {
try {
barrier.await();
} catch (Exception e) {
}
goMonitorDeadlock();
}
}
private void goMonitorDeadlock() {
try {
barrier.await();
} catch (Exception e) {
}
synchronized (mon2) {
throw new RuntimeException(getName() + " should not reach here.");
}
}
}
class Monitor {
String name;
Monitor(String name) {
this.name = name;
}
}
}
@Test(timeout=500)
public void testThreadDumpAndDeadlocks() throws Exception {
new Deadlock();
String s = null;
while (true) {
s = TimedOutTestsListener.buildDeadlockInfo();
if (s != null)
break;
Thread.sleep(100);
}
Assert.assertEquals(3, countStringOccurrences(s, "BLOCKED"));
Failure failure = new Failure(
null, new Exception(TimedOutTestsListener.TEST_TIMED_OUT_PREFIX));
StringWriter writer = new StringWriter();
new TimedOutTestsListener(new PrintWriter(writer)).testFailure(failure);
String out = writer.toString();
Assert.assertTrue(out.contains("THREAD DUMP"));
Assert.assertTrue(out.contains("DEADLOCKS DETECTED"));
System.out.println(out);
}
private int countStringOccurrences(String s, String substr) {
int n = 0;
int index = 0;
while ((index = s.indexOf(substr, index) + 1) != 0) {
n++;
}
return n;
}
}
| 4,881 | 25.824176 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestMultithreadedTestUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test;
import static org.junit.Assert.*;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicInteger;
import org.junit.Test;
import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
import org.apache.hadoop.test.MultithreadedTestUtil.TestingThread;
import org.apache.hadoop.test.MultithreadedTestUtil.RepeatingTestThread;
import org.apache.hadoop.util.Time;
public class TestMultithreadedTestUtil {
private static final String FAIL_MSG =
"Inner thread fails an assert";
@Test
public void testNoErrors() throws Exception {
final AtomicInteger threadsRun = new AtomicInteger();
TestContext ctx = new TestContext();
for (int i = 0; i < 3; i++) {
ctx.addThread(new TestingThread(ctx) {
@Override
public void doWork() throws Exception {
threadsRun.incrementAndGet();
}
});
}
assertEquals(0, threadsRun.get());
ctx.startThreads();
long st = Time.now();
ctx.waitFor(30000);
long et = Time.now();
// All threads should have run
assertEquals(3, threadsRun.get());
// Test shouldn't have waited the full 30 seconds, since
// the threads exited faster than that.
assertTrue("Test took " + (et - st) + "ms",
et - st < 5000);
}
@Test
public void testThreadFails() throws Exception {
TestContext ctx = new TestContext();
ctx.addThread(new TestingThread(ctx) {
@Override
public void doWork() throws Exception {
fail(FAIL_MSG);
}
});
ctx.startThreads();
long st = Time.now();
try {
ctx.waitFor(30000);
fail("waitFor did not throw");
} catch (RuntimeException rte) {
// expected
assertEquals(FAIL_MSG, rte.getCause().getMessage());
}
long et = Time.now();
// Test shouldn't have waited the full 30 seconds, since
// the thread throws faster than that
assertTrue("Test took " + (et - st) + "ms",
et - st < 5000);
}
@Test
public void testThreadThrowsCheckedException() throws Exception {
TestContext ctx = new TestContext();
ctx.addThread(new TestingThread(ctx) {
@Override
public void doWork() throws Exception {
throw new IOException("my ioe");
}
});
ctx.startThreads();
long st = Time.now();
try {
ctx.waitFor(30000);
fail("waitFor did not throw");
} catch (RuntimeException rte) {
// expected
assertEquals("my ioe", rte.getCause().getMessage());
}
long et = Time.now();
// Test shouldn't have waited the full 30 seconds, since
// the thread throws faster than that
assertTrue("Test took " + (et - st) + "ms",
et - st < 5000);
}
@Test
public void testRepeatingThread() throws Exception {
final AtomicInteger counter = new AtomicInteger();
TestContext ctx = new TestContext();
ctx.addThread(new RepeatingTestThread(ctx) {
@Override
public void doAnAction() throws Exception {
counter.incrementAndGet();
}
});
ctx.startThreads();
long st = Time.now();
ctx.waitFor(3000);
ctx.stop();
long et = Time.now();
long elapsed = et - st;
// Test should have waited just about 3 seconds
assertTrue("Test took " + (et - st) + "ms",
Math.abs(elapsed - 3000) < 500);
// Counter should have been incremented lots of times in 3 full seconds
assertTrue("Counter value = " + counter.get(),
counter.get() > 1000);
}
}
| 4,303 | 29.742857 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MockitoMaker.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test;
import static org.mockito.Mockito.*;
/**
* Helper class to create one-liner stubs, so that instead of: <pre>
* SomeType someDescriptiveMock = mock(SomeType.class);
* when(someDescriptiveMock.someMethod()).thenReturn(someValue);</pre>
* <p>You can now do: <pre>
* SomeType someDescriptiveMock = make(stub(SomeType.class)
* .returning(someValue).from.someMethod());</pre>
*/
public class MockitoMaker {
/**
* Create a mock object from a mocked method call.
*
* @param <T> type of mocked object
* @param methodCall for mocked object
* @return mocked object
*/
@SuppressWarnings("unchecked")
public static <T> T make(Object methodCall) {
StubBuilder<T> sb = StubBuilder.current();
when(methodCall).thenReturn(sb.firstReturn, sb.laterReturns);
return (T) StubBuilder.current().from;
}
/**
* Create a stub builder of a mocked object.
*
* @param <T> type of the target object to be mocked
* @param target class of the target object to be mocked
* @return the stub builder of the mocked object
*/
public static <T> StubBuilder<T> stub(Class<T> target) {
return new StubBuilder<T>(mock(target));
}
/**
* Builder class for stubs
* @param <T> type of the object to be mocked
*/
public static class StubBuilder<T> {
/**
* The target mock object
*/
public final T from;
// We want to be able to use this even when the tests are run in parallel.
@SuppressWarnings("rawtypes")
private static final ThreadLocal<StubBuilder> tls =
new ThreadLocal<StubBuilder>() {
@Override protected StubBuilder initialValue() {
return new StubBuilder();
}
};
private Object firstReturn = null;
private Object[] laterReturns = {};
/**
* Default constructor for the initial stub builder
*/
public StubBuilder() {
this.from = null;
}
/**
* Construct a stub builder with a mock instance
*
* @param mockInstance the mock object
*/
public StubBuilder(T mockInstance) {
tls.set(this);
this.from = mockInstance;
}
/**
* Get the current stub builder from thread local
*
* @param <T>
* @return the stub builder of the mocked object
*/
@SuppressWarnings("unchecked")
public static <T> StubBuilder<T> current() {
return tls.get();
}
/**
* Set the return value for the current stub builder
*
* @param value the return value
* @return the stub builder
*/
public StubBuilder<T> returning(Object value) {
this.firstReturn = value;
return this;
}
/**
* Set the return values for the current stub builder
*
* @param value the first return value
* @param values the return values for later invocations
* @return the stub builder
*/
public StubBuilder<T> returning(Object value, Object... values) {
this.firstReturn = value;
this.laterReturns = values;
return this;
}
}
}
| 3,888 | 28.240602 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test;
import java.io.File;
import java.io.IOException;
import java.io.StringWriter;
import java.lang.management.ManagementFactory;
import java.lang.management.ThreadInfo;
import java.lang.management.ThreadMXBean;
import java.lang.reflect.InvocationTargetException;
import java.util.Arrays;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.util.NativeCodeLoader;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.log4j.Layout;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
import org.apache.log4j.WriterAppender;
import org.junit.Assert;
import org.junit.Assume;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import com.google.common.base.Joiner;
import com.google.common.base.Supplier;
import com.google.common.collect.Sets;
/**
* Test provides some very generic helpers which might be used across the tests
*/
public abstract class GenericTestUtils {
private static final AtomicInteger sequence = new AtomicInteger();
@SuppressWarnings("unchecked")
public static void disableLog(Log log) {
// We expect that commons-logging is a wrapper around Log4j.
disableLog((Log4JLogger) log);
}
public static Logger toLog4j(org.slf4j.Logger logger) {
return LogManager.getLogger(logger.getName());
}
public static void disableLog(Log4JLogger log) {
log.getLogger().setLevel(Level.OFF);
}
public static void disableLog(Logger logger) {
logger.setLevel(Level.OFF);
}
public static void disableLog(org.slf4j.Logger logger) {
disableLog(toLog4j(logger));
}
@SuppressWarnings("unchecked")
public static void setLogLevel(Log log, Level level) {
// We expect that commons-logging is a wrapper around Log4j.
setLogLevel((Log4JLogger) log, level);
}
public static void setLogLevel(Log4JLogger log, Level level) {
log.getLogger().setLevel(level);
}
public static void setLogLevel(Logger logger, Level level) {
logger.setLevel(level);
}
public static void setLogLevel(org.slf4j.Logger logger, Level level) {
setLogLevel(toLog4j(logger), level);
}
/**
* Extracts the name of the method where the invocation has happened
* @return String name of the invoking method
*/
public static String getMethodName() {
return Thread.currentThread().getStackTrace()[2].getMethodName();
}
/**
* Generates a process-wide unique sequence number.
* @return an unique sequence number
*/
public static int uniqueSequenceId() {
return sequence.incrementAndGet();
}
/**
* Assert that a given file exists.
*/
public static void assertExists(File f) {
Assert.assertTrue("File " + f + " should exist", f.exists());
}
/**
* List all of the files in 'dir' that match the regex 'pattern'.
* Then check that this list is identical to 'expectedMatches'.
* @throws IOException if the dir is inaccessible
*/
public static void assertGlobEquals(File dir, String pattern,
String ... expectedMatches) throws IOException {
Set<String> found = Sets.newTreeSet();
for (File f : FileUtil.listFiles(dir)) {
if (f.getName().matches(pattern)) {
found.add(f.getName());
}
}
Set<String> expectedSet = Sets.newTreeSet(
Arrays.asList(expectedMatches));
Assert.assertEquals("Bad files matching " + pattern + " in " + dir,
Joiner.on(",").join(expectedSet),
Joiner.on(",").join(found));
}
public static void assertExceptionContains(String string, Throwable t) {
String msg = t.getMessage();
Assert.assertTrue(
"Expected to find '" + string + "' but got unexpected exception:"
+ StringUtils.stringifyException(t), msg.contains(string));
}
public static void waitFor(Supplier<Boolean> check,
int checkEveryMillis, int waitForMillis)
throws TimeoutException, InterruptedException
{
long st = Time.now();
do {
boolean result = check.get();
if (result) {
return;
}
Thread.sleep(checkEveryMillis);
} while (Time.now() - st < waitForMillis);
throw new TimeoutException("Timed out waiting for condition. " +
"Thread diagnostics:\n" +
TimedOutTestsListener.buildThreadDiagnosticString());
}
public static class LogCapturer {
private StringWriter sw = new StringWriter();
private WriterAppender appender;
private Logger logger;
public static LogCapturer captureLogs(Log l) {
Logger logger = ((Log4JLogger)l).getLogger();
LogCapturer c = new LogCapturer(logger);
return c;
}
private LogCapturer(Logger logger) {
this.logger = logger;
Layout layout = Logger.getRootLogger().getAppender("stdout").getLayout();
WriterAppender wa = new WriterAppender(layout, sw);
logger.addAppender(wa);
}
public String getOutput() {
return sw.toString();
}
public void stopCapturing() {
logger.removeAppender(appender);
}
}
/**
* Mockito answer helper that triggers one latch as soon as the
* method is called, then waits on another before continuing.
*/
public static class DelayAnswer implements Answer<Object> {
private final Log LOG;
private final CountDownLatch fireLatch = new CountDownLatch(1);
private final CountDownLatch waitLatch = new CountDownLatch(1);
private final CountDownLatch resultLatch = new CountDownLatch(1);
private final AtomicInteger fireCounter = new AtomicInteger(0);
private final AtomicInteger resultCounter = new AtomicInteger(0);
// Result fields set after proceed() is called.
private volatile Throwable thrown;
private volatile Object returnValue;
public DelayAnswer(Log log) {
this.LOG = log;
}
/**
* Wait until the method is called.
*/
public void waitForCall() throws InterruptedException {
fireLatch.await();
}
/**
* Tell the method to proceed.
* This should only be called after waitForCall()
*/
public void proceed() {
waitLatch.countDown();
}
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
LOG.info("DelayAnswer firing fireLatch");
fireCounter.getAndIncrement();
fireLatch.countDown();
try {
LOG.info("DelayAnswer waiting on waitLatch");
waitLatch.await();
LOG.info("DelayAnswer delay complete");
} catch (InterruptedException ie) {
throw new IOException("Interrupted waiting on latch", ie);
}
return passThrough(invocation);
}
protected Object passThrough(InvocationOnMock invocation) throws Throwable {
try {
Object ret = invocation.callRealMethod();
returnValue = ret;
return ret;
} catch (Throwable t) {
thrown = t;
throw t;
} finally {
resultCounter.incrementAndGet();
resultLatch.countDown();
}
}
/**
* After calling proceed(), this will wait until the call has
* completed and a result has been returned to the caller.
*/
public void waitForResult() throws InterruptedException {
resultLatch.await();
}
/**
* After the call has gone through, return any exception that
* was thrown, or null if no exception was thrown.
*/
public Throwable getThrown() {
return thrown;
}
/**
* After the call has gone through, return the call's return value,
* or null in case it was void or an exception was thrown.
*/
public Object getReturnValue() {
return returnValue;
}
public int getFireCount() {
return fireCounter.get();
}
public int getResultCount() {
return resultCounter.get();
}
}
/**
* An Answer implementation that simply forwards all calls through
* to a delegate.
*
* This is useful as the default Answer for a mock object, to create
* something like a spy on an RPC proxy. For example:
* <code>
* NamenodeProtocol origNNProxy = secondary.getNameNode();
* NamenodeProtocol spyNNProxy = Mockito.mock(NameNodeProtocol.class,
* new DelegateAnswer(origNNProxy);
* doThrow(...).when(spyNNProxy).getBlockLocations(...);
* ...
* </code>
*/
public static class DelegateAnswer implements Answer<Object> {
private final Object delegate;
private final Log log;
public DelegateAnswer(Object delegate) {
this(null, delegate);
}
public DelegateAnswer(Log log, Object delegate) {
this.log = log;
this.delegate = delegate;
}
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
try {
if (log != null) {
log.info("Call to " + invocation + " on " + delegate,
new Exception("TRACE"));
}
return invocation.getMethod().invoke(
delegate, invocation.getArguments());
} catch (InvocationTargetException ite) {
throw ite.getCause();
}
}
}
/**
* An Answer implementation which sleeps for a random number of milliseconds
* between 0 and a configurable value before delegating to the real
* implementation of the method. This can be useful for drawing out race
* conditions.
*/
public static class SleepAnswer implements Answer<Object> {
private final int maxSleepTime;
private static Random r = new Random();
public SleepAnswer(int maxSleepTime) {
this.maxSleepTime = maxSleepTime;
}
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
boolean interrupted = false;
try {
Thread.sleep(r.nextInt(maxSleepTime));
} catch (InterruptedException ie) {
interrupted = true;
}
try {
return invocation.callRealMethod();
} finally {
if (interrupted) {
Thread.currentThread().interrupt();
}
}
}
}
public static void assertDoesNotMatch(String output, String pattern) {
Assert.assertFalse("Expected output to match /" + pattern + "/" +
" but got:\n" + output,
Pattern.compile(pattern).matcher(output).find());
}
public static void assertMatches(String output, String pattern) {
Assert.assertTrue("Expected output to match /" + pattern + "/" +
" but got:\n" + output,
Pattern.compile(pattern).matcher(output).find());
}
public static void assertValueNear(long expected, long actual, long allowedError) {
assertValueWithinRange(expected - allowedError, expected + allowedError, actual);
}
public static void assertValueWithinRange(long expectedMin, long expectedMax,
long actual) {
Assert.assertTrue("Expected " + actual + " to be in range (" + expectedMin + ","
+ expectedMax + ")", expectedMin <= actual && actual <= expectedMax);
}
/**
* Assert that there are no threads running whose name matches the
* given regular expression.
* @param regex the regex to match against
*/
public static void assertNoThreadsMatching(String regex) {
Pattern pattern = Pattern.compile(regex);
ThreadMXBean threadBean = ManagementFactory.getThreadMXBean();
ThreadInfo[] infos = threadBean.getThreadInfo(threadBean.getAllThreadIds(), 20);
for (ThreadInfo info : infos) {
if (info == null) continue;
if (pattern.matcher(info.getThreadName()).matches()) {
Assert.fail("Leaked thread: " + info + "\n" +
Joiner.on("\n").join(info.getStackTrace()));
}
}
}
/**
* Skip test if native build profile of Maven is not activated.
* Sub-project using this must set 'runningWithNative' property to true
* in the definition of native profile in pom.xml.
*/
public static void assumeInNativeProfile() {
Assume.assumeTrue(
Boolean.valueOf(System.getProperty("runningWithNative", "false")));
}
}
| 13,221 | 30.037559 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MoreAsserts.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test;
import java.util.Iterator;
import org.junit.Assert;
/**
* A few more asserts
*/
public class MoreAsserts {
/**
* Assert equivalence for array and iterable
* @param <T> the type of the elements
* @param s the name/message for the collection
* @param expected the expected array of elements
* @param actual the actual iterable of elements
*/
public static <T> void assertEquals(String s, T[] expected,
Iterable<T> actual) {
Iterator<T> it = actual.iterator();
int i = 0;
for (; i < expected.length && it.hasNext(); ++i) {
Assert.assertEquals("Element "+ i +" for "+ s, expected[i], it.next());
}
Assert.assertTrue("Expected more elements", i == expected.length);
Assert.assertTrue("Expected less elements", !it.hasNext());
}
/**
* Assert equality for two iterables
* @param <T> the type of the elements
* @param s
* @param expected
* @param actual
*/
public static <T> void assertEquals(String s, Iterable<T> expected,
Iterable<T> actual) {
Iterator<T> ite = expected.iterator();
Iterator<T> ita = actual.iterator();
int i = 0;
while (ite.hasNext() && ita.hasNext()) {
Assert.assertEquals("Element "+ i +" for "+s, ite.next(), ita.next());
}
Assert.assertTrue("Expected more elements", !ite.hasNext());
Assert.assertTrue("Expected less elements", !ita.hasNext());
}
}
| 2,305 | 33.41791 | 77 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.