repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DirectDecompressor.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress;
import java.io.IOException;
import java.nio.ByteBuffer;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Specification of a direct ByteBuffer 'de-compressor'.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface DirectDecompressor {
/*
* This exposes a direct interface for record decompression with direct byte
* buffers.
*
* The decompress() function need not always consume the buffers provided,
* it will need to be called multiple times to decompress an entire buffer
* and the object will hold the compression context internally.
*
* Codecs such as {@link SnappyCodec} may or may not support partial
* decompression of buffers and will need enough space in the destination
* buffer to decompress an entire block.
*
* The operation is modelled around dst.put(src);
*
* The end result will move src.position() by the bytes-read and
* dst.position() by the bytes-written. It should not modify the src.limit()
* or dst.limit() to maintain consistency of operation between codecs.
*
* @param src Source direct {@link ByteBuffer} for reading from. Requires src
* != null and src.remaining() > 0
*
* @param dst Destination direct {@link ByteBuffer} for storing the results
* into. Requires dst != null and dst.remaining() to be > 0
*
* @throws IOException if compression fails
*/
public void decompress(ByteBuffer src, ByteBuffer dst) throws IOException;
}
| 2,392 | 38.883333 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/Lz4Codec.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.lz4.Lz4Compressor;
import org.apache.hadoop.io.compress.lz4.Lz4Decompressor;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.util.NativeCodeLoader;
/**
* This class creates lz4 compressors/decompressors.
*/
public class Lz4Codec implements Configurable, CompressionCodec {
static {
NativeCodeLoader.isNativeCodeLoaded();
}
Configuration conf;
/**
* Set the configuration to be used by this object.
*
* @param conf the configuration object.
*/
@Override
public void setConf(Configuration conf) {
this.conf = conf;
}
/**
* Return the configuration used by this object.
*
* @return the configuration object used by this objec.
*/
@Override
public Configuration getConf() {
return conf;
}
/**
* Are the native lz4 libraries loaded & initialized?
*
* @return true if loaded & initialized, otherwise false
*/
public static boolean isNativeCodeLoaded() {
return NativeCodeLoader.isNativeCodeLoaded();
}
public static String getLibraryName() {
return Lz4Compressor.getLibraryName();
}
/**
* Create a {@link CompressionOutputStream} that will write to the given
* {@link OutputStream}.
*
* @param out the location for the final output stream
* @return a stream the user can write uncompressed data to have it compressed
* @throws IOException
*/
@Override
public CompressionOutputStream createOutputStream(OutputStream out)
throws IOException {
return CompressionCodec.Util.
createOutputStreamWithCodecPool(this, conf, out);
}
/**
* Create a {@link CompressionOutputStream} that will write to the given
* {@link OutputStream} with the given {@link Compressor}.
*
* @param out the location for the final output stream
* @param compressor compressor to use
* @return a stream the user can write uncompressed data to have it compressed
* @throws IOException
*/
@Override
public CompressionOutputStream createOutputStream(OutputStream out,
Compressor compressor)
throws IOException {
if (!isNativeCodeLoaded()) {
throw new RuntimeException("native lz4 library not available");
}
int bufferSize = conf.getInt(
CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_KEY,
CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_DEFAULT);
int compressionOverhead = bufferSize/255 + 16;
return new BlockCompressorStream(out, compressor, bufferSize,
compressionOverhead);
}
/**
* Get the type of {@link Compressor} needed by this {@link CompressionCodec}.
*
* @return the type of compressor needed by this codec.
*/
@Override
public Class<? extends Compressor> getCompressorType() {
if (!isNativeCodeLoaded()) {
throw new RuntimeException("native lz4 library not available");
}
return Lz4Compressor.class;
}
/**
* Create a new {@link Compressor} for use by this {@link CompressionCodec}.
*
* @return a new compressor for use by this codec
*/
@Override
public Compressor createCompressor() {
if (!isNativeCodeLoaded()) {
throw new RuntimeException("native lz4 library not available");
}
int bufferSize = conf.getInt(
CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_KEY,
CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_DEFAULT);
boolean useLz4HC = conf.getBoolean(
CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_USELZ4HC_KEY,
CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_USELZ4HC_DEFAULT);
return new Lz4Compressor(bufferSize, useLz4HC);
}
/**
* Create a {@link CompressionInputStream} that will read from the given
* input stream.
*
* @param in the stream to read compressed bytes from
* @return a stream to read uncompressed bytes from
* @throws IOException
*/
@Override
public CompressionInputStream createInputStream(InputStream in)
throws IOException {
return CompressionCodec.Util.
createInputStreamWithCodecPool(this, conf, in);
}
/**
* Create a {@link CompressionInputStream} that will read from the given
* {@link InputStream} with the given {@link Decompressor}.
*
* @param in the stream to read compressed bytes from
* @param decompressor decompressor to use
* @return a stream to read uncompressed bytes from
* @throws IOException
*/
@Override
public CompressionInputStream createInputStream(InputStream in,
Decompressor decompressor)
throws IOException {
if (!isNativeCodeLoaded()) {
throw new RuntimeException("native lz4 library not available");
}
return new BlockDecompressorStream(in, decompressor, conf.getInt(
CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_KEY,
CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_DEFAULT));
}
/**
* Get the type of {@link Decompressor} needed by this {@link CompressionCodec}.
*
* @return the type of decompressor needed by this codec.
*/
@Override
public Class<? extends Decompressor> getDecompressorType() {
if (!isNativeCodeLoaded()) {
throw new RuntimeException("native lz4 library not available");
}
return Lz4Decompressor.class;
}
/**
* Create a new {@link Decompressor} for use by this {@link CompressionCodec}.
*
* @return a new decompressor for use by this codec
*/
@Override
public Decompressor createDecompressor() {
if (!isNativeCodeLoaded()) {
throw new RuntimeException("native lz4 library not available");
}
int bufferSize = conf.getInt(
CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_KEY,
CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_DEFAULT);
return new Lz4Decompressor(bufferSize);
}
/**
* Get the default filename extension for this kind of compression.
*
* @return <code>.lz4</code>.
*/
@Override
public String getDefaultExtension() {
return ".lz4";
}
}
| 7,202 | 30.731278 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DoNotPool.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* This is a marker annotation that marks a compressor or decompressor
* type as not to be pooled.
*/
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
@Documented
public @interface DoNotPool {
}
| 1,263 | 35.114286 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.zlib.ZlibDecompressor;
import org.apache.hadoop.io.compress.zlib.ZlibFactory;
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class DefaultCodec implements Configurable, CompressionCodec, DirectDecompressionCodec {
private static final Log LOG = LogFactory.getLog(DefaultCodec.class);
Configuration conf;
@Override
public void setConf(Configuration conf) {
this.conf = conf;
}
@Override
public Configuration getConf() {
return conf;
}
@Override
public CompressionOutputStream createOutputStream(OutputStream out)
throws IOException {
return CompressionCodec.Util.
createOutputStreamWithCodecPool(this, conf, out);
}
@Override
public CompressionOutputStream createOutputStream(OutputStream out,
Compressor compressor)
throws IOException {
return new CompressorStream(out, compressor,
conf.getInt("io.file.buffer.size", 4*1024));
}
@Override
public Class<? extends Compressor> getCompressorType() {
return ZlibFactory.getZlibCompressorType(conf);
}
@Override
public Compressor createCompressor() {
return ZlibFactory.getZlibCompressor(conf);
}
@Override
public CompressionInputStream createInputStream(InputStream in)
throws IOException {
return CompressionCodec.Util.
createInputStreamWithCodecPool(this, conf, in);
}
@Override
public CompressionInputStream createInputStream(InputStream in,
Decompressor decompressor)
throws IOException {
return new DecompressorStream(in, decompressor,
conf.getInt("io.file.buffer.size", 4*1024));
}
@Override
public Class<? extends Decompressor> getDecompressorType() {
return ZlibFactory.getZlibDecompressorType(conf);
}
@Override
public Decompressor createDecompressor() {
return ZlibFactory.getZlibDecompressor(conf);
}
/**
* {@inheritDoc}
*/
@Override
public DirectDecompressor createDirectDecompressor() {
return ZlibFactory.getZlibDirectDecompressor(conf);
}
@Override
public String getDefaultExtension() {
return ".deflate";
}
}
| 3,508 | 29.25 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress;
import java.io.*;
import java.util.zip.GZIPOutputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.DefaultCodec;
import org.apache.hadoop.io.compress.zlib.*;
import org.apache.hadoop.io.compress.zlib.ZlibDecompressor.ZlibDirectDecompressor;
import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
/**
* This class creates gzip compressors/decompressors.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class GzipCodec extends DefaultCodec {
/**
* A bridge that wraps around a DeflaterOutputStream to make it
* a CompressionOutputStream.
*/
@InterfaceStability.Evolving
protected static class GzipOutputStream extends CompressorStream {
private static class ResetableGZIPOutputStream extends GZIPOutputStream {
private static final int TRAILER_SIZE = 8;
public static final String JVMVersion= System.getProperty("java.version");
private static final boolean HAS_BROKEN_FINISH =
(IBM_JAVA && JVMVersion.contains("1.6.0"));
public ResetableGZIPOutputStream(OutputStream out) throws IOException {
super(out);
}
public void resetState() throws IOException {
def.reset();
}
}
public GzipOutputStream(OutputStream out) throws IOException {
super(new ResetableGZIPOutputStream(out));
}
/**
* Allow children types to put a different type in here.
* @param out the Deflater stream to use
*/
protected GzipOutputStream(CompressorStream out) {
super(out);
}
@Override
public void close() throws IOException {
out.close();
}
@Override
public void flush() throws IOException {
out.flush();
}
@Override
public void write(int b) throws IOException {
out.write(b);
}
@Override
public void write(byte[] data, int offset, int length)
throws IOException {
out.write(data, offset, length);
}
@Override
public void finish() throws IOException {
((ResetableGZIPOutputStream) out).finish();
}
@Override
public void resetState() throws IOException {
((ResetableGZIPOutputStream) out).resetState();
}
}
@Override
public CompressionOutputStream createOutputStream(OutputStream out)
throws IOException {
if (!ZlibFactory.isNativeZlibLoaded(conf)) {
return new GzipOutputStream(out);
}
return CompressionCodec.Util.
createOutputStreamWithCodecPool(this, conf, out);
}
@Override
public CompressionOutputStream createOutputStream(OutputStream out,
Compressor compressor)
throws IOException {
return (compressor != null) ?
new CompressorStream(out, compressor,
conf.getInt("io.file.buffer.size",
4*1024)) :
createOutputStream(out);
}
@Override
public Compressor createCompressor() {
return (ZlibFactory.isNativeZlibLoaded(conf))
? new GzipZlibCompressor(conf)
: null;
}
@Override
public Class<? extends Compressor> getCompressorType() {
return ZlibFactory.isNativeZlibLoaded(conf)
? GzipZlibCompressor.class
: null;
}
@Override
public CompressionInputStream createInputStream(InputStream in)
throws IOException {
return CompressionCodec.Util.
createInputStreamWithCodecPool(this, conf, in);
}
@Override
public CompressionInputStream createInputStream(InputStream in,
Decompressor decompressor)
throws IOException {
if (decompressor == null) {
decompressor = createDecompressor(); // always succeeds (or throws)
}
return new DecompressorStream(in, decompressor,
conf.getInt("io.file.buffer.size", 4*1024));
}
@Override
public Decompressor createDecompressor() {
return (ZlibFactory.isNativeZlibLoaded(conf))
? new GzipZlibDecompressor()
: new BuiltInGzipDecompressor();
}
@Override
public Class<? extends Decompressor> getDecompressorType() {
return ZlibFactory.isNativeZlibLoaded(conf)
? GzipZlibDecompressor.class
: BuiltInGzipDecompressor.class;
}
@Override
public DirectDecompressor createDirectDecompressor() {
return ZlibFactory.isNativeZlibLoaded(conf)
? new ZlibDecompressor.ZlibDirectDecompressor(
ZlibDecompressor.CompressionHeader.AUTODETECT_GZIP_ZLIB, 0) : null;
}
@Override
public String getDefaultExtension() {
return ".gz";
}
static final class GzipZlibCompressor extends ZlibCompressor {
public GzipZlibCompressor() {
super(ZlibCompressor.CompressionLevel.DEFAULT_COMPRESSION,
ZlibCompressor.CompressionStrategy.DEFAULT_STRATEGY,
ZlibCompressor.CompressionHeader.GZIP_FORMAT, 64*1024);
}
public GzipZlibCompressor(Configuration conf) {
super(ZlibFactory.getCompressionLevel(conf),
ZlibFactory.getCompressionStrategy(conf),
ZlibCompressor.CompressionHeader.GZIP_FORMAT,
64 * 1024);
}
}
static final class GzipZlibDecompressor extends ZlibDecompressor {
public GzipZlibDecompressor() {
super(ZlibDecompressor.CompressionHeader.AUTODETECT_GZIP_ZLIB, 64*1024);
}
}
}
| 6,382 | 30.136585 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.snappy.SnappyCompressor;
import org.apache.hadoop.io.compress.snappy.SnappyDecompressor;
import org.apache.hadoop.io.compress.snappy.SnappyDecompressor.SnappyDirectDecompressor;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.util.NativeCodeLoader;
/**
* This class creates snappy compressors/decompressors.
*/
public class SnappyCodec implements Configurable, CompressionCodec, DirectDecompressionCodec {
Configuration conf;
/**
* Set the configuration to be used by this object.
*
* @param conf the configuration object.
*/
@Override
public void setConf(Configuration conf) {
this.conf = conf;
}
/**
* Return the configuration used by this object.
*
* @return the configuration object used by this objec.
*/
@Override
public Configuration getConf() {
return conf;
}
/**
* Are the native snappy libraries loaded & initialized?
*/
public static void checkNativeCodeLoaded() {
if (!NativeCodeLoader.isNativeCodeLoaded() ||
!NativeCodeLoader.buildSupportsSnappy()) {
throw new RuntimeException("native snappy library not available: " +
"this version of libhadoop was built without " +
"snappy support.");
}
if (!SnappyCompressor.isNativeCodeLoaded()) {
throw new RuntimeException("native snappy library not available: " +
"SnappyCompressor has not been loaded.");
}
if (!SnappyDecompressor.isNativeCodeLoaded()) {
throw new RuntimeException("native snappy library not available: " +
"SnappyDecompressor has not been loaded.");
}
}
public static boolean isNativeCodeLoaded() {
return SnappyCompressor.isNativeCodeLoaded() &&
SnappyDecompressor.isNativeCodeLoaded();
}
public static String getLibraryName() {
return SnappyCompressor.getLibraryName();
}
/**
* Create a {@link CompressionOutputStream} that will write to the given
* {@link OutputStream}.
*
* @param out the location for the final output stream
* @return a stream the user can write uncompressed data to have it compressed
* @throws IOException
*/
@Override
public CompressionOutputStream createOutputStream(OutputStream out)
throws IOException {
return CompressionCodec.Util.
createOutputStreamWithCodecPool(this, conf, out);
}
/**
* Create a {@link CompressionOutputStream} that will write to the given
* {@link OutputStream} with the given {@link Compressor}.
*
* @param out the location for the final output stream
* @param compressor compressor to use
* @return a stream the user can write uncompressed data to have it compressed
* @throws IOException
*/
@Override
public CompressionOutputStream createOutputStream(OutputStream out,
Compressor compressor)
throws IOException {
checkNativeCodeLoaded();
int bufferSize = conf.getInt(
CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY,
CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT);
int compressionOverhead = (bufferSize / 6) + 32;
return new BlockCompressorStream(out, compressor, bufferSize,
compressionOverhead);
}
/**
* Get the type of {@link Compressor} needed by this {@link CompressionCodec}.
*
* @return the type of compressor needed by this codec.
*/
@Override
public Class<? extends Compressor> getCompressorType() {
checkNativeCodeLoaded();
return SnappyCompressor.class;
}
/**
* Create a new {@link Compressor} for use by this {@link CompressionCodec}.
*
* @return a new compressor for use by this codec
*/
@Override
public Compressor createCompressor() {
checkNativeCodeLoaded();
int bufferSize = conf.getInt(
CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY,
CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT);
return new SnappyCompressor(bufferSize);
}
/**
* Create a {@link CompressionInputStream} that will read from the given
* input stream.
*
* @param in the stream to read compressed bytes from
* @return a stream to read uncompressed bytes from
* @throws IOException
*/
@Override
public CompressionInputStream createInputStream(InputStream in)
throws IOException {
return CompressionCodec.Util.
createInputStreamWithCodecPool(this, conf, in);
}
/**
* Create a {@link CompressionInputStream} that will read from the given
* {@link InputStream} with the given {@link Decompressor}.
*
* @param in the stream to read compressed bytes from
* @param decompressor decompressor to use
* @return a stream to read uncompressed bytes from
* @throws IOException
*/
@Override
public CompressionInputStream createInputStream(InputStream in,
Decompressor decompressor)
throws IOException {
checkNativeCodeLoaded();
return new BlockDecompressorStream(in, decompressor, conf.getInt(
CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY,
CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT));
}
/**
* Get the type of {@link Decompressor} needed by this {@link CompressionCodec}.
*
* @return the type of decompressor needed by this codec.
*/
@Override
public Class<? extends Decompressor> getDecompressorType() {
checkNativeCodeLoaded();
return SnappyDecompressor.class;
}
/**
* Create a new {@link Decompressor} for use by this {@link CompressionCodec}.
*
* @return a new decompressor for use by this codec
*/
@Override
public Decompressor createDecompressor() {
checkNativeCodeLoaded();
int bufferSize = conf.getInt(
CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY,
CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT);
return new SnappyDecompressor(bufferSize);
}
/**
* {@inheritDoc}
*/
@Override
public DirectDecompressor createDirectDecompressor() {
return isNativeCodeLoaded() ? new SnappyDirectDecompressor() : null;
}
/**
* Get the default filename extension for this kind of compression.
*
* @return <code>.snappy</code>.
*/
@Override
public String getDefaultExtension() {
return ".snappy";
}
}
| 7,536 | 31.912664 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodec.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
/**
* This class encapsulates a streaming compression/decompression pair.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface CompressionCodec {
/**
* Create a {@link CompressionOutputStream} that will write to the given
* {@link OutputStream}.
*
* @param out the location for the final output stream
* @return a stream the user can write uncompressed data to have it compressed
* @throws IOException
*/
CompressionOutputStream createOutputStream(OutputStream out)
throws IOException;
/**
* Create a {@link CompressionOutputStream} that will write to the given
* {@link OutputStream} with the given {@link Compressor}.
*
* @param out the location for the final output stream
* @param compressor compressor to use
* @return a stream the user can write uncompressed data to have it compressed
* @throws IOException
*/
CompressionOutputStream createOutputStream(OutputStream out,
Compressor compressor)
throws IOException;
/**
* Get the type of {@link Compressor} needed by this {@link CompressionCodec}.
*
* @return the type of compressor needed by this codec.
*/
Class<? extends Compressor> getCompressorType();
/**
* Create a new {@link Compressor} for use by this {@link CompressionCodec}.
*
* @return a new compressor for use by this codec
*/
Compressor createCompressor();
/**
* Create a {@link CompressionInputStream} that will read from the given
* input stream.
*
* @param in the stream to read compressed bytes from
* @return a stream to read uncompressed bytes from
* @throws IOException
*/
CompressionInputStream createInputStream(InputStream in) throws IOException;
/**
* Create a {@link CompressionInputStream} that will read from the given
* {@link InputStream} with the given {@link Decompressor}.
*
* @param in the stream to read compressed bytes from
* @param decompressor decompressor to use
* @return a stream to read uncompressed bytes from
* @throws IOException
*/
CompressionInputStream createInputStream(InputStream in,
Decompressor decompressor)
throws IOException;
/**
* Get the type of {@link Decompressor} needed by this {@link CompressionCodec}.
*
* @return the type of decompressor needed by this codec.
*/
Class<? extends Decompressor> getDecompressorType();
/**
* Create a new {@link Decompressor} for use by this {@link CompressionCodec}.
*
* @return a new decompressor for use by this codec
*/
Decompressor createDecompressor();
/**
* Get the default filename extension for this kind of compression.
* @return the extension including the '.'
*/
String getDefaultExtension();
static class Util {
/**
* Create an output stream with a codec taken from the global CodecPool.
*
* @param codec The codec to use to create the output stream.
* @param conf The configuration to use if we need to create a new codec.
* @param out The output stream to wrap.
* @return The new output stream
* @throws IOException
*/
static CompressionOutputStream createOutputStreamWithCodecPool(
CompressionCodec codec, Configuration conf, OutputStream out)
throws IOException {
Compressor compressor = CodecPool.getCompressor(codec, conf);
CompressionOutputStream stream = null;
try {
stream = codec.createOutputStream(out, compressor);
} finally {
if (stream == null) {
CodecPool.returnCompressor(compressor);
} else {
stream.setTrackedCompressor(compressor);
}
}
return stream;
}
/**
* Create an input stream with a codec taken from the global CodecPool.
*
* @param codec The codec to use to create the input stream.
* @param conf The configuration to use if we need to create a new codec.
* @param in The input stream to wrap.
* @return The new input stream
* @throws IOException
*/
static CompressionInputStream createInputStreamWithCodecPool(
CompressionCodec codec, Configuration conf, InputStream in)
throws IOException {
Decompressor decompressor = CodecPool.getDecompressor(codec);
CompressionInputStream stream = null;
try {
stream = codec.createInputStream(in, decompressor);
} finally {
if (stream == null) {
CodecPool.returnDecompressor(decompressor);
} else {
stream.setTrackedDecompressor(decompressor);
}
}
return stream;
}
}
}
| 5,872 | 33.145349 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress;
import java.util.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
/**
* A factory that will find the correct codec for a given filename.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class CompressionCodecFactory {
public static final Log LOG =
LogFactory.getLog(CompressionCodecFactory.class.getName());
private static final ServiceLoader<CompressionCodec> CODEC_PROVIDERS =
ServiceLoader.load(CompressionCodec.class);
/**
* A map from the reversed filename suffixes to the codecs.
* This is probably overkill, because the maps should be small, but it
* automatically supports finding the longest matching suffix.
*/
private SortedMap<String, CompressionCodec> codecs = null;
/**
* A map from the reversed filename suffixes to the codecs.
* This is probably overkill, because the maps should be small, but it
* automatically supports finding the longest matching suffix.
*/
private Map<String, CompressionCodec> codecsByName = null;
/**
* A map from class names to the codecs
*/
private HashMap<String, CompressionCodec> codecsByClassName = null;
private void addCodec(CompressionCodec codec) {
String suffix = codec.getDefaultExtension();
codecs.put(new StringBuilder(suffix).reverse().toString(), codec);
codecsByClassName.put(codec.getClass().getCanonicalName(), codec);
String codecName = codec.getClass().getSimpleName();
codecsByName.put(StringUtils.toLowerCase(codecName), codec);
if (codecName.endsWith("Codec")) {
codecName = codecName.substring(0, codecName.length() - "Codec".length());
codecsByName.put(StringUtils.toLowerCase(codecName), codec);
}
}
/**
* Print the extension map out as a string.
*/
@Override
public String toString() {
StringBuilder buf = new StringBuilder();
Iterator<Map.Entry<String, CompressionCodec>> itr =
codecs.entrySet().iterator();
buf.append("{ ");
if (itr.hasNext()) {
Map.Entry<String, CompressionCodec> entry = itr.next();
buf.append(entry.getKey());
buf.append(": ");
buf.append(entry.getValue().getClass().getName());
while (itr.hasNext()) {
entry = itr.next();
buf.append(", ");
buf.append(entry.getKey());
buf.append(": ");
buf.append(entry.getValue().getClass().getName());
}
}
buf.append(" }");
return buf.toString();
}
/**
* Get the list of codecs discovered via a Java ServiceLoader, or
* listed in the configuration. Codecs specified in configuration come
* later in the returned list, and are considered to override those
* from the ServiceLoader.
* @param conf the configuration to look in
* @return a list of the {@link CompressionCodec} classes
*/
public static List<Class<? extends CompressionCodec>> getCodecClasses(
Configuration conf) {
List<Class<? extends CompressionCodec>> result
= new ArrayList<Class<? extends CompressionCodec>>();
// Add codec classes discovered via service loading
synchronized (CODEC_PROVIDERS) {
// CODEC_PROVIDERS is a lazy collection. Synchronize so it is
// thread-safe. See HADOOP-8406.
for (CompressionCodec codec : CODEC_PROVIDERS) {
result.add(codec.getClass());
}
}
// Add codec classes from configuration
String codecsString = conf.get(
CommonConfigurationKeys.IO_COMPRESSION_CODECS_KEY);
if (codecsString != null) {
StringTokenizer codecSplit = new StringTokenizer(codecsString, ",");
while (codecSplit.hasMoreElements()) {
String codecSubstring = codecSplit.nextToken().trim();
if (codecSubstring.length() != 0) {
try {
Class<?> cls = conf.getClassByName(codecSubstring);
if (!CompressionCodec.class.isAssignableFrom(cls)) {
throw new IllegalArgumentException("Class " + codecSubstring +
" is not a CompressionCodec");
}
result.add(cls.asSubclass(CompressionCodec.class));
} catch (ClassNotFoundException ex) {
throw new IllegalArgumentException("Compression codec " +
codecSubstring + " not found.",
ex);
}
}
}
}
return result;
}
/**
* Sets a list of codec classes in the configuration. In addition to any
* classes specified using this method, {@link CompressionCodec} classes on
* the classpath are discovered using a Java ServiceLoader.
* @param conf the configuration to modify
* @param classes the list of classes to set
*/
public static void setCodecClasses(Configuration conf,
List<Class> classes) {
StringBuilder buf = new StringBuilder();
Iterator<Class> itr = classes.iterator();
if (itr.hasNext()) {
Class cls = itr.next();
buf.append(cls.getName());
while(itr.hasNext()) {
buf.append(',');
buf.append(itr.next().getName());
}
}
conf.set(CommonConfigurationKeys.IO_COMPRESSION_CODECS_KEY, buf.toString());
}
/**
* Find the codecs specified in the config value io.compression.codecs
* and register them. Defaults to gzip and deflate.
*/
public CompressionCodecFactory(Configuration conf) {
codecs = new TreeMap<String, CompressionCodec>();
codecsByClassName = new HashMap<String, CompressionCodec>();
codecsByName = new HashMap<String, CompressionCodec>();
List<Class<? extends CompressionCodec>> codecClasses =
getCodecClasses(conf);
if (codecClasses == null || codecClasses.isEmpty()) {
addCodec(new GzipCodec());
addCodec(new DefaultCodec());
} else {
for (Class<? extends CompressionCodec> codecClass : codecClasses) {
addCodec(ReflectionUtils.newInstance(codecClass, conf));
}
}
}
/**
* Find the relevant compression codec for the given file based on its
* filename suffix.
* @param file the filename to check
* @return the codec object
*/
public CompressionCodec getCodec(Path file) {
CompressionCodec result = null;
if (codecs != null) {
String filename = file.getName();
String reversedFilename =
new StringBuilder(filename).reverse().toString();
SortedMap<String, CompressionCodec> subMap =
codecs.headMap(reversedFilename);
if (!subMap.isEmpty()) {
String potentialSuffix = subMap.lastKey();
if (reversedFilename.startsWith(potentialSuffix)) {
result = codecs.get(potentialSuffix);
}
}
}
return result;
}
/**
* Find the relevant compression codec for the codec's canonical class name.
* @param classname the canonical class name of the codec
* @return the codec object
*/
public CompressionCodec getCodecByClassName(String classname) {
if (codecsByClassName == null) {
return null;
}
return codecsByClassName.get(classname);
}
/**
* Find the relevant compression codec for the codec's canonical class name
* or by codec alias.
* <p/>
* Codec aliases are case insensitive.
* <p/>
* The code alias is the short class name (without the package name).
* If the short class name ends with 'Codec', then there are two aliases for
* the codec, the complete short class name and the short class name without
* the 'Codec' ending. For example for the 'GzipCodec' codec class name the
* alias are 'gzip' and 'gzipcodec'.
*
* @param codecName the canonical class name of the codec
* @return the codec object
*/
public CompressionCodec getCodecByName(String codecName) {
if (codecsByClassName == null) {
return null;
}
CompressionCodec codec = getCodecByClassName(codecName);
if (codec == null) {
// trying to get the codec by name in case the name was specified
// instead a class
codec = codecsByName.get(StringUtils.toLowerCase(codecName));
}
return codec;
}
/**
* Find the relevant compression codec for the codec's canonical class name
* or by codec alias and returns its implemetation class.
* <p/>
* Codec aliases are case insensitive.
* <p/>
* The code alias is the short class name (without the package name).
* If the short class name ends with 'Codec', then there are two aliases for
* the codec, the complete short class name and the short class name without
* the 'Codec' ending. For example for the 'GzipCodec' codec class name the
* alias are 'gzip' and 'gzipcodec'.
*
* @param codecName the canonical class name of the codec
* @return the codec class
*/
public Class<? extends CompressionCodec> getCodecClassByName(
String codecName) {
CompressionCodec codec = getCodecByName(codecName);
if (codec == null) {
return null;
}
return codec.getClass();
}
/**
* Removes a suffix from a filename, if it has it.
* @param filename the filename to strip
* @param suffix the suffix to remove
* @return the shortened filename
*/
public static String removeSuffix(String filename, String suffix) {
if (filename.endsWith(suffix)) {
return filename.substring(0, filename.length() - suffix.length());
}
return filename;
}
/**
* A little test program.
* @param args
*/
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
CompressionCodecFactory factory = new CompressionCodecFactory(conf);
boolean encode = false;
for(int i=0; i < args.length; ++i) {
if ("-in".equals(args[i])) {
encode = true;
} else if ("-out".equals(args[i])) {
encode = false;
} else {
CompressionCodec codec = factory.getCodec(new Path(args[i]));
if (codec == null) {
System.out.println("Codec for " + args[i] + " not found.");
} else {
if (encode) {
CompressionOutputStream out = null;
java.io.InputStream in = null;
try {
out = codec.createOutputStream(
new java.io.FileOutputStream(args[i]));
byte[] buffer = new byte[100];
String inFilename = removeSuffix(args[i],
codec.getDefaultExtension());
in = new java.io.FileInputStream(inFilename);
int len = in.read(buffer);
while (len > 0) {
out.write(buffer, 0, len);
len = in.read(buffer);
}
} finally {
if(out != null) { out.close(); }
if(in != null) { in.close(); }
}
} else {
CompressionInputStream in = null;
try {
in = codec.createInputStream(
new java.io.FileInputStream(args[i]));
byte[] buffer = new byte[100];
int len = in.read(buffer);
while (len > 0) {
System.out.write(buffer, 0, len);
len = in.read(buffer);
}
} finally {
if(in != null) { in.close(); }
}
}
}
}
}
}
}
| 12,644 | 35.232092 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/Compressor.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
/**
* Specification of a stream-based 'compressor' which can be
* plugged into a {@link CompressionOutputStream} to compress data.
* This is modelled after {@link java.util.zip.Deflater}
*
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface Compressor {
/**
* Sets input data for compression.
* This should be called whenever #needsInput() returns
* <code>true</code> indicating that more input data is required.
*
* @param b Input data
* @param off Start offset
* @param len Length
*/
public void setInput(byte[] b, int off, int len);
/**
* Returns true if the input data buffer is empty and
* #setInput() should be called to provide more input.
*
* @return <code>true</code> if the input data buffer is empty and
* #setInput() should be called in order to provide more input.
*/
public boolean needsInput();
/**
* Sets preset dictionary for compression. A preset dictionary
* is used when the history buffer can be predetermined.
*
* @param b Dictionary data bytes
* @param off Start offset
* @param len Length
*/
public void setDictionary(byte[] b, int off, int len);
/**
* Return number of uncompressed bytes input so far.
*/
public long getBytesRead();
/**
* Return number of compressed bytes output so far.
*/
public long getBytesWritten();
/**
* When called, indicates that compression should end
* with the current contents of the input buffer.
*/
public void finish();
/**
* Returns true if the end of the compressed
* data output stream has been reached.
* @return <code>true</code> if the end of the compressed
* data output stream has been reached.
*/
public boolean finished();
/**
* Fills specified buffer with compressed data. Returns actual number
* of bytes of compressed data. A return value of 0 indicates that
* needsInput() should be called in order to determine if more input
* data is required.
*
* @param b Buffer for the compressed data
* @param off Start offset of the data
* @param len Size of the buffer
* @return The actual number of bytes of compressed data.
*/
public int compress(byte[] b, int off, int len) throws IOException;
/**
* Resets compressor so that a new set of input data can be processed.
*/
public void reset();
/**
* Closes the compressor and discards any unprocessed input.
*/
public void end();
/**
* Prepare the compressor to be used in a new stream with settings defined in
* the given Configuration
*
* @param conf Configuration from which new setting are fetched
*/
public void reinit(Configuration conf);
}
| 3,767 | 30.140496 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressorStream.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress;
import java.io.IOException;
import java.io.OutputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.compress.CompressionOutputStream;
import org.apache.hadoop.io.compress.Compressor;
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class CompressorStream extends CompressionOutputStream {
protected Compressor compressor;
protected byte[] buffer;
protected boolean closed = false;
public CompressorStream(OutputStream out, Compressor compressor, int bufferSize) {
super(out);
if (out == null || compressor == null) {
throw new NullPointerException();
} else if (bufferSize <= 0) {
throw new IllegalArgumentException("Illegal bufferSize");
}
this.compressor = compressor;
buffer = new byte[bufferSize];
}
public CompressorStream(OutputStream out, Compressor compressor) {
this(out, compressor, 512);
}
/**
* Allow derived classes to directly set the underlying stream.
*
* @param out Underlying output stream.
*/
protected CompressorStream(OutputStream out) {
super(out);
}
@Override
public void write(byte[] b, int off, int len) throws IOException {
// Sanity checks
if (compressor.finished()) {
throw new IOException("write beyond end of stream");
}
if ((off | len | (off + len) | (b.length - (off + len))) < 0) {
throw new IndexOutOfBoundsException();
} else if (len == 0) {
return;
}
compressor.setInput(b, off, len);
while (!compressor.needsInput()) {
compress();
}
}
protected void compress() throws IOException {
int len = compressor.compress(buffer, 0, buffer.length);
if (len > 0) {
out.write(buffer, 0, len);
}
}
@Override
public void finish() throws IOException {
if (!compressor.finished()) {
compressor.finish();
while (!compressor.finished()) {
compress();
}
}
}
@Override
public void resetState() throws IOException {
compressor.reset();
}
@Override
public void close() throws IOException {
if (!closed) {
try {
finish();
}
finally {
out.close();
closed = true;
}
}
}
private byte[] oneByte = new byte[1];
@Override
public void write(int b) throws IOException {
oneByte[0] = (byte)(b & 0xff);
write(oneByte, 0, oneByte.length);
}
}
| 3,324 | 26.03252 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionOutputStream.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress;
import java.io.IOException;
import java.io.OutputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* A compression output stream.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public abstract class CompressionOutputStream extends OutputStream {
/**
* The output stream to be compressed.
*/
protected final OutputStream out;
/**
* If non-null, this is the Compressor object that we should call
* CodecPool#returnCompressor on when this stream is closed.
*/
private Compressor trackedCompressor;
/**
* Create a compression output stream that writes
* the compressed bytes to the given stream.
* @param out
*/
protected CompressionOutputStream(OutputStream out) {
this.out = out;
}
void setTrackedCompressor(Compressor compressor) {
trackedCompressor = compressor;
}
@Override
public void close() throws IOException {
finish();
out.close();
if (trackedCompressor != null) {
CodecPool.returnCompressor(trackedCompressor);
trackedCompressor = null;
}
}
@Override
public void flush() throws IOException {
out.flush();
}
/**
* Write compressed bytes to the stream.
* Made abstract to prevent leakage to underlying stream.
*/
@Override
public abstract void write(byte[] b, int off, int len) throws IOException;
/**
* Finishes writing compressed data to the output stream
* without closing the underlying stream.
*/
public abstract void finish() throws IOException;
/**
* Reset the compression to the initial state.
* Does not reset the underlying stream.
*/
public abstract void resetState() throws IOException;
}
| 2,606 | 27.336957 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress.snappy;
import java.io.IOException;
import java.nio.Buffer;
import java.nio.ByteBuffer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.compress.Decompressor;
import org.apache.hadoop.io.compress.DirectDecompressor;
import org.apache.hadoop.util.NativeCodeLoader;
/**
* A {@link Decompressor} based on the snappy compression algorithm.
* http://code.google.com/p/snappy/
*/
public class SnappyDecompressor implements Decompressor {
private static final Log LOG =
LogFactory.getLog(SnappyDecompressor.class.getName());
private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64 * 1024;
private int directBufferSize;
private Buffer compressedDirectBuf = null;
private int compressedDirectBufLen;
private Buffer uncompressedDirectBuf = null;
private byte[] userBuf = null;
private int userBufOff = 0, userBufLen = 0;
private boolean finished;
private static boolean nativeSnappyLoaded = false;
static {
if (NativeCodeLoader.isNativeCodeLoaded() &&
NativeCodeLoader.buildSupportsSnappy()) {
try {
initIDs();
nativeSnappyLoaded = true;
} catch (Throwable t) {
LOG.error("failed to load SnappyDecompressor", t);
}
}
}
public static boolean isNativeCodeLoaded() {
return nativeSnappyLoaded;
}
/**
* Creates a new compressor.
*
* @param directBufferSize size of the direct buffer to be used.
*/
public SnappyDecompressor(int directBufferSize) {
this.directBufferSize = directBufferSize;
compressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
uncompressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
uncompressedDirectBuf.position(directBufferSize);
}
/**
* Creates a new decompressor with the default buffer size.
*/
public SnappyDecompressor() {
this(DEFAULT_DIRECT_BUFFER_SIZE);
}
/**
* Sets input data for decompression.
* This should be called if and only if {@link #needsInput()} returns
* <code>true</code> indicating that more input data is required.
* (Both native and non-native versions of various Decompressors require
* that the data passed in via <code>b[]</code> remain unmodified until
* the caller is explicitly notified--via {@link #needsInput()}--that the
* buffer may be safely modified. With this requirement, an extra
* buffer-copy can be avoided.)
*
* @param b Input data
* @param off Start offset
* @param len Length
*/
@Override
public void setInput(byte[] b, int off, int len) {
if (b == null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || off > b.length - len) {
throw new ArrayIndexOutOfBoundsException();
}
this.userBuf = b;
this.userBufOff = off;
this.userBufLen = len;
setInputFromSavedData();
// Reinitialize snappy's output direct-buffer
uncompressedDirectBuf.limit(directBufferSize);
uncompressedDirectBuf.position(directBufferSize);
}
/**
* If a write would exceed the capacity of the direct buffers, it is set
* aside to be loaded by this function while the compressed data are
* consumed.
*/
void setInputFromSavedData() {
compressedDirectBufLen = Math.min(userBufLen, directBufferSize);
// Reinitialize snappy's input direct buffer
compressedDirectBuf.rewind();
((ByteBuffer) compressedDirectBuf).put(userBuf, userBufOff,
compressedDirectBufLen);
// Note how much data is being fed to snappy
userBufOff += compressedDirectBufLen;
userBufLen -= compressedDirectBufLen;
}
/**
* Does nothing.
*/
@Override
public void setDictionary(byte[] b, int off, int len) {
// do nothing
}
/**
* Returns true if the input data buffer is empty and
* {@link #setInput(byte[], int, int)} should be called to
* provide more input.
*
* @return <code>true</code> if the input data buffer is empty and
* {@link #setInput(byte[], int, int)} should be called in
* order to provide more input.
*/
@Override
public boolean needsInput() {
// Consume remaining compressed data?
if (uncompressedDirectBuf.remaining() > 0) {
return false;
}
// Check if snappy has consumed all input
if (compressedDirectBufLen <= 0) {
// Check if we have consumed all user-input
if (userBufLen <= 0) {
return true;
} else {
setInputFromSavedData();
}
}
return false;
}
/**
* Returns <code>false</code>.
*
* @return <code>false</code>.
*/
@Override
public boolean needsDictionary() {
return false;
}
/**
* Returns true if the end of the decompressed
* data output stream has been reached.
*
* @return <code>true</code> if the end of the decompressed
* data output stream has been reached.
*/
@Override
public boolean finished() {
return (finished && uncompressedDirectBuf.remaining() == 0);
}
/**
* Fills specified buffer with uncompressed data. Returns actual number
* of bytes of uncompressed data. A return value of 0 indicates that
* {@link #needsInput()} should be called in order to determine if more
* input data is required.
*
* @param b Buffer for the compressed data
* @param off Start offset of the data
* @param len Size of the buffer
* @return The actual number of bytes of compressed data.
* @throws IOException
*/
@Override
public int decompress(byte[] b, int off, int len)
throws IOException {
if (b == null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || off > b.length - len) {
throw new ArrayIndexOutOfBoundsException();
}
int n = 0;
// Check if there is uncompressed data
n = uncompressedDirectBuf.remaining();
if (n > 0) {
n = Math.min(n, len);
((ByteBuffer) uncompressedDirectBuf).get(b, off, n);
return n;
}
if (compressedDirectBufLen > 0) {
// Re-initialize the snappy's output direct buffer
uncompressedDirectBuf.rewind();
uncompressedDirectBuf.limit(directBufferSize);
// Decompress data
n = decompressBytesDirect();
uncompressedDirectBuf.limit(n);
if (userBufLen <= 0) {
finished = true;
}
// Get atmost 'len' bytes
n = Math.min(n, len);
((ByteBuffer) uncompressedDirectBuf).get(b, off, n);
}
return n;
}
/**
* Returns <code>0</code>.
*
* @return <code>0</code>.
*/
@Override
public int getRemaining() {
// Never use this function in BlockDecompressorStream.
return 0;
}
@Override
public void reset() {
finished = false;
compressedDirectBufLen = 0;
uncompressedDirectBuf.limit(directBufferSize);
uncompressedDirectBuf.position(directBufferSize);
userBufOff = userBufLen = 0;
}
/**
* Resets decompressor and input and output buffers so that a new set of
* input data can be processed.
*/
@Override
public void end() {
// do nothing
}
private native static void initIDs();
private native int decompressBytesDirect();
int decompressDirect(ByteBuffer src, ByteBuffer dst) throws IOException {
assert (this instanceof SnappyDirectDecompressor);
ByteBuffer presliced = dst;
if (dst.position() > 0) {
presliced = dst;
dst = dst.slice();
}
Buffer originalCompressed = compressedDirectBuf;
Buffer originalUncompressed = uncompressedDirectBuf;
int originalBufferSize = directBufferSize;
compressedDirectBuf = src.slice();
compressedDirectBufLen = src.remaining();
uncompressedDirectBuf = dst;
directBufferSize = dst.remaining();
int n = 0;
try {
n = decompressBytesDirect();
presliced.position(presliced.position() + n);
// SNAPPY always consumes the whole buffer or throws an exception
src.position(src.limit());
finished = true;
} finally {
compressedDirectBuf = originalCompressed;
uncompressedDirectBuf = originalUncompressed;
compressedDirectBufLen = 0;
directBufferSize = originalBufferSize;
}
return n;
}
public static class SnappyDirectDecompressor extends SnappyDecompressor implements
DirectDecompressor {
@Override
public boolean finished() {
return (endOfInput && super.finished());
}
@Override
public void reset() {
super.reset();
endOfInput = true;
}
private boolean endOfInput;
@Override
public void decompress(ByteBuffer src, ByteBuffer dst)
throws IOException {
assert dst.isDirect() : "dst.isDirect()";
assert src.isDirect() : "src.isDirect()";
assert dst.remaining() > 0 : "dst.remaining() > 0";
this.decompressDirect(src, dst);
endOfInput = !src.hasRemaining();
}
@Override
public void setDictionary(byte[] b, int off, int len) {
throw new UnsupportedOperationException(
"byte[] arrays are not supported for DirectDecompressor");
}
@Override
public int decompress(byte[] b, int off, int len) {
throw new UnsupportedOperationException(
"byte[] arrays are not supported for DirectDecompressor");
}
}
}
| 10,134 | 27.629944 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
package org.apache.hadoop.io.compress.snappy;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
| 1,028 | 41.875 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/snappy/SnappyCompressor.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress.snappy;
import java.io.IOException;
import java.nio.Buffer;
import java.nio.ByteBuffer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.Compressor;
import org.apache.hadoop.util.NativeCodeLoader;
/**
* A {@link Compressor} based on the snappy compression algorithm.
* http://code.google.com/p/snappy/
*/
public class SnappyCompressor implements Compressor {
private static final Log LOG =
LogFactory.getLog(SnappyCompressor.class.getName());
private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64 * 1024;
private int directBufferSize;
private Buffer compressedDirectBuf = null;
private int uncompressedDirectBufLen;
private Buffer uncompressedDirectBuf = null;
private byte[] userBuf = null;
private int userBufOff = 0, userBufLen = 0;
private boolean finish, finished;
private long bytesRead = 0L;
private long bytesWritten = 0L;
private static boolean nativeSnappyLoaded = false;
static {
if (NativeCodeLoader.isNativeCodeLoaded() &&
NativeCodeLoader.buildSupportsSnappy()) {
try {
initIDs();
nativeSnappyLoaded = true;
} catch (Throwable t) {
LOG.error("failed to load SnappyCompressor", t);
}
}
}
public static boolean isNativeCodeLoaded() {
return nativeSnappyLoaded;
}
/**
* Creates a new compressor.
*
* @param directBufferSize size of the direct buffer to be used.
*/
public SnappyCompressor(int directBufferSize) {
this.directBufferSize = directBufferSize;
uncompressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
compressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
compressedDirectBuf.position(directBufferSize);
}
/**
* Creates a new compressor with the default buffer size.
*/
public SnappyCompressor() {
this(DEFAULT_DIRECT_BUFFER_SIZE);
}
/**
* Sets input data for compression.
* This should be called whenever #needsInput() returns
* <code>true</code> indicating that more input data is required.
*
* @param b Input data
* @param off Start offset
* @param len Length
*/
@Override
public void setInput(byte[] b, int off, int len) {
if (b == null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || off > b.length - len) {
throw new ArrayIndexOutOfBoundsException();
}
finished = false;
if (len > uncompressedDirectBuf.remaining()) {
// save data; now !needsInput
this.userBuf = b;
this.userBufOff = off;
this.userBufLen = len;
} else {
((ByteBuffer) uncompressedDirectBuf).put(b, off, len);
uncompressedDirectBufLen = uncompressedDirectBuf.position();
}
bytesRead += len;
}
/**
* If a write would exceed the capacity of the direct buffers, it is set
* aside to be loaded by this function while the compressed data are
* consumed.
*/
void setInputFromSavedData() {
if (0 >= userBufLen) {
return;
}
finished = false;
uncompressedDirectBufLen = Math.min(userBufLen, directBufferSize);
((ByteBuffer) uncompressedDirectBuf).put(userBuf, userBufOff,
uncompressedDirectBufLen);
// Note how much data is being fed to snappy
userBufOff += uncompressedDirectBufLen;
userBufLen -= uncompressedDirectBufLen;
}
/**
* Does nothing.
*/
@Override
public void setDictionary(byte[] b, int off, int len) {
// do nothing
}
/**
* Returns true if the input data buffer is empty and
* #setInput() should be called to provide more input.
*
* @return <code>true</code> if the input data buffer is empty and
* #setInput() should be called in order to provide more input.
*/
@Override
public boolean needsInput() {
return !(compressedDirectBuf.remaining() > 0
|| uncompressedDirectBuf.remaining() == 0 || userBufLen > 0);
}
/**
* When called, indicates that compression should end
* with the current contents of the input buffer.
*/
@Override
public void finish() {
finish = true;
}
/**
* Returns true if the end of the compressed
* data output stream has been reached.
*
* @return <code>true</code> if the end of the compressed
* data output stream has been reached.
*/
@Override
public boolean finished() {
// Check if all uncompressed data has been consumed
return (finish && finished && compressedDirectBuf.remaining() == 0);
}
/**
* Fills specified buffer with compressed data. Returns actual number
* of bytes of compressed data. A return value of 0 indicates that
* needsInput() should be called in order to determine if more input
* data is required.
*
* @param b Buffer for the compressed data
* @param off Start offset of the data
* @param len Size of the buffer
* @return The actual number of bytes of compressed data.
*/
@Override
public int compress(byte[] b, int off, int len)
throws IOException {
if (b == null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || off > b.length - len) {
throw new ArrayIndexOutOfBoundsException();
}
// Check if there is compressed data
int n = compressedDirectBuf.remaining();
if (n > 0) {
n = Math.min(n, len);
((ByteBuffer) compressedDirectBuf).get(b, off, n);
bytesWritten += n;
return n;
}
// Re-initialize the snappy's output direct-buffer
compressedDirectBuf.clear();
compressedDirectBuf.limit(0);
if (0 == uncompressedDirectBuf.position()) {
// No compressed data, so we should have !needsInput or !finished
setInputFromSavedData();
if (0 == uncompressedDirectBuf.position()) {
// Called without data; write nothing
finished = true;
return 0;
}
}
// Compress data
n = compressBytesDirect();
compressedDirectBuf.limit(n);
uncompressedDirectBuf.clear(); // snappy consumes all buffer input
// Set 'finished' if snapy has consumed all user-data
if (0 == userBufLen) {
finished = true;
}
// Get atmost 'len' bytes
n = Math.min(n, len);
bytesWritten += n;
((ByteBuffer) compressedDirectBuf).get(b, off, n);
return n;
}
/**
* Resets compressor so that a new set of input data can be processed.
*/
@Override
public void reset() {
finish = false;
finished = false;
uncompressedDirectBuf.clear();
uncompressedDirectBufLen = 0;
compressedDirectBuf.clear();
compressedDirectBuf.limit(0);
userBufOff = userBufLen = 0;
bytesRead = bytesWritten = 0L;
}
/**
* Prepare the compressor to be used in a new stream with settings defined in
* the given Configuration
*
* @param conf Configuration from which new setting are fetched
*/
@Override
public void reinit(Configuration conf) {
reset();
}
/**
* Return number of bytes given to this compressor since last reset.
*/
@Override
public long getBytesRead() {
return bytesRead;
}
/**
* Return number of bytes consumed by callers of compress since last reset.
*/
@Override
public long getBytesWritten() {
return bytesWritten;
}
/**
* Closes the compressor and discards any unprocessed input.
*/
@Override
public void end() {
}
private native static void initIDs();
private native int compressBytesDirect();
public native static String getLibraryName();
}
| 8,415 | 27.053333 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
package org.apache.hadoop.io.compress.zlib;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
| 1,026 | 41.791667 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInGzipDecompressor.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress.zlib;
import java.io.IOException;
import java.util.zip.Checksum;
import java.util.zip.DataFormatException;
import java.util.zip.Inflater;
import org.apache.hadoop.io.compress.Decompressor;
import org.apache.hadoop.io.compress.DoNotPool;
import org.apache.hadoop.util.DataChecksum;
/**
* A {@link Decompressor} based on the popular gzip compressed file format.
* http://www.gzip.org/
*
*/
@DoNotPool
public class BuiltInGzipDecompressor implements Decompressor {
private static final int GZIP_MAGIC_ID = 0x8b1f; // if read as LE short int
private static final int GZIP_DEFLATE_METHOD = 8;
private static final int GZIP_FLAGBIT_HEADER_CRC = 0x02;
private static final int GZIP_FLAGBIT_EXTRA_FIELD = 0x04;
private static final int GZIP_FLAGBIT_FILENAME = 0x08;
private static final int GZIP_FLAGBIT_COMMENT = 0x10;
private static final int GZIP_FLAGBITS_RESERVED = 0xe0;
// 'true' (nowrap) => Inflater will handle raw deflate stream only
private Inflater inflater = new Inflater(true);
private byte[] userBuf = null;
private int userBufOff = 0;
private int userBufLen = 0;
private byte[] localBuf = new byte[256];
private int localBufOff = 0;
private int headerBytesRead = 0;
private int trailerBytesRead = 0;
private int numExtraFieldBytesRemaining = -1;
private Checksum crc = DataChecksum.newCrc32();
private boolean hasExtraField = false;
private boolean hasFilename = false;
private boolean hasComment = false;
private boolean hasHeaderCRC = false;
private GzipStateLabel state;
/**
* The current state of the gzip decoder, external to the Inflater context.
* (Technically, the private variables localBuf through hasHeaderCRC are
* also part of the state, so this enum is merely the label for it.)
*/
private static enum GzipStateLabel {
/**
* Immediately prior to or (strictly) within the 10-byte basic gzip header.
*/
HEADER_BASIC,
/**
* Immediately prior to or within the optional "extra field."
*/
HEADER_EXTRA_FIELD,
/**
* Immediately prior to or within the optional filename field.
*/
HEADER_FILENAME,
/**
* Immediately prior to or within the optional comment field.
*/
HEADER_COMMENT,
/**
* Immediately prior to or within the optional 2-byte header CRC value.
*/
HEADER_CRC,
/**
* Immediately prior to or within the main compressed (deflate) data stream.
*/
DEFLATE_STREAM,
/**
* Immediately prior to or (strictly) within the 4-byte uncompressed CRC.
*/
TRAILER_CRC,
/**
* Immediately prior to or (strictly) within the 4-byte uncompressed size.
*/
TRAILER_SIZE,
/**
* Immediately after the trailer (and potentially prior to the next gzip
* member/substream header), without reset() having been called.
*/
FINISHED;
}
/**
* Creates a new (pure Java) gzip decompressor.
*/
public BuiltInGzipDecompressor() {
state = GzipStateLabel.HEADER_BASIC;
crc.reset();
// FIXME? Inflater docs say: 'it is also necessary to provide an extra
// "dummy" byte as input. This is required by the ZLIB native
// library in order to support certain optimizations.' However,
// this does not appear to be true, and in any case, it's not
// entirely clear where the byte should go or what its value
// should be. Perhaps it suffices to have some deflated bytes
// in the first buffer load? (But how else would one do it?)
}
@Override
public synchronized boolean needsInput() {
if (state == GzipStateLabel.DEFLATE_STREAM) { // most common case
return inflater.needsInput();
}
// see userBufLen comment at top of decompress(); currently no need to
// verify userBufLen <= 0
return (state != GzipStateLabel.FINISHED);
}
/** {@inheritDoc} */
/*
* In our case, the input data includes both gzip header/trailer bytes (which
* we handle in executeState()) and deflate-stream bytes (which we hand off
* to Inflater).
*
* NOTE: This code assumes the data passed in via b[] remains unmodified
* until _we_ signal that it's safe to modify it (via needsInput()).
* The alternative would require an additional buffer-copy even for
* the bulk deflate stream, which is a performance hit we don't want
* to absorb. (Decompressor now documents this requirement.)
*/
@Override
public synchronized void setInput(byte[] b, int off, int len) {
if (b == null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || off > b.length - len) {
throw new ArrayIndexOutOfBoundsException();
}
userBuf = b;
userBufOff = off;
userBufLen = len; // note: might be zero
}
/**
* Decompress the data (gzip header, deflate stream, gzip trailer) in the
* provided buffer.
*
* @return the number of decompressed bytes placed into b
*/
/* From the caller's perspective, this is where the state machine lives.
* The code is written such that we never return from decompress() with
* data remaining in userBuf unless we're in FINISHED state and there was
* data beyond the current gzip member (e.g., we're within a concatenated
* gzip stream). If this ever changes, {@link #needsInput()} will also
* need to be modified (i.e., uncomment the userBufLen condition).
*
* The actual deflate-stream processing (decompression) is handled by
* Java's Inflater class. Unlike the gzip header/trailer code (execute*
* methods below), the deflate stream is never copied; Inflater operates
* directly on the user's buffer.
*/
@Override
public synchronized int decompress(byte[] b, int off, int len)
throws IOException {
int numAvailBytes = 0;
if (state != GzipStateLabel.DEFLATE_STREAM) {
executeHeaderState();
if (userBufLen <= 0) {
return numAvailBytes;
}
}
// "executeDeflateStreamState()"
if (state == GzipStateLabel.DEFLATE_STREAM) {
// hand off user data (or what's left of it) to Inflater--but note that
// Inflater may not have consumed all of previous bufferload (e.g., if
// data highly compressed or output buffer very small), in which case
// userBufLen will be zero
if (userBufLen > 0) {
inflater.setInput(userBuf, userBufOff, userBufLen);
userBufOff += userBufLen;
userBufLen = 0;
}
// now decompress it into b[]
try {
numAvailBytes = inflater.inflate(b, off, len);
} catch (DataFormatException dfe) {
throw new IOException(dfe.getMessage());
}
crc.update(b, off, numAvailBytes); // CRC-32 is on _uncompressed_ data
if (inflater.finished()) {
state = GzipStateLabel.TRAILER_CRC;
int bytesRemaining = inflater.getRemaining();
assert (bytesRemaining >= 0) :
"logic error: Inflater finished; byte-count is inconsistent";
// could save a copy of userBufLen at call to inflater.setInput() and
// verify that bytesRemaining <= origUserBufLen, but would have to
// be a (class) member variable...seems excessive for a sanity check
userBufOff -= bytesRemaining;
userBufLen = bytesRemaining; // or "+=", but guaranteed 0 coming in
} else {
return numAvailBytes; // minor optimization
}
}
executeTrailerState();
return numAvailBytes;
}
/**
* Parse the gzip header (assuming we're in the appropriate state).
* In order to deal with degenerate cases (e.g., user buffer is one byte
* long), we copy (some) header bytes to another buffer. (Filename,
* comment, and extra-field bytes are simply skipped.)</p>
*
* See http://www.ietf.org/rfc/rfc1952.txt for the gzip spec. Note that
* no version of gzip to date (at least through 1.4.0, 2010-01-20) supports
* the FHCRC header-CRC16 flagbit; instead, the implementation treats it
* as a multi-file continuation flag (which it also doesn't support). :-(
* Sun's JDK v6 (1.6) supports the header CRC, however, and so do we.
*/
private void executeHeaderState() throws IOException {
// this can happen because DecompressorStream's decompress() is written
// to call decompress() first, setInput() second:
if (userBufLen <= 0) {
return;
}
// "basic"/required header: somewhere in first 10 bytes
if (state == GzipStateLabel.HEADER_BASIC) {
int n = Math.min(userBufLen, 10-localBufOff); // (or 10-headerBytesRead)
checkAndCopyBytesToLocal(n); // modifies userBufLen, etc.
if (localBufOff >= 10) { // should be strictly ==
processBasicHeader(); // sig, compression method, flagbits
localBufOff = 0; // no further need for basic header
state = GzipStateLabel.HEADER_EXTRA_FIELD;
}
}
if (userBufLen <= 0) {
return;
}
// optional header stuff (extra field, filename, comment, header CRC)
if (state == GzipStateLabel.HEADER_EXTRA_FIELD) {
if (hasExtraField) {
// 2 substates: waiting for 2 bytes => get numExtraFieldBytesRemaining,
// or already have 2 bytes & waiting to finish skipping specified length
if (numExtraFieldBytesRemaining < 0) {
int n = Math.min(userBufLen, 2-localBufOff);
checkAndCopyBytesToLocal(n);
if (localBufOff >= 2) {
numExtraFieldBytesRemaining = readUShortLE(localBuf, 0);
localBufOff = 0;
}
}
if (numExtraFieldBytesRemaining > 0 && userBufLen > 0) {
int n = Math.min(userBufLen, numExtraFieldBytesRemaining);
checkAndSkipBytes(n); // modifies userBufLen, etc.
numExtraFieldBytesRemaining -= n;
}
if (numExtraFieldBytesRemaining == 0) {
state = GzipStateLabel.HEADER_FILENAME;
}
} else {
state = GzipStateLabel.HEADER_FILENAME;
}
}
if (userBufLen <= 0) {
return;
}
if (state == GzipStateLabel.HEADER_FILENAME) {
if (hasFilename) {
boolean doneWithFilename = checkAndSkipBytesUntilNull();
if (!doneWithFilename) {
return; // exit early: used up entire buffer without hitting NULL
}
}
state = GzipStateLabel.HEADER_COMMENT;
}
if (userBufLen <= 0) {
return;
}
if (state == GzipStateLabel.HEADER_COMMENT) {
if (hasComment) {
boolean doneWithComment = checkAndSkipBytesUntilNull();
if (!doneWithComment) {
return; // exit early: used up entire buffer
}
}
state = GzipStateLabel.HEADER_CRC;
}
if (userBufLen <= 0) {
return;
}
if (state == GzipStateLabel.HEADER_CRC) {
if (hasHeaderCRC) {
assert (localBufOff < 2);
int n = Math.min(userBufLen, 2-localBufOff);
copyBytesToLocal(n);
if (localBufOff >= 2) {
long headerCRC = readUShortLE(localBuf, 0);
if (headerCRC != (crc.getValue() & 0xffff)) {
throw new IOException("gzip header CRC failure");
}
localBufOff = 0;
crc.reset();
state = GzipStateLabel.DEFLATE_STREAM;
}
} else {
crc.reset(); // will reuse for CRC-32 of uncompressed data
state = GzipStateLabel.DEFLATE_STREAM; // switching to Inflater now
}
}
}
/**
* Parse the gzip trailer (assuming we're in the appropriate state).
* In order to deal with degenerate cases (e.g., user buffer is one byte
* long), we copy trailer bytes (all 8 of 'em) to a local buffer.</p>
*
* See http://www.ietf.org/rfc/rfc1952.txt for the gzip spec.
*/
private void executeTrailerState() throws IOException {
if (userBufLen <= 0) {
return;
}
// verify that the CRC-32 of the decompressed stream matches the value
// stored in the gzip trailer
if (state == GzipStateLabel.TRAILER_CRC) {
// localBuf was empty before we handed off to Inflater, so we handle this
// exactly like header fields
assert (localBufOff < 4); // initially 0, but may need multiple calls
int n = Math.min(userBufLen, 4-localBufOff);
copyBytesToLocal(n);
if (localBufOff >= 4) {
long streamCRC = readUIntLE(localBuf, 0);
if (streamCRC != crc.getValue()) {
throw new IOException("gzip stream CRC failure");
}
localBufOff = 0;
crc.reset();
state = GzipStateLabel.TRAILER_SIZE;
}
}
if (userBufLen <= 0) {
return;
}
// verify that the mod-2^32 decompressed stream size matches the value
// stored in the gzip trailer
if (state == GzipStateLabel.TRAILER_SIZE) {
assert (localBufOff < 4); // initially 0, but may need multiple calls
int n = Math.min(userBufLen, 4-localBufOff);
copyBytesToLocal(n); // modifies userBufLen, etc.
if (localBufOff >= 4) { // should be strictly ==
long inputSize = readUIntLE(localBuf, 0);
if (inputSize != (inflater.getBytesWritten() & 0xffffffffL)) {
throw new IOException(
"stored gzip size doesn't match decompressed size");
}
localBufOff = 0;
state = GzipStateLabel.FINISHED;
}
}
if (state == GzipStateLabel.FINISHED) {
return;
}
}
/**
* Returns the total number of compressed bytes input so far, including
* gzip header/trailer bytes.</p>
*
* @return the total (non-negative) number of compressed bytes read so far
*/
public synchronized long getBytesRead() {
return headerBytesRead + inflater.getBytesRead() + trailerBytesRead;
}
/**
* Returns the number of bytes remaining in the input buffer; normally
* called when finished() is true to determine amount of post-gzip-stream
* data. Note that, other than the finished state with concatenated data
* after the end of the current gzip stream, this will never return a
* non-zero value unless called after {@link #setInput(byte[] b, int off,
* int len)} and before {@link #decompress(byte[] b, int off, int len)}.
* (That is, after {@link #decompress(byte[] b, int off, int len)} it
* always returns zero, except in finished state with concatenated data.)</p>
*
* @return the total (non-negative) number of unprocessed bytes in input
*/
@Override
public synchronized int getRemaining() {
return userBufLen;
}
@Override
public synchronized boolean needsDictionary() {
return inflater.needsDictionary();
}
@Override
public synchronized void setDictionary(byte[] b, int off, int len) {
inflater.setDictionary(b, off, len);
}
/**
* Returns true if the end of the gzip substream (single "member") has been
* reached.</p>
*/
@Override
public synchronized boolean finished() {
return (state == GzipStateLabel.FINISHED);
}
/**
* Resets everything, including the input buffer, regardless of whether the
* current gzip substream is finished.</p>
*/
@Override
public synchronized void reset() {
// could optionally emit INFO message if state != GzipStateLabel.FINISHED
inflater.reset();
state = GzipStateLabel.HEADER_BASIC;
crc.reset();
userBufOff = userBufLen = 0;
localBufOff = 0;
headerBytesRead = 0;
trailerBytesRead = 0;
numExtraFieldBytesRemaining = -1;
hasExtraField = false;
hasFilename = false;
hasComment = false;
hasHeaderCRC = false;
}
@Override
public synchronized void end() {
inflater.end();
}
/**
* Check ID bytes (throw if necessary), compression method (throw if not 8),
* and flag bits (set hasExtraField, hasFilename, hasComment, hasHeaderCRC).
* Ignore MTIME, XFL, OS. Caller must ensure we have at least 10 bytes (at
* the start of localBuf).</p>
*/
/*
* Flag bits (remainder are reserved and must be zero):
* bit 0 FTEXT
* bit 1 FHCRC (never implemented in gzip, at least through version
* 1.4.0; instead interpreted as "continuation of multi-
* part gzip file," which is unsupported through 1.4.0)
* bit 2 FEXTRA
* bit 3 FNAME
* bit 4 FCOMMENT
* [bit 5 encrypted]
*/
private void processBasicHeader() throws IOException {
if (readUShortLE(localBuf, 0) != GZIP_MAGIC_ID) {
throw new IOException("not a gzip file");
}
if (readUByte(localBuf, 2) != GZIP_DEFLATE_METHOD) {
throw new IOException("gzip data not compressed with deflate method");
}
int flg = readUByte(localBuf, 3);
if ((flg & GZIP_FLAGBITS_RESERVED) != 0) {
throw new IOException("unknown gzip format (reserved flagbits set)");
}
hasExtraField = ((flg & GZIP_FLAGBIT_EXTRA_FIELD) != 0);
hasFilename = ((flg & GZIP_FLAGBIT_FILENAME) != 0);
hasComment = ((flg & GZIP_FLAGBIT_COMMENT) != 0);
hasHeaderCRC = ((flg & GZIP_FLAGBIT_HEADER_CRC) != 0);
}
private void checkAndCopyBytesToLocal(int len) {
System.arraycopy(userBuf, userBufOff, localBuf, localBufOff, len);
localBufOff += len;
// alternatively, could call checkAndSkipBytes(len) for rest...
crc.update(userBuf, userBufOff, len);
userBufOff += len;
userBufLen -= len;
headerBytesRead += len;
}
private void checkAndSkipBytes(int len) {
crc.update(userBuf, userBufOff, len);
userBufOff += len;
userBufLen -= len;
headerBytesRead += len;
}
// returns true if saw NULL, false if ran out of buffer first; called _only_
// during gzip-header processing (not trailer)
// (caller can check before/after state of userBufLen to compute num bytes)
private boolean checkAndSkipBytesUntilNull() {
boolean hitNull = false;
if (userBufLen > 0) {
do {
hitNull = (userBuf[userBufOff] == 0);
crc.update(userBuf[userBufOff]);
++userBufOff;
--userBufLen;
++headerBytesRead;
} while (userBufLen > 0 && !hitNull);
}
return hitNull;
}
// this one doesn't update the CRC and does support trailer processing but
// otherwise is same as its "checkAnd" sibling
private void copyBytesToLocal(int len) {
System.arraycopy(userBuf, userBufOff, localBuf, localBufOff, len);
localBufOff += len;
userBufOff += len;
userBufLen -= len;
if (state == GzipStateLabel.TRAILER_CRC ||
state == GzipStateLabel.TRAILER_SIZE) {
trailerBytesRead += len;
} else {
headerBytesRead += len;
}
}
private int readUByte(byte[] b, int off) {
return ((int)b[off] & 0xff);
}
// caller is responsible for not overrunning buffer
private int readUShortLE(byte[] b, int off) {
return ((((b[off+1] & 0xff) << 8) |
((b[off] & 0xff) )) & 0xffff);
}
// caller is responsible for not overrunning buffer
private long readUIntLE(byte[] b, int off) {
return ((((long)(b[off+3] & 0xff) << 24) |
((long)(b[off+2] & 0xff) << 16) |
((long)(b[off+1] & 0xff) << 8) |
((long)(b[off] & 0xff) )) & 0xffffffffL);
}
}
| 20,079 | 33.680484 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibFactory.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress.zlib;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.Compressor;
import org.apache.hadoop.io.compress.Decompressor;
import org.apache.hadoop.io.compress.DirectDecompressor;
import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel;
import org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy;
import org.apache.hadoop.util.NativeCodeLoader;
import org.apache.hadoop.fs.CommonConfigurationKeys;
/**
* A collection of factories to create the right
* zlib/gzip compressor/decompressor instances.
*
*/
public class ZlibFactory {
private static final Log LOG =
LogFactory.getLog(ZlibFactory.class);
private static boolean nativeZlibLoaded = false;
static {
if (NativeCodeLoader.isNativeCodeLoaded()) {
nativeZlibLoaded = ZlibCompressor.isNativeZlibLoaded() &&
ZlibDecompressor.isNativeZlibLoaded();
if (nativeZlibLoaded) {
LOG.info("Successfully loaded & initialized native-zlib library");
} else {
LOG.warn("Failed to load/initialize native-zlib library");
}
}
}
/**
* Check if native-zlib code is loaded & initialized correctly and
* can be loaded for this job.
*
* @param conf configuration
* @return <code>true</code> if native-zlib is loaded & initialized
* and can be loaded for this job, else <code>false</code>
*/
public static boolean isNativeZlibLoaded(Configuration conf) {
return nativeZlibLoaded && conf.getBoolean(
CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY,
CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_DEFAULT);
}
public static String getLibraryName() {
return ZlibCompressor.getLibraryName();
}
/**
* Return the appropriate type of the zlib compressor.
*
* @param conf configuration
* @return the appropriate type of the zlib compressor.
*/
public static Class<? extends Compressor>
getZlibCompressorType(Configuration conf) {
return (isNativeZlibLoaded(conf)) ?
ZlibCompressor.class : BuiltInZlibDeflater.class;
}
/**
* Return the appropriate implementation of the zlib compressor.
*
* @param conf configuration
* @return the appropriate implementation of the zlib compressor.
*/
public static Compressor getZlibCompressor(Configuration conf) {
return (isNativeZlibLoaded(conf)) ?
new ZlibCompressor(conf) :
new BuiltInZlibDeflater(ZlibFactory.getCompressionLevel(conf).compressionLevel());
}
/**
* Return the appropriate type of the zlib decompressor.
*
* @param conf configuration
* @return the appropriate type of the zlib decompressor.
*/
public static Class<? extends Decompressor>
getZlibDecompressorType(Configuration conf) {
return (isNativeZlibLoaded(conf)) ?
ZlibDecompressor.class : BuiltInZlibInflater.class;
}
/**
* Return the appropriate implementation of the zlib decompressor.
*
* @param conf configuration
* @return the appropriate implementation of the zlib decompressor.
*/
public static Decompressor getZlibDecompressor(Configuration conf) {
return (isNativeZlibLoaded(conf)) ?
new ZlibDecompressor() : new BuiltInZlibInflater();
}
/**
* Return the appropriate implementation of the zlib direct decompressor.
*
* @param conf configuration
* @return the appropriate implementation of the zlib decompressor.
*/
public static DirectDecompressor getZlibDirectDecompressor(Configuration conf) {
return (isNativeZlibLoaded(conf)) ?
new ZlibDecompressor.ZlibDirectDecompressor() : null;
}
public static void setCompressionStrategy(Configuration conf,
CompressionStrategy strategy) {
conf.setEnum("zlib.compress.strategy", strategy);
}
public static CompressionStrategy getCompressionStrategy(Configuration conf) {
return conf.getEnum("zlib.compress.strategy",
CompressionStrategy.DEFAULT_STRATEGY);
}
public static void setCompressionLevel(Configuration conf,
CompressionLevel level) {
conf.setEnum("zlib.compress.level", level);
}
public static CompressionLevel getCompressionLevel(Configuration conf) {
return conf.getEnum("zlib.compress.level",
CompressionLevel.DEFAULT_COMPRESSION);
}
}
| 5,293 | 33.601307 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibDeflater.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress.zlib;
import java.io.IOException;
import java.util.zip.Deflater;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.Compressor;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* A wrapper around java.util.zip.Deflater to make it conform
* to org.apache.hadoop.io.compress.Compressor interface.
*
*/
public class BuiltInZlibDeflater extends Deflater implements Compressor {
private static final Log LOG = LogFactory.getLog(BuiltInZlibDeflater.class);
public BuiltInZlibDeflater(int level, boolean nowrap) {
super(level, nowrap);
}
public BuiltInZlibDeflater(int level) {
super(level);
}
public BuiltInZlibDeflater() {
super();
}
@Override
public synchronized int compress(byte[] b, int off, int len)
throws IOException {
return super.deflate(b, off, len);
}
/**
* reinit the compressor with the given configuration. It will reset the
* compressor's compression level and compression strategy. Different from
* <tt>ZlibCompressor</tt>, <tt>BuiltInZlibDeflater</tt> only support three
* kind of compression strategy: FILTERED, HUFFMAN_ONLY and DEFAULT_STRATEGY.
* It will use DEFAULT_STRATEGY as default if the configured compression
* strategy is not supported.
*/
@Override
public void reinit(Configuration conf) {
reset();
if (conf == null) {
return;
}
setLevel(ZlibFactory.getCompressionLevel(conf).compressionLevel());
final ZlibCompressor.CompressionStrategy strategy =
ZlibFactory.getCompressionStrategy(conf);
try {
setStrategy(strategy.compressionStrategy());
} catch (IllegalArgumentException ill) {
LOG.warn(strategy + " not supported by BuiltInZlibDeflater.");
setStrategy(DEFAULT_STRATEGY);
}
if(LOG.isDebugEnabled()) {
LOG.debug("Reinit compressor with new compression configuration");
}
}
}
| 2,782 | 31.741176 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibInflater.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress.zlib;
import java.io.IOException;
import java.util.zip.DataFormatException;
import java.util.zip.Inflater;
import org.apache.hadoop.io.compress.Decompressor;
/**
* A wrapper around java.util.zip.Inflater to make it conform
* to org.apache.hadoop.io.compress.Decompressor interface.
*
*/
public class BuiltInZlibInflater extends Inflater implements Decompressor {
public BuiltInZlibInflater(boolean nowrap) {
super(nowrap);
}
public BuiltInZlibInflater() {
super();
}
@Override
public synchronized int decompress(byte[] b, int off, int len)
throws IOException {
try {
return super.inflate(b, off, len);
} catch (DataFormatException dfe) {
throw new IOException(dfe.getMessage());
}
}
}
| 1,593 | 29.653846 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress.zlib;
import java.io.IOException;
import java.nio.Buffer;
import java.nio.ByteBuffer;
import org.apache.hadoop.io.compress.Decompressor;
import org.apache.hadoop.io.compress.DirectDecompressor;
import org.apache.hadoop.util.NativeCodeLoader;
/**
* A {@link Decompressor} based on the popular
* zlib compression algorithm.
* http://www.zlib.net/
*
*/
public class ZlibDecompressor implements Decompressor {
private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64*1024;
private long stream;
private CompressionHeader header;
private int directBufferSize;
private Buffer compressedDirectBuf = null;
private int compressedDirectBufOff, compressedDirectBufLen;
private Buffer uncompressedDirectBuf = null;
private byte[] userBuf = null;
private int userBufOff = 0, userBufLen = 0;
private boolean finished;
private boolean needDict;
/**
* The headers to detect from compressed data.
*/
public static enum CompressionHeader {
/**
* No headers/trailers/checksums.
*/
NO_HEADER (-15),
/**
* Default headers/trailers/checksums.
*/
DEFAULT_HEADER (15),
/**
* Simple gzip headers/trailers.
*/
GZIP_FORMAT (31),
/**
* Autodetect gzip/zlib headers/trailers.
*/
AUTODETECT_GZIP_ZLIB (47);
private final int windowBits;
CompressionHeader(int windowBits) {
this.windowBits = windowBits;
}
public int windowBits() {
return windowBits;
}
}
private static boolean nativeZlibLoaded = false;
static {
if (NativeCodeLoader.isNativeCodeLoaded()) {
try {
// Initialize the native library
initIDs();
nativeZlibLoaded = true;
} catch (Throwable t) {
// Ignore failure to load/initialize native-zlib
}
}
}
static boolean isNativeZlibLoaded() {
return nativeZlibLoaded;
}
/**
* Creates a new decompressor.
*/
public ZlibDecompressor(CompressionHeader header, int directBufferSize) {
this.header = header;
this.directBufferSize = directBufferSize;
compressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
uncompressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
uncompressedDirectBuf.position(directBufferSize);
stream = init(this.header.windowBits());
}
public ZlibDecompressor() {
this(CompressionHeader.DEFAULT_HEADER, DEFAULT_DIRECT_BUFFER_SIZE);
}
@Override
public void setInput(byte[] b, int off, int len) {
if (b == null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || off > b.length - len) {
throw new ArrayIndexOutOfBoundsException();
}
this.userBuf = b;
this.userBufOff = off;
this.userBufLen = len;
setInputFromSavedData();
// Reinitialize zlib's output direct buffer
uncompressedDirectBuf.limit(directBufferSize);
uncompressedDirectBuf.position(directBufferSize);
}
void setInputFromSavedData() {
compressedDirectBufOff = 0;
compressedDirectBufLen = userBufLen;
if (compressedDirectBufLen > directBufferSize) {
compressedDirectBufLen = directBufferSize;
}
// Reinitialize zlib's input direct buffer
compressedDirectBuf.rewind();
((ByteBuffer)compressedDirectBuf).put(userBuf, userBufOff,
compressedDirectBufLen);
// Note how much data is being fed to zlib
userBufOff += compressedDirectBufLen;
userBufLen -= compressedDirectBufLen;
}
@Override
public void setDictionary(byte[] b, int off, int len) {
if (stream == 0 || b == null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || off > b.length - len) {
throw new ArrayIndexOutOfBoundsException();
}
setDictionary(stream, b, off, len);
needDict = false;
}
@Override
public boolean needsInput() {
// Consume remaining compressed data?
if (uncompressedDirectBuf.remaining() > 0) {
return false;
}
// Check if zlib has consumed all input
if (compressedDirectBufLen <= 0) {
// Check if we have consumed all user-input
if (userBufLen <= 0) {
return true;
} else {
setInputFromSavedData();
}
}
return false;
}
@Override
public boolean needsDictionary() {
return needDict;
}
@Override
public boolean finished() {
// Check if 'zlib' says it's 'finished' and
// all compressed data has been consumed
return (finished && uncompressedDirectBuf.remaining() == 0);
}
@Override
public int decompress(byte[] b, int off, int len)
throws IOException {
if (b == null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || off > b.length - len) {
throw new ArrayIndexOutOfBoundsException();
}
int n = 0;
// Check if there is uncompressed data
n = uncompressedDirectBuf.remaining();
if (n > 0) {
n = Math.min(n, len);
((ByteBuffer)uncompressedDirectBuf).get(b, off, n);
return n;
}
// Re-initialize the zlib's output direct buffer
uncompressedDirectBuf.rewind();
uncompressedDirectBuf.limit(directBufferSize);
// Decompress data
n = inflateBytesDirect();
uncompressedDirectBuf.limit(n);
// Get at most 'len' bytes
n = Math.min(n, len);
((ByteBuffer)uncompressedDirectBuf).get(b, off, n);
return n;
}
/**
* Returns the total number of uncompressed bytes output so far.
*
* @return the total (non-negative) number of uncompressed bytes output so far
*/
public long getBytesWritten() {
checkStream();
return getBytesWritten(stream);
}
/**
* Returns the total number of compressed bytes input so far.</p>
*
* @return the total (non-negative) number of compressed bytes input so far
*/
public long getBytesRead() {
checkStream();
return getBytesRead(stream);
}
/**
* Returns the number of bytes remaining in the input buffers; normally
* called when finished() is true to determine amount of post-gzip-stream
* data.</p>
*
* @return the total (non-negative) number of unprocessed bytes in input
*/
@Override
public int getRemaining() {
checkStream();
return userBufLen + getRemaining(stream); // userBuf + compressedDirectBuf
}
/**
* Resets everything including the input buffers (user and direct).</p>
*/
@Override
public void reset() {
checkStream();
reset(stream);
finished = false;
needDict = false;
compressedDirectBufOff = compressedDirectBufLen = 0;
uncompressedDirectBuf.limit(directBufferSize);
uncompressedDirectBuf.position(directBufferSize);
userBufOff = userBufLen = 0;
}
@Override
public void end() {
if (stream != 0) {
end(stream);
stream = 0;
}
}
@Override
protected void finalize() {
end();
}
private void checkStream() {
if (stream == 0)
throw new NullPointerException();
}
private native static void initIDs();
private native static long init(int windowBits);
private native static void setDictionary(long strm, byte[] b, int off,
int len);
private native int inflateBytesDirect();
private native static long getBytesRead(long strm);
private native static long getBytesWritten(long strm);
private native static int getRemaining(long strm);
private native static void reset(long strm);
private native static void end(long strm);
int inflateDirect(ByteBuffer src, ByteBuffer dst) throws IOException {
assert (this instanceof ZlibDirectDecompressor);
ByteBuffer presliced = dst;
if (dst.position() > 0) {
presliced = dst;
dst = dst.slice();
}
Buffer originalCompressed = compressedDirectBuf;
Buffer originalUncompressed = uncompressedDirectBuf;
int originalBufferSize = directBufferSize;
compressedDirectBuf = src;
compressedDirectBufOff = src.position();
compressedDirectBufLen = src.remaining();
uncompressedDirectBuf = dst;
directBufferSize = dst.remaining();
int n = 0;
try {
n = inflateBytesDirect();
presliced.position(presliced.position() + n);
if (compressedDirectBufLen > 0) {
src.position(compressedDirectBufOff);
} else {
src.position(src.limit());
}
} finally {
compressedDirectBuf = originalCompressed;
uncompressedDirectBuf = originalUncompressed;
compressedDirectBufOff = 0;
compressedDirectBufLen = 0;
directBufferSize = originalBufferSize;
}
return n;
}
public static class ZlibDirectDecompressor
extends ZlibDecompressor implements DirectDecompressor {
public ZlibDirectDecompressor() {
super(CompressionHeader.DEFAULT_HEADER, 0);
}
public ZlibDirectDecompressor(CompressionHeader header, int directBufferSize) {
super(header, directBufferSize);
}
@Override
public boolean finished() {
return (endOfInput && super.finished());
}
@Override
public void reset() {
super.reset();
endOfInput = true;
}
private boolean endOfInput;
@Override
public void decompress(ByteBuffer src, ByteBuffer dst)
throws IOException {
assert dst.isDirect() : "dst.isDirect()";
assert src.isDirect() : "src.isDirect()";
assert dst.remaining() > 0 : "dst.remaining() > 0";
this.inflateDirect(src, dst);
endOfInput = !src.hasRemaining();
}
@Override
public void setDictionary(byte[] b, int off, int len) {
throw new UnsupportedOperationException(
"byte[] arrays are not supported for DirectDecompressor");
}
@Override
public int decompress(byte[] b, int off, int len) {
throw new UnsupportedOperationException(
"byte[] arrays are not supported for DirectDecompressor");
}
}
}
| 10,839 | 26.51269 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress.zlib;
import java.io.IOException;
import java.nio.Buffer;
import java.nio.ByteBuffer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.Compressor;
import org.apache.hadoop.util.NativeCodeLoader;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* A {@link Compressor} based on the popular
* zlib compression algorithm.
* http://www.zlib.net/
*
*/
public class ZlibCompressor implements Compressor {
private static final Log LOG = LogFactory.getLog(ZlibCompressor.class);
private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64*1024;
private long stream;
private CompressionLevel level;
private CompressionStrategy strategy;
private final CompressionHeader windowBits;
private int directBufferSize;
private byte[] userBuf = null;
private int userBufOff = 0, userBufLen = 0;
private Buffer uncompressedDirectBuf = null;
private int uncompressedDirectBufOff = 0, uncompressedDirectBufLen = 0;
private boolean keepUncompressedBuf = false;
private Buffer compressedDirectBuf = null;
private boolean finish, finished;
/**
* The compression level for zlib library.
*/
public static enum CompressionLevel {
/**
* Compression level for no compression.
*/
NO_COMPRESSION (0),
/**
* Compression level for fastest compression.
*/
BEST_SPEED (1),
/**
* Compression level for best compression.
*/
BEST_COMPRESSION (9),
/**
* Default compression level.
*/
DEFAULT_COMPRESSION (-1);
private final int compressionLevel;
CompressionLevel(int level) {
compressionLevel = level;
}
int compressionLevel() {
return compressionLevel;
}
};
/**
* The compression level for zlib library.
*/
public static enum CompressionStrategy {
/**
* Compression strategy best used for data consisting mostly of small
* values with a somewhat random distribution. Forces more Huffman coding
* and less string matching.
*/
FILTERED (1),
/**
* Compression strategy for Huffman coding only.
*/
HUFFMAN_ONLY (2),
/**
* Compression strategy to limit match distances to one
* (run-length encoding).
*/
RLE (3),
/**
* Compression strategy to prevent the use of dynamic Huffman codes,
* allowing for a simpler decoder for special applications.
*/
FIXED (4),
/**
* Default compression strategy.
*/
DEFAULT_STRATEGY (0);
private final int compressionStrategy;
CompressionStrategy(int strategy) {
compressionStrategy = strategy;
}
int compressionStrategy() {
return compressionStrategy;
}
};
/**
* The type of header for compressed data.
*/
public static enum CompressionHeader {
/**
* No headers/trailers/checksums.
*/
NO_HEADER (-15),
/**
* Default headers/trailers/checksums.
*/
DEFAULT_HEADER (15),
/**
* Simple gzip headers/trailers.
*/
GZIP_FORMAT (31);
private final int windowBits;
CompressionHeader(int windowBits) {
this.windowBits = windowBits;
}
public int windowBits() {
return windowBits;
}
}
private static boolean nativeZlibLoaded = false;
static {
if (NativeCodeLoader.isNativeCodeLoaded()) {
try {
// Initialize the native library
initIDs();
nativeZlibLoaded = true;
} catch (Throwable t) {
// Ignore failure to load/initialize native-zlib
}
}
}
static boolean isNativeZlibLoaded() {
return nativeZlibLoaded;
}
protected final void construct(CompressionLevel level, CompressionStrategy strategy,
CompressionHeader header, int directBufferSize) {
}
/**
* Creates a new compressor with the default compression level.
* Compressed data will be generated in ZLIB format.
*/
public ZlibCompressor() {
this(CompressionLevel.DEFAULT_COMPRESSION,
CompressionStrategy.DEFAULT_STRATEGY,
CompressionHeader.DEFAULT_HEADER,
DEFAULT_DIRECT_BUFFER_SIZE);
}
/**
* Creates a new compressor, taking settings from the configuration.
*/
public ZlibCompressor(Configuration conf) {
this(ZlibFactory.getCompressionLevel(conf),
ZlibFactory.getCompressionStrategy(conf),
CompressionHeader.DEFAULT_HEADER,
DEFAULT_DIRECT_BUFFER_SIZE);
}
/**
* Creates a new compressor using the specified compression level.
* Compressed data will be generated in ZLIB format.
*
* @param level Compression level #CompressionLevel
* @param strategy Compression strategy #CompressionStrategy
* @param header Compression header #CompressionHeader
* @param directBufferSize Size of the direct buffer to be used.
*/
public ZlibCompressor(CompressionLevel level, CompressionStrategy strategy,
CompressionHeader header, int directBufferSize) {
this.level = level;
this.strategy = strategy;
this.windowBits = header;
stream = init(this.level.compressionLevel(),
this.strategy.compressionStrategy(),
this.windowBits.windowBits());
this.directBufferSize = directBufferSize;
uncompressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
compressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
compressedDirectBuf.position(directBufferSize);
}
/**
* Prepare the compressor to be used in a new stream with settings defined in
* the given Configuration. It will reset the compressor's compression level
* and compression strategy.
*
* @param conf Configuration storing new settings
*/
@Override
public void reinit(Configuration conf) {
reset();
if (conf == null) {
return;
}
end(stream);
level = ZlibFactory.getCompressionLevel(conf);
strategy = ZlibFactory.getCompressionStrategy(conf);
stream = init(level.compressionLevel(),
strategy.compressionStrategy(),
windowBits.windowBits());
if(LOG.isDebugEnabled()) {
LOG.debug("Reinit compressor with new compression configuration");
}
}
@Override
public void setInput(byte[] b, int off, int len) {
if (b== null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || off > b.length - len) {
throw new ArrayIndexOutOfBoundsException();
}
this.userBuf = b;
this.userBufOff = off;
this.userBufLen = len;
uncompressedDirectBufOff = 0;
setInputFromSavedData();
// Reinitialize zlib's output direct buffer
compressedDirectBuf.limit(directBufferSize);
compressedDirectBuf.position(directBufferSize);
}
//copy enough data from userBuf to uncompressedDirectBuf
void setInputFromSavedData() {
int len = Math.min(userBufLen, uncompressedDirectBuf.remaining());
((ByteBuffer)uncompressedDirectBuf).put(userBuf, userBufOff, len);
userBufLen -= len;
userBufOff += len;
uncompressedDirectBufLen = uncompressedDirectBuf.position();
}
@Override
public void setDictionary(byte[] b, int off, int len) {
if (stream == 0 || b == null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || off > b.length - len) {
throw new ArrayIndexOutOfBoundsException();
}
setDictionary(stream, b, off, len);
}
@Override
public boolean needsInput() {
// Consume remaining compressed data?
if (compressedDirectBuf.remaining() > 0) {
return false;
}
// Check if zlib has consumed all input
// compress should be invoked if keepUncompressedBuf true
if (keepUncompressedBuf && uncompressedDirectBufLen > 0)
return false;
if (uncompressedDirectBuf.remaining() > 0) {
// Check if we have consumed all user-input
if (userBufLen <= 0) {
return true;
} else {
// copy enough data from userBuf to uncompressedDirectBuf
setInputFromSavedData();
if (uncompressedDirectBuf.remaining() > 0) // uncompressedDirectBuf is not full
return true;
else
return false;
}
}
return false;
}
@Override
public void finish() {
finish = true;
}
@Override
public boolean finished() {
// Check if 'zlib' says its 'finished' and
// all compressed data has been consumed
return (finished && compressedDirectBuf.remaining() == 0);
}
@Override
public int compress(byte[] b, int off, int len)
throws IOException {
if (b == null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || off > b.length - len) {
throw new ArrayIndexOutOfBoundsException();
}
int n = 0;
// Check if there is compressed data
n = compressedDirectBuf.remaining();
if (n > 0) {
n = Math.min(n, len);
((ByteBuffer)compressedDirectBuf).get(b, off, n);
return n;
}
// Re-initialize the zlib's output direct buffer
compressedDirectBuf.rewind();
compressedDirectBuf.limit(directBufferSize);
// Compress data
n = deflateBytesDirect();
compressedDirectBuf.limit(n);
// Check if zlib consumed all input buffer
// set keepUncompressedBuf properly
if (uncompressedDirectBufLen <= 0) { // zlib consumed all input buffer
keepUncompressedBuf = false;
uncompressedDirectBuf.clear();
uncompressedDirectBufOff = 0;
uncompressedDirectBufLen = 0;
} else { // zlib did not consume all input buffer
keepUncompressedBuf = true;
}
// Get atmost 'len' bytes
n = Math.min(n, len);
((ByteBuffer)compressedDirectBuf).get(b, off, n);
return n;
}
/**
* Returns the total number of compressed bytes output so far.
*
* @return the total (non-negative) number of compressed bytes output so far
*/
@Override
public long getBytesWritten() {
checkStream();
return getBytesWritten(stream);
}
/**
* Returns the total number of uncompressed bytes input so far.</p>
*
* @return the total (non-negative) number of uncompressed bytes input so far
*/
@Override
public long getBytesRead() {
checkStream();
return getBytesRead(stream);
}
@Override
public void reset() {
checkStream();
reset(stream);
finish = false;
finished = false;
uncompressedDirectBuf.rewind();
uncompressedDirectBufOff = uncompressedDirectBufLen = 0;
keepUncompressedBuf = false;
compressedDirectBuf.limit(directBufferSize);
compressedDirectBuf.position(directBufferSize);
userBufOff = userBufLen = 0;
}
@Override
public void end() {
if (stream != 0) {
end(stream);
stream = 0;
}
}
private void checkStream() {
if (stream == 0)
throw new NullPointerException();
}
private native static void initIDs();
private native static long init(int level, int strategy, int windowBits);
private native static void setDictionary(long strm, byte[] b, int off,
int len);
private native int deflateBytesDirect();
private native static long getBytesRead(long strm);
private native static long getBytesWritten(long strm);
private native static void reset(long strm);
private native static void end(long strm);
public native static String getLibraryName();
}
| 12,363 | 26.659955 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
package org.apache.hadoop.io.compress.lz4;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
| 1,025 | 41.75 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Compressor.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress.lz4;
import java.io.IOException;
import java.nio.Buffer;
import java.nio.ByteBuffer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.Compressor;
import org.apache.hadoop.util.NativeCodeLoader;
/**
* A {@link Compressor} based on the lz4 compression algorithm.
* http://code.google.com/p/lz4/
*/
public class Lz4Compressor implements Compressor {
private static final Log LOG =
LogFactory.getLog(Lz4Compressor.class.getName());
private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64 * 1024;
private int directBufferSize;
private Buffer compressedDirectBuf = null;
private int uncompressedDirectBufLen;
private Buffer uncompressedDirectBuf = null;
private byte[] userBuf = null;
private int userBufOff = 0, userBufLen = 0;
private boolean finish, finished;
private long bytesRead = 0L;
private long bytesWritten = 0L;
private final boolean useLz4HC;
static {
if (NativeCodeLoader.isNativeCodeLoaded()) {
// Initialize the native library
try {
initIDs();
} catch (Throwable t) {
// Ignore failure to load/initialize lz4
LOG.warn(t.toString());
}
} else {
LOG.error("Cannot load " + Lz4Compressor.class.getName() +
" without native hadoop library!");
}
}
/**
* Creates a new compressor.
*
* @param directBufferSize size of the direct buffer to be used.
* @param useLz4HC use high compression ratio version of lz4,
* which trades CPU for compression ratio.
*/
public Lz4Compressor(int directBufferSize, boolean useLz4HC) {
this.useLz4HC = useLz4HC;
this.directBufferSize = directBufferSize;
uncompressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
compressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
compressedDirectBuf.position(directBufferSize);
}
/**
* Creates a new compressor.
*
* @param directBufferSize size of the direct buffer to be used.
*/
public Lz4Compressor(int directBufferSize) {
this(directBufferSize, false);
}
/**
* Creates a new compressor with the default buffer size.
*/
public Lz4Compressor() {
this(DEFAULT_DIRECT_BUFFER_SIZE);
}
/**
* Sets input data for compression.
* This should be called whenever #needsInput() returns
* <code>true</code> indicating that more input data is required.
*
* @param b Input data
* @param off Start offset
* @param len Length
*/
@Override
public synchronized void setInput(byte[] b, int off, int len) {
if (b == null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || off > b.length - len) {
throw new ArrayIndexOutOfBoundsException();
}
finished = false;
if (len > uncompressedDirectBuf.remaining()) {
// save data; now !needsInput
this.userBuf = b;
this.userBufOff = off;
this.userBufLen = len;
} else {
((ByteBuffer) uncompressedDirectBuf).put(b, off, len);
uncompressedDirectBufLen = uncompressedDirectBuf.position();
}
bytesRead += len;
}
/**
* If a write would exceed the capacity of the direct buffers, it is set
* aside to be loaded by this function while the compressed data are
* consumed.
*/
synchronized void setInputFromSavedData() {
if (0 >= userBufLen) {
return;
}
finished = false;
uncompressedDirectBufLen = Math.min(userBufLen, directBufferSize);
((ByteBuffer) uncompressedDirectBuf).put(userBuf, userBufOff,
uncompressedDirectBufLen);
// Note how much data is being fed to lz4
userBufOff += uncompressedDirectBufLen;
userBufLen -= uncompressedDirectBufLen;
}
/**
* Does nothing.
*/
@Override
public synchronized void setDictionary(byte[] b, int off, int len) {
// do nothing
}
/**
* Returns true if the input data buffer is empty and
* #setInput() should be called to provide more input.
*
* @return <code>true</code> if the input data buffer is empty and
* #setInput() should be called in order to provide more input.
*/
@Override
public synchronized boolean needsInput() {
return !(compressedDirectBuf.remaining() > 0
|| uncompressedDirectBuf.remaining() == 0 || userBufLen > 0);
}
/**
* When called, indicates that compression should end
* with the current contents of the input buffer.
*/
@Override
public synchronized void finish() {
finish = true;
}
/**
* Returns true if the end of the compressed
* data output stream has been reached.
*
* @return <code>true</code> if the end of the compressed
* data output stream has been reached.
*/
@Override
public synchronized boolean finished() {
// Check if all uncompressed data has been consumed
return (finish && finished && compressedDirectBuf.remaining() == 0);
}
/**
* Fills specified buffer with compressed data. Returns actual number
* of bytes of compressed data. A return value of 0 indicates that
* needsInput() should be called in order to determine if more input
* data is required.
*
* @param b Buffer for the compressed data
* @param off Start offset of the data
* @param len Size of the buffer
* @return The actual number of bytes of compressed data.
*/
@Override
public synchronized int compress(byte[] b, int off, int len)
throws IOException {
if (b == null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || off > b.length - len) {
throw new ArrayIndexOutOfBoundsException();
}
// Check if there is compressed data
int n = compressedDirectBuf.remaining();
if (n > 0) {
n = Math.min(n, len);
((ByteBuffer) compressedDirectBuf).get(b, off, n);
bytesWritten += n;
return n;
}
// Re-initialize the lz4's output direct-buffer
compressedDirectBuf.clear();
compressedDirectBuf.limit(0);
if (0 == uncompressedDirectBuf.position()) {
// No compressed data, so we should have !needsInput or !finished
setInputFromSavedData();
if (0 == uncompressedDirectBuf.position()) {
// Called without data; write nothing
finished = true;
return 0;
}
}
// Compress data
n = useLz4HC ? compressBytesDirectHC() : compressBytesDirect();
compressedDirectBuf.limit(n);
uncompressedDirectBuf.clear(); // lz4 consumes all buffer input
// Set 'finished' if snapy has consumed all user-data
if (0 == userBufLen) {
finished = true;
}
// Get atmost 'len' bytes
n = Math.min(n, len);
bytesWritten += n;
((ByteBuffer) compressedDirectBuf).get(b, off, n);
return n;
}
/**
* Resets compressor so that a new set of input data can be processed.
*/
@Override
public synchronized void reset() {
finish = false;
finished = false;
uncompressedDirectBuf.clear();
uncompressedDirectBufLen = 0;
compressedDirectBuf.clear();
compressedDirectBuf.limit(0);
userBufOff = userBufLen = 0;
bytesRead = bytesWritten = 0L;
}
/**
* Prepare the compressor to be used in a new stream with settings defined in
* the given Configuration
*
* @param conf Configuration from which new setting are fetched
*/
@Override
public synchronized void reinit(Configuration conf) {
reset();
}
/**
* Return number of bytes given to this compressor since last reset.
*/
@Override
public synchronized long getBytesRead() {
return bytesRead;
}
/**
* Return number of bytes consumed by callers of compress since last reset.
*/
@Override
public synchronized long getBytesWritten() {
return bytesWritten;
}
/**
* Closes the compressor and discards any unprocessed input.
*/
@Override
public synchronized void end() {
}
private native static void initIDs();
private native int compressBytesDirect();
private native int compressBytesDirectHC();
public native static String getLibraryName();
}
| 8,995 | 27.741214 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress.lz4;
import java.io.IOException;
import java.nio.Buffer;
import java.nio.ByteBuffer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.compress.Decompressor;
import org.apache.hadoop.util.NativeCodeLoader;
/**
* A {@link Decompressor} based on the lz4 compression algorithm.
* http://code.google.com/p/lz4/
*/
public class Lz4Decompressor implements Decompressor {
private static final Log LOG =
LogFactory.getLog(Lz4Compressor.class.getName());
private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64 * 1024;
private int directBufferSize;
private Buffer compressedDirectBuf = null;
private int compressedDirectBufLen;
private Buffer uncompressedDirectBuf = null;
private byte[] userBuf = null;
private int userBufOff = 0, userBufLen = 0;
private boolean finished;
static {
if (NativeCodeLoader.isNativeCodeLoaded()) {
// Initialize the native library
try {
initIDs();
} catch (Throwable t) {
// Ignore failure to load/initialize lz4
LOG.warn(t.toString());
}
} else {
LOG.error("Cannot load " + Lz4Compressor.class.getName() +
" without native hadoop library!");
}
}
/**
* Creates a new compressor.
*
* @param directBufferSize size of the direct buffer to be used.
*/
public Lz4Decompressor(int directBufferSize) {
this.directBufferSize = directBufferSize;
compressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
uncompressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
uncompressedDirectBuf.position(directBufferSize);
}
/**
* Creates a new decompressor with the default buffer size.
*/
public Lz4Decompressor() {
this(DEFAULT_DIRECT_BUFFER_SIZE);
}
/**
* Sets input data for decompression.
* This should be called if and only if {@link #needsInput()} returns
* <code>true</code> indicating that more input data is required.
* (Both native and non-native versions of various Decompressors require
* that the data passed in via <code>b[]</code> remain unmodified until
* the caller is explicitly notified--via {@link #needsInput()}--that the
* buffer may be safely modified. With this requirement, an extra
* buffer-copy can be avoided.)
*
* @param b Input data
* @param off Start offset
* @param len Length
*/
@Override
public synchronized void setInput(byte[] b, int off, int len) {
if (b == null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || off > b.length - len) {
throw new ArrayIndexOutOfBoundsException();
}
this.userBuf = b;
this.userBufOff = off;
this.userBufLen = len;
setInputFromSavedData();
// Reinitialize lz4's output direct-buffer
uncompressedDirectBuf.limit(directBufferSize);
uncompressedDirectBuf.position(directBufferSize);
}
/**
* If a write would exceed the capacity of the direct buffers, it is set
* aside to be loaded by this function while the compressed data are
* consumed.
*/
synchronized void setInputFromSavedData() {
compressedDirectBufLen = Math.min(userBufLen, directBufferSize);
// Reinitialize lz4's input direct buffer
compressedDirectBuf.rewind();
((ByteBuffer) compressedDirectBuf).put(userBuf, userBufOff,
compressedDirectBufLen);
// Note how much data is being fed to lz4
userBufOff += compressedDirectBufLen;
userBufLen -= compressedDirectBufLen;
}
/**
* Does nothing.
*/
@Override
public synchronized void setDictionary(byte[] b, int off, int len) {
// do nothing
}
/**
* Returns true if the input data buffer is empty and
* {@link #setInput(byte[], int, int)} should be called to
* provide more input.
*
* @return <code>true</code> if the input data buffer is empty and
* {@link #setInput(byte[], int, int)} should be called in
* order to provide more input.
*/
@Override
public synchronized boolean needsInput() {
// Consume remaining compressed data?
if (uncompressedDirectBuf.remaining() > 0) {
return false;
}
// Check if lz4 has consumed all input
if (compressedDirectBufLen <= 0) {
// Check if we have consumed all user-input
if (userBufLen <= 0) {
return true;
} else {
setInputFromSavedData();
}
}
return false;
}
/**
* Returns <code>false</code>.
*
* @return <code>false</code>.
*/
@Override
public synchronized boolean needsDictionary() {
return false;
}
/**
* Returns true if the end of the decompressed
* data output stream has been reached.
*
* @return <code>true</code> if the end of the decompressed
* data output stream has been reached.
*/
@Override
public synchronized boolean finished() {
return (finished && uncompressedDirectBuf.remaining() == 0);
}
/**
* Fills specified buffer with uncompressed data. Returns actual number
* of bytes of uncompressed data. A return value of 0 indicates that
* {@link #needsInput()} should be called in order to determine if more
* input data is required.
*
* @param b Buffer for the compressed data
* @param off Start offset of the data
* @param len Size of the buffer
* @return The actual number of bytes of compressed data.
* @throws IOException
*/
@Override
public synchronized int decompress(byte[] b, int off, int len)
throws IOException {
if (b == null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || off > b.length - len) {
throw new ArrayIndexOutOfBoundsException();
}
int n = 0;
// Check if there is uncompressed data
n = uncompressedDirectBuf.remaining();
if (n > 0) {
n = Math.min(n, len);
((ByteBuffer) uncompressedDirectBuf).get(b, off, n);
return n;
}
if (compressedDirectBufLen > 0) {
// Re-initialize the lz4's output direct buffer
uncompressedDirectBuf.rewind();
uncompressedDirectBuf.limit(directBufferSize);
// Decompress data
n = decompressBytesDirect();
uncompressedDirectBuf.limit(n);
if (userBufLen <= 0) {
finished = true;
}
// Get atmost 'len' bytes
n = Math.min(n, len);
((ByteBuffer) uncompressedDirectBuf).get(b, off, n);
}
return n;
}
/**
* Returns <code>0</code>.
*
* @return <code>0</code>.
*/
@Override
public synchronized int getRemaining() {
// Never use this function in BlockDecompressorStream.
return 0;
}
@Override
public synchronized void reset() {
finished = false;
compressedDirectBufLen = 0;
uncompressedDirectBuf.limit(directBufferSize);
uncompressedDirectBuf.position(directBufferSize);
userBufOff = userBufLen = 0;
}
/**
* Resets decompressor and input and output buffers so that a new set of
* input data can be processed.
*/
@Override
public synchronized void end() {
// do nothing
}
private native static void initIDs();
private native int decompressBytesDirect();
}
| 8,017 | 27.738351 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
package org.apache.hadoop.io.compress.bzip2;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
| 1,027 | 41.833333 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CRC.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/*
* This package is based on the work done by Keiron Liddle, Aftex Software
* <[email protected]> to whom the Ant project is very grateful for his
* great code.
*/
package org.apache.hadoop.io.compress.bzip2;
/**
* A simple class the hold and calculate the CRC for sanity checking of the
* data.
*
*/
final class CRC {
static final int crc32Table[] = { 0x00000000, 0x04c11db7, 0x09823b6e,
0x0d4326d9, 0x130476dc, 0x17c56b6b, 0x1a864db2, 0x1e475005,
0x2608edb8, 0x22c9f00f, 0x2f8ad6d6, 0x2b4bcb61, 0x350c9b64,
0x31cd86d3, 0x3c8ea00a, 0x384fbdbd, 0x4c11db70, 0x48d0c6c7,
0x4593e01e, 0x4152fda9, 0x5f15adac, 0x5bd4b01b, 0x569796c2,
0x52568b75, 0x6a1936c8, 0x6ed82b7f, 0x639b0da6, 0x675a1011,
0x791d4014, 0x7ddc5da3, 0x709f7b7a, 0x745e66cd, 0x9823b6e0,
0x9ce2ab57, 0x91a18d8e, 0x95609039, 0x8b27c03c, 0x8fe6dd8b,
0x82a5fb52, 0x8664e6e5, 0xbe2b5b58, 0xbaea46ef, 0xb7a96036,
0xb3687d81, 0xad2f2d84, 0xa9ee3033, 0xa4ad16ea, 0xa06c0b5d,
0xd4326d90, 0xd0f37027, 0xddb056fe, 0xd9714b49, 0xc7361b4c,
0xc3f706fb, 0xceb42022, 0xca753d95, 0xf23a8028, 0xf6fb9d9f,
0xfbb8bb46, 0xff79a6f1, 0xe13ef6f4, 0xe5ffeb43, 0xe8bccd9a,
0xec7dd02d, 0x34867077, 0x30476dc0, 0x3d044b19, 0x39c556ae,
0x278206ab, 0x23431b1c, 0x2e003dc5, 0x2ac12072, 0x128e9dcf,
0x164f8078, 0x1b0ca6a1, 0x1fcdbb16, 0x018aeb13, 0x054bf6a4,
0x0808d07d, 0x0cc9cdca, 0x7897ab07, 0x7c56b6b0, 0x71159069,
0x75d48dde, 0x6b93dddb, 0x6f52c06c, 0x6211e6b5, 0x66d0fb02,
0x5e9f46bf, 0x5a5e5b08, 0x571d7dd1, 0x53dc6066, 0x4d9b3063,
0x495a2dd4, 0x44190b0d, 0x40d816ba, 0xaca5c697, 0xa864db20,
0xa527fdf9, 0xa1e6e04e, 0xbfa1b04b, 0xbb60adfc, 0xb6238b25,
0xb2e29692, 0x8aad2b2f, 0x8e6c3698, 0x832f1041, 0x87ee0df6,
0x99a95df3, 0x9d684044, 0x902b669d, 0x94ea7b2a, 0xe0b41de7,
0xe4750050, 0xe9362689, 0xedf73b3e, 0xf3b06b3b, 0xf771768c,
0xfa325055, 0xfef34de2, 0xc6bcf05f, 0xc27dede8, 0xcf3ecb31,
0xcbffd686, 0xd5b88683, 0xd1799b34, 0xdc3abded, 0xd8fba05a,
0x690ce0ee, 0x6dcdfd59, 0x608edb80, 0x644fc637, 0x7a089632,
0x7ec98b85, 0x738aad5c, 0x774bb0eb, 0x4f040d56, 0x4bc510e1,
0x46863638, 0x42472b8f, 0x5c007b8a, 0x58c1663d, 0x558240e4,
0x51435d53, 0x251d3b9e, 0x21dc2629, 0x2c9f00f0, 0x285e1d47,
0x36194d42, 0x32d850f5, 0x3f9b762c, 0x3b5a6b9b, 0x0315d626,
0x07d4cb91, 0x0a97ed48, 0x0e56f0ff, 0x1011a0fa, 0x14d0bd4d,
0x19939b94, 0x1d528623, 0xf12f560e, 0xf5ee4bb9, 0xf8ad6d60,
0xfc6c70d7, 0xe22b20d2, 0xe6ea3d65, 0xeba91bbc, 0xef68060b,
0xd727bbb6, 0xd3e6a601, 0xdea580d8, 0xda649d6f, 0xc423cd6a,
0xc0e2d0dd, 0xcda1f604, 0xc960ebb3, 0xbd3e8d7e, 0xb9ff90c9,
0xb4bcb610, 0xb07daba7, 0xae3afba2, 0xaafbe615, 0xa7b8c0cc,
0xa379dd7b, 0x9b3660c6, 0x9ff77d71, 0x92b45ba8, 0x9675461f,
0x8832161a, 0x8cf30bad, 0x81b02d74, 0x857130c3, 0x5d8a9099,
0x594b8d2e, 0x5408abf7, 0x50c9b640, 0x4e8ee645, 0x4a4ffbf2,
0x470cdd2b, 0x43cdc09c, 0x7b827d21, 0x7f436096, 0x7200464f,
0x76c15bf8, 0x68860bfd, 0x6c47164a, 0x61043093, 0x65c52d24,
0x119b4be9, 0x155a565e, 0x18197087, 0x1cd86d30, 0x029f3d35,
0x065e2082, 0x0b1d065b, 0x0fdc1bec, 0x3793a651, 0x3352bbe6,
0x3e119d3f, 0x3ad08088, 0x2497d08d, 0x2056cd3a, 0x2d15ebe3,
0x29d4f654, 0xc5a92679, 0xc1683bce, 0xcc2b1d17, 0xc8ea00a0,
0xd6ad50a5, 0xd26c4d12, 0xdf2f6bcb, 0xdbee767c, 0xe3a1cbc1,
0xe760d676, 0xea23f0af, 0xeee2ed18, 0xf0a5bd1d, 0xf464a0aa,
0xf9278673, 0xfde69bc4, 0x89b8fd09, 0x8d79e0be, 0x803ac667,
0x84fbdbd0, 0x9abc8bd5, 0x9e7d9662, 0x933eb0bb, 0x97ffad0c,
0xafb010b1, 0xab710d06, 0xa6322bdf, 0xa2f33668, 0xbcb4666d,
0xb8757bda, 0xb5365d03, 0xb1f740b4 };
CRC() {
initialiseCRC();
}
void initialiseCRC() {
globalCrc = 0xffffffff;
}
int getFinalCRC() {
return ~globalCrc;
}
int getGlobalCRC() {
return globalCrc;
}
void setGlobalCRC(int newCrc) {
globalCrc = newCrc;
}
void updateCRC(int inCh) {
int temp = (globalCrc >> 24) ^ inCh;
if (temp < 0) {
temp = 256 + temp;
}
globalCrc = (globalCrc << 8) ^ CRC.crc32Table[temp];
}
void updateCRC(int inCh, int repeat) {
int globalCrcShadow = this.globalCrc;
while (repeat-- > 0) {
int temp = (globalCrcShadow >> 24) ^ inCh;
globalCrcShadow = (globalCrcShadow << 8)
^ crc32Table[(temp >= 0) ? temp : (temp + 256)];
}
this.globalCrc = globalCrcShadow;
}
int globalCrc;
}
| 5,339 | 41.380952 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/BZip2Constants.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/*
* This package is based on the work done by Keiron Liddle, Aftex Software
* <[email protected]> to whom the Ant project is very grateful for his
* great code.
*/
package org.apache.hadoop.io.compress.bzip2;
/**
* Base class for both the compress and decompress classes. Holds common arrays,
* and static data.
* <p>
* This interface is public for historical purposes. You should have no need to
* use it.
* </p>
*/
public interface BZip2Constants {
int baseBlockSize = 100000;
int MAX_ALPHA_SIZE = 258;
int MAX_CODE_LEN = 23;
int RUNA = 0;
int RUNB = 1;
int N_GROUPS = 6;
int G_SIZE = 50;
int N_ITERS = 4;
int MAX_SELECTORS = (2 + (900000 / G_SIZE));
int NUM_OVERSHOOT_BYTES = 20;
/**
* End of a BZip2 block
*/
public static final int END_OF_BLOCK = -2;
/**
* End of BZip2 stream.
*/
public static final int END_OF_STREAM = -1;
/**
* This array really shouldn't be here. Again, for historical purposes it
* is.
*
* <p>
* FIXME: This array should be in a private or package private location,
* since it could be modified by malicious code.
* </p>
*/
final int[] rNums = { 619, 720, 127, 481, 931, 816, 813, 233, 566, 247,
985, 724, 205, 454, 863, 491, 741, 242, 949, 214, 733, 859, 335,
708, 621, 574, 73, 654, 730, 472, 419, 436, 278, 496, 867, 210,
399, 680, 480, 51, 878, 465, 811, 169, 869, 675, 611, 697, 867,
561, 862, 687, 507, 283, 482, 129, 807, 591, 733, 623, 150, 238,
59, 379, 684, 877, 625, 169, 643, 105, 170, 607, 520, 932, 727,
476, 693, 425, 174, 647, 73, 122, 335, 530, 442, 853, 695, 249,
445, 515, 909, 545, 703, 919, 874, 474, 882, 500, 594, 612, 641,
801, 220, 162, 819, 984, 589, 513, 495, 799, 161, 604, 958, 533,
221, 400, 386, 867, 600, 782, 382, 596, 414, 171, 516, 375, 682,
485, 911, 276, 98, 553, 163, 354, 666, 933, 424, 341, 533, 870,
227, 730, 475, 186, 263, 647, 537, 686, 600, 224, 469, 68, 770,
919, 190, 373, 294, 822, 808, 206, 184, 943, 795, 384, 383, 461,
404, 758, 839, 887, 715, 67, 618, 276, 204, 918, 873, 777, 604,
560, 951, 160, 578, 722, 79, 804, 96, 409, 713, 940, 652, 934, 970,
447, 318, 353, 859, 672, 112, 785, 645, 863, 803, 350, 139, 93,
354, 99, 820, 908, 609, 772, 154, 274, 580, 184, 79, 626, 630, 742,
653, 282, 762, 623, 680, 81, 927, 626, 789, 125, 411, 521, 938,
300, 821, 78, 343, 175, 128, 250, 170, 774, 972, 275, 999, 639,
495, 78, 352, 126, 857, 956, 358, 619, 580, 124, 737, 594, 701,
612, 669, 112, 134, 694, 363, 992, 809, 743, 168, 974, 944, 375,
748, 52, 600, 747, 642, 182, 862, 81, 344, 805, 988, 739, 511, 655,
814, 334, 249, 515, 897, 955, 664, 981, 649, 113, 974, 459, 893,
228, 433, 837, 553, 268, 926, 240, 102, 654, 459, 51, 686, 754,
806, 760, 493, 403, 415, 394, 687, 700, 946, 670, 656, 610, 738,
392, 760, 799, 887, 653, 978, 321, 576, 617, 626, 502, 894, 679,
243, 440, 680, 879, 194, 572, 640, 724, 926, 56, 204, 700, 707,
151, 457, 449, 797, 195, 791, 558, 945, 679, 297, 59, 87, 824, 713,
663, 412, 693, 342, 606, 134, 108, 571, 364, 631, 212, 174, 643,
304, 329, 343, 97, 430, 751, 497, 314, 983, 374, 822, 928, 140,
206, 73, 263, 980, 736, 876, 478, 430, 305, 170, 514, 364, 692,
829, 82, 855, 953, 676, 246, 369, 970, 294, 750, 807, 827, 150,
790, 288, 923, 804, 378, 215, 828, 592, 281, 565, 555, 710, 82,
896, 831, 547, 261, 524, 462, 293, 465, 502, 56, 661, 821, 976,
991, 658, 869, 905, 758, 745, 193, 768, 550, 608, 933, 378, 286,
215, 979, 792, 961, 61, 688, 793, 644, 986, 403, 106, 366, 905,
644, 372, 567, 466, 434, 645, 210, 389, 550, 919, 135, 780, 773,
635, 389, 707, 100, 626, 958, 165, 504, 920, 176, 193, 713, 857,
265, 203, 50, 668, 108, 645, 990, 626, 197, 510, 357, 358, 850,
858, 364, 936, 638 };
}
| 4,660 | 42.971698 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/BZip2DummyCompressor.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.hadoop.io.compress.bzip2;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.Compressor;
/**
* This is a dummy compressor for BZip2.
*/
public class BZip2DummyCompressor implements Compressor {
@Override
public int compress(byte[] b, int off, int len) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public void end() {
throw new UnsupportedOperationException();
}
@Override
public void finish() {
throw new UnsupportedOperationException();
}
@Override
public boolean finished() {
throw new UnsupportedOperationException();
}
@Override
public long getBytesRead() {
throw new UnsupportedOperationException();
}
@Override
public long getBytesWritten() {
throw new UnsupportedOperationException();
}
@Override
public boolean needsInput() {
throw new UnsupportedOperationException();
}
@Override
public void reset() {
// do nothing
}
@Override
public void setDictionary(byte[] b, int off, int len) {
throw new UnsupportedOperationException();
}
@Override
public void setInput(byte[] b, int off, int len) {
throw new UnsupportedOperationException();
}
@Override
public void reinit(Configuration conf) {
// do nothing
}
}
| 2,167 | 23.91954 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/BZip2DummyDecompressor.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.hadoop.io.compress.bzip2;
import java.io.IOException;
import org.apache.hadoop.io.compress.Decompressor;
/**
* This is a dummy decompressor for BZip2.
*/
public class BZip2DummyDecompressor implements Decompressor {
@Override
public int decompress(byte[] b, int off, int len) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public void end() {
throw new UnsupportedOperationException();
}
@Override
public boolean finished() {
throw new UnsupportedOperationException();
}
@Override
public boolean needsDictionary() {
throw new UnsupportedOperationException();
}
@Override
public boolean needsInput() {
throw new UnsupportedOperationException();
}
@Override
public int getRemaining() {
throw new UnsupportedOperationException();
}
@Override
public void reset() {
// do nothing
}
@Override
public void setDictionary(byte[] b, int off, int len) {
throw new UnsupportedOperationException();
}
@Override
public void setInput(byte[] b, int off, int len) {
throw new UnsupportedOperationException();
}
}
| 1,967 | 24.894737 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2OutputStream.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/*
* This package is based on the work done by Keiron Liddle, Aftex Software
* <[email protected]> to whom the Ant project is very grateful for his
* great code.
*/
package org.apache.hadoop.io.compress.bzip2;
import java.io.OutputStream;
import java.io.IOException;
import org.apache.hadoop.io.IOUtils;
/**
* An output stream that compresses into the BZip2 format (without the file
* header chars) into another stream.
*
* <p>
* The compression requires large amounts of memory. Thus you should call the
* {@link #close() close()} method as soon as possible, to force
* <tt>CBZip2OutputStream</tt> to release the allocated memory.
* </p>
*
* <p>
* You can shrink the amount of allocated memory and maybe raise the compression
* speed by choosing a lower blocksize, which in turn may cause a lower
* compression ratio. You can avoid unnecessary memory allocation by avoiding
* using a blocksize which is bigger than the size of the input.
* </p>
*
* <p>
* You can compute the memory usage for compressing by the following formula:
* </p>
*
* <pre>
* <code>400k + (9 * blocksize)</code>.
* </pre>
*
* <p>
* To get the memory required for decompression by {@link CBZip2InputStream
* CBZip2InputStream} use
* </p>
*
* <pre>
* <code>65k + (5 * blocksize)</code>.
* </pre>
*
* <table width="100%" border="1">
* <colgroup> <col width="33%" /> <col width="33%" /> <col width="33%" />
* </colgroup>
* <tr>
* <th colspan="3">Memory usage by blocksize</th>
* </tr>
* <tr>
* <th align="right">Blocksize</th> <th align="right">Compression<br>
* memory usage</th> <th align="right">Decompression<br>
* memory usage</th>
* </tr>
* <tr>
* <td align="right">100k</td>
* <td align="right">1300k</td>
* <td align="right">565k</td>
* </tr>
* <tr>
* <td align="right">200k</td>
* <td align="right">2200k</td>
* <td align="right">1065k</td>
* </tr>
* <tr>
* <td align="right">300k</td>
* <td align="right">3100k</td>
* <td align="right">1565k</td>
* </tr>
* <tr>
* <td align="right">400k</td>
* <td align="right">4000k</td>
* <td align="right">2065k</td>
* </tr>
* <tr>
* <td align="right">500k</td>
* <td align="right">4900k</td>
* <td align="right">2565k</td>
* </tr>
* <tr>
* <td align="right">600k</td>
* <td align="right">5800k</td>
* <td align="right">3065k</td>
* </tr>
* <tr>
* <td align="right">700k</td>
* <td align="right">6700k</td>
* <td align="right">3565k</td>
* </tr>
* <tr>
* <td align="right">800k</td>
* <td align="right">7600k</td>
* <td align="right">4065k</td>
* </tr>
* <tr>
* <td align="right">900k</td>
* <td align="right">8500k</td>
* <td align="right">4565k</td>
* </tr>
* </table>
*
* <p>
* For decompression <tt>CBZip2InputStream</tt> allocates less memory if the
* bzipped input is smaller than one block.
* </p>
*
* <p>
* Instances of this class are not threadsafe.
* </p>
*
* <p>
* TODO: Update to BZip2 1.0.1
* </p>
*
*/
public class CBZip2OutputStream extends OutputStream implements BZip2Constants {
/**
* The minimum supported blocksize <tt> == 1</tt>.
*/
public static final int MIN_BLOCKSIZE = 1;
/**
* The maximum supported blocksize <tt> == 9</tt>.
*/
public static final int MAX_BLOCKSIZE = 9;
/**
* This constant is accessible by subclasses for historical purposes. If you
* don't know what it means then you don't need it.
*/
protected static final int SETMASK = (1 << 21);
/**
* This constant is accessible by subclasses for historical purposes. If you
* don't know what it means then you don't need it.
*/
protected static final int CLEARMASK = (~SETMASK);
/**
* This constant is accessible by subclasses for historical purposes. If you
* don't know what it means then you don't need it.
*/
protected static final int GREATER_ICOST = 15;
/**
* This constant is accessible by subclasses for historical purposes. If you
* don't know what it means then you don't need it.
*/
protected static final int LESSER_ICOST = 0;
/**
* This constant is accessible by subclasses for historical purposes. If you
* don't know what it means then you don't need it.
*/
protected static final int SMALL_THRESH = 20;
/**
* This constant is accessible by subclasses for historical purposes. If you
* don't know what it means then you don't need it.
*/
protected static final int DEPTH_THRESH = 10;
/**
* This constant is accessible by subclasses for historical purposes. If you
* don't know what it means then you don't need it.
*/
protected static final int WORK_FACTOR = 30;
/**
* This constant is accessible by subclasses for historical purposes. If you
* don't know what it means then you don't need it.
* <p>
* If you are ever unlucky/improbable enough to get a stack overflow whilst
* sorting, increase the following constant and try again. In practice I
* have never seen the stack go above 27 elems, so the following limit seems
* very generous.
* </p>
*/
protected static final int QSORT_STACK_SIZE = 1000;
/**
* Knuth's increments seem to work better than Incerpi-Sedgewick here.
* Possibly because the number of elems to sort is usually small, typically
* <= 20.
*/
private static final int[] INCS = { 1, 4, 13, 40, 121, 364, 1093, 3280,
9841, 29524, 88573, 265720, 797161, 2391484 };
/**
* This method is accessible by subclasses for historical purposes. If you
* don't know what it does then you don't need it.
*/
protected static void hbMakeCodeLengths(char[] len, int[] freq,
int alphaSize, int maxLen) {
/*
* Nodes and heap entries run from 1. Entry 0 for both the heap and
* nodes is a sentinel.
*/
final int[] heap = new int[MAX_ALPHA_SIZE * 2];
final int[] weight = new int[MAX_ALPHA_SIZE * 2];
final int[] parent = new int[MAX_ALPHA_SIZE * 2];
for (int i = alphaSize; --i >= 0;) {
weight[i + 1] = (freq[i] == 0 ? 1 : freq[i]) << 8;
}
for (boolean tooLong = true; tooLong;) {
tooLong = false;
int nNodes = alphaSize;
int nHeap = 0;
heap[0] = 0;
weight[0] = 0;
parent[0] = -2;
for (int i = 1; i <= alphaSize; i++) {
parent[i] = -1;
nHeap++;
heap[nHeap] = i;
int zz = nHeap;
int tmp = heap[zz];
while (weight[tmp] < weight[heap[zz >> 1]]) {
heap[zz] = heap[zz >> 1];
zz >>= 1;
}
heap[zz] = tmp;
}
// assert (nHeap < (MAX_ALPHA_SIZE + 2)) : nHeap;
while (nHeap > 1) {
int n1 = heap[1];
heap[1] = heap[nHeap];
nHeap--;
int yy = 0;
int zz = 1;
int tmp = heap[1];
while (true) {
yy = zz << 1;
if (yy > nHeap) {
break;
}
if ((yy < nHeap)
&& (weight[heap[yy + 1]] < weight[heap[yy]])) {
yy++;
}
if (weight[tmp] < weight[heap[yy]]) {
break;
}
heap[zz] = heap[yy];
zz = yy;
}
heap[zz] = tmp;
int n2 = heap[1];
heap[1] = heap[nHeap];
nHeap--;
yy = 0;
zz = 1;
tmp = heap[1];
while (true) {
yy = zz << 1;
if (yy > nHeap) {
break;
}
if ((yy < nHeap)
&& (weight[heap[yy + 1]] < weight[heap[yy]])) {
yy++;
}
if (weight[tmp] < weight[heap[yy]]) {
break;
}
heap[zz] = heap[yy];
zz = yy;
}
heap[zz] = tmp;
nNodes++;
parent[n1] = parent[n2] = nNodes;
final int weight_n1 = weight[n1];
final int weight_n2 = weight[n2];
weight[nNodes] = (((weight_n1 & 0xffffff00) + (weight_n2 & 0xffffff00)) | (1 + (((weight_n1 & 0x000000ff) > (weight_n2 & 0x000000ff)) ? (weight_n1 & 0x000000ff)
: (weight_n2 & 0x000000ff))));
parent[nNodes] = -1;
nHeap++;
heap[nHeap] = nNodes;
tmp = 0;
zz = nHeap;
tmp = heap[zz];
final int weight_tmp = weight[tmp];
while (weight_tmp < weight[heap[zz >> 1]]) {
heap[zz] = heap[zz >> 1];
zz >>= 1;
}
heap[zz] = tmp;
}
// assert (nNodes < (MAX_ALPHA_SIZE * 2)) : nNodes;
for (int i = 1; i <= alphaSize; i++) {
int j = 0;
int k = i;
for (int parent_k; (parent_k = parent[k]) >= 0;) {
k = parent_k;
j++;
}
len[i - 1] = (char) j;
if (j > maxLen) {
tooLong = true;
}
}
if (tooLong) {
for (int i = 1; i < alphaSize; i++) {
int j = weight[i] >> 8;
j = 1 + (j >> 1);
weight[i] = j << 8;
}
}
}
}
private static void hbMakeCodeLengths(final byte[] len, final int[] freq,
final Data dat, final int alphaSize, final int maxLen) {
/*
* Nodes and heap entries run from 1. Entry 0 for both the heap and
* nodes is a sentinel.
*/
final int[] heap = dat.heap;
final int[] weight = dat.weight;
final int[] parent = dat.parent;
for (int i = alphaSize; --i >= 0;) {
weight[i + 1] = (freq[i] == 0 ? 1 : freq[i]) << 8;
}
for (boolean tooLong = true; tooLong;) {
tooLong = false;
int nNodes = alphaSize;
int nHeap = 0;
heap[0] = 0;
weight[0] = 0;
parent[0] = -2;
for (int i = 1; i <= alphaSize; i++) {
parent[i] = -1;
nHeap++;
heap[nHeap] = i;
int zz = nHeap;
int tmp = heap[zz];
while (weight[tmp] < weight[heap[zz >> 1]]) {
heap[zz] = heap[zz >> 1];
zz >>= 1;
}
heap[zz] = tmp;
}
while (nHeap > 1) {
int n1 = heap[1];
heap[1] = heap[nHeap];
nHeap--;
int yy = 0;
int zz = 1;
int tmp = heap[1];
while (true) {
yy = zz << 1;
if (yy > nHeap) {
break;
}
if ((yy < nHeap)
&& (weight[heap[yy + 1]] < weight[heap[yy]])) {
yy++;
}
if (weight[tmp] < weight[heap[yy]]) {
break;
}
heap[zz] = heap[yy];
zz = yy;
}
heap[zz] = tmp;
int n2 = heap[1];
heap[1] = heap[nHeap];
nHeap--;
yy = 0;
zz = 1;
tmp = heap[1];
while (true) {
yy = zz << 1;
if (yy > nHeap) {
break;
}
if ((yy < nHeap)
&& (weight[heap[yy + 1]] < weight[heap[yy]])) {
yy++;
}
if (weight[tmp] < weight[heap[yy]]) {
break;
}
heap[zz] = heap[yy];
zz = yy;
}
heap[zz] = tmp;
nNodes++;
parent[n1] = parent[n2] = nNodes;
final int weight_n1 = weight[n1];
final int weight_n2 = weight[n2];
weight[nNodes] = ((weight_n1 & 0xffffff00) + (weight_n2 & 0xffffff00))
| (1 + (((weight_n1 & 0x000000ff) > (weight_n2 & 0x000000ff)) ? (weight_n1 & 0x000000ff)
: (weight_n2 & 0x000000ff)));
parent[nNodes] = -1;
nHeap++;
heap[nHeap] = nNodes;
tmp = 0;
zz = nHeap;
tmp = heap[zz];
final int weight_tmp = weight[tmp];
while (weight_tmp < weight[heap[zz >> 1]]) {
heap[zz] = heap[zz >> 1];
zz >>= 1;
}
heap[zz] = tmp;
}
for (int i = 1; i <= alphaSize; i++) {
int j = 0;
int k = i;
for (int parent_k; (parent_k = parent[k]) >= 0;) {
k = parent_k;
j++;
}
len[i - 1] = (byte) j;
if (j > maxLen) {
tooLong = true;
}
}
if (tooLong) {
for (int i = 1; i < alphaSize; i++) {
int j = weight[i] >> 8;
j = 1 + (j >> 1);
weight[i] = j << 8;
}
}
}
}
/**
* Index of the last char in the block, so the block size == last + 1.
*/
private int last;
/**
* Index in fmap[] of original string after sorting.
*/
private int origPtr;
/**
* Always: in the range 0 .. 9. The current block size is 100000 * this
* number.
*/
private final int blockSize100k;
private boolean blockRandomised;
private int bsBuff;
private int bsLive;
private final CRC crc = new CRC();
private int nInUse;
private int nMTF;
/*
* Used when sorting. If too many long comparisons happen, we stop sorting,
* randomise the block slightly, and try again.
*/
private int workDone;
private int workLimit;
private boolean firstAttempt;
private int currentChar = -1;
private int runLength = 0;
private int blockCRC;
private int combinedCRC;
private int allowableBlockSize;
/**
* All memory intensive stuff.
*/
private CBZip2OutputStream.Data data;
private OutputStream out;
/**
* Chooses a blocksize based on the given length of the data to compress.
*
* @return The blocksize, between {@link #MIN_BLOCKSIZE} and
* {@link #MAX_BLOCKSIZE} both inclusive. For a negative
* <tt>inputLength</tt> this method returns <tt>MAX_BLOCKSIZE</tt>
* always.
*
* @param inputLength
* The length of the data which will be compressed by
* <tt>CBZip2OutputStream</tt>.
*/
public static int chooseBlockSize(long inputLength) {
return (inputLength > 0) ? (int) Math
.min((inputLength / 132000) + 1, 9) : MAX_BLOCKSIZE;
}
/**
* Constructs a new <tt>CBZip2OutputStream</tt> with a blocksize of 900k.
*
* <p>
* <b>Attention: </b>The caller is resonsible to write the two BZip2 magic
* bytes <tt>"BZ"</tt> to the specified stream prior to calling this
* constructor.
* </p>
*
* @param out *
* the destination stream.
*
* @throws IOException
* if an I/O error occurs in the specified stream.
* @throws NullPointerException
* if <code>out == null</code>.
*/
public CBZip2OutputStream(final OutputStream out) throws IOException {
this(out, MAX_BLOCKSIZE);
}
/**
* Constructs a new <tt>CBZip2OutputStream</tt> with specified blocksize.
*
* <p>
* <b>Attention: </b>The caller is resonsible to write the two BZip2 magic
* bytes <tt>"BZ"</tt> to the specified stream prior to calling this
* constructor.
* </p>
*
*
* @param out
* the destination stream.
* @param blockSize
* the blockSize as 100k units.
*
* @throws IOException
* if an I/O error occurs in the specified stream.
* @throws IllegalArgumentException
* if <code>(blockSize < 1) || (blockSize > 9)</code>.
* @throws NullPointerException
* if <code>out == null</code>.
*
* @see #MIN_BLOCKSIZE
* @see #MAX_BLOCKSIZE
*/
public CBZip2OutputStream(final OutputStream out, final int blockSize)
throws IOException {
super();
if (blockSize < 1) {
throw new IllegalArgumentException("blockSize(" + blockSize
+ ") < 1");
}
if (blockSize > 9) {
throw new IllegalArgumentException("blockSize(" + blockSize
+ ") > 9");
}
this.blockSize100k = blockSize;
this.out = out;
init();
}
@Override
public void write(final int b) throws IOException {
if (this.out != null) {
write0(b);
} else {
throw new IOException("closed");
}
}
private void writeRun() throws IOException {
final int lastShadow = this.last;
if (lastShadow < this.allowableBlockSize) {
final int currentCharShadow = this.currentChar;
final Data dataShadow = this.data;
dataShadow.inUse[currentCharShadow] = true;
final byte ch = (byte) currentCharShadow;
int runLengthShadow = this.runLength;
this.crc.updateCRC(currentCharShadow, runLengthShadow);
switch (runLengthShadow) {
case 1:
dataShadow.block[lastShadow + 2] = ch;
this.last = lastShadow + 1;
break;
case 2:
dataShadow.block[lastShadow + 2] = ch;
dataShadow.block[lastShadow + 3] = ch;
this.last = lastShadow + 2;
break;
case 3: {
final byte[] block = dataShadow.block;
block[lastShadow + 2] = ch;
block[lastShadow + 3] = ch;
block[lastShadow + 4] = ch;
this.last = lastShadow + 3;
}
break;
default: {
runLengthShadow -= 4;
dataShadow.inUse[runLengthShadow] = true;
final byte[] block = dataShadow.block;
block[lastShadow + 2] = ch;
block[lastShadow + 3] = ch;
block[lastShadow + 4] = ch;
block[lastShadow + 5] = ch;
block[lastShadow + 6] = (byte) runLengthShadow;
this.last = lastShadow + 5;
}
break;
}
} else {
endBlock();
initBlock();
writeRun();
}
}
/**
* Overriden to close the stream.
*/
@Override
protected void finalize() throws Throwable {
finish();
super.finalize();
}
public void finish() throws IOException {
if (out != null) {
try {
if (this.runLength > 0) {
writeRun();
}
this.currentChar = -1;
endBlock();
endCompression();
} finally {
this.out = null;
this.data = null;
}
}
}
@Override
public void close() throws IOException {
if (out != null) {
OutputStream outShadow = this.out;
try {
finish();
outShadow.close();
outShadow = null;
} finally {
IOUtils.closeStream(outShadow);
}
}
}
@Override
public void flush() throws IOException {
OutputStream outShadow = this.out;
if (outShadow != null) {
outShadow.flush();
}
}
private void init() throws IOException {
// write magic: done by caller who created this stream
// this.out.write('B');
// this.out.write('Z');
this.data = new Data(this.blockSize100k);
/*
* Write `magic' bytes h indicating file-format == huffmanised, followed
* by a digit indicating blockSize100k.
*/
bsPutUByte('h');
bsPutUByte('0' + this.blockSize100k);
this.combinedCRC = 0;
initBlock();
}
private void initBlock() {
// blockNo++;
this.crc.initialiseCRC();
this.last = -1;
// ch = 0;
boolean[] inUse = this.data.inUse;
for (int i = 256; --i >= 0;) {
inUse[i] = false;
}
/* 20 is just a paranoia constant */
this.allowableBlockSize = (this.blockSize100k * BZip2Constants.baseBlockSize) - 20;
}
private void endBlock() throws IOException {
this.blockCRC = this.crc.getFinalCRC();
this.combinedCRC = (this.combinedCRC << 1) | (this.combinedCRC >>> 31);
this.combinedCRC ^= this.blockCRC;
// empty block at end of file
if (this.last == -1) {
return;
}
/* sort the block and establish posn of original string */
blockSort();
/*
* A 6-byte block header, the value chosen arbitrarily as 0x314159265359
* :-). A 32 bit value does not really give a strong enough guarantee
* that the value will not appear by chance in the compressed
* datastream. Worst-case probability of this event, for a 900k block,
* is about 2.0e-3 for 32 bits, 1.0e-5 for 40 bits and 4.0e-8 for 48
* bits. For a compressed file of size 100Gb -- about 100000 blocks --
* only a 48-bit marker will do. NB: normal compression/ decompression
* donot rely on these statistical properties. They are only important
* when trying to recover blocks from damaged files.
*/
bsPutUByte(0x31);
bsPutUByte(0x41);
bsPutUByte(0x59);
bsPutUByte(0x26);
bsPutUByte(0x53);
bsPutUByte(0x59);
/* Now the block's CRC, so it is in a known place. */
bsPutInt(this.blockCRC);
/* Now a single bit indicating randomisation. */
if (this.blockRandomised) {
bsW(1, 1);
} else {
bsW(1, 0);
}
/* Finally, block's contents proper. */
moveToFrontCodeAndSend();
}
private void endCompression() throws IOException {
/*
* Now another magic 48-bit number, 0x177245385090, to indicate the end
* of the last block. (sqrt(pi), if you want to know. I did want to use
* e, but it contains too much repetition -- 27 18 28 18 28 46 -- for me
* to feel statistically comfortable. Call me paranoid.)
*/
bsPutUByte(0x17);
bsPutUByte(0x72);
bsPutUByte(0x45);
bsPutUByte(0x38);
bsPutUByte(0x50);
bsPutUByte(0x90);
bsPutInt(this.combinedCRC);
bsFinishedWithStream();
}
/**
* Returns the blocksize parameter specified at construction time.
*/
public final int getBlockSize() {
return this.blockSize100k;
}
@Override
public void write(final byte[] buf, int offs, final int len)
throws IOException {
if (offs < 0) {
throw new IndexOutOfBoundsException("offs(" + offs + ") < 0.");
}
if (len < 0) {
throw new IndexOutOfBoundsException("len(" + len + ") < 0.");
}
if (offs + len > buf.length) {
throw new IndexOutOfBoundsException("offs(" + offs + ") + len("
+ len + ") > buf.length(" + buf.length + ").");
}
if (this.out == null) {
throw new IOException("stream closed");
}
for (int hi = offs + len; offs < hi;) {
write0(buf[offs++]);
}
}
private void write0(int b) throws IOException {
if (this.currentChar != -1) {
b &= 0xff;
if (this.currentChar == b) {
if (++this.runLength > 254) {
writeRun();
this.currentChar = -1;
this.runLength = 0;
}
// else nothing to do
} else {
writeRun();
this.runLength = 1;
this.currentChar = b;
}
} else {
this.currentChar = b & 0xff;
this.runLength++;
}
}
private static void hbAssignCodes(final int[] code, final byte[] length,
final int minLen, final int maxLen, final int alphaSize) {
int vec = 0;
for (int n = minLen; n <= maxLen; n++) {
for (int i = 0; i < alphaSize; i++) {
if ((length[i] & 0xff) == n) {
code[i] = vec;
vec++;
}
}
vec <<= 1;
}
}
private void bsFinishedWithStream() throws IOException {
while (this.bsLive > 0) {
int ch = this.bsBuff >> 24;
this.out.write(ch); // write 8-bit
this.bsBuff <<= 8;
this.bsLive -= 8;
}
}
private void bsW(final int n, final int v) throws IOException {
final OutputStream outShadow = this.out;
int bsLiveShadow = this.bsLive;
int bsBuffShadow = this.bsBuff;
while (bsLiveShadow >= 8) {
outShadow.write(bsBuffShadow >> 24); // write 8-bit
bsBuffShadow <<= 8;
bsLiveShadow -= 8;
}
this.bsBuff = bsBuffShadow | (v << (32 - bsLiveShadow - n));
this.bsLive = bsLiveShadow + n;
}
private void bsPutUByte(final int c) throws IOException {
bsW(8, c);
}
private void bsPutInt(final int u) throws IOException {
bsW(8, (u >> 24) & 0xff);
bsW(8, (u >> 16) & 0xff);
bsW(8, (u >> 8) & 0xff);
bsW(8, u & 0xff);
}
private void sendMTFValues() throws IOException {
final byte[][] len = this.data.sendMTFValues_len;
final int alphaSize = this.nInUse + 2;
for (int t = N_GROUPS; --t >= 0;) {
byte[] len_t = len[t];
for (int v = alphaSize; --v >= 0;) {
len_t[v] = GREATER_ICOST;
}
}
/* Decide how many coding tables to use */
// assert (this.nMTF > 0) : this.nMTF;
final int nGroups = (this.nMTF < 200) ? 2 : (this.nMTF < 600) ? 3
: (this.nMTF < 1200) ? 4 : (this.nMTF < 2400) ? 5 : 6;
/* Generate an initial set of coding tables */
sendMTFValues0(nGroups, alphaSize);
/*
* Iterate up to N_ITERS times to improve the tables.
*/
final int nSelectors = sendMTFValues1(nGroups, alphaSize);
/* Compute MTF values for the selectors. */
sendMTFValues2(nGroups, nSelectors);
/* Assign actual codes for the tables. */
sendMTFValues3(nGroups, alphaSize);
/* Transmit the mapping table. */
sendMTFValues4();
/* Now the selectors. */
sendMTFValues5(nGroups, nSelectors);
/* Now the coding tables. */
sendMTFValues6(nGroups, alphaSize);
/* And finally, the block data proper */
sendMTFValues7(nSelectors);
}
private void sendMTFValues0(final int nGroups, final int alphaSize) {
final byte[][] len = this.data.sendMTFValues_len;
final int[] mtfFreq = this.data.mtfFreq;
int remF = this.nMTF;
int gs = 0;
for (int nPart = nGroups; nPart > 0; nPart--) {
final int tFreq = remF / nPart;
int ge = gs - 1;
int aFreq = 0;
for (final int a = alphaSize - 1; (aFreq < tFreq) && (ge < a);) {
aFreq += mtfFreq[++ge];
}
if ((ge > gs) && (nPart != nGroups) && (nPart != 1)
&& (((nGroups - nPart) & 1) != 0)) {
aFreq -= mtfFreq[ge--];
}
final byte[] len_np = len[nPart - 1];
for (int v = alphaSize; --v >= 0;) {
if ((v >= gs) && (v <= ge)) {
len_np[v] = LESSER_ICOST;
} else {
len_np[v] = GREATER_ICOST;
}
}
gs = ge + 1;
remF -= aFreq;
}
}
private int sendMTFValues1(final int nGroups, final int alphaSize) {
final Data dataShadow = this.data;
final int[][] rfreq = dataShadow.sendMTFValues_rfreq;
final int[] fave = dataShadow.sendMTFValues_fave;
final short[] cost = dataShadow.sendMTFValues_cost;
final char[] sfmap = dataShadow.sfmap;
final byte[] selector = dataShadow.selector;
final byte[][] len = dataShadow.sendMTFValues_len;
final byte[] len_0 = len[0];
final byte[] len_1 = len[1];
final byte[] len_2 = len[2];
final byte[] len_3 = len[3];
final byte[] len_4 = len[4];
final byte[] len_5 = len[5];
final int nMTFShadow = this.nMTF;
int nSelectors = 0;
for (int iter = 0; iter < N_ITERS; iter++) {
for (int t = nGroups; --t >= 0;) {
fave[t] = 0;
int[] rfreqt = rfreq[t];
for (int i = alphaSize; --i >= 0;) {
rfreqt[i] = 0;
}
}
nSelectors = 0;
for (int gs = 0; gs < this.nMTF;) {
/* Set group start & end marks. */
/*
* Calculate the cost of this group as coded by each of the
* coding tables.
*/
final int ge = Math.min(gs + G_SIZE - 1, nMTFShadow - 1);
if (nGroups == N_GROUPS) {
// unrolled version of the else-block
short cost0 = 0;
short cost1 = 0;
short cost2 = 0;
short cost3 = 0;
short cost4 = 0;
short cost5 = 0;
for (int i = gs; i <= ge; i++) {
final int icv = sfmap[i];
cost0 += len_0[icv] & 0xff;
cost1 += len_1[icv] & 0xff;
cost2 += len_2[icv] & 0xff;
cost3 += len_3[icv] & 0xff;
cost4 += len_4[icv] & 0xff;
cost5 += len_5[icv] & 0xff;
}
cost[0] = cost0;
cost[1] = cost1;
cost[2] = cost2;
cost[3] = cost3;
cost[4] = cost4;
cost[5] = cost5;
} else {
for (int t = nGroups; --t >= 0;) {
cost[t] = 0;
}
for (int i = gs; i <= ge; i++) {
final int icv = sfmap[i];
for (int t = nGroups; --t >= 0;) {
cost[t] += len[t][icv] & 0xff;
}
}
}
/*
* Find the coding table which is best for this group, and
* record its identity in the selector table.
*/
int bt = -1;
for (int t = nGroups, bc = 999999999; --t >= 0;) {
final int cost_t = cost[t];
if (cost_t < bc) {
bc = cost_t;
bt = t;
}
}
fave[bt]++;
selector[nSelectors] = (byte) bt;
nSelectors++;
/*
* Increment the symbol frequencies for the selected table.
*/
final int[] rfreq_bt = rfreq[bt];
for (int i = gs; i <= ge; i++) {
rfreq_bt[sfmap[i]]++;
}
gs = ge + 1;
}
/*
* Recompute the tables based on the accumulated frequencies.
*/
for (int t = 0; t < nGroups; t++) {
hbMakeCodeLengths(len[t], rfreq[t], this.data, alphaSize, 20);
}
}
return nSelectors;
}
private void sendMTFValues2(final int nGroups, final int nSelectors) {
// assert (nGroups < 8) : nGroups;
final Data dataShadow = this.data;
byte[] pos = dataShadow.sendMTFValues2_pos;
for (int i = nGroups; --i >= 0;) {
pos[i] = (byte) i;
}
for (int i = 0; i < nSelectors; i++) {
final byte ll_i = dataShadow.selector[i];
byte tmp = pos[0];
int j = 0;
while (ll_i != tmp) {
j++;
byte tmp2 = tmp;
tmp = pos[j];
pos[j] = tmp2;
}
pos[0] = tmp;
dataShadow.selectorMtf[i] = (byte) j;
}
}
private void sendMTFValues3(final int nGroups, final int alphaSize) {
int[][] code = this.data.sendMTFValues_code;
byte[][] len = this.data.sendMTFValues_len;
for (int t = 0; t < nGroups; t++) {
int minLen = 32;
int maxLen = 0;
final byte[] len_t = len[t];
for (int i = alphaSize; --i >= 0;) {
final int l = len_t[i] & 0xff;
if (l > maxLen) {
maxLen = l;
}
if (l < minLen) {
minLen = l;
}
}
// assert (maxLen <= 20) : maxLen;
// assert (minLen >= 1) : minLen;
hbAssignCodes(code[t], len[t], minLen, maxLen, alphaSize);
}
}
private void sendMTFValues4() throws IOException {
final boolean[] inUse = this.data.inUse;
final boolean[] inUse16 = this.data.sentMTFValues4_inUse16;
for (int i = 16; --i >= 0;) {
inUse16[i] = false;
final int i16 = i * 16;
for (int j = 16; --j >= 0;) {
if (inUse[i16 + j]) {
inUse16[i] = true;
}
}
}
for (int i = 0; i < 16; i++) {
bsW(1, inUse16[i] ? 1 : 0);
}
final OutputStream outShadow = this.out;
int bsLiveShadow = this.bsLive;
int bsBuffShadow = this.bsBuff;
for (int i = 0; i < 16; i++) {
if (inUse16[i]) {
final int i16 = i * 16;
for (int j = 0; j < 16; j++) {
// inlined: bsW(1, inUse[i16 + j] ? 1 : 0);
while (bsLiveShadow >= 8) {
outShadow.write(bsBuffShadow >> 24); // write 8-bit
bsBuffShadow <<= 8;
bsLiveShadow -= 8;
}
if (inUse[i16 + j]) {
bsBuffShadow |= 1 << (32 - bsLiveShadow - 1);
}
bsLiveShadow++;
}
}
}
this.bsBuff = bsBuffShadow;
this.bsLive = bsLiveShadow;
}
private void sendMTFValues5(final int nGroups, final int nSelectors)
throws IOException {
bsW(3, nGroups);
bsW(15, nSelectors);
final OutputStream outShadow = this.out;
final byte[] selectorMtf = this.data.selectorMtf;
int bsLiveShadow = this.bsLive;
int bsBuffShadow = this.bsBuff;
for (int i = 0; i < nSelectors; i++) {
for (int j = 0, hj = selectorMtf[i] & 0xff; j < hj; j++) {
// inlined: bsW(1, 1);
while (bsLiveShadow >= 8) {
outShadow.write(bsBuffShadow >> 24);
bsBuffShadow <<= 8;
bsLiveShadow -= 8;
}
bsBuffShadow |= 1 << (32 - bsLiveShadow - 1);
bsLiveShadow++;
}
// inlined: bsW(1, 0);
while (bsLiveShadow >= 8) {
outShadow.write(bsBuffShadow >> 24);
bsBuffShadow <<= 8;
bsLiveShadow -= 8;
}
// bsBuffShadow |= 0 << (32 - bsLiveShadow - 1);
bsLiveShadow++;
}
this.bsBuff = bsBuffShadow;
this.bsLive = bsLiveShadow;
}
private void sendMTFValues6(final int nGroups, final int alphaSize)
throws IOException {
final byte[][] len = this.data.sendMTFValues_len;
final OutputStream outShadow = this.out;
int bsLiveShadow = this.bsLive;
int bsBuffShadow = this.bsBuff;
for (int t = 0; t < nGroups; t++) {
byte[] len_t = len[t];
int curr = len_t[0] & 0xff;
// inlined: bsW(5, curr);
while (bsLiveShadow >= 8) {
outShadow.write(bsBuffShadow >> 24); // write 8-bit
bsBuffShadow <<= 8;
bsLiveShadow -= 8;
}
bsBuffShadow |= curr << (32 - bsLiveShadow - 5);
bsLiveShadow += 5;
for (int i = 0; i < alphaSize; i++) {
int lti = len_t[i] & 0xff;
while (curr < lti) {
// inlined: bsW(2, 2);
while (bsLiveShadow >= 8) {
outShadow.write(bsBuffShadow >> 24); // write 8-bit
bsBuffShadow <<= 8;
bsLiveShadow -= 8;
}
bsBuffShadow |= 2 << (32 - bsLiveShadow - 2);
bsLiveShadow += 2;
curr++; /* 10 */
}
while (curr > lti) {
// inlined: bsW(2, 3);
while (bsLiveShadow >= 8) {
outShadow.write(bsBuffShadow >> 24); // write 8-bit
bsBuffShadow <<= 8;
bsLiveShadow -= 8;
}
bsBuffShadow |= 3 << (32 - bsLiveShadow - 2);
bsLiveShadow += 2;
curr--; /* 11 */
}
// inlined: bsW(1, 0);
while (bsLiveShadow >= 8) {
outShadow.write(bsBuffShadow >> 24); // write 8-bit
bsBuffShadow <<= 8;
bsLiveShadow -= 8;
}
// bsBuffShadow |= 0 << (32 - bsLiveShadow - 1);
bsLiveShadow++;
}
}
this.bsBuff = bsBuffShadow;
this.bsLive = bsLiveShadow;
}
private void sendMTFValues7(final int nSelectors) throws IOException {
final Data dataShadow = this.data;
final byte[][] len = dataShadow.sendMTFValues_len;
final int[][] code = dataShadow.sendMTFValues_code;
final OutputStream outShadow = this.out;
final byte[] selector = dataShadow.selector;
final char[] sfmap = dataShadow.sfmap;
final int nMTFShadow = this.nMTF;
int selCtr = 0;
int bsLiveShadow = this.bsLive;
int bsBuffShadow = this.bsBuff;
for (int gs = 0; gs < nMTFShadow;) {
final int ge = Math.min(gs + G_SIZE - 1, nMTFShadow - 1);
final int selector_selCtr = selector[selCtr] & 0xff;
final int[] code_selCtr = code[selector_selCtr];
final byte[] len_selCtr = len[selector_selCtr];
while (gs <= ge) {
final int sfmap_i = sfmap[gs];
//
// inlined: bsW(len_selCtr[sfmap_i] & 0xff,
// code_selCtr[sfmap_i]);
//
while (bsLiveShadow >= 8) {
outShadow.write(bsBuffShadow >> 24);
bsBuffShadow <<= 8;
bsLiveShadow -= 8;
}
final int n = len_selCtr[sfmap_i] & 0xFF;
bsBuffShadow |= code_selCtr[sfmap_i] << (32 - bsLiveShadow - n);
bsLiveShadow += n;
gs++;
}
gs = ge + 1;
selCtr++;
}
this.bsBuff = bsBuffShadow;
this.bsLive = bsLiveShadow;
}
private void moveToFrontCodeAndSend() throws IOException {
bsW(24, this.origPtr);
generateMTFValues();
sendMTFValues();
}
/**
* This is the most hammered method of this class.
*
* <p>
* This is the version using unrolled loops. Normally I never use such ones
* in Java code. The unrolling has shown a noticable performance improvement
* on JRE 1.4.2 (Linux i586 / HotSpot Client). Of course it depends on the
* JIT compiler of the vm.
* </p>
*/
private boolean mainSimpleSort(final Data dataShadow, final int lo,
final int hi, final int d) {
final int bigN = hi - lo + 1;
if (bigN < 2) {
return this.firstAttempt && (this.workDone > this.workLimit);
}
int hp = 0;
while (INCS[hp] < bigN) {
hp++;
}
final int[] fmap = dataShadow.fmap;
final char[] quadrant = dataShadow.quadrant;
final byte[] block = dataShadow.block;
final int lastShadow = this.last;
final int lastPlus1 = lastShadow + 1;
final boolean firstAttemptShadow = this.firstAttempt;
final int workLimitShadow = this.workLimit;
int workDoneShadow = this.workDone;
// Following block contains unrolled code which could be shortened by
// coding it in additional loops.
HP: while (--hp >= 0) {
final int h = INCS[hp];
final int mj = lo + h - 1;
for (int i = lo + h; i <= hi;) {
// copy
for (int k = 3; (i <= hi) && (--k >= 0); i++) {
final int v = fmap[i];
final int vd = v + d;
int j = i;
// for (int a;
// (j > mj) && mainGtU((a = fmap[j - h]) + d, vd,
// block, quadrant, lastShadow);
// j -= h) {
// fmap[j] = a;
// }
//
// unrolled version:
// start inline mainGTU
boolean onceRunned = false;
int a = 0;
HAMMER: while (true) {
if (onceRunned) {
fmap[j] = a;
if ((j -= h) <= mj) {
break HAMMER;
}
} else {
onceRunned = true;
}
a = fmap[j - h];
int i1 = a + d;
int i2 = vd;
// following could be done in a loop, but
// unrolled it for performance:
if (block[i1 + 1] == block[i2 + 1]) {
if (block[i1 + 2] == block[i2 + 2]) {
if (block[i1 + 3] == block[i2 + 3]) {
if (block[i1 + 4] == block[i2 + 4]) {
if (block[i1 + 5] == block[i2 + 5]) {
if (block[(i1 += 6)] == block[(i2 += 6)]) {
int x = lastShadow;
X: while (x > 0) {
x -= 4;
if (block[i1 + 1] == block[i2 + 1]) {
if (quadrant[i1] == quadrant[i2]) {
if (block[i1 + 2] == block[i2 + 2]) {
if (quadrant[i1 + 1] == quadrant[i2 + 1]) {
if (block[i1 + 3] == block[i2 + 3]) {
if (quadrant[i1 + 2] == quadrant[i2 + 2]) {
if (block[i1 + 4] == block[i2 + 4]) {
if (quadrant[i1 + 3] == quadrant[i2 + 3]) {
if ((i1 += 4) >= lastPlus1) {
i1 -= lastPlus1;
}
if ((i2 += 4) >= lastPlus1) {
i2 -= lastPlus1;
}
workDoneShadow++;
continue X;
} else if ((quadrant[i1 + 3] > quadrant[i2 + 3])) {
continue HAMMER;
} else {
break HAMMER;
}
} else if ((block[i1 + 4] & 0xff) > (block[i2 + 4] & 0xff)) {
continue HAMMER;
} else {
break HAMMER;
}
} else if ((quadrant[i1 + 2] > quadrant[i2 + 2])) {
continue HAMMER;
} else {
break HAMMER;
}
} else if ((block[i1 + 3] & 0xff) > (block[i2 + 3] & 0xff)) {
continue HAMMER;
} else {
break HAMMER;
}
} else if ((quadrant[i1 + 1] > quadrant[i2 + 1])) {
continue HAMMER;
} else {
break HAMMER;
}
} else if ((block[i1 + 2] & 0xff) > (block[i2 + 2] & 0xff)) {
continue HAMMER;
} else {
break HAMMER;
}
} else if ((quadrant[i1] > quadrant[i2])) {
continue HAMMER;
} else {
break HAMMER;
}
} else if ((block[i1 + 1] & 0xff) > (block[i2 + 1] & 0xff)) {
continue HAMMER;
} else {
break HAMMER;
}
}
break HAMMER;
} // while x > 0
else {
if ((block[i1] & 0xff) > (block[i2] & 0xff)) {
continue HAMMER;
} else {
break HAMMER;
}
}
} else if ((block[i1 + 5] & 0xff) > (block[i2 + 5] & 0xff)) {
continue HAMMER;
} else {
break HAMMER;
}
} else if ((block[i1 + 4] & 0xff) > (block[i2 + 4] & 0xff)) {
continue HAMMER;
} else {
break HAMMER;
}
} else if ((block[i1 + 3] & 0xff) > (block[i2 + 3] & 0xff)) {
continue HAMMER;
} else {
break HAMMER;
}
} else if ((block[i1 + 2] & 0xff) > (block[i2 + 2] & 0xff)) {
continue HAMMER;
} else {
break HAMMER;
}
} else if ((block[i1 + 1] & 0xff) > (block[i2 + 1] & 0xff)) {
continue HAMMER;
} else {
break HAMMER;
}
} // HAMMER
// end inline mainGTU
fmap[j] = v;
}
if (firstAttemptShadow && (i <= hi)
&& (workDoneShadow > workLimitShadow)) {
break HP;
}
}
}
this.workDone = workDoneShadow;
return firstAttemptShadow && (workDoneShadow > workLimitShadow);
}
private static void vswap(int[] fmap, int p1, int p2, int n) {
n += p1;
while (p1 < n) {
int t = fmap[p1];
fmap[p1++] = fmap[p2];
fmap[p2++] = t;
}
}
private static byte med3(byte a, byte b, byte c) {
return (a < b) ? (b < c ? b : a < c ? c : a) : (b > c ? b : a > c ? c
: a);
}
private void blockSort() {
this.workLimit = WORK_FACTOR * this.last;
this.workDone = 0;
this.blockRandomised = false;
this.firstAttempt = true;
mainSort();
if (this.firstAttempt && (this.workDone > this.workLimit)) {
randomiseBlock();
this.workLimit = this.workDone = 0;
this.firstAttempt = false;
mainSort();
}
int[] fmap = this.data.fmap;
this.origPtr = -1;
for (int i = 0, lastShadow = this.last; i <= lastShadow; i++) {
if (fmap[i] == 0) {
this.origPtr = i;
break;
}
}
// assert (this.origPtr != -1) : this.origPtr;
}
/**
* Method "mainQSort3", file "blocksort.c", BZip2 1.0.2
*/
private void mainQSort3(final Data dataShadow, final int loSt,
final int hiSt, final int dSt) {
final int[] stack_ll = dataShadow.stack_ll;
final int[] stack_hh = dataShadow.stack_hh;
final int[] stack_dd = dataShadow.stack_dd;
final int[] fmap = dataShadow.fmap;
final byte[] block = dataShadow.block;
stack_ll[0] = loSt;
stack_hh[0] = hiSt;
stack_dd[0] = dSt;
for (int sp = 1; --sp >= 0;) {
final int lo = stack_ll[sp];
final int hi = stack_hh[sp];
final int d = stack_dd[sp];
if ((hi - lo < SMALL_THRESH) || (d > DEPTH_THRESH)) {
if (mainSimpleSort(dataShadow, lo, hi, d)) {
return;
}
} else {
final int d1 = d + 1;
final int med = med3(block[fmap[lo] + d1],
block[fmap[hi] + d1], block[fmap[(lo + hi) >>> 1] + d1]) & 0xff;
int unLo = lo;
int unHi = hi;
int ltLo = lo;
int gtHi = hi;
while (true) {
while (unLo <= unHi) {
final int n = ((int) block[fmap[unLo] + d1] & 0xff)
- med;
if (n == 0) {
final int temp = fmap[unLo];
fmap[unLo++] = fmap[ltLo];
fmap[ltLo++] = temp;
} else if (n < 0) {
unLo++;
} else {
break;
}
}
while (unLo <= unHi) {
final int n = ((int) block[fmap[unHi] + d1] & 0xff)
- med;
if (n == 0) {
final int temp = fmap[unHi];
fmap[unHi--] = fmap[gtHi];
fmap[gtHi--] = temp;
} else if (n > 0) {
unHi--;
} else {
break;
}
}
if (unLo <= unHi) {
final int temp = fmap[unLo];
fmap[unLo++] = fmap[unHi];
fmap[unHi--] = temp;
} else {
break;
}
}
if (gtHi < ltLo) {
stack_ll[sp] = lo;
stack_hh[sp] = hi;
stack_dd[sp] = d1;
sp++;
} else {
int n = ((ltLo - lo) < (unLo - ltLo)) ? (ltLo - lo)
: (unLo - ltLo);
vswap(fmap, lo, unLo - n, n);
int m = ((hi - gtHi) < (gtHi - unHi)) ? (hi - gtHi)
: (gtHi - unHi);
vswap(fmap, unLo, hi - m + 1, m);
n = lo + unLo - ltLo - 1;
m = hi - (gtHi - unHi) + 1;
stack_ll[sp] = lo;
stack_hh[sp] = n;
stack_dd[sp] = d;
sp++;
stack_ll[sp] = n + 1;
stack_hh[sp] = m - 1;
stack_dd[sp] = d1;
sp++;
stack_ll[sp] = m;
stack_hh[sp] = hi;
stack_dd[sp] = d;
sp++;
}
}
}
}
private void mainSort() {
final Data dataShadow = this.data;
final int[] runningOrder = dataShadow.mainSort_runningOrder;
final int[] copy = dataShadow.mainSort_copy;
final boolean[] bigDone = dataShadow.mainSort_bigDone;
final int[] ftab = dataShadow.ftab;
final byte[] block = dataShadow.block;
final int[] fmap = dataShadow.fmap;
final char[] quadrant = dataShadow.quadrant;
final int lastShadow = this.last;
final int workLimitShadow = this.workLimit;
final boolean firstAttemptShadow = this.firstAttempt;
// Set up the 2-byte frequency table
for (int i = 65537; --i >= 0;) {
ftab[i] = 0;
}
/*
* In the various block-sized structures, live data runs from 0 to
* last+NUM_OVERSHOOT_BYTES inclusive. First, set up the overshoot area
* for block.
*/
for (int i = 0; i < NUM_OVERSHOOT_BYTES; i++) {
block[lastShadow + i + 2] = block[(i % (lastShadow + 1)) + 1];
}
for (int i = lastShadow + NUM_OVERSHOOT_BYTES +1; --i >= 0;) {
quadrant[i] = 0;
}
block[0] = block[lastShadow + 1];
// Complete the initial radix sort:
int c1 = block[0] & 0xff;
for (int i = 0; i <= lastShadow; i++) {
final int c2 = block[i + 1] & 0xff;
ftab[(c1 << 8) + c2]++;
c1 = c2;
}
for (int i = 1; i <= 65536; i++)
ftab[i] += ftab[i - 1];
c1 = block[1] & 0xff;
for (int i = 0; i < lastShadow; i++) {
final int c2 = block[i + 2] & 0xff;
fmap[--ftab[(c1 << 8) + c2]] = i;
c1 = c2;
}
fmap[--ftab[((block[lastShadow + 1] & 0xff) << 8) + (block[1] & 0xff)]] = lastShadow;
/*
* Now ftab contains the first loc of every small bucket. Calculate the
* running order, from smallest to largest big bucket.
*/
for (int i = 256; --i >= 0;) {
bigDone[i] = false;
runningOrder[i] = i;
}
for (int h = 364; h != 1;) {
h /= 3;
for (int i = h; i <= 255; i++) {
final int vv = runningOrder[i];
final int a = ftab[(vv + 1) << 8] - ftab[vv << 8];
final int b = h - 1;
int j = i;
for (int ro = runningOrder[j - h]; (ftab[(ro + 1) << 8] - ftab[ro << 8]) > a; ro = runningOrder[j
- h]) {
runningOrder[j] = ro;
j -= h;
if (j <= b) {
break;
}
}
runningOrder[j] = vv;
}
}
/*
* The main sorting loop.
*/
for (int i = 0; i <= 255; i++) {
/*
* Process big buckets, starting with the least full.
*/
final int ss = runningOrder[i];
// Step 1:
/*
* Complete the big bucket [ss] by quicksorting any unsorted small
* buckets [ss, j]. Hopefully previous pointer-scanning phases have
* already completed many of the small buckets [ss, j], so we don't
* have to sort them at all.
*/
for (int j = 0; j <= 255; j++) {
final int sb = (ss << 8) + j;
final int ftab_sb = ftab[sb];
if ((ftab_sb & SETMASK) != SETMASK) {
final int lo = ftab_sb & CLEARMASK;
final int hi = (ftab[sb + 1] & CLEARMASK) - 1;
if (hi > lo) {
mainQSort3(dataShadow, lo, hi, 2);
if (firstAttemptShadow
&& (this.workDone > workLimitShadow)) {
return;
}
}
ftab[sb] = ftab_sb | SETMASK;
}
}
// Step 2:
// Now scan this big bucket so as to synthesise the
// sorted order for small buckets [t, ss] for all t != ss.
for (int j = 0; j <= 255; j++) {
copy[j] = ftab[(j << 8) + ss] & CLEARMASK;
}
for (int j = ftab[ss << 8] & CLEARMASK, hj = (ftab[(ss + 1) << 8] & CLEARMASK); j < hj; j++) {
final int fmap_j = fmap[j];
c1 = block[fmap_j] & 0xff;
if (!bigDone[c1]) {
fmap[copy[c1]] = (fmap_j == 0) ? lastShadow : (fmap_j - 1);
copy[c1]++;
}
}
for (int j = 256; --j >= 0;)
ftab[(j << 8) + ss] |= SETMASK;
// Step 3:
/*
* The ss big bucket is now done. Record this fact, and update the
* quadrant descriptors. Remember to update quadrants in the
* overshoot area too, if necessary. The "if (i < 255)" test merely
* skips this updating for the last bucket processed, since updating
* for the last bucket is pointless.
*/
bigDone[ss] = true;
if (i < 255) {
final int bbStart = ftab[ss << 8] & CLEARMASK;
final int bbSize = (ftab[(ss + 1) << 8] & CLEARMASK) - bbStart;
int shifts = 0;
while ((bbSize >> shifts) > 65534) {
shifts++;
}
for (int j = 0; j < bbSize; j++) {
final int a2update = fmap[bbStart + j];
final char qVal = (char) (j >> shifts);
quadrant[a2update] = qVal;
if (a2update < NUM_OVERSHOOT_BYTES) {
quadrant[a2update + lastShadow + 1] = qVal;
}
}
}
}
}
private void randomiseBlock() {
final boolean[] inUse = this.data.inUse;
final byte[] block = this.data.block;
final int lastShadow = this.last;
for (int i = 256; --i >= 0;)
inUse[i] = false;
int rNToGo = 0;
int rTPos = 0;
for (int i = 0, j = 1; i <= lastShadow; i = j, j++) {
if (rNToGo == 0) {
rNToGo = (char) BZip2Constants.rNums[rTPos];
if (++rTPos == 512) {
rTPos = 0;
}
}
rNToGo--;
block[j] ^= ((rNToGo == 1) ? 1 : 0);
// handle 16 bit signed numbers
inUse[block[j] & 0xff] = true;
}
this.blockRandomised = true;
}
private void generateMTFValues() {
final int lastShadow = this.last;
final Data dataShadow = this.data;
final boolean[] inUse = dataShadow.inUse;
final byte[] block = dataShadow.block;
final int[] fmap = dataShadow.fmap;
final char[] sfmap = dataShadow.sfmap;
final int[] mtfFreq = dataShadow.mtfFreq;
final byte[] unseqToSeq = dataShadow.unseqToSeq;
final byte[] yy = dataShadow.generateMTFValues_yy;
// make maps
int nInUseShadow = 0;
for (int i = 0; i < 256; i++) {
if (inUse[i]) {
unseqToSeq[i] = (byte) nInUseShadow;
nInUseShadow++;
}
}
this.nInUse = nInUseShadow;
final int eob = nInUseShadow + 1;
for (int i = eob; i >= 0; i--) {
mtfFreq[i] = 0;
}
for (int i = nInUseShadow; --i >= 0;) {
yy[i] = (byte) i;
}
int wr = 0;
int zPend = 0;
for (int i = 0; i <= lastShadow; i++) {
final byte ll_i = unseqToSeq[block[fmap[i]] & 0xff];
byte tmp = yy[0];
int j = 0;
while (ll_i != tmp) {
j++;
byte tmp2 = tmp;
tmp = yy[j];
yy[j] = tmp2;
}
yy[0] = tmp;
if (j == 0) {
zPend++;
} else {
if (zPend > 0) {
zPend--;
while (true) {
if ((zPend & 1) == 0) {
sfmap[wr] = RUNA;
wr++;
mtfFreq[RUNA]++;
} else {
sfmap[wr] = RUNB;
wr++;
mtfFreq[RUNB]++;
}
if (zPend >= 2) {
zPend = (zPend - 2) >> 1;
} else {
break;
}
}
zPend = 0;
}
sfmap[wr] = (char) (j + 1);
wr++;
mtfFreq[j + 1]++;
}
}
if (zPend > 0) {
zPend--;
while (true) {
if ((zPend & 1) == 0) {
sfmap[wr] = RUNA;
wr++;
mtfFreq[RUNA]++;
} else {
sfmap[wr] = RUNB;
wr++;
mtfFreq[RUNB]++;
}
if (zPend >= 2) {
zPend = (zPend - 2) >> 1;
} else {
break;
}
}
}
sfmap[wr] = (char) eob;
mtfFreq[eob]++;
this.nMTF = wr + 1;
}
private static final class Data extends Object {
// with blockSize 900k
final boolean[] inUse = new boolean[256]; // 256 byte
final byte[] unseqToSeq = new byte[256]; // 256 byte
final int[] mtfFreq = new int[MAX_ALPHA_SIZE]; // 1032 byte
final byte[] selector = new byte[MAX_SELECTORS]; // 18002 byte
final byte[] selectorMtf = new byte[MAX_SELECTORS]; // 18002 byte
final byte[] generateMTFValues_yy = new byte[256]; // 256 byte
final byte[][] sendMTFValues_len = new byte[N_GROUPS][MAX_ALPHA_SIZE]; // 1548
// byte
final int[][] sendMTFValues_rfreq = new int[N_GROUPS][MAX_ALPHA_SIZE]; // 6192
// byte
final int[] sendMTFValues_fave = new int[N_GROUPS]; // 24 byte
final short[] sendMTFValues_cost = new short[N_GROUPS]; // 12 byte
final int[][] sendMTFValues_code = new int[N_GROUPS][MAX_ALPHA_SIZE]; // 6192
// byte
final byte[] sendMTFValues2_pos = new byte[N_GROUPS]; // 6 byte
final boolean[] sentMTFValues4_inUse16 = new boolean[16]; // 16 byte
final int[] stack_ll = new int[QSORT_STACK_SIZE]; // 4000 byte
final int[] stack_hh = new int[QSORT_STACK_SIZE]; // 4000 byte
final int[] stack_dd = new int[QSORT_STACK_SIZE]; // 4000 byte
final int[] mainSort_runningOrder = new int[256]; // 1024 byte
final int[] mainSort_copy = new int[256]; // 1024 byte
final boolean[] mainSort_bigDone = new boolean[256]; // 256 byte
final int[] heap = new int[MAX_ALPHA_SIZE + 2]; // 1040 byte
final int[] weight = new int[MAX_ALPHA_SIZE * 2]; // 2064 byte
final int[] parent = new int[MAX_ALPHA_SIZE * 2]; // 2064 byte
final int[] ftab = new int[65537]; // 262148 byte
// ------------
// 333408 byte
final byte[] block; // 900021 byte
final int[] fmap; // 3600000 byte
final char[] sfmap; // 3600000 byte
// ------------
// 8433529 byte
// ============
/**
* Array instance identical to sfmap, both are used only temporarily and
* indepently, so we do not need to allocate additional memory.
*/
final char[] quadrant;
Data(int blockSize100k) {
super();
final int n = blockSize100k * BZip2Constants.baseBlockSize;
this.block = new byte[(n + 1 + NUM_OVERSHOOT_BYTES)];
this.fmap = new int[n];
this.sfmap = new char[2 * n];
this.quadrant = this.sfmap;
}
}
}
| 57,764 | 26.58596 | 168 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/*
* This package is based on the work done by Keiron Liddle, Aftex Software
* <[email protected]> to whom the Ant project is very grateful for his
* great code.
*/
package org.apache.hadoop.io.compress.bzip2;
import java.io.BufferedInputStream;
import java.io.InputStream;
import java.io.IOException;
import org.apache.hadoop.io.compress.SplittableCompressionCodec.READ_MODE;
/**
* An input stream that decompresses from the BZip2 format (without the file
* header chars) to be read as any other stream.
*
* <p>
* The decompression requires large amounts of memory. Thus you should call the
* {@link #close() close()} method as soon as possible, to force
* <tt>CBZip2InputStream</tt> to release the allocated memory. See
* {@link CBZip2OutputStream CBZip2OutputStream} for information about memory
* usage.
* </p>
*
* <p>
* <tt>CBZip2InputStream</tt> reads bytes from the compressed source stream via
* the single byte {@link java.io.InputStream#read() read()} method exclusively.
* Thus you should consider to use a buffered source stream.
* </p>
*
* <p>
* This Ant code was enhanced so that it can de-compress blocks of bzip2 data.
* Current position in the stream is an important statistic for Hadoop. For
* example in LineRecordReader, we solely depend on the current position in the
* stream to know about the progess. The notion of position becomes complicated
* for compressed files. The Hadoop splitting is done in terms of compressed
* file. But a compressed file deflates to a large amount of data. So we have
* handled this problem in the following way.
*
* On object creation time, we find the next block start delimiter. Once such a
* marker is found, the stream stops there (we discard any read compressed data
* in this process) and the position is updated (i.e. the caller of this class
* will find out the stream location). At this point we are ready for actual
* reading (i.e. decompression) of data.
*
* The subsequent read calls give out data. The position is updated when the
* caller of this class has read off the current block + 1 bytes. In between the
* block reading, position is not updated. (We can only update the postion on
* block boundaries).
* </p>
*
* <p>
* Instances of this class are not threadsafe.
* </p>
*/
public class CBZip2InputStream extends InputStream implements BZip2Constants {
public static final long BLOCK_DELIMITER = 0X314159265359L;// start of block
public static final long EOS_DELIMITER = 0X177245385090L;// end of bzip2 stream
private static final int DELIMITER_BIT_LENGTH = 48;
READ_MODE readMode = READ_MODE.CONTINUOUS;
// The variable records the current advertised position of the stream.
private long reportedBytesReadFromCompressedStream = 0L;
// The following variable keep record of compressed bytes read.
private long bytesReadFromCompressedStream = 0L;
private boolean lazyInitialization = false;
private byte array[] = new byte[1];
/**
* Index of the last char in the block, so the block size == last + 1.
*/
private int last;
/**
* Index in zptr[] of original string after sorting.
*/
private int origPtr;
/**
* always: in the range 0 .. 9. The current block size is 100000 * this
* number.
*/
private int blockSize100k;
private boolean blockRandomised = false;
private long bsBuff;
private long bsLive;
private final CRC crc = new CRC();
private int nInUse;
private BufferedInputStream in;
private int currentChar = -1;
/**
* A state machine to keep track of current state of the de-coder
*
*/
public enum STATE {
EOF, START_BLOCK_STATE, RAND_PART_A_STATE, RAND_PART_B_STATE, RAND_PART_C_STATE, NO_RAND_PART_A_STATE, NO_RAND_PART_B_STATE, NO_RAND_PART_C_STATE, NO_PROCESS_STATE
};
private STATE currentState = STATE.START_BLOCK_STATE;
private int storedBlockCRC, storedCombinedCRC;
private int computedBlockCRC, computedCombinedCRC;
private boolean skipResult = false;// used by skipToNextMarker
private boolean skipDecompression = false;
// Variables used by setup* methods exclusively
private int su_count;
private int su_ch2;
private int su_chPrev;
private int su_i2;
private int su_j2;
private int su_rNToGo;
private int su_rTPos;
private int su_tPos;
private char su_z;
/**
* All memory intensive stuff. This field is initialized by initBlock().
*/
private CBZip2InputStream.Data data;
/**
* This method reports the processed bytes so far. Please note that this
* statistic is only updated on block boundaries and only when the stream is
* initiated in BYBLOCK mode.
*/
public long getProcessedByteCount() {
return reportedBytesReadFromCompressedStream;
}
/**
* This method keeps track of raw processed compressed
* bytes.
*
* @param count count is the number of bytes to be
* added to raw processed bytes
*/
protected void updateProcessedByteCount(int count) {
this.bytesReadFromCompressedStream += count;
}
/**
* This method is called by the client of this
* class in case there are any corrections in
* the stream position. One common example is
* when client of this code removes starting BZ
* characters from the compressed stream.
*
* @param count count bytes are added to the reported bytes
*
*/
public void updateReportedByteCount(int count) {
this.reportedBytesReadFromCompressedStream += count;
this.updateProcessedByteCount(count);
}
/**
* This method reads a Byte from the compressed stream. Whenever we need to
* read from the underlying compressed stream, this method should be called
* instead of directly calling the read method of the underlying compressed
* stream. This method does important record keeping to have the statistic
* that how many bytes have been read off the compressed stream.
*/
private int readAByte(InputStream inStream) throws IOException {
int read = inStream.read();
if (read >= 0) {
this.updateProcessedByteCount(1);
}
return read;
}
/**
* This method tries to find the marker (passed to it as the first parameter)
* in the stream. It can find bit patterns of length <= 63 bits. Specifically
* this method is used in CBZip2InputStream to find the end of block (EOB)
* delimiter in the stream, starting from the current position of the stream.
* If marker is found, the stream position will be right after marker at the
* end of this call.
*
* @param marker The bit pattern to be found in the stream
* @param markerBitLength No of bits in the marker
*
* @throws IOException
* @throws IllegalArgumentException if marketBitLength is greater than 63
*/
public boolean skipToNextMarker(long marker, int markerBitLength)
throws IOException, IllegalArgumentException {
try {
if (markerBitLength > 63) {
throw new IllegalArgumentException(
"skipToNextMarker can not find patterns greater than 63 bits");
}
// pick next marketBitLength bits in the stream
long bytes = 0;
bytes = this.bsR(markerBitLength);
if (bytes == -1) {
return false;
}
while (true) {
if (bytes == marker) {
return true;
} else {
bytes = bytes << 1;
bytes = bytes & ((1L << markerBitLength) - 1);
int oneBit = (int) this.bsR(1);
if (oneBit != -1) {
bytes = bytes | oneBit;
} else
return false;
}
}
} catch (IOException ex) {
return false;
}
}
protected void reportCRCError() throws IOException {
throw new IOException("crc error");
}
private void makeMaps() {
final boolean[] inUse = this.data.inUse;
final byte[] seqToUnseq = this.data.seqToUnseq;
int nInUseShadow = 0;
for (int i = 0; i < 256; i++) {
if (inUse[i])
seqToUnseq[nInUseShadow++] = (byte) i;
}
this.nInUse = nInUseShadow;
}
/**
* Constructs a new CBZip2InputStream which decompresses bytes read from the
* specified stream.
*
* <p>
* Although BZip2 headers are marked with the magic <tt>"Bz"</tt> this
* constructor expects the next byte in the stream to be the first one after
* the magic. Thus callers have to skip the first two bytes. Otherwise this
* constructor will throw an exception.
* </p>
*
* @throws IOException
* if the stream content is malformed or an I/O error occurs.
* @throws NullPointerException
* if <tt>in == null</tt>
*/
public CBZip2InputStream(final InputStream in, READ_MODE readMode)
throws IOException {
this(in, readMode, false);
}
private CBZip2InputStream(final InputStream in, READ_MODE readMode, boolean skipDecompression)
throws IOException {
super();
int blockSize = 0X39;// i.e 9
this.blockSize100k = blockSize - '0';
this.in = new BufferedInputStream(in, 1024 * 9);// >1 MB buffer
this.readMode = readMode;
this.skipDecompression = skipDecompression;
if (readMode == READ_MODE.CONTINUOUS) {
currentState = STATE.START_BLOCK_STATE;
lazyInitialization = (in.available() == 0)?true:false;
if(!lazyInitialization){
init();
}
} else if (readMode == READ_MODE.BYBLOCK) {
this.currentState = STATE.NO_PROCESS_STATE;
skipResult = this.skipToNextMarker(CBZip2InputStream.BLOCK_DELIMITER,DELIMITER_BIT_LENGTH);
this.reportedBytesReadFromCompressedStream = this.bytesReadFromCompressedStream;
if(!skipDecompression){
changeStateToProcessABlock();
}
}
}
/**
* Returns the number of bytes between the current stream position
* and the immediate next BZip2 block marker.
*
* @param in
* The InputStream
*
* @return long Number of bytes between current stream position and the
* next BZip2 block start marker.
* @throws IOException
*
*/
public static long numberOfBytesTillNextMarker(final InputStream in) throws IOException{
CBZip2InputStream anObject = new CBZip2InputStream(in, READ_MODE.BYBLOCK, true);
return anObject.getProcessedByteCount();
}
public CBZip2InputStream(final InputStream in) throws IOException {
this(in, READ_MODE.CONTINUOUS);
}
private void changeStateToProcessABlock() throws IOException {
if (skipResult == true) {
initBlock();
setupBlock();
} else {
this.currentState = STATE.EOF;
}
}
@Override
public int read() throws IOException {
if (this.in != null) {
int result = this.read(array, 0, 1);
int value = 0XFF & array[0];
return (result > 0 ? value : result);
} else {
throw new IOException("stream closed");
}
}
/**
* In CONTINOUS reading mode, this read method starts from the
* start of the compressed stream and end at the end of file by
* emitting un-compressed data. In this mode stream positioning
* is not announced and should be ignored.
*
* In BYBLOCK reading mode, this read method informs about the end
* of a BZip2 block by returning EOB. At this event, the compressed
* stream position is also announced. This announcement tells that
* how much of the compressed stream has been de-compressed and read
* out of this class. In between EOB events, the stream position is
* not updated.
*
*
* @throws IOException
* if the stream content is malformed or an I/O error occurs.
*
* @return int The return value greater than 0 are the bytes read. A value
* of -1 means end of stream while -2 represents end of block
*/
@Override
public int read(final byte[] dest, final int offs, final int len)
throws IOException {
if (offs < 0) {
throw new IndexOutOfBoundsException("offs(" + offs + ") < 0.");
}
if (len < 0) {
throw new IndexOutOfBoundsException("len(" + len + ") < 0.");
}
if (offs + len > dest.length) {
throw new IndexOutOfBoundsException("offs(" + offs + ") + len("
+ len + ") > dest.length(" + dest.length + ").");
}
if (this.in == null) {
throw new IOException("stream closed");
}
if(lazyInitialization){
this.init();
this.lazyInitialization = false;
}
if(skipDecompression){
changeStateToProcessABlock();
skipDecompression = false;
}
final int hi = offs + len;
int destOffs = offs;
int b = 0;
for (; ((destOffs < hi) && ((b = read0())) >= 0);) {
dest[destOffs++] = (byte) b;
}
int result = destOffs - offs;
if (result == 0) {
//report 'end of block' or 'end of stream'
result = b;
skipResult = this.skipToNextMarker(CBZip2InputStream.BLOCK_DELIMITER, DELIMITER_BIT_LENGTH);
//Exactly when we are about to start a new block, we advertise the stream position.
this.reportedBytesReadFromCompressedStream = this.bytesReadFromCompressedStream;
changeStateToProcessABlock();
}
return result;
}
private int read0() throws IOException {
final int retChar = this.currentChar;
switch (this.currentState) {
case EOF:
return END_OF_STREAM;// return -1
case NO_PROCESS_STATE:
return END_OF_BLOCK;// return -2
case START_BLOCK_STATE:
throw new IllegalStateException();
case RAND_PART_A_STATE:
throw new IllegalStateException();
case RAND_PART_B_STATE:
setupRandPartB();
break;
case RAND_PART_C_STATE:
setupRandPartC();
break;
case NO_RAND_PART_A_STATE:
throw new IllegalStateException();
case NO_RAND_PART_B_STATE:
setupNoRandPartB();
break;
case NO_RAND_PART_C_STATE:
setupNoRandPartC();
break;
default:
throw new IllegalStateException();
}
return retChar;
}
private void init() throws IOException {
int magic2 = this.readAByte(in);
if (magic2 != 'h') {
throw new IOException("Stream is not BZip2 formatted: expected 'h'"
+ " as first byte but got '" + (char) magic2 + "'");
}
int blockSize = this.readAByte(in);
if ((blockSize < '1') || (blockSize > '9')) {
throw new IOException("Stream is not BZip2 formatted: illegal "
+ "blocksize " + (char) blockSize);
}
this.blockSize100k = blockSize - '0';
initBlock();
setupBlock();
}
private void initBlock() throws IOException {
if (this.readMode == READ_MODE.BYBLOCK) {
// this.checkBlockIntegrity();
this.storedBlockCRC = bsGetInt();
this.blockRandomised = bsR(1) == 1;
/**
* Allocate data here instead in constructor, so we do not allocate
* it if the input file is empty.
*/
if (this.data == null) {
this.data = new Data(this.blockSize100k);
}
// currBlockNo++;
getAndMoveToFrontDecode();
this.crc.initialiseCRC();
this.currentState = STATE.START_BLOCK_STATE;
return;
}
char magic0 = bsGetUByte();
char magic1 = bsGetUByte();
char magic2 = bsGetUByte();
char magic3 = bsGetUByte();
char magic4 = bsGetUByte();
char magic5 = bsGetUByte();
if (magic0 == 0x17 && magic1 == 0x72 && magic2 == 0x45
&& magic3 == 0x38 && magic4 == 0x50 && magic5 == 0x90) {
complete(); // end of file
} else if (magic0 != 0x31 || // '1'
magic1 != 0x41 || // ')'
magic2 != 0x59 || // 'Y'
magic3 != 0x26 || // '&'
magic4 != 0x53 || // 'S'
magic5 != 0x59 // 'Y'
) {
this.currentState = STATE.EOF;
throw new IOException("bad block header");
} else {
this.storedBlockCRC = bsGetInt();
this.blockRandomised = bsR(1) == 1;
/**
* Allocate data here instead in constructor, so we do not allocate
* it if the input file is empty.
*/
if (this.data == null) {
this.data = new Data(this.blockSize100k);
}
// currBlockNo++;
getAndMoveToFrontDecode();
this.crc.initialiseCRC();
this.currentState = STATE.START_BLOCK_STATE;
}
}
private void endBlock() throws IOException {
this.computedBlockCRC = this.crc.getFinalCRC();
// A bad CRC is considered a fatal error.
if (this.storedBlockCRC != this.computedBlockCRC) {
// make next blocks readable without error
// (repair feature, not yet documented, not tested)
this.computedCombinedCRC = (this.storedCombinedCRC << 1)
| (this.storedCombinedCRC >>> 31);
this.computedCombinedCRC ^= this.storedBlockCRC;
reportCRCError();
}
this.computedCombinedCRC = (this.computedCombinedCRC << 1)
| (this.computedCombinedCRC >>> 31);
this.computedCombinedCRC ^= this.computedBlockCRC;
}
private void complete() throws IOException {
this.storedCombinedCRC = bsGetInt();
this.currentState = STATE.EOF;
this.data = null;
if (this.storedCombinedCRC != this.computedCombinedCRC) {
reportCRCError();
}
}
@Override
public void close() throws IOException {
InputStream inShadow = this.in;
if (inShadow != null) {
try {
if (inShadow != System.in) {
inShadow.close();
}
} finally {
this.data = null;
this.in = null;
}
}
}
private long bsR(final long n) throws IOException {
long bsLiveShadow = this.bsLive;
long bsBuffShadow = this.bsBuff;
if (bsLiveShadow < n) {
final InputStream inShadow = this.in;
do {
int thech = readAByte(inShadow);
if (thech < 0) {
throw new IOException("unexpected end of stream");
}
bsBuffShadow = (bsBuffShadow << 8) | thech;
bsLiveShadow += 8;
} while (bsLiveShadow < n);
this.bsBuff = bsBuffShadow;
}
this.bsLive = bsLiveShadow - n;
return (bsBuffShadow >> (bsLiveShadow - n)) & ((1L << n) - 1);
}
private boolean bsGetBit() throws IOException {
long bsLiveShadow = this.bsLive;
long bsBuffShadow = this.bsBuff;
if (bsLiveShadow < 1) {
int thech = this.readAByte(in);
if (thech < 0) {
throw new IOException("unexpected end of stream");
}
bsBuffShadow = (bsBuffShadow << 8) | thech;
bsLiveShadow += 8;
this.bsBuff = bsBuffShadow;
}
this.bsLive = bsLiveShadow - 1;
return ((bsBuffShadow >> (bsLiveShadow - 1)) & 1) != 0;
}
private char bsGetUByte() throws IOException {
return (char) bsR(8);
}
private int bsGetInt() throws IOException {
return (int) ((((((bsR(8) << 8) | bsR(8)) << 8) | bsR(8)) << 8) | bsR(8));
}
/**
* Called by createHuffmanDecodingTables() exclusively.
*/
private static void hbCreateDecodeTables(final int[] limit,
final int[] base, final int[] perm, final char[] length,
final int minLen, final int maxLen, final int alphaSize) {
for (int i = minLen, pp = 0; i <= maxLen; i++) {
for (int j = 0; j < alphaSize; j++) {
if (length[j] == i) {
perm[pp++] = j;
}
}
}
for (int i = MAX_CODE_LEN; --i > 0;) {
base[i] = 0;
limit[i] = 0;
}
for (int i = 0; i < alphaSize; i++) {
base[length[i] + 1]++;
}
for (int i = 1, b = base[0]; i < MAX_CODE_LEN; i++) {
b += base[i];
base[i] = b;
}
for (int i = minLen, vec = 0, b = base[i]; i <= maxLen; i++) {
final int nb = base[i + 1];
vec += nb - b;
b = nb;
limit[i] = vec - 1;
vec <<= 1;
}
for (int i = minLen + 1; i <= maxLen; i++) {
base[i] = ((limit[i - 1] + 1) << 1) - base[i];
}
}
private void recvDecodingTables() throws IOException {
final Data dataShadow = this.data;
final boolean[] inUse = dataShadow.inUse;
final byte[] pos = dataShadow.recvDecodingTables_pos;
final byte[] selector = dataShadow.selector;
final byte[] selectorMtf = dataShadow.selectorMtf;
int inUse16 = 0;
/* Receive the mapping table */
for (int i = 0; i < 16; i++) {
if (bsGetBit()) {
inUse16 |= 1 << i;
}
}
for (int i = 256; --i >= 0;) {
inUse[i] = false;
}
for (int i = 0; i < 16; i++) {
if ((inUse16 & (1 << i)) != 0) {
final int i16 = i << 4;
for (int j = 0; j < 16; j++) {
if (bsGetBit()) {
inUse[i16 + j] = true;
}
}
}
}
makeMaps();
final int alphaSize = this.nInUse + 2;
/* Now the selectors */
final int nGroups = (int) bsR(3);
final int nSelectors = (int) bsR(15);
for (int i = 0; i < nSelectors; i++) {
int j = 0;
while (bsGetBit()) {
j++;
}
selectorMtf[i] = (byte) j;
}
/* Undo the MTF values for the selectors. */
for (int v = nGroups; --v >= 0;) {
pos[v] = (byte) v;
}
for (int i = 0; i < nSelectors; i++) {
int v = selectorMtf[i] & 0xff;
final byte tmp = pos[v];
while (v > 0) {
// nearly all times v is zero, 4 in most other cases
pos[v] = pos[v - 1];
v--;
}
pos[0] = tmp;
selector[i] = tmp;
}
final char[][] len = dataShadow.temp_charArray2d;
/* Now the coding tables */
for (int t = 0; t < nGroups; t++) {
int curr = (int) bsR(5);
final char[] len_t = len[t];
for (int i = 0; i < alphaSize; i++) {
while (bsGetBit()) {
curr += bsGetBit() ? -1 : 1;
}
len_t[i] = (char) curr;
}
}
// finally create the Huffman tables
createHuffmanDecodingTables(alphaSize, nGroups);
}
/**
* Called by recvDecodingTables() exclusively.
*/
private void createHuffmanDecodingTables(final int alphaSize,
final int nGroups) {
final Data dataShadow = this.data;
final char[][] len = dataShadow.temp_charArray2d;
final int[] minLens = dataShadow.minLens;
final int[][] limit = dataShadow.limit;
final int[][] base = dataShadow.base;
final int[][] perm = dataShadow.perm;
for (int t = 0; t < nGroups; t++) {
int minLen = 32;
int maxLen = 0;
final char[] len_t = len[t];
for (int i = alphaSize; --i >= 0;) {
final char lent = len_t[i];
if (lent > maxLen) {
maxLen = lent;
}
if (lent < minLen) {
minLen = lent;
}
}
hbCreateDecodeTables(limit[t], base[t], perm[t], len[t], minLen,
maxLen, alphaSize);
minLens[t] = minLen;
}
}
private void getAndMoveToFrontDecode() throws IOException {
this.origPtr = (int) bsR(24);
recvDecodingTables();
final InputStream inShadow = this.in;
final Data dataShadow = this.data;
final byte[] ll8 = dataShadow.ll8;
final int[] unzftab = dataShadow.unzftab;
final byte[] selector = dataShadow.selector;
final byte[] seqToUnseq = dataShadow.seqToUnseq;
final char[] yy = dataShadow.getAndMoveToFrontDecode_yy;
final int[] minLens = dataShadow.minLens;
final int[][] limit = dataShadow.limit;
final int[][] base = dataShadow.base;
final int[][] perm = dataShadow.perm;
final int limitLast = this.blockSize100k * 100000;
/*
* Setting up the unzftab entries here is not strictly necessary, but it
* does save having to do it later in a separate pass, and so saves a
* block's worth of cache misses.
*/
for (int i = 256; --i >= 0;) {
yy[i] = (char) i;
unzftab[i] = 0;
}
int groupNo = 0;
int groupPos = G_SIZE - 1;
final int eob = this.nInUse + 1;
int nextSym = getAndMoveToFrontDecode0(0);
int bsBuffShadow = (int) this.bsBuff;
int bsLiveShadow = (int) this.bsLive;
int lastShadow = -1;
int zt = selector[groupNo] & 0xff;
int[] base_zt = base[zt];
int[] limit_zt = limit[zt];
int[] perm_zt = perm[zt];
int minLens_zt = minLens[zt];
while (nextSym != eob) {
if ((nextSym == RUNA) || (nextSym == RUNB)) {
int s = -1;
for (int n = 1; true; n <<= 1) {
if (nextSym == RUNA) {
s += n;
} else if (nextSym == RUNB) {
s += n << 1;
} else {
break;
}
if (groupPos == 0) {
groupPos = G_SIZE - 1;
zt = selector[++groupNo] & 0xff;
base_zt = base[zt];
limit_zt = limit[zt];
perm_zt = perm[zt];
minLens_zt = minLens[zt];
} else {
groupPos--;
}
int zn = minLens_zt;
while (bsLiveShadow < zn) {
final int thech = readAByte(inShadow);
if (thech >= 0) {
bsBuffShadow = (bsBuffShadow << 8) | thech;
bsLiveShadow += 8;
continue;
} else {
throw new IOException("unexpected end of stream");
}
}
long zvec = (bsBuffShadow >> (bsLiveShadow - zn))
& ((1 << zn) - 1);
bsLiveShadow -= zn;
while (zvec > limit_zt[zn]) {
zn++;
while (bsLiveShadow < 1) {
final int thech = readAByte(inShadow);
if (thech >= 0) {
bsBuffShadow = (bsBuffShadow << 8) | thech;
bsLiveShadow += 8;
continue;
} else {
throw new IOException(
"unexpected end of stream");
}
}
bsLiveShadow--;
zvec = (zvec << 1)
| ((bsBuffShadow >> bsLiveShadow) & 1);
}
nextSym = perm_zt[(int) (zvec - base_zt[zn])];
}
final byte ch = seqToUnseq[yy[0]];
unzftab[ch & 0xff] += s + 1;
while (s-- >= 0) {
ll8[++lastShadow] = ch;
}
if (lastShadow >= limitLast) {
throw new IOException("block overrun");
}
} else {
if (++lastShadow >= limitLast) {
throw new IOException("block overrun");
}
final char tmp = yy[nextSym - 1];
unzftab[seqToUnseq[tmp] & 0xff]++;
ll8[lastShadow] = seqToUnseq[tmp];
/*
* This loop is hammered during decompression, hence avoid
* native method call overhead of System.arraycopy for very
* small ranges to copy.
*/
if (nextSym <= 16) {
for (int j = nextSym - 1; j > 0;) {
yy[j] = yy[--j];
}
} else {
System.arraycopy(yy, 0, yy, 1, nextSym - 1);
}
yy[0] = tmp;
if (groupPos == 0) {
groupPos = G_SIZE - 1;
zt = selector[++groupNo] & 0xff;
base_zt = base[zt];
limit_zt = limit[zt];
perm_zt = perm[zt];
minLens_zt = minLens[zt];
} else {
groupPos--;
}
int zn = minLens_zt;
while (bsLiveShadow < zn) {
final int thech = readAByte(inShadow);
if (thech >= 0) {
bsBuffShadow = (bsBuffShadow << 8) | thech;
bsLiveShadow += 8;
continue;
} else {
throw new IOException("unexpected end of stream");
}
}
int zvec = (bsBuffShadow >> (bsLiveShadow - zn))
& ((1 << zn) - 1);
bsLiveShadow -= zn;
while (zvec > limit_zt[zn]) {
zn++;
while (bsLiveShadow < 1) {
final int thech = readAByte(inShadow);
if (thech >= 0) {
bsBuffShadow = (bsBuffShadow << 8) | thech;
bsLiveShadow += 8;
continue;
} else {
throw new IOException("unexpected end of stream");
}
}
bsLiveShadow--;
zvec = ((zvec << 1) | ((bsBuffShadow >> bsLiveShadow) & 1));
}
nextSym = perm_zt[zvec - base_zt[zn]];
}
}
this.last = lastShadow;
this.bsLive = bsLiveShadow;
this.bsBuff = bsBuffShadow;
}
private int getAndMoveToFrontDecode0(final int groupNo) throws IOException {
final InputStream inShadow = this.in;
final Data dataShadow = this.data;
final int zt = dataShadow.selector[groupNo] & 0xff;
final int[] limit_zt = dataShadow.limit[zt];
int zn = dataShadow.minLens[zt];
int zvec = (int) bsR(zn);
int bsLiveShadow = (int) this.bsLive;
int bsBuffShadow = (int) this.bsBuff;
while (zvec > limit_zt[zn]) {
zn++;
while (bsLiveShadow < 1) {
final int thech = readAByte(inShadow);
if (thech >= 0) {
bsBuffShadow = (bsBuffShadow << 8) | thech;
bsLiveShadow += 8;
continue;
} else {
throw new IOException("unexpected end of stream");
}
}
bsLiveShadow--;
zvec = (zvec << 1) | ((bsBuffShadow >> bsLiveShadow) & 1);
}
this.bsLive = bsLiveShadow;
this.bsBuff = bsBuffShadow;
return dataShadow.perm[zt][zvec - dataShadow.base[zt][zn]];
}
private void setupBlock() throws IOException {
if (this.data == null) {
return;
}
final int[] cftab = this.data.cftab;
final int[] tt = this.data.initTT(this.last + 1);
final byte[] ll8 = this.data.ll8;
cftab[0] = 0;
System.arraycopy(this.data.unzftab, 0, cftab, 1, 256);
for (int i = 1, c = cftab[0]; i <= 256; i++) {
c += cftab[i];
cftab[i] = c;
}
for (int i = 0, lastShadow = this.last; i <= lastShadow; i++) {
tt[cftab[ll8[i] & 0xff]++] = i;
}
if ((this.origPtr < 0) || (this.origPtr >= tt.length)) {
throw new IOException("stream corrupted");
}
this.su_tPos = tt[this.origPtr];
this.su_count = 0;
this.su_i2 = 0;
this.su_ch2 = 256; /* not a char and not EOF */
if (this.blockRandomised) {
this.su_rNToGo = 0;
this.su_rTPos = 0;
setupRandPartA();
} else {
setupNoRandPartA();
}
}
private void setupRandPartA() throws IOException {
if (this.su_i2 <= this.last) {
this.su_chPrev = this.su_ch2;
int su_ch2Shadow = this.data.ll8[this.su_tPos] & 0xff;
this.su_tPos = this.data.tt[this.su_tPos];
if (this.su_rNToGo == 0) {
this.su_rNToGo = BZip2Constants.rNums[this.su_rTPos] - 1;
if (++this.su_rTPos == 512) {
this.su_rTPos = 0;
}
} else {
this.su_rNToGo--;
}
this.su_ch2 = su_ch2Shadow ^= (this.su_rNToGo == 1) ? 1 : 0;
this.su_i2++;
this.currentChar = su_ch2Shadow;
this.currentState = STATE.RAND_PART_B_STATE;
this.crc.updateCRC(su_ch2Shadow);
} else {
endBlock();
if (readMode == READ_MODE.CONTINUOUS) {
initBlock();
setupBlock();
} else if (readMode == READ_MODE.BYBLOCK) {
this.currentState = STATE.NO_PROCESS_STATE;
}
}
}
private void setupNoRandPartA() throws IOException {
if (this.su_i2 <= this.last) {
this.su_chPrev = this.su_ch2;
int su_ch2Shadow = this.data.ll8[this.su_tPos] & 0xff;
this.su_ch2 = su_ch2Shadow;
this.su_tPos = this.data.tt[this.su_tPos];
this.su_i2++;
this.currentChar = su_ch2Shadow;
this.currentState = STATE.NO_RAND_PART_B_STATE;
this.crc.updateCRC(su_ch2Shadow);
} else {
this.currentState = STATE.NO_RAND_PART_A_STATE;
endBlock();
if (readMode == READ_MODE.CONTINUOUS) {
initBlock();
setupBlock();
} else if (readMode == READ_MODE.BYBLOCK) {
this.currentState = STATE.NO_PROCESS_STATE;
}
}
}
private void setupRandPartB() throws IOException {
if (this.su_ch2 != this.su_chPrev) {
this.currentState = STATE.RAND_PART_A_STATE;
this.su_count = 1;
setupRandPartA();
} else if (++this.su_count >= 4) {
this.su_z = (char) (this.data.ll8[this.su_tPos] & 0xff);
this.su_tPos = this.data.tt[this.su_tPos];
if (this.su_rNToGo == 0) {
this.su_rNToGo = BZip2Constants.rNums[this.su_rTPos] - 1;
if (++this.su_rTPos == 512) {
this.su_rTPos = 0;
}
} else {
this.su_rNToGo--;
}
this.su_j2 = 0;
this.currentState = STATE.RAND_PART_C_STATE;
if (this.su_rNToGo == 1) {
this.su_z ^= 1;
}
setupRandPartC();
} else {
this.currentState = STATE.RAND_PART_A_STATE;
setupRandPartA();
}
}
private void setupRandPartC() throws IOException {
if (this.su_j2 < this.su_z) {
this.currentChar = this.su_ch2;
this.crc.updateCRC(this.su_ch2);
this.su_j2++;
} else {
this.currentState = STATE.RAND_PART_A_STATE;
this.su_i2++;
this.su_count = 0;
setupRandPartA();
}
}
private void setupNoRandPartB() throws IOException {
if (this.su_ch2 != this.su_chPrev) {
this.su_count = 1;
setupNoRandPartA();
} else if (++this.su_count >= 4) {
this.su_z = (char) (this.data.ll8[this.su_tPos] & 0xff);
this.su_tPos = this.data.tt[this.su_tPos];
this.su_j2 = 0;
setupNoRandPartC();
} else {
setupNoRandPartA();
}
}
private void setupNoRandPartC() throws IOException {
if (this.su_j2 < this.su_z) {
int su_ch2Shadow = this.su_ch2;
this.currentChar = su_ch2Shadow;
this.crc.updateCRC(su_ch2Shadow);
this.su_j2++;
this.currentState = STATE.NO_RAND_PART_C_STATE;
} else {
this.su_i2++;
this.su_count = 0;
setupNoRandPartA();
}
}
private static final class Data extends Object {
// (with blockSize 900k)
final boolean[] inUse = new boolean[256]; // 256 byte
final byte[] seqToUnseq = new byte[256]; // 256 byte
final byte[] selector = new byte[MAX_SELECTORS]; // 18002 byte
final byte[] selectorMtf = new byte[MAX_SELECTORS]; // 18002 byte
/**
* Freq table collected to save a pass over the data during
* decompression.
*/
final int[] unzftab = new int[256]; // 1024 byte
final int[][] limit = new int[N_GROUPS][MAX_ALPHA_SIZE]; // 6192 byte
final int[][] base = new int[N_GROUPS][MAX_ALPHA_SIZE]; // 6192 byte
final int[][] perm = new int[N_GROUPS][MAX_ALPHA_SIZE]; // 6192 byte
final int[] minLens = new int[N_GROUPS]; // 24 byte
final int[] cftab = new int[257]; // 1028 byte
final char[] getAndMoveToFrontDecode_yy = new char[256]; // 512 byte
final char[][] temp_charArray2d = new char[N_GROUPS][MAX_ALPHA_SIZE]; // 3096
// byte
final byte[] recvDecodingTables_pos = new byte[N_GROUPS]; // 6 byte
// ---------------
// 60798 byte
int[] tt; // 3600000 byte
byte[] ll8; // 900000 byte
// ---------------
// 4560782 byte
// ===============
Data(int blockSize100k) {
super();
this.ll8 = new byte[blockSize100k * BZip2Constants.baseBlockSize];
}
/**
* Initializes the {@link #tt} array.
*
* This method is called when the required length of the array is known.
* I don't initialize it at construction time to avoid unneccessary
* memory allocation when compressing small files.
*/
final int[] initTT(int length) {
int[] ttShadow = this.tt;
// tt.length should always be >= length, but theoretically
// it can happen, if the compressor mixed small and large
// blocks. Normally only the last block will be smaller
// than others.
if ((ttShadow == null) || (ttShadow.length < length)) {
this.tt = ttShadow = new int[length];
}
return ttShadow;
}
}
}
| 36,368 | 28.306205 | 167 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Compressor.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress.bzip2;
import java.io.IOException;
import java.nio.Buffer;
import java.nio.ByteBuffer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.Compressor;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* A {@link Compressor} based on the popular
* bzip2 compression algorithm.
* http://www.bzip2.org/
*
*/
public class Bzip2Compressor implements Compressor {
private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64*1024;
// The default values for the block size and work factor are the same
// those in Julian Seward's original bzip2 implementation.
static final int DEFAULT_BLOCK_SIZE = 9;
static final int DEFAULT_WORK_FACTOR = 30;
private static final Log LOG = LogFactory.getLog(Bzip2Compressor.class);
private long stream;
private int blockSize;
private int workFactor;
private int directBufferSize;
private byte[] userBuf = null;
private int userBufOff = 0, userBufLen = 0;
private Buffer uncompressedDirectBuf = null;
private int uncompressedDirectBufOff = 0, uncompressedDirectBufLen = 0;
private boolean keepUncompressedBuf = false;
private Buffer compressedDirectBuf = null;
private boolean finish, finished;
/**
* Creates a new compressor with a default values for the
* compression block size and work factor. Compressed data will be
* generated in bzip2 format.
*/
public Bzip2Compressor() {
this(DEFAULT_BLOCK_SIZE, DEFAULT_WORK_FACTOR, DEFAULT_DIRECT_BUFFER_SIZE);
}
/**
* Creates a new compressor, taking settings from the configuration.
*/
public Bzip2Compressor(Configuration conf) {
this(Bzip2Factory.getBlockSize(conf),
Bzip2Factory.getWorkFactor(conf),
DEFAULT_DIRECT_BUFFER_SIZE);
}
/**
* Creates a new compressor using the specified block size.
* Compressed data will be generated in bzip2 format.
*
* @param blockSize The block size to be used for compression. This is
* an integer from 1 through 9, which is multiplied by 100,000 to
* obtain the actual block size in bytes.
* @param workFactor This parameter is a threshold that determines when a
* fallback algorithm is used for pathological data. It ranges from
* 0 to 250.
* @param directBufferSize Size of the direct buffer to be used.
*/
public Bzip2Compressor(int blockSize, int workFactor,
int directBufferSize) {
this.blockSize = blockSize;
this.workFactor = workFactor;
this.directBufferSize = directBufferSize;
stream = init(blockSize, workFactor);
uncompressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
compressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
compressedDirectBuf.position(directBufferSize);
}
/**
* Prepare the compressor to be used in a new stream with settings defined in
* the given Configuration. It will reset the compressor's block size and
* and work factor.
*
* @param conf Configuration storing new settings
*/
@Override
public synchronized void reinit(Configuration conf) {
reset();
end(stream);
if (conf == null) {
stream = init(blockSize, workFactor);
return;
}
blockSize = Bzip2Factory.getBlockSize(conf);
workFactor = Bzip2Factory.getWorkFactor(conf);
stream = init(blockSize, workFactor);
if(LOG.isDebugEnabled()) {
LOG.debug("Reinit compressor with new compression configuration");
}
}
@Override
public synchronized void setInput(byte[] b, int off, int len) {
if (b == null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || off > b.length - len) {
throw new ArrayIndexOutOfBoundsException();
}
this.userBuf = b;
this.userBufOff = off;
this.userBufLen = len;
uncompressedDirectBufOff = 0;
setInputFromSavedData();
// Reinitialize bzip2's output direct buffer.
compressedDirectBuf.limit(directBufferSize);
compressedDirectBuf.position(directBufferSize);
}
// Copy enough data from userBuf to uncompressedDirectBuf.
synchronized void setInputFromSavedData() {
int len = Math.min(userBufLen, uncompressedDirectBuf.remaining());
((ByteBuffer)uncompressedDirectBuf).put(userBuf, userBufOff, len);
userBufLen -= len;
userBufOff += len;
uncompressedDirectBufLen = uncompressedDirectBuf.position();
}
@Override
public synchronized void setDictionary(byte[] b, int off, int len) {
throw new UnsupportedOperationException();
}
@Override
public synchronized boolean needsInput() {
// Compressed data still available?
if (compressedDirectBuf.remaining() > 0) {
return false;
}
// Uncompressed data available in either the direct buffer or user buffer?
if (keepUncompressedBuf && uncompressedDirectBufLen > 0)
return false;
if (uncompressedDirectBuf.remaining() > 0) {
// Check if we have consumed all data in the user buffer.
if (userBufLen <= 0) {
return true;
} else {
// Copy enough data from userBuf to uncompressedDirectBuf.
setInputFromSavedData();
return uncompressedDirectBuf.remaining() > 0;
}
}
return false;
}
@Override
public synchronized void finish() {
finish = true;
}
@Override
public synchronized boolean finished() {
// Check if bzip2 says it has finished and
// all compressed data has been consumed.
return (finished && compressedDirectBuf.remaining() == 0);
}
@Override
public synchronized int compress(byte[] b, int off, int len)
throws IOException {
if (b == null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || off > b.length - len) {
throw new ArrayIndexOutOfBoundsException();
}
// Check if there is compressed data.
int n = compressedDirectBuf.remaining();
if (n > 0) {
n = Math.min(n, len);
((ByteBuffer)compressedDirectBuf).get(b, off, n);
return n;
}
// Re-initialize bzip2's output direct buffer.
compressedDirectBuf.rewind();
compressedDirectBuf.limit(directBufferSize);
// Compress the data.
n = deflateBytesDirect();
compressedDirectBuf.limit(n);
// Check if bzip2 has consumed the entire input buffer.
// Set keepUncompressedBuf properly.
if (uncompressedDirectBufLen <= 0) { // bzip2 consumed all input
keepUncompressedBuf = false;
uncompressedDirectBuf.clear();
uncompressedDirectBufOff = 0;
uncompressedDirectBufLen = 0;
} else {
keepUncompressedBuf = true;
}
// Get at most 'len' bytes.
n = Math.min(n, len);
((ByteBuffer)compressedDirectBuf).get(b, off, n);
return n;
}
/**
* Returns the total number of compressed bytes output so far.
*
* @return the total (non-negative) number of compressed bytes output so far
*/
@Override
public synchronized long getBytesWritten() {
checkStream();
return getBytesWritten(stream);
}
/**
* Returns the total number of uncompressed bytes input so far.</p>
*
* @return the total (non-negative) number of uncompressed bytes input so far
*/
@Override
public synchronized long getBytesRead() {
checkStream();
return getBytesRead(stream);
}
@Override
public synchronized void reset() {
checkStream();
end(stream);
stream = init(blockSize, workFactor);
finish = false;
finished = false;
uncompressedDirectBuf.rewind();
uncompressedDirectBufOff = uncompressedDirectBufLen = 0;
keepUncompressedBuf = false;
compressedDirectBuf.limit(directBufferSize);
compressedDirectBuf.position(directBufferSize);
userBufOff = userBufLen = 0;
}
@Override
public synchronized void end() {
if (stream != 0) {
end(stream);
stream = 0;
}
}
static void initSymbols(String libname) {
initIDs(libname);
}
private void checkStream() {
if (stream == 0)
throw new NullPointerException();
}
private native static void initIDs(String libname);
private native static long init(int blockSize, int workFactor);
private native int deflateBytesDirect();
private native static long getBytesRead(long strm);
private native static long getBytesWritten(long strm);
private native static void end(long strm);
public native static String getLibraryName();
}
| 9,312 | 29.940199 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Factory.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress.bzip2;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.util.NativeCodeLoader;
import org.apache.hadoop.io.compress.Compressor;
import org.apache.hadoop.io.compress.Decompressor;
import org.apache.hadoop.io.compress.bzip2.Bzip2Compressor;
import org.apache.hadoop.io.compress.bzip2.Bzip2Decompressor;
import org.apache.hadoop.io.compress.bzip2.BZip2DummyCompressor;
import org.apache.hadoop.io.compress.bzip2.BZip2DummyDecompressor;
/**
* A collection of factories to create the right
* bzip2 compressor/decompressor instances.
*
*/
public class Bzip2Factory {
private static final Log LOG = LogFactory.getLog(Bzip2Factory.class);
private static String bzip2LibraryName = "";
private static boolean nativeBzip2Loaded;
/**
* Check if native-bzip2 code is loaded & initialized correctly and
* can be loaded for this job.
*
* @param conf configuration
* @return <code>true</code> if native-bzip2 is loaded & initialized
* and can be loaded for this job, else <code>false</code>
*/
public static synchronized boolean isNativeBzip2Loaded(Configuration conf) {
String libname = conf.get("io.compression.codec.bzip2.library",
"system-native");
if (!bzip2LibraryName.equals(libname)) {
nativeBzip2Loaded = false;
bzip2LibraryName = libname;
if (libname.equals("java-builtin")) {
LOG.info("Using pure-Java version of bzip2 library");
} else if (conf.getBoolean(
CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY,
CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_DEFAULT) &&
NativeCodeLoader.isNativeCodeLoaded()) {
try {
// Initialize the native library.
Bzip2Compressor.initSymbols(libname);
Bzip2Decompressor.initSymbols(libname);
nativeBzip2Loaded = true;
LOG.info("Successfully loaded & initialized native-bzip2 library " +
libname);
} catch (Throwable t) {
LOG.warn("Failed to load/initialize native-bzip2 library " +
libname + ", will use pure-Java version");
}
}
}
return nativeBzip2Loaded;
}
public static String getLibraryName(Configuration conf) {
if (isNativeBzip2Loaded(conf)) {
return Bzip2Compressor.getLibraryName();
} else {
return bzip2LibraryName;
}
}
/**
* Return the appropriate type of the bzip2 compressor.
*
* @param conf configuration
* @return the appropriate type of the bzip2 compressor.
*/
public static Class<? extends Compressor>
getBzip2CompressorType(Configuration conf) {
return isNativeBzip2Loaded(conf) ?
Bzip2Compressor.class : BZip2DummyCompressor.class;
}
/**
* Return the appropriate implementation of the bzip2 compressor.
*
* @param conf configuration
* @return the appropriate implementation of the bzip2 compressor.
*/
public static Compressor getBzip2Compressor(Configuration conf) {
return isNativeBzip2Loaded(conf)?
new Bzip2Compressor(conf) : new BZip2DummyCompressor();
}
/**
* Return the appropriate type of the bzip2 decompressor.
*
* @param conf configuration
* @return the appropriate type of the bzip2 decompressor.
*/
public static Class<? extends Decompressor>
getBzip2DecompressorType(Configuration conf) {
return isNativeBzip2Loaded(conf) ?
Bzip2Decompressor.class : BZip2DummyDecompressor.class;
}
/**
* Return the appropriate implementation of the bzip2 decompressor.
*
* @param conf configuration
* @return the appropriate implementation of the bzip2 decompressor.
*/
public static Decompressor getBzip2Decompressor(Configuration conf) {
return isNativeBzip2Loaded(conf) ?
new Bzip2Decompressor() : new BZip2DummyDecompressor();
}
public static void setBlockSize(Configuration conf, int blockSize) {
conf.setInt("bzip2.compress.blocksize", blockSize);
}
public static int getBlockSize(Configuration conf) {
return conf.getInt("bzip2.compress.blocksize",
Bzip2Compressor.DEFAULT_BLOCK_SIZE);
}
public static void setWorkFactor(Configuration conf, int workFactor) {
conf.setInt("bzip2.compress.workfactor", workFactor);
}
public static int getWorkFactor(Configuration conf) {
return conf.getInt("bzip2.compress.workfactor",
Bzip2Compressor.DEFAULT_WORK_FACTOR);
}
}
| 5,493 | 34.675325 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/Bzip2Decompressor.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress.bzip2;
import java.io.IOException;
import java.nio.Buffer;
import java.nio.ByteBuffer;
import org.apache.hadoop.io.compress.Decompressor;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* A {@link Decompressor} based on the popular
* bzip2 compression algorithm.
* http://www.bzip2.org/
*
*/
public class Bzip2Decompressor implements Decompressor {
private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64*1024;
private static final Log LOG = LogFactory.getLog(Bzip2Decompressor.class);
private long stream;
private boolean conserveMemory;
private int directBufferSize;
private Buffer compressedDirectBuf = null;
private int compressedDirectBufOff, compressedDirectBufLen;
private Buffer uncompressedDirectBuf = null;
private byte[] userBuf = null;
private int userBufOff = 0, userBufLen = 0;
private boolean finished;
/**
* Creates a new decompressor.
*/
public Bzip2Decompressor(boolean conserveMemory, int directBufferSize) {
this.conserveMemory = conserveMemory;
this.directBufferSize = directBufferSize;
compressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
uncompressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
uncompressedDirectBuf.position(directBufferSize);
stream = init(conserveMemory ? 1 : 0);
}
public Bzip2Decompressor() {
this(false, DEFAULT_DIRECT_BUFFER_SIZE);
}
@Override
public synchronized void setInput(byte[] b, int off, int len) {
if (b == null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || off > b.length - len) {
throw new ArrayIndexOutOfBoundsException();
}
this.userBuf = b;
this.userBufOff = off;
this.userBufLen = len;
setInputFromSavedData();
// Reinitialize bzip2's output direct buffer.
uncompressedDirectBuf.limit(directBufferSize);
uncompressedDirectBuf.position(directBufferSize);
}
synchronized void setInputFromSavedData() {
compressedDirectBufOff = 0;
compressedDirectBufLen = userBufLen;
if (compressedDirectBufLen > directBufferSize) {
compressedDirectBufLen = directBufferSize;
}
// Reinitialize bzip2's input direct buffer.
compressedDirectBuf.rewind();
((ByteBuffer)compressedDirectBuf).put(userBuf, userBufOff,
compressedDirectBufLen);
// Note how much data is being fed to bzip2.
userBufOff += compressedDirectBufLen;
userBufLen -= compressedDirectBufLen;
}
@Override
public synchronized void setDictionary(byte[] b, int off, int len) {
throw new UnsupportedOperationException();
}
@Override
public synchronized boolean needsInput() {
// Consume remaining compressed data?
if (uncompressedDirectBuf.remaining() > 0) {
return false;
}
// Check if bzip2 has consumed all input.
if (compressedDirectBufLen <= 0) {
// Check if we have consumed all user-input.
if (userBufLen <= 0) {
return true;
} else {
setInputFromSavedData();
}
}
return false;
}
@Override
public synchronized boolean needsDictionary() {
return false;
}
@Override
public synchronized boolean finished() {
// Check if bzip2 says it has finished and
// all compressed data has been consumed.
return (finished && uncompressedDirectBuf.remaining() == 0);
}
@Override
public synchronized int decompress(byte[] b, int off, int len)
throws IOException {
if (b == null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || off > b.length - len) {
throw new ArrayIndexOutOfBoundsException();
}
// Check if there is uncompressed data.
int n = uncompressedDirectBuf.remaining();
if (n > 0) {
n = Math.min(n, len);
((ByteBuffer)uncompressedDirectBuf).get(b, off, n);
return n;
}
// Re-initialize bzip2's output direct buffer.
uncompressedDirectBuf.rewind();
uncompressedDirectBuf.limit(directBufferSize);
// Decompress the data.
n = finished ? 0 : inflateBytesDirect();
uncompressedDirectBuf.limit(n);
// Get at most 'len' bytes.
n = Math.min(n, len);
((ByteBuffer)uncompressedDirectBuf).get(b, off, n);
return n;
}
/**
* Returns the total number of uncompressed bytes output so far.
*
* @return the total (non-negative) number of uncompressed bytes output so far
*/
public synchronized long getBytesWritten() {
checkStream();
return getBytesWritten(stream);
}
/**
* Returns the total number of compressed bytes input so far.</p>
*
* @return the total (non-negative) number of compressed bytes input so far
*/
public synchronized long getBytesRead() {
checkStream();
return getBytesRead(stream);
}
/**
* Returns the number of bytes remaining in the input buffers; normally
* called when finished() is true to determine amount of post-gzip-stream
* data.</p>
*
* @return the total (non-negative) number of unprocessed bytes in input
*/
@Override
public synchronized int getRemaining() {
checkStream();
return userBufLen + getRemaining(stream); // userBuf + compressedDirectBuf
}
/**
* Resets everything including the input buffers (user and direct).</p>
*/
@Override
public synchronized void reset() {
checkStream();
end(stream);
stream = init(conserveMemory ? 1 : 0);
finished = false;
compressedDirectBufOff = compressedDirectBufLen = 0;
uncompressedDirectBuf.limit(directBufferSize);
uncompressedDirectBuf.position(directBufferSize);
userBufOff = userBufLen = 0;
}
@Override
public synchronized void end() {
if (stream != 0) {
end(stream);
stream = 0;
}
}
static void initSymbols(String libname) {
initIDs(libname);
}
private void checkStream() {
if (stream == 0)
throw new NullPointerException();
}
private native static void initIDs(String libname);
private native static long init(int conserveMemory);
private native int inflateBytesDirect();
private native static long getBytesRead(long strm);
private native static long getBytesWritten(long strm);
private native static int getRemaining(long strm);
private native static void end(long strm);
}
| 7,228 | 28.149194 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Public
package org.apache.hadoop.service;
import org.apache.hadoop.classification.InterfaceAudience;
| 927 | 41.181818 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/LifecycleEvent.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.service;
import java.io.Serializable;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
/**
* A serializable lifecycle event: the time a state
* transition occurred, and what state was entered.
*/
@Public
@Evolving
public class LifecycleEvent implements Serializable {
private static final long serialVersionUID = 1648576996238247836L;
/**
* Local time in milliseconds when the event occurred
*/
public long time;
/**
* new state
*/
public Service.STATE state;
}
| 1,412 | 31.113636 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.service;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.conf.Configuration;
import com.google.common.annotations.VisibleForTesting;
/**
* This is the base implementation class for services.
*/
@Public
@Evolving
public abstract class AbstractService implements Service {
private static final Log LOG = LogFactory.getLog(AbstractService.class);
/**
* Service name.
*/
private final String name;
/** service state */
private final ServiceStateModel stateModel;
/**
* Service start time. Will be zero until the service is started.
*/
private long startTime;
/**
* The configuration. Will be null until the service is initialized.
*/
private volatile Configuration config;
/**
* List of state change listeners; it is final to ensure
* that it will never be null.
*/
private final ServiceOperations.ServiceListeners listeners
= new ServiceOperations.ServiceListeners();
/**
* Static listeners to all events across all services
*/
private static ServiceOperations.ServiceListeners globalListeners
= new ServiceOperations.ServiceListeners();
/**
* The cause of any failure -will be null.
* if a service did not stop due to a failure.
*/
private Exception failureCause;
/**
* the state in which the service was when it failed.
* Only valid when the service is stopped due to a failure
*/
private STATE failureState = null;
/**
* object used to co-ordinate {@link #waitForServiceToStop(long)}
* across threads.
*/
private final AtomicBoolean terminationNotification =
new AtomicBoolean(false);
/**
* History of lifecycle transitions
*/
private final List<LifecycleEvent> lifecycleHistory
= new ArrayList<LifecycleEvent>(5);
/**
* Map of blocking dependencies
*/
private final Map<String,String> blockerMap = new HashMap<String, String>();
private final Object stateChangeLock = new Object();
/**
* Construct the service.
* @param name service name
*/
public AbstractService(String name) {
this.name = name;
stateModel = new ServiceStateModel(name);
}
@Override
public final STATE getServiceState() {
return stateModel.getState();
}
@Override
public final synchronized Throwable getFailureCause() {
return failureCause;
}
@Override
public synchronized STATE getFailureState() {
return failureState;
}
/**
* Set the configuration for this service.
* This method is called during {@link #init(Configuration)}
* and should only be needed if for some reason a service implementation
* needs to override that initial setting -for example replacing
* it with a new subclass of {@link Configuration}
* @param conf new configuration.
*/
protected void setConfig(Configuration conf) {
this.config = conf;
}
/**
* {@inheritDoc}
* This invokes {@link #serviceInit}
* @param conf the configuration of the service. This must not be null
* @throws ServiceStateException if the configuration was null,
* the state change not permitted, or something else went wrong
*/
@Override
public void init(Configuration conf) {
if (conf == null) {
throw new ServiceStateException("Cannot initialize service "
+ getName() + ": null configuration");
}
if (isInState(STATE.INITED)) {
return;
}
synchronized (stateChangeLock) {
if (enterState(STATE.INITED) != STATE.INITED) {
setConfig(conf);
try {
serviceInit(config);
if (isInState(STATE.INITED)) {
//if the service ended up here during init,
//notify the listeners
notifyListeners();
}
} catch (Exception e) {
noteFailure(e);
ServiceOperations.stopQuietly(LOG, this);
throw ServiceStateException.convert(e);
}
}
}
}
/**
* {@inheritDoc}
* @throws ServiceStateException if the current service state does not permit
* this action
*/
@Override
public void start() {
if (isInState(STATE.STARTED)) {
return;
}
//enter the started state
synchronized (stateChangeLock) {
if (stateModel.enterState(STATE.STARTED) != STATE.STARTED) {
try {
startTime = System.currentTimeMillis();
serviceStart();
if (isInState(STATE.STARTED)) {
//if the service started (and isn't now in a later state), notify
if (LOG.isDebugEnabled()) {
LOG.debug("Service " + getName() + " is started");
}
notifyListeners();
}
} catch (Exception e) {
noteFailure(e);
ServiceOperations.stopQuietly(LOG, this);
throw ServiceStateException.convert(e);
}
}
}
}
/**
* {@inheritDoc}
*/
@Override
public void stop() {
if (isInState(STATE.STOPPED)) {
return;
}
synchronized (stateChangeLock) {
if (enterState(STATE.STOPPED) != STATE.STOPPED) {
try {
serviceStop();
} catch (Exception e) {
//stop-time exceptions are logged if they are the first one,
noteFailure(e);
throw ServiceStateException.convert(e);
} finally {
//report that the service has terminated
terminationNotification.set(true);
synchronized (terminationNotification) {
terminationNotification.notifyAll();
}
//notify anything listening for events
notifyListeners();
}
} else {
//already stopped: note it
if (LOG.isDebugEnabled()) {
LOG.debug("Ignoring re-entrant call to stop()");
}
}
}
}
/**
* Relay to {@link #stop()}
* @throws IOException
*/
@Override
public final void close() throws IOException {
stop();
}
/**
* Failure handling: record the exception
* that triggered it -if there was not one already.
* Services are free to call this themselves.
* @param exception the exception
*/
protected final void noteFailure(Exception exception) {
if (LOG.isDebugEnabled()) {
LOG.debug("noteFailure " + exception, null);
}
if (exception == null) {
//make sure failure logic doesn't itself cause problems
return;
}
//record the failure details, and log it
synchronized (this) {
if (failureCause == null) {
failureCause = exception;
failureState = getServiceState();
LOG.info("Service " + getName()
+ " failed in state " + failureState
+ "; cause: " + exception,
exception);
}
}
}
@Override
public final boolean waitForServiceToStop(long timeout) {
boolean completed = terminationNotification.get();
while (!completed) {
try {
synchronized(terminationNotification) {
terminationNotification.wait(timeout);
}
// here there has been a timeout, the object has terminated,
// or there has been a spurious wakeup (which we ignore)
completed = true;
} catch (InterruptedException e) {
// interrupted; have another look at the flag
completed = terminationNotification.get();
}
}
return terminationNotification.get();
}
/* ===================================================================== */
/* Override Points */
/* ===================================================================== */
/**
* All initialization code needed by a service.
*
* This method will only ever be called once during the lifecycle of
* a specific service instance.
*
* Implementations do not need to be synchronized as the logic
* in {@link #init(Configuration)} prevents re-entrancy.
*
* The base implementation checks to see if the subclass has created
* a new configuration instance, and if so, updates the base class value
* @param conf configuration
* @throws Exception on a failure -these will be caught,
* possibly wrapped, and wil; trigger a service stop
*/
protected void serviceInit(Configuration conf) throws Exception {
if (conf != config) {
LOG.debug("Config has been overridden during init");
setConfig(conf);
}
}
/**
* Actions called during the INITED to STARTED transition.
*
* This method will only ever be called once during the lifecycle of
* a specific service instance.
*
* Implementations do not need to be synchronized as the logic
* in {@link #start()} prevents re-entrancy.
*
* @throws Exception if needed -these will be caught,
* wrapped, and trigger a service stop
*/
protected void serviceStart() throws Exception {
}
/**
* Actions called during the transition to the STOPPED state.
*
* This method will only ever be called once during the lifecycle of
* a specific service instance.
*
* Implementations do not need to be synchronized as the logic
* in {@link #stop()} prevents re-entrancy.
*
* Implementations MUST write this to be robust against failures, including
* checks for null references -and for the first failure to not stop other
* attempts to shut down parts of the service.
*
* @throws Exception if needed -these will be caught and logged.
*/
protected void serviceStop() throws Exception {
}
@Override
public void registerServiceListener(ServiceStateChangeListener l) {
listeners.add(l);
}
@Override
public void unregisterServiceListener(ServiceStateChangeListener l) {
listeners.remove(l);
}
/**
* Register a global listener, which receives notifications
* from the state change events of all services in the JVM
* @param l listener
*/
public static void registerGlobalListener(ServiceStateChangeListener l) {
globalListeners.add(l);
}
/**
* unregister a global listener.
* @param l listener to unregister
* @return true if the listener was found (and then deleted)
*/
public static boolean unregisterGlobalListener(ServiceStateChangeListener l) {
return globalListeners.remove(l);
}
/**
* Package-scoped method for testing -resets the global listener list
*/
@VisibleForTesting
static void resetGlobalListeners() {
globalListeners.reset();
}
@Override
public String getName() {
return name;
}
@Override
public synchronized Configuration getConfig() {
return config;
}
@Override
public long getStartTime() {
return startTime;
}
/**
* Notify local and global listeners of state changes.
* Exceptions raised by listeners are NOT passed up.
*/
private void notifyListeners() {
try {
listeners.notifyListeners(this);
globalListeners.notifyListeners(this);
} catch (Throwable e) {
LOG.warn("Exception while notifying listeners of " + this + ": " + e,
e);
}
}
/**
* Add a state change event to the lifecycle history
*/
private void recordLifecycleEvent() {
LifecycleEvent event = new LifecycleEvent();
event.time = System.currentTimeMillis();
event.state = getServiceState();
lifecycleHistory.add(event);
}
@Override
public synchronized List<LifecycleEvent> getLifecycleHistory() {
return new ArrayList<LifecycleEvent>(lifecycleHistory);
}
/**
* Enter a state; record this via {@link #recordLifecycleEvent}
* and log at the info level.
* @param newState the proposed new state
* @return the original state
* it wasn't already in that state, and the state model permits state re-entrancy.
*/
private STATE enterState(STATE newState) {
assert stateModel != null : "null state in " + name + " " + this.getClass();
STATE oldState = stateModel.enterState(newState);
if (oldState != newState) {
if (LOG.isDebugEnabled()) {
LOG.debug(
"Service: " + getName() + " entered state " + getServiceState());
}
recordLifecycleEvent();
}
return oldState;
}
@Override
public final boolean isInState(Service.STATE expected) {
return stateModel.isInState(expected);
}
@Override
public String toString() {
return "Service " + name + " in state " + stateModel;
}
/**
* Put a blocker to the blocker map -replacing any
* with the same name.
* @param name blocker name
* @param details any specifics on the block. This must be non-null.
*/
protected void putBlocker(String name, String details) {
synchronized (blockerMap) {
blockerMap.put(name, details);
}
}
/**
* Remove a blocker from the blocker map -
* this is a no-op if the blocker is not present
* @param name the name of the blocker
*/
public void removeBlocker(String name) {
synchronized (blockerMap) {
blockerMap.remove(name);
}
}
@Override
public Map<String, String> getBlockers() {
synchronized (blockerMap) {
Map<String, String> map = new HashMap<String, String>(blockerMap);
return map;
}
}
}
| 14,267 | 27.479042 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/Service.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.service;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.conf.Configuration;
import java.io.Closeable;
import java.io.IOException;
import java.util.List;
import java.util.Map;
/**
* Service LifeCycle.
*/
@Public
@Evolving
public interface Service extends Closeable {
/**
* Service states
*/
public enum STATE {
/** Constructed but not initialized */
NOTINITED(0, "NOTINITED"),
/** Initialized but not started or stopped */
INITED(1, "INITED"),
/** started and not stopped */
STARTED(2, "STARTED"),
/** stopped. No further state transitions are permitted */
STOPPED(3, "STOPPED");
/**
* An integer value for use in array lookup and JMX interfaces.
* Although {@link Enum#ordinal()} could do this, explicitly
* identify the numbers gives more stability guarantees over time.
*/
private final int value;
/**
* A name of the state that can be used in messages
*/
private final String statename;
private STATE(int value, String name) {
this.value = value;
this.statename = name;
}
/**
* Get the integer value of a state
* @return the numeric value of the state
*/
public int getValue() {
return value;
}
/**
* Get the name of a state
* @return the state's name
*/
@Override
public String toString() {
return statename;
}
}
/**
* Initialize the service.
*
* The transition MUST be from {@link STATE#NOTINITED} to {@link STATE#INITED}
* unless the operation failed and an exception was raised, in which case
* {@link #stop()} MUST be invoked and the service enter the state
* {@link STATE#STOPPED}.
* @param config the configuration of the service
* @throws RuntimeException on any failure during the operation
*/
void init(Configuration config);
/**
* Start the service.
*
* The transition MUST be from {@link STATE#INITED} to {@link STATE#STARTED}
* unless the operation failed and an exception was raised, in which case
* {@link #stop()} MUST be invoked and the service enter the state
* {@link STATE#STOPPED}.
* @throws RuntimeException on any failure during the operation
*/
void start();
/**
* Stop the service. This MUST be a no-op if the service is already
* in the {@link STATE#STOPPED} state. It SHOULD be a best-effort attempt
* to stop all parts of the service.
*
* The implementation must be designed to complete regardless of the service
* state, including the initialized/uninitialized state of all its internal
* fields.
* @throws RuntimeException on any failure during the stop operation
*/
void stop();
/**
* A version of stop() that is designed to be usable in Java7 closure
* clauses.
* Implementation classes MUST relay this directly to {@link #stop()}
* @throws IOException never
* @throws RuntimeException on any failure during the stop operation
*/
void close() throws IOException;
/**
* Register a listener to the service state change events.
* If the supplied listener is already listening to this service,
* this method is a no-op.
* @param listener a new listener
*/
void registerServiceListener(ServiceStateChangeListener listener);
/**
* Unregister a previously registered listener of the service state
* change events. No-op if the listener is already unregistered.
* @param listener the listener to unregister.
*/
void unregisterServiceListener(ServiceStateChangeListener listener);
/**
* Get the name of this service.
* @return the service name
*/
String getName();
/**
* Get the configuration of this service.
* This is normally not a clone and may be manipulated, though there are no
* guarantees as to what the consequences of such actions may be
* @return the current configuration, unless a specific implentation chooses
* otherwise.
*/
Configuration getConfig();
/**
* Get the current service state
* @return the state of the service
*/
STATE getServiceState();
/**
* Get the service start time
* @return the start time of the service. This will be zero if the service
* has not yet been started.
*/
long getStartTime();
/**
* Query to see if the service is in a specific state.
* In a multi-threaded system, the state may not hold for very long.
* @param state the expected state
* @return true if, at the time of invocation, the service was in that state.
*/
boolean isInState(STATE state);
/**
* Get the first exception raised during the service failure. If null,
* no exception was logged
* @return the failure logged during a transition to the stopped state
*/
Throwable getFailureCause();
/**
* Get the state in which the failure in {@link #getFailureCause()} occurred.
* @return the state or null if there was no failure
*/
STATE getFailureState();
/**
* Block waiting for the service to stop; uses the termination notification
* object to do so.
*
* This method will only return after all the service stop actions
* have been executed (to success or failure), or the timeout elapsed
* This method can be called before the service is inited or started; this is
* to eliminate any race condition with the service stopping before
* this event occurs.
* @param timeout timeout in milliseconds. A value of zero means "forever"
* @return true iff the service stopped in the time period
*/
boolean waitForServiceToStop(long timeout);
/**
* Get a snapshot of the lifecycle history; it is a static list
* @return a possibly empty but never null list of lifecycle events.
*/
public List<LifecycleEvent> getLifecycleHistory();
/**
* Get the blockers on a service -remote dependencies
* that are stopping the service from being <i>live</i>.
* @return a (snapshotted) map of blocker name->description values
*/
public Map<String, String> getBlockers();
}
| 6,953 | 29.769912 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceStateChangeListener.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.service;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Stable;
/**
* Interface to notify state changes of a service.
*/
@Public
@Stable
public interface ServiceStateChangeListener {
/**
* Callback to notify of a state change. The service will already
* have changed state before this callback is invoked.
*
* This operation is invoked on the thread that initiated the state change,
* while the service itself in in a sychronized section.
* <ol>
* <li>Any long-lived operation here will prevent the service state
* change from completing in a timely manner.</li>
* <li>If another thread is somehow invoked from the listener, and
* that thread invokes the methods of the service (including
* subclass-specific methods), there is a risk of a deadlock.</li>
* </ol>
*
*
* @param service the service that has changed.
*/
void stateChanged(Service service);
}
| 1,819 | 34.686275 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceStateModel.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.service;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
/**
* Implements the service state model.
*/
@Public
@Evolving
public class ServiceStateModel {
/**
* Map of all valid state transitions
* [current] [proposed1, proposed2, ...]
*/
private static final boolean[][] statemap =
{
// uninited inited started stopped
/* uninited */ {false, true, false, true},
/* inited */ {false, true, true, true},
/* started */ {false, false, true, true},
/* stopped */ {false, false, false, true},
};
/**
* The state of the service
*/
private volatile Service.STATE state;
/**
* The name of the service: used in exceptions
*/
private String name;
/**
* Create the service state model in the {@link Service.STATE#NOTINITED}
* state.
*/
public ServiceStateModel(String name) {
this(name, Service.STATE.NOTINITED);
}
/**
* Create a service state model instance in the chosen state
* @param state the starting state
*/
public ServiceStateModel(String name, Service.STATE state) {
this.state = state;
this.name = name;
}
/**
* Query the service state. This is a non-blocking operation.
* @return the state
*/
public Service.STATE getState() {
return state;
}
/**
* Query that the state is in a specific state
* @param proposed proposed new state
* @return the state
*/
public boolean isInState(Service.STATE proposed) {
return state.equals(proposed);
}
/**
* Verify that that a service is in a given state.
* @param expectedState the desired state
* @throws ServiceStateException if the service state is different from
* the desired state
*/
public void ensureCurrentState(Service.STATE expectedState) {
if (state != expectedState) {
throw new ServiceStateException(name+ ": for this operation, the " +
"current service state must be "
+ expectedState
+ " instead of " + state);
}
}
/**
* Enter a state -thread safe.
*
* @param proposed proposed new state
* @return the original state
* @throws ServiceStateException if the transition is not permitted
*/
public synchronized Service.STATE enterState(Service.STATE proposed) {
checkStateTransition(name, state, proposed);
Service.STATE oldState = state;
//atomic write of the new state
state = proposed;
return oldState;
}
/**
* Check that a state tansition is valid and
* throw an exception if not
* @param name name of the service (can be null)
* @param state current state
* @param proposed proposed new state
*/
public static void checkStateTransition(String name,
Service.STATE state,
Service.STATE proposed) {
if (!isValidStateTransition(state, proposed)) {
throw new ServiceStateException(name + " cannot enter state "
+ proposed + " from state " + state);
}
}
/**
* Is a state transition valid?
* There are no checks for current==proposed
* as that is considered a non-transition.
*
* using an array kills off all branch misprediction costs, at the expense
* of cache line misses.
*
* @param current current state
* @param proposed proposed new state
* @return true if the transition to a new state is valid
*/
public static boolean isValidStateTransition(Service.STATE current,
Service.STATE proposed) {
boolean[] row = statemap[current.getValue()];
return row[proposed.getValue()];
}
/**
* return the state text as the toString() value
* @return the current state's description
*/
@Override
public String toString() {
return (name.isEmpty() ? "" : ((name) + ": "))
+ state.toString();
}
}
| 4,958 | 29.423313 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/CompositeService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.service;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.conf.Configuration;
/**
* Composition of services.
*/
@Public
@Evolving
public class CompositeService extends AbstractService {
private static final Log LOG = LogFactory.getLog(CompositeService.class);
/**
* Policy on shutdown: attempt to close everything (purest) or
* only try to close started services (which assumes
* that the service implementations may not handle the stop() operation
* except when started.
* Irrespective of this policy, if a child service fails during
* its init() or start() operations, it will have stop() called on it.
*/
protected static final boolean STOP_ONLY_STARTED_SERVICES = false;
private final List<Service> serviceList = new ArrayList<Service>();
public CompositeService(String name) {
super(name);
}
/**
* Get a cloned list of services
* @return a list of child services at the time of invocation -
* added services will not be picked up.
*/
public List<Service> getServices() {
synchronized (serviceList) {
return new ArrayList<Service>(serviceList);
}
}
/**
* Add the passed {@link Service} to the list of services managed by this
* {@link CompositeService}
* @param service the {@link Service} to be added
*/
protected void addService(Service service) {
if (LOG.isDebugEnabled()) {
LOG.debug("Adding service " + service.getName());
}
synchronized (serviceList) {
serviceList.add(service);
}
}
/**
* If the passed object is an instance of {@link Service},
* add it to the list of services managed by this {@link CompositeService}
* @param object
* @return true if a service is added, false otherwise.
*/
protected boolean addIfService(Object object) {
if (object instanceof Service) {
addService((Service) object);
return true;
} else {
return false;
}
}
protected synchronized boolean removeService(Service service) {
synchronized (serviceList) {
return serviceList.remove(service);
}
}
protected void serviceInit(Configuration conf) throws Exception {
List<Service> services = getServices();
if (LOG.isDebugEnabled()) {
LOG.debug(getName() + ": initing services, size=" + services.size());
}
for (Service service : services) {
service.init(conf);
}
super.serviceInit(conf);
}
protected void serviceStart() throws Exception {
List<Service> services = getServices();
if (LOG.isDebugEnabled()) {
LOG.debug(getName() + ": starting services, size=" + services.size());
}
for (Service service : services) {
// start the service. If this fails that service
// will be stopped and an exception raised
service.start();
}
super.serviceStart();
}
protected void serviceStop() throws Exception {
//stop all services that were started
int numOfServicesToStop = serviceList.size();
if (LOG.isDebugEnabled()) {
LOG.debug(getName() + ": stopping services, size=" + numOfServicesToStop);
}
stop(numOfServicesToStop, STOP_ONLY_STARTED_SERVICES);
super.serviceStop();
}
/**
* Stop the services in reverse order
*
* @param numOfServicesStarted index from where the stop should work
* @param stopOnlyStartedServices flag to say "only start services that are
* started, not those that are NOTINITED or INITED.
* @throws RuntimeException the first exception raised during the
* stop process -<i>after all services are stopped</i>
*/
private void stop(int numOfServicesStarted, boolean stopOnlyStartedServices) {
// stop in reverse order of start
Exception firstException = null;
List<Service> services = getServices();
for (int i = numOfServicesStarted - 1; i >= 0; i--) {
Service service = services.get(i);
if (LOG.isDebugEnabled()) {
LOG.debug("Stopping service #" + i + ": " + service);
}
STATE state = service.getServiceState();
//depending on the stop police
if (state == STATE.STARTED
|| (!stopOnlyStartedServices && state == STATE.INITED)) {
Exception ex = ServiceOperations.stopQuietly(LOG, service);
if (ex != null && firstException == null) {
firstException = ex;
}
}
}
//after stopping all services, rethrow the first exception raised
if (firstException != null) {
throw ServiceStateException.convert(firstException);
}
}
/**
* JVM Shutdown hook for CompositeService which will stop the give
* CompositeService gracefully in case of JVM shutdown.
*/
public static class CompositeServiceShutdownHook implements Runnable {
private CompositeService compositeService;
public CompositeServiceShutdownHook(CompositeService compositeService) {
this.compositeService = compositeService;
}
@Override
public void run() {
ServiceOperations.stopQuietly(compositeService);
}
}
}
| 6,081 | 31.351064 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.service;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
/**
* This class contains a set of methods to work with services, especially
* to walk them through their lifecycle.
*/
@Public
@Evolving
public final class ServiceOperations {
private static final Log LOG = LogFactory.getLog(AbstractService.class);
private ServiceOperations() {
}
/**
* Stop a service.
* <p/>Do nothing if the service is null or not
* in a state in which it can be/needs to be stopped.
* <p/>
* The service state is checked <i>before</i> the operation begins.
* This process is <i>not</i> thread safe.
* @param service a service or null
*/
public static void stop(Service service) {
if (service != null) {
service.stop();
}
}
/**
* Stop a service; if it is null do nothing. Exceptions are caught and
* logged at warn level. (but not Throwables). This operation is intended to
* be used in cleanup operations
*
* @param service a service; may be null
* @return any exception that was caught; null if none was.
*/
public static Exception stopQuietly(Service service) {
return stopQuietly(LOG, service);
}
/**
* Stop a service; if it is null do nothing. Exceptions are caught and
* logged at warn level. (but not Throwables). This operation is intended to
* be used in cleanup operations
*
* @param log the log to warn at
* @param service a service; may be null
* @return any exception that was caught; null if none was.
* @see ServiceOperations#stopQuietly(Service)
*/
public static Exception stopQuietly(Log log, Service service) {
try {
stop(service);
} catch (Exception e) {
log.warn("When stopping the service " + service.getName()
+ " : " + e,
e);
return e;
}
return null;
}
/**
* Class to manage a list of {@link ServiceStateChangeListener} instances,
* including a notification loop that is robust against changes to the list
* during the notification process.
*/
public static class ServiceListeners {
/**
* List of state change listeners; it is final to guarantee
* that it will never be null.
*/
private final List<ServiceStateChangeListener> listeners =
new ArrayList<ServiceStateChangeListener>();
/**
* Thread-safe addition of a new listener to the end of a list.
* Attempts to re-register a listener that is already registered
* will be ignored.
* @param l listener
*/
public synchronized void add(ServiceStateChangeListener l) {
if(!listeners.contains(l)) {
listeners.add(l);
}
}
/**
* Remove any registration of a listener from the listener list.
* @param l listener
* @return true if the listener was found (and then removed)
*/
public synchronized boolean remove(ServiceStateChangeListener l) {
return listeners.remove(l);
}
/**
* Reset the listener list
*/
public synchronized void reset() {
listeners.clear();
}
/**
* Change to a new state and notify all listeners.
* This method will block until all notifications have been issued.
* It caches the list of listeners before the notification begins,
* so additions or removal of listeners will not be visible.
* @param service the service that has changed state
*/
public void notifyListeners(Service service) {
//take a very fast snapshot of the callback list
//very much like CopyOnWriteArrayList, only more minimal
ServiceStateChangeListener[] callbacks;
synchronized (this) {
callbacks = listeners.toArray(new ServiceStateChangeListener[listeners.size()]);
}
//iterate through the listeners outside the synchronized method,
//ensuring that listener registration/unregistration doesn't break anything
for (ServiceStateChangeListener l : callbacks) {
l.stateChanged(service);
}
}
}
}
| 5,062 | 31.664516 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceStateException.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.service;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
/**
* Exception that is raised on state change operations.
*/
@Public
@Evolving
public class ServiceStateException extends RuntimeException {
private static final long serialVersionUID = 1110000352259232646L;
public ServiceStateException(String message) {
super(message);
}
public ServiceStateException(String message, Throwable cause) {
super(message, cause);
}
public ServiceStateException(Throwable cause) {
super(cause);
}
/**
* Convert any exception into a {@link RuntimeException}.
* If the caught exception is already of that type, it is typecast to a
* {@link RuntimeException} and returned.
*
* All other exception types are wrapped in a new instance of
* ServiceStateException
* @param fault exception or throwable
* @return a ServiceStateException to rethrow
*/
public static RuntimeException convert(Throwable fault) {
if (fault instanceof RuntimeException) {
return (RuntimeException) fault;
} else {
return new ServiceStateException(fault);
}
}
/**
* Convert any exception into a {@link RuntimeException}.
* If the caught exception is already of that type, it is typecast to a
* {@link RuntimeException} and returned.
*
* All other exception types are wrapped in a new instance of
* ServiceStateException
* @param text text to use if a new exception is created
* @param fault exception or throwable
* @return a ServiceStateException to rethrow
*/
public static RuntimeException convert(String text, Throwable fault) {
if (fault instanceof RuntimeException) {
return (RuntimeException) fault;
} else {
return new ServiceStateException(text, fault);
}
}
}
| 2,711 | 32.073171 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/LoggingStateChangeListener.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.service;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
/**
* This is a state change listener that logs events at INFO level
*/
@Public
@Evolving
public class LoggingStateChangeListener implements ServiceStateChangeListener {
private static final Log LOG = LogFactory.getLog(LoggingStateChangeListener.class);
private final Log log;
/**
* Log events to the given log
* @param log destination for events
*/
public LoggingStateChangeListener(Log log) {
//force an NPE if a null log came in
log.isDebugEnabled();
this.log = log;
}
/**
* Log events to the static log for this class
*/
public LoggingStateChangeListener() {
this(LOG);
}
/**
* Callback for a state change event: log it
* @param service the service that has changed.
*/
@Override
public void stateChanged(Service service) {
log.info("Entry to state " + service.getServiceState()
+ " for " + service.getName());
}
}
| 1,989 | 30.09375 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* A wrapper class to maven's ComparableVersion class, to comply
* with maven's version name string convention
*/
@InterfaceAudience.Private
public abstract class VersionUtil {
/**
* Compares two version name strings using maven's ComparableVersion class.
*
* @param version1
* the first version to compare
* @param version2
* the second version to compare
* @return a negative integer if version1 precedes version2, a positive
* integer if version2 precedes version1, and 0 if and only if the two
* versions are equal.
*/
public static int compareVersions(String version1, String version2) {
ComparableVersion v1 = new ComparableVersion(version1);
ComparableVersion v2 = new ComparableVersion(version2);
return v1.compareTo(v2);
}
}
| 1,723 | 37.311111 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.io.IOException;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
/**
* Plugin to calculate resource information on Windows systems.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class SysInfoWindows extends SysInfo {
private static final Log LOG = LogFactory.getLog(SysInfoWindows.class);
private long vmemSize;
private long memSize;
private long vmemAvailable;
private long memAvailable;
private int numProcessors;
private long cpuFrequencyKhz;
private long cumulativeCpuTimeMs;
private float cpuUsage;
private long lastRefreshTime;
static final int REFRESH_INTERVAL_MS = 1000;
public SysInfoWindows() {
lastRefreshTime = 0;
reset();
}
@VisibleForTesting
long now() {
return System.nanoTime();
}
void reset() {
vmemSize = -1;
memSize = -1;
vmemAvailable = -1;
memAvailable = -1;
numProcessors = -1;
cpuFrequencyKhz = -1;
cumulativeCpuTimeMs = -1;
cpuUsage = -1;
}
String getSystemInfoInfoFromShell() {
ShellCommandExecutor shellExecutor = new ShellCommandExecutor(
new String[] {Shell.WINUTILS, "systeminfo" });
try {
shellExecutor.execute();
return shellExecutor.getOutput();
} catch (IOException e) {
LOG.error(StringUtils.stringifyException(e));
}
return null;
}
void refreshIfNeeded() {
long now = now();
if (now - lastRefreshTime > REFRESH_INTERVAL_MS) {
long refreshInterval = now - lastRefreshTime;
lastRefreshTime = now;
long lastCumCpuTimeMs = cumulativeCpuTimeMs;
reset();
String sysInfoStr = getSystemInfoInfoFromShell();
if (sysInfoStr != null) {
final int sysInfoSplitCount = 7;
String[] sysInfo = sysInfoStr.substring(0, sysInfoStr.indexOf("\r\n"))
.split(",");
if (sysInfo.length == sysInfoSplitCount) {
try {
vmemSize = Long.parseLong(sysInfo[0]);
memSize = Long.parseLong(sysInfo[1]);
vmemAvailable = Long.parseLong(sysInfo[2]);
memAvailable = Long.parseLong(sysInfo[3]);
numProcessors = Integer.parseInt(sysInfo[4]);
cpuFrequencyKhz = Long.parseLong(sysInfo[5]);
cumulativeCpuTimeMs = Long.parseLong(sysInfo[6]);
if (lastCumCpuTimeMs != -1) {
cpuUsage = (cumulativeCpuTimeMs - lastCumCpuTimeMs)
/ (refreshInterval * 1.0f);
}
} catch (NumberFormatException nfe) {
LOG.warn("Error parsing sysInfo", nfe);
}
} else {
LOG.warn("Expected split length of sysInfo to be "
+ sysInfoSplitCount + ". Got " + sysInfo.length);
}
}
}
}
/** {@inheritDoc} */
@Override
public long getVirtualMemorySize() {
refreshIfNeeded();
return vmemSize;
}
/** {@inheritDoc} */
@Override
public long getPhysicalMemorySize() {
refreshIfNeeded();
return memSize;
}
/** {@inheritDoc} */
@Override
public long getAvailableVirtualMemorySize() {
refreshIfNeeded();
return vmemAvailable;
}
/** {@inheritDoc} */
@Override
public long getAvailablePhysicalMemorySize() {
refreshIfNeeded();
return memAvailable;
}
/** {@inheritDoc} */
@Override
public int getNumProcessors() {
refreshIfNeeded();
return numProcessors;
}
/** {@inheritDoc} */
@Override
public int getNumCores() {
return getNumProcessors();
}
/** {@inheritDoc} */
@Override
public long getCpuFrequency() {
refreshIfNeeded();
return cpuFrequencyKhz;
}
/** {@inheritDoc} */
@Override
public long getCumulativeCpuTime() {
refreshIfNeeded();
return cumulativeCpuTimeMs;
}
/** {@inheritDoc} */
@Override
public float getCpuUsage() {
refreshIfNeeded();
return cpuUsage;
}
/** {@inheritDoc} */
@Override
public long getNetworkBytesRead() {
// TODO unimplemented
return 0L;
}
/** {@inheritDoc} */
@Override
public long getNetworkBytesWritten() {
// TODO unimplemented
return 0L;
}
@Override
public long getStorageBytesRead() {
// TODO unimplemented
return 0L;
}
@Override
public long getStorageBytesWritten() {
// TODO unimplemented
return 0L;
}
}
| 5,398 | 24.832536 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ComparableVersion.java
|
// Code source of this file:
// http://grepcode.com/file/repo1.maven.org/maven2/
// org.apache.maven/maven-artifact/3.1.1/
// org/apache/maven/artifact/versioning/ComparableVersion.java/
//
// Modifications made on top of the source:
// 1. Changed
// package org.apache.maven.artifact.versioning;
// to
// package org.apache.hadoop.util;
// 2. Removed author tags to clear hadoop author tag warning
//
package org.apache.hadoop.util;
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import java.math.BigInteger;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.ListIterator;
import java.util.Properties;
import java.util.Stack;
/**
* Generic implementation of version comparison.
*
* <p>Features:
* <ul>
* <li>mixing of '<code>-</code>' (dash) and '<code>.</code>' (dot) separators,</li>
* <li>transition between characters and digits also constitutes a separator:
* <code>1.0alpha1 => [1, 0, alpha, 1]</code></li>
* <li>unlimited number of version components,</li>
* <li>version components in the text can be digits or strings,</li>
* <li>strings are checked for well-known qualifiers and the qualifier ordering is used for version ordering.
* Well-known qualifiers (case insensitive) are:<ul>
* <li><code>alpha</code> or <code>a</code></li>
* <li><code>beta</code> or <code>b</code></li>
* <li><code>milestone</code> or <code>m</code></li>
* <li><code>rc</code> or <code>cr</code></li>
* <li><code>snapshot</code></li>
* <li><code>(the empty string)</code> or <code>ga</code> or <code>final</code></li>
* <li><code>sp</code></li>
* </ul>
* Unknown qualifiers are considered after known qualifiers, with lexical order (always case insensitive),
* </li>
* <li>a dash usually precedes a qualifier, and is always less important than something preceded with a dot.</li>
* </ul></p>
*
* @see <a href="https://cwiki.apache.org/confluence/display/MAVENOLD/Versioning">"Versioning" on Maven Wiki</a>
*/
public class ComparableVersion
implements Comparable<ComparableVersion>
{
private String value;
private String canonical;
private ListItem items;
private interface Item
{
int INTEGER_ITEM = 0;
int STRING_ITEM = 1;
int LIST_ITEM = 2;
int compareTo( Item item );
int getType();
boolean isNull();
}
/**
* Represents a numeric item in the version item list.
*/
private static class IntegerItem
implements Item
{
private static final BigInteger BIG_INTEGER_ZERO = new BigInteger( "0" );
private final BigInteger value;
public static final IntegerItem ZERO = new IntegerItem();
private IntegerItem()
{
this.value = BIG_INTEGER_ZERO;
}
public IntegerItem( String str )
{
this.value = new BigInteger( str );
}
public int getType()
{
return INTEGER_ITEM;
}
public boolean isNull()
{
return BIG_INTEGER_ZERO.equals( value );
}
public int compareTo( Item item )
{
if ( item == null )
{
return BIG_INTEGER_ZERO.equals( value ) ? 0 : 1; // 1.0 == 1, 1.1 > 1
}
switch ( item.getType() )
{
case INTEGER_ITEM:
return value.compareTo( ( (IntegerItem) item ).value );
case STRING_ITEM:
return 1; // 1.1 > 1-sp
case LIST_ITEM:
return 1; // 1.1 > 1-1
default:
throw new RuntimeException( "invalid item: " + item.getClass() );
}
}
public String toString()
{
return value.toString();
}
}
/**
* Represents a string in the version item list, usually a qualifier.
*/
private static class StringItem
implements Item
{
private static final String[] QUALIFIERS = { "alpha", "beta", "milestone", "rc", "snapshot", "", "sp" };
private static final List<String> _QUALIFIERS = Arrays.asList( QUALIFIERS );
private static final Properties ALIASES = new Properties();
static
{
ALIASES.put( "ga", "" );
ALIASES.put( "final", "" );
ALIASES.put( "cr", "rc" );
}
/**
* A comparable value for the empty-string qualifier. This one is used to determine if a given qualifier makes
* the version older than one without a qualifier, or more recent.
*/
private static final String RELEASE_VERSION_INDEX = String.valueOf( _QUALIFIERS.indexOf( "" ) );
private String value;
public StringItem( String value, boolean followedByDigit )
{
if ( followedByDigit && value.length() == 1 )
{
// a1 = alpha-1, b1 = beta-1, m1 = milestone-1
switch ( value.charAt( 0 ) )
{
case 'a':
value = "alpha";
break;
case 'b':
value = "beta";
break;
case 'm':
value = "milestone";
break;
default:
break;
}
}
this.value = ALIASES.getProperty( value , value );
}
public int getType()
{
return STRING_ITEM;
}
public boolean isNull()
{
return ( comparableQualifier( value ).compareTo( RELEASE_VERSION_INDEX ) == 0 );
}
/**
* Returns a comparable value for a qualifier.
*
* This method takes into account the ordering of known qualifiers then unknown qualifiers with lexical ordering.
*
* just returning an Integer with the index here is faster, but requires a lot of if/then/else to check for -1
* or QUALIFIERS.size and then resort to lexical ordering. Most comparisons are decided by the first character,
* so this is still fast. If more characters are needed then it requires a lexical sort anyway.
*
* @param qualifier
* @return an equivalent value that can be used with lexical comparison
*/
public static String comparableQualifier( String qualifier )
{
int i = _QUALIFIERS.indexOf( qualifier );
return i == -1 ? ( _QUALIFIERS.size() + "-" + qualifier ) : String.valueOf( i );
}
public int compareTo( Item item )
{
if ( item == null )
{
// 1-rc < 1, 1-ga > 1
return comparableQualifier( value ).compareTo( RELEASE_VERSION_INDEX );
}
switch ( item.getType() )
{
case INTEGER_ITEM:
return -1; // 1.any < 1.1 ?
case STRING_ITEM:
return comparableQualifier( value ).compareTo( comparableQualifier( ( (StringItem) item ).value ) );
case LIST_ITEM:
return -1; // 1.any < 1-1
default:
throw new RuntimeException( "invalid item: " + item.getClass() );
}
}
public String toString()
{
return value;
}
}
/**
* Represents a version list item. This class is used both for the global item list and for sub-lists (which start
* with '-(number)' in the version specification).
*/
private static class ListItem
extends ArrayList<Item>
implements Item
{
public int getType()
{
return LIST_ITEM;
}
public boolean isNull()
{
return ( size() == 0 );
}
void normalize()
{
for ( ListIterator<Item> iterator = listIterator( size() ); iterator.hasPrevious(); )
{
Item item = iterator.previous();
if ( item.isNull() )
{
iterator.remove(); // remove null trailing items: 0, "", empty list
}
else
{
break;
}
}
}
public int compareTo( Item item )
{
if ( item == null )
{
if ( size() == 0 )
{
return 0; // 1-0 = 1- (normalize) = 1
}
Item first = get( 0 );
return first.compareTo( null );
}
switch ( item.getType() )
{
case INTEGER_ITEM:
return -1; // 1-1 < 1.0.x
case STRING_ITEM:
return 1; // 1-1 > 1-sp
case LIST_ITEM:
Iterator<Item> left = iterator();
Iterator<Item> right = ( (ListItem) item ).iterator();
while ( left.hasNext() || right.hasNext() )
{
Item l = left.hasNext() ? left.next() : null;
Item r = right.hasNext() ? right.next() : null;
// if this is shorter, then invert the compare and mul with -1
int result = l == null ? -1 * r.compareTo( l ) : l.compareTo( r );
if ( result != 0 )
{
return result;
}
}
return 0;
default:
throw new RuntimeException( "invalid item: " + item.getClass() );
}
}
public String toString()
{
StringBuilder buffer = new StringBuilder( "(" );
for ( Iterator<Item> iter = iterator(); iter.hasNext(); )
{
buffer.append( iter.next() );
if ( iter.hasNext() )
{
buffer.append( ',' );
}
}
buffer.append( ')' );
return buffer.toString();
}
}
public ComparableVersion( String version )
{
parseVersion( version );
}
public final void parseVersion( String version )
{
this.value = version;
items = new ListItem();
version = StringUtils.toLowerCase(version);
ListItem list = items;
Stack<Item> stack = new Stack<Item>();
stack.push( list );
boolean isDigit = false;
int startIndex = 0;
for ( int i = 0; i < version.length(); i++ )
{
char c = version.charAt( i );
if ( c == '.' )
{
if ( i == startIndex )
{
list.add( IntegerItem.ZERO );
}
else
{
list.add( parseItem( isDigit, version.substring( startIndex, i ) ) );
}
startIndex = i + 1;
}
else if ( c == '-' )
{
if ( i == startIndex )
{
list.add( IntegerItem.ZERO );
}
else
{
list.add( parseItem( isDigit, version.substring( startIndex, i ) ) );
}
startIndex = i + 1;
if ( isDigit )
{
list.normalize(); // 1.0-* = 1-*
if ( ( i + 1 < version.length() ) && Character.isDigit( version.charAt( i + 1 ) ) )
{
// new ListItem only if previous were digits and new char is a digit,
// ie need to differentiate only 1.1 from 1-1
list.add( list = new ListItem() );
stack.push( list );
}
}
}
else if ( Character.isDigit( c ) )
{
if ( !isDigit && i > startIndex )
{
list.add( new StringItem( version.substring( startIndex, i ), true ) );
startIndex = i;
}
isDigit = true;
}
else
{
if ( isDigit && i > startIndex )
{
list.add( parseItem( true, version.substring( startIndex, i ) ) );
startIndex = i;
}
isDigit = false;
}
}
if ( version.length() > startIndex )
{
list.add( parseItem( isDigit, version.substring( startIndex ) ) );
}
while ( !stack.isEmpty() )
{
list = (ListItem) stack.pop();
list.normalize();
}
canonical = items.toString();
}
private static Item parseItem( boolean isDigit, String buf )
{
return isDigit ? new IntegerItem( buf ) : new StringItem( buf, false );
}
public int compareTo( ComparableVersion o )
{
return items.compareTo( o.items );
}
public String toString()
{
return value;
}
public boolean equals( Object o )
{
return ( o instanceof ComparableVersion ) && canonical.equals( ( (ComparableVersion) o ).canonical );
}
public int hashCode()
{
return canonical.hashCode();
}
}
| 14,516 | 29.306889 | 121 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IdentityHashStore.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import com.google.common.base.Preconditions;
/**
* The IdentityHashStore stores (key, value) mappings in an array.
* It is similar to java.util.HashTable, but much more lightweight.
* Neither inserting nor removing an element ever leads to any garbage
* getting created (assuming the array doesn't need to be enlarged).
*
* Unlike HashTable, it compares keys using
* {@link System#identityHashCode(Object)} and the identity operator.
* This is useful for types like ByteBuffer which have expensive hashCode
* and equals operators.
*
* We use linear probing to resolve collisions. This avoids the need for
* the overhead of linked list data structures. It also means that it is
* expensive to attempt to remove an element that isn't there, since we
* have to look at the entire array to be sure that it doesn't exist.
*
* @param <K> The key type to use.
* @param <V> THe value type to use.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
@SuppressWarnings("unchecked")
public final class IdentityHashStore<K, V> {
/**
* Even elements are keys; odd elements are values.
* The array has size 1 + Math.pow(2, capacity).
*/
private Object buffer[];
private int numInserted = 0;
private int capacity;
/**
* The default maxCapacity value to use.
*/
private static final int DEFAULT_MAX_CAPACITY = 2;
public IdentityHashStore(int capacity) {
Preconditions.checkArgument(capacity >= 0);
if (capacity == 0) {
this.capacity = 0;
this.buffer = null;
} else {
// Round the capacity we need up to a power of 2.
realloc((int)Math.pow(2,
Math.ceil(Math.log(capacity) / Math.log(2))));
}
}
private void realloc(int newCapacity) {
Preconditions.checkArgument(newCapacity > 0);
Object prevBuffer[] = buffer;
this.capacity = newCapacity;
// Each element takes two array slots -- one for the key,
// and another for the value. We also want a load factor
// of 0.50. Combine those together and you get 4 * newCapacity.
this.buffer = new Object[4 * newCapacity];
this.numInserted = 0;
if (prevBuffer != null) {
for (int i = 0; i < prevBuffer.length; i += 2) {
if (prevBuffer[i] != null) {
putInternal(prevBuffer[i], prevBuffer[i + 1]);
}
}
}
}
private void putInternal(Object k, Object v) {
final int hash = System.identityHashCode(k);
final int numEntries = buffer.length >> 1;
//computing modulo with the assumption buffer.length is power of 2
int index = hash & (numEntries-1);
while (true) {
if (buffer[2 * index] == null) {
buffer[2 * index] = k;
buffer[1 + (2 * index)] = v;
numInserted++;
return;
}
index = (index + 1) % numEntries;
}
}
/**
* Add a new (key, value) mapping.
*
* Inserting a new (key, value) never overwrites a previous one.
* In other words, you can insert the same key multiple times and it will
* lead to multiple entries.
*/
public void put(K k, V v) {
Preconditions.checkNotNull(k);
if (buffer == null) {
realloc(DEFAULT_MAX_CAPACITY);
} else if (numInserted + 1 > capacity) {
realloc(capacity * 2);
}
putInternal(k, v);
}
private int getElementIndex(K k) {
if (buffer == null) {
return -1;
}
final int numEntries = buffer.length >> 1;
final int hash = System.identityHashCode(k);
//computing modulo with the assumption buffer.length is power of 2
int index = hash & (numEntries -1);
int firstIndex = index;
do {
if (buffer[2 * index] == k) {
return index;
}
index = (index + 1) % numEntries;
} while (index != firstIndex);
return -1;
}
/**
* Retrieve a value associated with a given key.
*/
public V get(K k) {
int index = getElementIndex(k);
if (index < 0) {
return null;
}
return (V)buffer[1 + (2 * index)];
}
/**
* Retrieve a value associated with a given key, and delete the
* relevant entry.
*/
public V remove(K k) {
int index = getElementIndex(k);
if (index < 0) {
return null;
}
V val = (V)buffer[1 + (2 * index)];
buffer[2 * index] = null;
buffer[1 + (2 * index)] = null;
numInserted--;
return val;
}
public boolean isEmpty() {
return numInserted == 0;
}
public int numElements() {
return numInserted;
}
public int capacity() {
return capacity;
}
public interface Visitor<K, V> {
void accept(K k, V v);
}
/**
* Visit all key, value pairs in the IdentityHashStore.
*/
public void visitAll(Visitor<K, V> visitor) {
int length = buffer == null ? 0 : buffer.length;
for (int i = 0; i < length; i += 2) {
if (buffer[i] != null) {
visitor.accept((K)buffer[i], (V)buffer[i + 1]);
}
}
}
}
| 5,851 | 28.26 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ServicePlugin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.io.Closeable;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Service plug-in interface.
*
* Service plug-ins may be used to expose functionality of datanodes or
* namenodes using arbitrary RPC protocols. Plug-ins are instantiated by the
* service instance, and are notified of service life-cycle events using the
* methods defined by this class.
*
* Service plug-ins are started after the service instance is started, and
* stopped before the service instance is stopped.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public interface ServicePlugin extends Closeable {
/**
* This method is invoked when the service instance has been started.
*
* @param service The service instance invoking this method
*/
void start(Object service);
/**
* This method is invoked when the service instance is about to be shut down.
*/
void stop();
}
| 1,820 | 34.019231 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GSetByHashMap.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.util.HashMap;
import java.util.Iterator;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* A {@link GSet} implementation by {@link HashMap}.
*/
@InterfaceAudience.Private
public class GSetByHashMap<K, E extends K> implements GSet<K, E> {
private final HashMap<K, E> m;
public GSetByHashMap(int initialCapacity, float loadFactor) {
m = new HashMap<K, E>(initialCapacity, loadFactor);
}
@Override
public int size() {
return m.size();
}
@Override
public boolean contains(K k) {
return m.containsKey(k);
}
@Override
public E get(K k) {
return m.get(k);
}
@Override
public E put(E element) {
if (element == null) {
throw new UnsupportedOperationException("Null element is not supported.");
}
return m.put(element, element);
}
@Override
public E remove(K k) {
return m.remove(k);
}
@Override
public Iterator<E> iterator() {
return m.values().iterator();
}
@Override
public void clear() {
m.clear();
}
}
| 1,872 | 24.310811 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ServletUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.io.*;
import java.util.Calendar;
import javax.servlet.*;
import javax.servlet.http.HttpServletRequest;
import org.apache.commons.httpclient.URIException;
import org.apache.commons.httpclient.util.URIUtil;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import com.google.common.base.Preconditions;
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class ServletUtil {
/**
* Initial HTML header
*/
public static PrintWriter initHTML(ServletResponse response, String title
) throws IOException {
response.setContentType("text/html");
PrintWriter out = response.getWriter();
out.println("<html>\n"
+ "<link rel='stylesheet' type='text/css' href='/static/hadoop.css'>\n"
+ "<title>" + title + "</title>\n"
+ "<body>\n"
+ "<h1>" + title + "</h1>\n");
return out;
}
/**
* Get a parameter from a ServletRequest.
* Return null if the parameter contains only white spaces.
*/
public static String getParameter(ServletRequest request, String name) {
String s = request.getParameter(name);
if (s == null) {
return null;
}
s = s.trim();
return s.length() == 0? null: s;
}
/**
* @return a long value as passed in the given parameter, throwing
* an exception if it is not present or if it is not a valid number.
*/
public static long parseLongParam(ServletRequest request, String param)
throws IOException {
String paramStr = request.getParameter(param);
if (paramStr == null) {
throw new IOException("Invalid request has no " + param + " parameter");
}
return Long.parseLong(paramStr);
}
public static final String HTML_TAIL = "<hr />\n"
+ "<a href='http://hadoop.apache.org/core'>Hadoop</a>, "
+ Calendar.getInstance().get(Calendar.YEAR) + ".\n"
+ "</body></html>";
/**
* HTML footer to be added in the jsps.
* @return the HTML footer.
*/
public static String htmlFooter() {
return HTML_TAIL;
}
/**
* Escape and encode a string regarded as within the query component of an URI.
* @param value the value to encode
* @return encoded query, null if the default charset is not supported
*/
public static String encodeQueryValue(final String value) {
try {
return URIUtil.encodeWithinQuery(value, "UTF-8");
} catch (URIException e) {
throw new AssertionError("JVM does not support UTF-8"); // should never happen!
}
}
/**
* Escape and encode a string regarded as the path component of an URI.
* @param path the path component to encode
* @return encoded path, null if UTF-8 is not supported
*/
public static String encodePath(final String path) {
try {
return URIUtil.encodePath(path, "UTF-8");
} catch (URIException e) {
throw new AssertionError("JVM does not support UTF-8"); // should never happen!
}
}
/**
* Parse and decode the path component from the given request.
* @param request Http request to parse
* @param servletName the name of servlet that precedes the path
* @return decoded path component, null if UTF-8 is not supported
*/
public static String getDecodedPath(final HttpServletRequest request, String servletName) {
try {
return URIUtil.decode(getRawPath(request, servletName), "UTF-8");
} catch (URIException e) {
throw new AssertionError("JVM does not support UTF-8"); // should never happen!
}
}
/**
* Parse the path component from the given request and return w/o decoding.
* @param request Http request to parse
* @param servletName the name of servlet that precedes the path
* @return path component, null if the default charset is not supported
*/
public static String getRawPath(final HttpServletRequest request, String servletName) {
Preconditions.checkArgument(request.getRequestURI().startsWith(servletName+"/"));
return request.getRequestURI().substring(servletName.length());
}
}
| 4,889 | 33.928571 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IndexedSortable.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Interface for collections capable of being sorted by {@link IndexedSorter}
* algorithms.
*/
@InterfaceAudience.LimitedPrivate({"MapReduce"})
@InterfaceStability.Unstable
public interface IndexedSortable {
/**
* Compare items at the given addresses consistent with the semantics of
* {@link java.util.Comparator#compare(Object, Object)}.
*/
int compare(int i, int j);
/**
* Swap items at the given addresses.
*/
void swap(int i, int j);
}
| 1,431 | 33.095238 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/MachineList.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.net.util.SubnetUtils;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.net.InetAddresses;
/**
* Container class which holds a list of ip/host addresses and
* answers membership queries.
*
* Accepts list of ip addresses, ip addreses in CIDR format and/or
* host addresses.
*/
public class MachineList {
public static final Log LOG = LogFactory.getLog(MachineList.class);
public static final String WILDCARD_VALUE = "*";
/**
* InetAddressFactory is used to obtain InetAddress from host.
* This class makes it easy to simulate host to ip mappings during testing.
*
*/
public static class InetAddressFactory {
static final InetAddressFactory S_INSTANCE = new InetAddressFactory();
public InetAddress getByName (String host) throws UnknownHostException {
return InetAddress.getByName(host);
}
}
private final boolean all;
private final Set<String> ipAddresses;
private final List<SubnetUtils.SubnetInfo> cidrAddresses;
private final Set<String> hostNames;
private final InetAddressFactory addressFactory;
/**
*
* @param hostEntries comma separated ip/cidr/host addresses
*/
public MachineList(String hostEntries) {
this(StringUtils.getTrimmedStringCollection(hostEntries));
}
/**
*
* @param hostEntries collection of separated ip/cidr/host addresses
*/
public MachineList(Collection<String> hostEntries) {
this(hostEntries, InetAddressFactory.S_INSTANCE);
}
/**
* Accepts a collection of ip/cidr/host addresses
*
* @param hostEntries
* @param addressFactory addressFactory to convert host to InetAddress
*/
public MachineList(Collection<String> hostEntries, InetAddressFactory addressFactory) {
this.addressFactory = addressFactory;
if (hostEntries != null) {
if ((hostEntries.size() == 1) && (hostEntries.contains(WILDCARD_VALUE))) {
all = true;
ipAddresses = null;
hostNames = null;
cidrAddresses = null;
} else {
all = false;
Set<String> ips = new HashSet<String>();
List<SubnetUtils.SubnetInfo> cidrs = new LinkedList<SubnetUtils.SubnetInfo>();
Set<String> hosts = new HashSet<String>();
for (String hostEntry : hostEntries) {
//ip address range
if (hostEntry.indexOf("/") > -1) {
try {
SubnetUtils subnet = new SubnetUtils(hostEntry);
subnet.setInclusiveHostCount(true);
cidrs.add(subnet.getInfo());
} catch (IllegalArgumentException e) {
LOG.warn("Invalid CIDR syntax : " + hostEntry);
throw e;
}
} else if (InetAddresses.isInetAddress(hostEntry)) { //ip address
ips.add(hostEntry);
} else { //hostname
hosts.add(hostEntry);
}
}
ipAddresses = (ips.size() > 0) ? ips : null;
cidrAddresses = (cidrs.size() > 0) ? cidrs : null;
hostNames = (hosts.size() > 0) ? hosts : null;
}
} else {
all = false;
ipAddresses = null;
hostNames = null;
cidrAddresses = null;
}
}
/**
* Accepts an ip address and return true if ipAddress is in the list
* @param ipAddress
* @return true if ipAddress is part of the list
*/
public boolean includes(String ipAddress) {
if (all) {
return true;
}
//check in the set of ipAddresses
if ((ipAddresses != null) && ipAddresses.contains(ipAddress)) {
return true;
}
//iterate through the ip ranges for inclusion
if (cidrAddresses != null) {
for(SubnetUtils.SubnetInfo cidrAddress : cidrAddresses) {
if(cidrAddress.isInRange(ipAddress)) {
return true;
}
}
}
//check if the ipAddress matches one of hostnames
if (hostNames != null) {
//convert given ipAddress to hostname and look for a match
InetAddress hostAddr;
try {
hostAddr = addressFactory.getByName(ipAddress);
if ((hostAddr != null) && hostNames.contains(hostAddr.getCanonicalHostName())) {
return true;
}
} catch (UnknownHostException e) {
//ignore the exception and proceed to resolve the list of hosts
}
//loop through host addresses and convert them to ip and look for a match
for (String host : hostNames) {
try {
hostAddr = addressFactory.getByName(host);
} catch (UnknownHostException e) {
continue;
}
if (hostAddr.getHostAddress().equals(ipAddress)) {
return true;
}
}
}
return false;
}
/**
* returns the contents of the MachineList as a Collection<String>
* This can be used for testing
* @return contents of the MachineList
*/
@VisibleForTesting
public Collection<String> getCollection() {
Collection<String> list = new ArrayList<String>();
if (all) {
list.add("*");
} else {
if (ipAddresses != null) {
list.addAll(ipAddresses);
}
if (hostNames != null) {
list.addAll(hostNames);
}
if (cidrAddresses != null) {
for(SubnetUtils.SubnetInfo cidrAddress : cidrAddresses) {
list.add(cidrAddress.getCidrSignature());
}
}
}
return list;
}
}
| 6,535 | 29.830189 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.lang.management.GarbageCollectorMXBean;
import java.lang.management.ManagementFactory;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
/**
* Class which sets up a simple thread which runs in a loop sleeping
* for a short interval of time. If the sleep takes significantly longer
* than its target time, it implies that the JVM or host machine has
* paused processing, which may cause other problems. If such a pause is
* detected, the thread logs a message.
*/
@InterfaceAudience.Private
public class JvmPauseMonitor {
private static final Log LOG = LogFactory.getLog(
JvmPauseMonitor.class);
/** The target sleep time */
private static final long SLEEP_INTERVAL_MS = 500;
/** log WARN if we detect a pause longer than this threshold */
private final long warnThresholdMs;
private static final String WARN_THRESHOLD_KEY =
"jvm.pause.warn-threshold.ms";
private static final long WARN_THRESHOLD_DEFAULT = 10000;
/** log INFO if we detect a pause longer than this threshold */
private final long infoThresholdMs;
private static final String INFO_THRESHOLD_KEY =
"jvm.pause.info-threshold.ms";
private static final long INFO_THRESHOLD_DEFAULT = 1000;
private long numGcWarnThresholdExceeded = 0;
private long numGcInfoThresholdExceeded = 0;
private long totalGcExtraSleepTime = 0;
private Thread monitorThread;
private volatile boolean shouldRun = true;
public JvmPauseMonitor(Configuration conf) {
this.warnThresholdMs = conf.getLong(WARN_THRESHOLD_KEY, WARN_THRESHOLD_DEFAULT);
this.infoThresholdMs = conf.getLong(INFO_THRESHOLD_KEY, INFO_THRESHOLD_DEFAULT);
}
public void start() {
Preconditions.checkState(monitorThread == null,
"Already started");
monitorThread = new Daemon(new Monitor());
monitorThread.start();
}
public void stop() {
shouldRun = false;
monitorThread.interrupt();
try {
monitorThread.join();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
public boolean isStarted() {
return monitorThread != null;
}
public long getNumGcWarnThreadholdExceeded() {
return numGcWarnThresholdExceeded;
}
public long getNumGcInfoThresholdExceeded() {
return numGcInfoThresholdExceeded;
}
public long getTotalGcExtraSleepTime() {
return totalGcExtraSleepTime;
}
private String formatMessage(long extraSleepTime,
Map<String, GcTimes> gcTimesAfterSleep,
Map<String, GcTimes> gcTimesBeforeSleep) {
Set<String> gcBeanNames = Sets.intersection(
gcTimesAfterSleep.keySet(),
gcTimesBeforeSleep.keySet());
List<String> gcDiffs = Lists.newArrayList();
for (String name : gcBeanNames) {
GcTimes diff = gcTimesAfterSleep.get(name).subtract(
gcTimesBeforeSleep.get(name));
if (diff.gcCount != 0) {
gcDiffs.add("GC pool '" + name + "' had collection(s): " +
diff.toString());
}
}
String ret = "Detected pause in JVM or host machine (eg GC): " +
"pause of approximately " + extraSleepTime + "ms\n";
if (gcDiffs.isEmpty()) {
ret += "No GCs detected";
} else {
ret += Joiner.on("\n").join(gcDiffs);
}
return ret;
}
private Map<String, GcTimes> getGcTimes() {
Map<String, GcTimes> map = Maps.newHashMap();
List<GarbageCollectorMXBean> gcBeans =
ManagementFactory.getGarbageCollectorMXBeans();
for (GarbageCollectorMXBean gcBean : gcBeans) {
map.put(gcBean.getName(), new GcTimes(gcBean));
}
return map;
}
private static class GcTimes {
private GcTimes(GarbageCollectorMXBean gcBean) {
gcCount = gcBean.getCollectionCount();
gcTimeMillis = gcBean.getCollectionTime();
}
private GcTimes(long count, long time) {
this.gcCount = count;
this.gcTimeMillis = time;
}
private GcTimes subtract(GcTimes other) {
return new GcTimes(this.gcCount - other.gcCount,
this.gcTimeMillis - other.gcTimeMillis);
}
@Override
public String toString() {
return "count=" + gcCount + " time=" + gcTimeMillis + "ms";
}
private long gcCount;
private long gcTimeMillis;
}
private class Monitor implements Runnable {
@Override
public void run() {
StopWatch sw = new StopWatch();
Map<String, GcTimes> gcTimesBeforeSleep = getGcTimes();
while (shouldRun) {
sw.reset().start();
try {
Thread.sleep(SLEEP_INTERVAL_MS);
} catch (InterruptedException ie) {
return;
}
long extraSleepTime = sw.now(TimeUnit.MILLISECONDS) - SLEEP_INTERVAL_MS;
Map<String, GcTimes> gcTimesAfterSleep = getGcTimes();
if (extraSleepTime > warnThresholdMs) {
++numGcWarnThresholdExceeded;
LOG.warn(formatMessage(
extraSleepTime, gcTimesAfterSleep, gcTimesBeforeSleep));
} else if (extraSleepTime > infoThresholdMs) {
++numGcInfoThresholdExceeded;
LOG.info(formatMessage(
extraSleepTime, gcTimesAfterSleep, gcTimesBeforeSleep));
}
totalGcExtraSleepTime += extraSleepTime;
gcTimesBeforeSleep = gcTimesAfterSleep;
}
}
}
/**
* Simple 'main' to facilitate manual testing of the pause monitor.
*
* This main function just leaks memory into a list. Running this class
* with a 1GB heap will very quickly go into "GC hell" and result in
* log messages about the GC pauses.
*/
public static void main(String []args) throws Exception {
new JvmPauseMonitor(new Configuration()).start();
List<String> list = Lists.newArrayList();
int i = 0;
while (true) {
list.add(String.valueOf(i++));
}
}
}
| 7,096 | 31.555046 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HeapSort.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* An implementation of the core algorithm of HeapSort.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public final class HeapSort implements IndexedSorter {
public HeapSort() { }
private static void downHeap(final IndexedSortable s, final int b,
int i, final int N) {
for (int idx = i << 1; idx < N; idx = i << 1) {
if (idx + 1 < N && s.compare(b + idx, b + idx + 1) < 0) {
if (s.compare(b + i, b + idx + 1) < 0) {
s.swap(b + i, b + idx + 1);
} else return;
i = idx + 1;
} else if (s.compare(b + i, b + idx) < 0) {
s.swap(b + i, b + idx);
i = idx;
} else return;
}
}
/**
* Sort the given range of items using heap sort.
* {@inheritDoc}
*/
@Override
public void sort(IndexedSortable s, int p, int r) {
sort(s, p, r, null);
}
@Override
public void sort(final IndexedSortable s, final int p, final int r,
final Progressable rep) {
final int N = r - p;
// build heap w/ reverse comparator, then write in-place from end
final int t = Integer.highestOneBit(N);
for (int i = t; i > 1; i >>>= 1) {
for (int j = i >>> 1; j < i; ++j) {
downHeap(s, p-1, j, N + 1);
}
if (null != rep) {
rep.progress();
}
}
for (int i = r - 1; i > p; --i) {
s.swap(p, i);
downHeap(s, p - 1, 1, i - p + 1);
}
}
}
| 2,365 | 30.131579 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.io.DataInput;
import java.io.IOException;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto;
import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.UserInformationProto;
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.*;
import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.htrace.Span;
import org.apache.htrace.Trace;
import com.google.protobuf.ByteString;
public abstract class ProtoUtil {
/**
* Read a variable length integer in the same format that ProtoBufs encodes.
* @param in the input stream to read from
* @return the integer
* @throws IOException if it is malformed or EOF.
*/
public static int readRawVarint32(DataInput in) throws IOException {
byte tmp = in.readByte();
if (tmp >= 0) {
return tmp;
}
int result = tmp & 0x7f;
if ((tmp = in.readByte()) >= 0) {
result |= tmp << 7;
} else {
result |= (tmp & 0x7f) << 7;
if ((tmp = in.readByte()) >= 0) {
result |= tmp << 14;
} else {
result |= (tmp & 0x7f) << 14;
if ((tmp = in.readByte()) >= 0) {
result |= tmp << 21;
} else {
result |= (tmp & 0x7f) << 21;
result |= (tmp = in.readByte()) << 28;
if (tmp < 0) {
// Discard upper 32 bits.
for (int i = 0; i < 5; i++) {
if (in.readByte() >= 0) {
return result;
}
}
throw new IOException("Malformed varint");
}
}
}
}
return result;
}
/**
* This method creates the connection context using exactly the same logic
* as the old connection context as was done for writable where
* the effective and real users are set based on the auth method.
*
*/
public static IpcConnectionContextProto makeIpcConnectionContext(
final String protocol,
final UserGroupInformation ugi, final AuthMethod authMethod) {
IpcConnectionContextProto.Builder result = IpcConnectionContextProto.newBuilder();
if (protocol != null) {
result.setProtocol(protocol);
}
UserInformationProto.Builder ugiProto = UserInformationProto.newBuilder();
if (ugi != null) {
/*
* In the connection context we send only additional user info that
* is not derived from the authentication done during connection setup.
*/
if (authMethod == AuthMethod.KERBEROS) {
// Real user was established as part of the connection.
// Send effective user only.
ugiProto.setEffectiveUser(ugi.getUserName());
} else if (authMethod == AuthMethod.TOKEN) {
// With token, the connection itself establishes
// both real and effective user. Hence send none in header.
} else { // Simple authentication
// No user info is established as part of the connection.
// Send both effective user and real user
ugiProto.setEffectiveUser(ugi.getUserName());
if (ugi.getRealUser() != null) {
ugiProto.setRealUser(ugi.getRealUser().getUserName());
}
}
}
result.setUserInfo(ugiProto);
return result.build();
}
public static UserGroupInformation getUgi(IpcConnectionContextProto context) {
if (context.hasUserInfo()) {
UserInformationProto userInfo = context.getUserInfo();
return getUgi(userInfo);
} else {
return null;
}
}
public static UserGroupInformation getUgi(UserInformationProto userInfo) {
UserGroupInformation ugi = null;
String effectiveUser = userInfo.hasEffectiveUser() ? userInfo
.getEffectiveUser() : null;
String realUser = userInfo.hasRealUser() ? userInfo.getRealUser() : null;
if (effectiveUser != null) {
if (realUser != null) {
UserGroupInformation realUserUgi = UserGroupInformation
.createRemoteUser(realUser);
ugi = UserGroupInformation
.createProxyUser(effectiveUser, realUserUgi);
} else {
ugi = org.apache.hadoop.security.UserGroupInformation
.createRemoteUser(effectiveUser);
}
}
return ugi;
}
static RpcKindProto convert(RPC.RpcKind kind) {
switch (kind) {
case RPC_BUILTIN: return RpcKindProto.RPC_BUILTIN;
case RPC_WRITABLE: return RpcKindProto.RPC_WRITABLE;
case RPC_PROTOCOL_BUFFER: return RpcKindProto.RPC_PROTOCOL_BUFFER;
}
return null;
}
public static RPC.RpcKind convert( RpcKindProto kind) {
switch (kind) {
case RPC_BUILTIN: return RPC.RpcKind.RPC_BUILTIN;
case RPC_WRITABLE: return RPC.RpcKind.RPC_WRITABLE;
case RPC_PROTOCOL_BUFFER: return RPC.RpcKind.RPC_PROTOCOL_BUFFER;
}
return null;
}
public static RpcRequestHeaderProto makeRpcRequestHeader(RPC.RpcKind rpcKind,
RpcRequestHeaderProto.OperationProto operation, int callId,
int retryCount, byte[] uuid) {
RpcRequestHeaderProto.Builder result = RpcRequestHeaderProto.newBuilder();
result.setRpcKind(convert(rpcKind)).setRpcOp(operation).setCallId(callId)
.setRetryCount(retryCount).setClientId(ByteString.copyFrom(uuid));
// Add tracing info if we are currently tracing.
if (Trace.isTracing()) {
Span s = Trace.currentSpan();
result.setTraceInfo(RPCTraceInfoProto.newBuilder()
.setParentId(s.getSpanId())
.setTraceId(s.getTraceId()).build());
}
return result.build();
}
}
| 6,421 | 34.285714 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightGSet.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.io.PrintStream;
import java.util.ConcurrentModificationException;
import java.util.Iterator;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.util.StringUtils;
import com.google.common.annotations.VisibleForTesting;
/**
* A low memory footprint {@link GSet} implementation,
* which uses an array for storing the elements
* and linked lists for collision resolution.
*
* No rehash will be performed.
* Therefore, the internal array will never be resized.
*
* This class does not support null element.
*
* This class is not thread safe.
*
* @param <K> Key type for looking up the elements
* @param <E> Element type, which must be
* (1) a subclass of K, and
* (2) implementing {@link LinkedElement} interface.
*/
@InterfaceAudience.Private
public class LightWeightGSet<K, E extends K> implements GSet<K, E> {
/**
* Elements of {@link LightWeightGSet}.
*/
public static interface LinkedElement {
/** Set the next element. */
public void setNext(LinkedElement next);
/** Get the next element. */
public LinkedElement getNext();
}
static final int MAX_ARRAY_LENGTH = 1 << 30; //prevent int overflow problem
static final int MIN_ARRAY_LENGTH = 1;
/**
* An internal array of entries, which are the rows of the hash table.
* The size must be a power of two.
*/
private final LinkedElement[] entries;
/** A mask for computing the array index from the hash value of an element. */
private final int hash_mask;
/** The size of the set (not the entry array). */
private int size = 0;
/** Modification version for fail-fast.
* @see ConcurrentModificationException
*/
private int modification = 0;
/**
* @param recommended_length Recommended size of the internal array.
*/
public LightWeightGSet(final int recommended_length) {
final int actual = actualArrayLength(recommended_length);
if (LOG.isDebugEnabled()) {
LOG.debug("recommended=" + recommended_length + ", actual=" + actual);
}
entries = new LinkedElement[actual];
hash_mask = entries.length - 1;
}
//compute actual length
private static int actualArrayLength(int recommended) {
if (recommended > MAX_ARRAY_LENGTH) {
return MAX_ARRAY_LENGTH;
} else if (recommended < MIN_ARRAY_LENGTH) {
return MIN_ARRAY_LENGTH;
} else {
final int a = Integer.highestOneBit(recommended);
return a == recommended? a: a << 1;
}
}
@Override
public int size() {
return size;
}
private int getIndex(final K key) {
return key.hashCode() & hash_mask;
}
private E convert(final LinkedElement e){
@SuppressWarnings("unchecked")
final E r = (E)e;
return r;
}
@Override
public E get(final K key) {
//validate key
if (key == null) {
throw new NullPointerException("key == null");
}
//find element
final int index = getIndex(key);
for(LinkedElement e = entries[index]; e != null; e = e.getNext()) {
if (e.equals(key)) {
return convert(e);
}
}
//element not found
return null;
}
@Override
public boolean contains(final K key) {
return get(key) != null;
}
@Override
public E put(final E element) {
//validate element
if (element == null) {
throw new NullPointerException("Null element is not supported.");
}
if (!(element instanceof LinkedElement)) {
throw new HadoopIllegalArgumentException(
"!(element instanceof LinkedElement), element.getClass()="
+ element.getClass());
}
final LinkedElement e = (LinkedElement)element;
//find index
final int index = getIndex(element);
//remove if it already exists
final E existing = remove(index, element);
//insert the element to the head of the linked list
modification++;
size++;
e.setNext(entries[index]);
entries[index] = e;
return existing;
}
/**
* Remove the element corresponding to the key,
* given key.hashCode() == index.
*
* @return If such element exists, return it.
* Otherwise, return null.
*/
private E remove(final int index, final K key) {
if (entries[index] == null) {
return null;
} else if (entries[index].equals(key)) {
//remove the head of the linked list
modification++;
size--;
final LinkedElement e = entries[index];
entries[index] = e.getNext();
e.setNext(null);
return convert(e);
} else {
//head != null and key is not equal to head
//search the element
LinkedElement prev = entries[index];
for(LinkedElement curr = prev.getNext(); curr != null; ) {
if (curr.equals(key)) {
//found the element, remove it
modification++;
size--;
prev.setNext(curr.getNext());
curr.setNext(null);
return convert(curr);
} else {
prev = curr;
curr = curr.getNext();
}
}
//element not found
return null;
}
}
@Override
public E remove(final K key) {
//validate key
if (key == null) {
throw new NullPointerException("key == null");
}
return remove(getIndex(key), key);
}
@Override
public Iterator<E> iterator() {
return new SetIterator();
}
@Override
public String toString() {
final StringBuilder b = new StringBuilder(getClass().getSimpleName());
b.append("(size=").append(size)
.append(String.format(", %08x", hash_mask))
.append(", modification=").append(modification)
.append(", entries.length=").append(entries.length)
.append(")");
return b.toString();
}
/** Print detailed information of this object. */
public void printDetails(final PrintStream out) {
out.print(this + ", entries = [");
for(int i = 0; i < entries.length; i++) {
if (entries[i] != null) {
LinkedElement e = entries[i];
out.print("\n " + i + ": " + e);
for(e = e.getNext(); e != null; e = e.getNext()) {
out.print(" -> " + e);
}
}
}
out.println("\n]");
}
public class SetIterator implements Iterator<E> {
/** The starting modification for fail-fast. */
private int iterModification = modification;
/** The current index of the entry array. */
private int index = -1;
private LinkedElement cur = null;
private LinkedElement next = nextNonemptyEntry();
private boolean trackModification = true;
/** Find the next nonempty entry starting at (index + 1). */
private LinkedElement nextNonemptyEntry() {
for(index++; index < entries.length && entries[index] == null; index++);
return index < entries.length? entries[index]: null;
}
private void ensureNext() {
if (trackModification && modification != iterModification) {
throw new ConcurrentModificationException("modification=" + modification
+ " != iterModification = " + iterModification);
}
if (next != null) {
return;
}
if (cur == null) {
return;
}
next = cur.getNext();
if (next == null) {
next = nextNonemptyEntry();
}
}
@Override
public boolean hasNext() {
ensureNext();
return next != null;
}
@Override
public E next() {
ensureNext();
if (next == null) {
throw new IllegalStateException("There are no more elements");
}
cur = next;
next = null;
return convert(cur);
}
@SuppressWarnings("unchecked")
@Override
public void remove() {
ensureNext();
if (cur == null) {
throw new IllegalStateException("There is no current element " +
"to remove");
}
LightWeightGSet.this.remove((K)cur);
iterModification++;
cur = null;
}
public void setTrackModification(boolean trackModification) {
this.trackModification = trackModification;
}
}
/**
* Let t = percentage of max memory.
* Let e = round(log_2 t).
* Then, we choose capacity = 2^e/(size of reference),
* unless it is outside the close interval [1, 2^30].
*/
public static int computeCapacity(double percentage, String mapName) {
return computeCapacity(Runtime.getRuntime().maxMemory(), percentage,
mapName);
}
@VisibleForTesting
static int computeCapacity(long maxMemory, double percentage,
String mapName) {
if (percentage > 100.0 || percentage < 0.0) {
throw new HadoopIllegalArgumentException("Percentage " + percentage
+ " must be greater than or equal to 0 "
+ " and less than or equal to 100");
}
if (maxMemory < 0) {
throw new HadoopIllegalArgumentException("Memory " + maxMemory
+ " must be greater than or equal to 0");
}
if (percentage == 0.0 || maxMemory == 0) {
return 0;
}
//VM detection
//See http://java.sun.com/docs/hotspot/HotSpotFAQ.html#64bit_detection
final String vmBit = System.getProperty("sun.arch.data.model");
//Percentage of max memory
final double percentDivisor = 100.0/percentage;
final double percentMemory = maxMemory/percentDivisor;
//compute capacity
final int e1 = (int)(Math.log(percentMemory)/Math.log(2.0) + 0.5);
final int e2 = e1 - ("32".equals(vmBit)? 2: 3);
final int exponent = e2 < 0? 0: e2 > 30? 30: e2;
final int c = 1 << exponent;
LOG.info("Computing capacity for map " + mapName);
LOG.info("VM type = " + vmBit + "-bit");
LOG.info(percentage + "% max memory "
+ StringUtils.TraditionalBinaryPrefix.long2String(maxMemory, "B", 1)
+ " = "
+ StringUtils.TraditionalBinaryPrefix.long2String((long) percentMemory,
"B", 1));
LOG.info("capacity = 2^" + exponent + " = " + c + " entries");
return c;
}
public void clear() {
for (int i = 0; i < entries.length; i++) {
entries[i] = null;
}
size = 0;
}
}
| 10,927 | 28.376344 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* The <code>ShutdownHookManager</code> enables running shutdownHook
* in a deterministic order, higher priority first.
* <p/>
* The JVM runs ShutdownHooks in a non-deterministic order or in parallel.
* This class registers a single JVM shutdownHook and run all the
* shutdownHooks registered to it (to this class) in order based on their
* priority.
*/
public class ShutdownHookManager {
private static final ShutdownHookManager MGR = new ShutdownHookManager();
private static final Log LOG = LogFactory.getLog(ShutdownHookManager.class);
static {
Runtime.getRuntime().addShutdownHook(
new Thread() {
@Override
public void run() {
MGR.shutdownInProgress.set(true);
for (Runnable hook: MGR.getShutdownHooksInOrder()) {
try {
hook.run();
} catch (Throwable ex) {
LOG.warn("ShutdownHook '" + hook.getClass().getSimpleName() +
"' failed, " + ex.toString(), ex);
}
}
}
}
);
}
/**
* Return <code>ShutdownHookManager</code> singleton.
*
* @return <code>ShutdownHookManager</code> singleton.
*/
public static ShutdownHookManager get() {
return MGR;
}
/**
* Private structure to store ShutdownHook and its priority.
*/
private static class HookEntry {
Runnable hook;
int priority;
public HookEntry(Runnable hook, int priority) {
this.hook = hook;
this.priority = priority;
}
@Override
public int hashCode() {
return hook.hashCode();
}
@Override
public boolean equals(Object obj) {
boolean eq = false;
if (obj != null) {
if (obj instanceof HookEntry) {
eq = (hook == ((HookEntry)obj).hook);
}
}
return eq;
}
}
private Set<HookEntry> hooks =
Collections.synchronizedSet(new HashSet<HookEntry>());
private AtomicBoolean shutdownInProgress = new AtomicBoolean(false);
//private to constructor to ensure singularity
private ShutdownHookManager() {
}
/**
* Returns the list of shutdownHooks in order of execution,
* Highest priority first.
*
* @return the list of shutdownHooks in order of execution.
*/
List<Runnable> getShutdownHooksInOrder() {
List<HookEntry> list;
synchronized (MGR.hooks) {
list = new ArrayList<HookEntry>(MGR.hooks);
}
Collections.sort(list, new Comparator<HookEntry>() {
//reversing comparison so highest priority hooks are first
@Override
public int compare(HookEntry o1, HookEntry o2) {
return o2.priority - o1.priority;
}
});
List<Runnable> ordered = new ArrayList<Runnable>();
for (HookEntry entry: list) {
ordered.add(entry.hook);
}
return ordered;
}
/**
* Adds a shutdownHook with a priority, the higher the priority
* the earlier will run. ShutdownHooks with same priority run
* in a non-deterministic order.
*
* @param shutdownHook shutdownHook <code>Runnable</code>
* @param priority priority of the shutdownHook.
*/
public void addShutdownHook(Runnable shutdownHook, int priority) {
if (shutdownHook == null) {
throw new IllegalArgumentException("shutdownHook cannot be NULL");
}
if (shutdownInProgress.get()) {
throw new IllegalStateException("Shutdown in progress, cannot add a shutdownHook");
}
hooks.add(new HookEntry(shutdownHook, priority));
}
/**
* Removes a shutdownHook.
*
* @param shutdownHook shutdownHook to remove.
* @return TRUE if the shutdownHook was registered and removed,
* FALSE otherwise.
*/
public boolean removeShutdownHook(Runnable shutdownHook) {
if (shutdownInProgress.get()) {
throw new IllegalStateException("Shutdown in progress, cannot remove a shutdownHook");
}
return hooks.remove(new HookEntry(shutdownHook, 0));
}
/**
* Indicates if a shutdownHook is registered or not.
*
* @param shutdownHook shutdownHook to check if registered.
* @return TRUE/FALSE depending if the shutdownHook is is registered.
*/
public boolean hasShutdownHook(Runnable shutdownHook) {
return hooks.contains(new HookEntry(shutdownHook, 0));
}
/**
* Indicates if shutdown is in progress or not.
*
* @return TRUE if the shutdown is in progress, otherwise FALSE.
*/
public boolean isShutdownInProgress() {
return shutdownInProgress.get();
}
}
| 5,609 | 28.371728 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ToolRunner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.io.IOException;
import java.io.PrintStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
/**
* A utility to help run {@link Tool}s.
*
* <p><code>ToolRunner</code> can be used to run classes implementing
* <code>Tool</code> interface. It works in conjunction with
* {@link GenericOptionsParser} to parse the
* <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/CommandsManual.html#Generic_Options">
* generic hadoop command line arguments</a> and modifies the
* <code>Configuration</code> of the <code>Tool</code>. The
* application-specific options are passed along without being modified.
* </p>
*
* @see Tool
* @see GenericOptionsParser
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ToolRunner {
/**
* Runs the given <code>Tool</code> by {@link Tool#run(String[])}, after
* parsing with the given generic arguments. Uses the given
* <code>Configuration</code>, or builds one if null.
*
* Sets the <code>Tool</code>'s configuration with the possibly modified
* version of the <code>conf</code>.
*
* @param conf <code>Configuration</code> for the <code>Tool</code>.
* @param tool <code>Tool</code> to run.
* @param args command-line arguments to the tool.
* @return exit code of the {@link Tool#run(String[])} method.
*/
public static int run(Configuration conf, Tool tool, String[] args)
throws Exception{
if(conf == null) {
conf = new Configuration();
}
GenericOptionsParser parser = new GenericOptionsParser(conf, args);
//set the configuration back, so that Tool can configure itself
tool.setConf(conf);
//get the args w/o generic hadoop args
String[] toolArgs = parser.getRemainingArgs();
return tool.run(toolArgs);
}
/**
* Runs the <code>Tool</code> with its <code>Configuration</code>.
*
* Equivalent to <code>run(tool.getConf(), tool, args)</code>.
*
* @param tool <code>Tool</code> to run.
* @param args command-line arguments to the tool.
* @return exit code of the {@link Tool#run(String[])} method.
*/
public static int run(Tool tool, String[] args)
throws Exception{
return run(tool.getConf(), tool, args);
}
/**
* Prints generic command-line argurments and usage information.
*
* @param out stream to write usage information to.
*/
public static void printGenericCommandUsage(PrintStream out) {
GenericOptionsParser.printGenericCommandUsage(out);
}
/**
* Print out a prompt to the user, and return true if the user
* responds with "y" or "yes". (case insensitive)
*/
public static boolean confirmPrompt(String prompt) throws IOException {
while (true) {
System.err.print(prompt + " (Y or N) ");
StringBuilder responseBuilder = new StringBuilder();
while (true) {
int c = System.in.read();
if (c == -1 || c == '\r' || c == '\n') {
break;
}
responseBuilder.append((char)c);
}
String response = responseBuilder.toString();
if (response.equalsIgnoreCase("y") ||
response.equalsIgnoreCase("yes")) {
return true;
} else if (response.equalsIgnoreCase("n") ||
response.equalsIgnoreCase("no")) {
return false;
}
System.err.println("Invalid input: " + response);
// else ask them again
}
}
}
| 4,359 | 33.603175 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ExitUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Facilitates hooking process termination for tests and debugging.
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Unstable
public final class ExitUtil {
private final static Log LOG = LogFactory.getLog(ExitUtil.class.getName());
private static volatile boolean systemExitDisabled = false;
private static volatile boolean systemHaltDisabled = false;
private static volatile ExitException firstExitException;
private static volatile HaltException firstHaltException;
public static class ExitException extends RuntimeException {
private static final long serialVersionUID = 1L;
public final int status;
public ExitException(int status, String msg) {
super(msg);
this.status = status;
}
}
public static class HaltException extends RuntimeException {
private static final long serialVersionUID = 1L;
public final int status;
public HaltException(int status, String msg) {
super(msg);
this.status = status;
}
}
/**
* Disable the use of System.exit for testing.
*/
public static void disableSystemExit() {
systemExitDisabled = true;
}
/**
* Disable the use of {@code Runtime.getRuntime().halt() } for testing.
*/
public static void disableSystemHalt() {
systemHaltDisabled = true;
}
/**
* @return true if terminate has been called
*/
public static boolean terminateCalled() {
// Either we set this member or we actually called System#exit
return firstExitException != null;
}
/**
* @return true if halt has been called
*/
public static boolean haltCalled() {
return firstHaltException != null;
}
/**
* @return the first ExitException thrown, null if none thrown yet
*/
public static ExitException getFirstExitException() {
return firstExitException;
}
/**
* @return the first {@code HaltException} thrown, null if none thrown yet
*/
public static HaltException getFirstHaltException() {
return firstHaltException;
}
/**
* Reset the tracking of process termination. This is for use in unit tests
* where one test in the suite expects an exit but others do not.
*/
public static void resetFirstExitException() {
firstExitException = null;
}
public static void resetFirstHaltException() {
firstHaltException = null;
}
/**
* Terminate the current process. Note that terminate is the *only* method
* that should be used to terminate the daemon processes.
*
* @param status
* exit code
* @param msg
* message used to create the {@code ExitException}
* @throws ExitException
* if System.exit is disabled for test purposes
*/
public static void terminate(int status, String msg) throws ExitException {
LOG.info("Exiting with status " + status);
if (systemExitDisabled) {
ExitException ee = new ExitException(status, msg);
LOG.fatal("Terminate called", ee);
if (null == firstExitException) {
firstExitException = ee;
}
throw ee;
}
System.exit(status);
}
/**
* Forcibly terminates the currently running Java virtual machine.
*
* @param status
* exit code
* @param msg
* message used to create the {@code HaltException}
* @throws HaltException
* if Runtime.getRuntime().halt() is disabled for test purposes
*/
public static void halt(int status, String msg) throws HaltException {
LOG.info("Halt with status " + status + " Message: " + msg);
if (systemHaltDisabled) {
HaltException ee = new HaltException(status, msg);
LOG.fatal("Halt called", ee);
if (null == firstHaltException) {
firstHaltException = ee;
}
throw ee;
}
Runtime.getRuntime().halt(status);
}
/**
* Like {@link terminate(int, String)} but uses the given throwable to
* initialize the ExitException.
*
* @param status
* @param t
* throwable used to create the ExitException
* @throws ExitException
* if System.exit is disabled for test purposes
*/
public static void terminate(int status, Throwable t) throws ExitException {
terminate(status, StringUtils.stringifyException(t));
}
/**
* Forcibly terminates the currently running Java virtual machine.
*
* @param status
* @param t
* @throws ExitException
*/
public static void halt(int status, Throwable t) throws HaltException {
halt(status, StringUtils.stringifyException(t));
}
/**
* Like {@link terminate(int, String)} without a message.
*
* @param status
* @throws ExitException
* if System.exit is disabled for test purposes
*/
public static void terminate(int status) throws ExitException {
terminate(status, "ExitException");
}
/**
* Forcibly terminates the currently running Java virtual machine.
* @param status
* @throws ExitException
*/
public static void halt(int status) throws HaltException {
halt(status, "HaltException");
}
}
| 6,119 | 29 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IntrusiveCollection.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.util.Collection;
import java.util.Iterator;
import java.util.NoSuchElementException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import com.google.common.base.Preconditions;
/**
* Implements an intrusive doubly-linked list.
*
* An intrusive linked list is one in which the elements themselves are
* responsible for storing the pointers to previous and next elements.
* This can save a lot of memory if there are many elements in the list or
* many lists.
*/
@InterfaceAudience.Private
public class IntrusiveCollection<E extends IntrusiveCollection.Element>
implements Collection<E> {
/**
* An element contained in this list.
*
* We pass the list itself as a parameter so that elements can belong to
* multiple lists. (The element will need to store separate prev and next
* pointers for each.)
*/
@InterfaceAudience.Private
public interface Element {
/**
* Insert this element into the list. This is the first thing that will
* be called on the element.
*/
void insertInternal(IntrusiveCollection<? extends Element> list,
Element prev, Element next);
/**
* Set the prev pointer of an element already in the list.
*/
void setPrev(IntrusiveCollection<? extends Element> list, Element prev);
/**
* Set the next pointer of an element already in the list.
*/
void setNext(IntrusiveCollection<? extends Element> list, Element next);
/**
* Remove an element from the list. This is the last thing that will be
* called on an element.
*/
void removeInternal(IntrusiveCollection<? extends Element> list);
/**
* Get the prev pointer of an element.
*/
Element getPrev(IntrusiveCollection<? extends Element> list);
/**
* Get the next pointer of an element.
*/
Element getNext(IntrusiveCollection<? extends Element> list);
/**
* Returns true if this element is in the provided list.
*/
boolean isInList(IntrusiveCollection<? extends Element> list);
}
private Element root = new Element() {
// We keep references to the first and last elements for easy access.
Element first = this;
Element last = this;
@Override
public void insertInternal(IntrusiveCollection<? extends Element> list,
Element prev, Element next) {
throw new RuntimeException("Can't insert root element");
}
@Override
public void setPrev(IntrusiveCollection<? extends Element> list,
Element prev) {
Preconditions.checkState(list == IntrusiveCollection.this);
last = prev;
}
@Override
public void setNext(IntrusiveCollection<? extends Element> list,
Element next) {
Preconditions.checkState(list == IntrusiveCollection.this);
first = next;
}
@Override
public void removeInternal(IntrusiveCollection<? extends Element> list) {
throw new RuntimeException("Can't remove root element");
}
@Override
public Element getNext(
IntrusiveCollection<? extends Element> list) {
Preconditions.checkState(list == IntrusiveCollection.this);
return first;
}
@Override
public Element getPrev(
IntrusiveCollection<? extends Element> list) {
Preconditions.checkState(list == IntrusiveCollection.this);
return last;
}
@Override
public boolean isInList(IntrusiveCollection<? extends Element> list) {
return list == IntrusiveCollection.this;
}
@Override
public String toString() {
return "root"; // + IntrusiveCollection.this + "]";
}
};
private int size = 0;
/**
* An iterator over the intrusive collection.
*
* Currently, you can remove elements from the list using
* #{IntrusiveIterator#remove()}, but modifying the collection in other
* ways during the iteration is not supported.
*/
public class IntrusiveIterator implements Iterator<E> {
Element cur;
Element next;
IntrusiveIterator() {
this.cur = root;
this.next = null;
}
@Override
public boolean hasNext() {
if (next == null) {
next = cur.getNext(IntrusiveCollection.this);
}
return next != root;
}
@SuppressWarnings("unchecked")
@Override
public E next() {
if (next == null) {
next = cur.getNext(IntrusiveCollection.this);
}
if (next == root) {
throw new NoSuchElementException();
}
cur = next;
next = null;
return (E)cur;
}
@Override
public void remove() {
if (cur == null) {
throw new IllegalStateException("Already called remove " +
"once on this element.");
}
next = removeElement(cur);
cur = null;
}
}
private Element removeElement(Element elem) {
Element prev = elem.getPrev(IntrusiveCollection.this);
Element next = elem.getNext(IntrusiveCollection.this);
elem.removeInternal(IntrusiveCollection.this);
prev.setNext(IntrusiveCollection.this, next);
next.setPrev(IntrusiveCollection.this, prev);
size--;
return next;
}
/**
* Get an iterator over the list. This can be used to remove elements.
* It is not safe to do concurrent modifications from other threads while
* using this iterator.
*
* @return The iterator.
*/
public Iterator<E> iterator() {
return new IntrusiveIterator();
}
@Override
public int size() {
return size;
}
@Override
public boolean isEmpty() {
return size == 0;
}
@Override
public boolean contains(Object o) {
try {
Element element = (Element)o;
return element.isInList(this);
} catch (ClassCastException e) {
return false;
}
}
@Override
public Object[] toArray() {
Object ret[] = new Object[size];
int i = 0;
for (Iterator<E> iter = iterator(); iter.hasNext(); ) {
ret[i++] = iter.next();
}
return ret;
}
@SuppressWarnings("unchecked")
@Override
public <T> T[] toArray(T[] array) {
if (array.length < size) {
return (T[])toArray();
} else {
int i = 0;
for (Iterator<E> iter = iterator(); iter.hasNext(); ) {
array[i++] = (T)iter.next();
}
}
return array;
}
/**
* Add an element to the end of the list.
*
* @param elem The new element to add.
*/
@Override
public boolean add(E elem) {
if (elem == null) {
return false;
}
if (elem.isInList(this)) {
return false;
}
Element prev = root.getPrev(IntrusiveCollection.this);
prev.setNext(IntrusiveCollection.this, elem);
root.setPrev(IntrusiveCollection.this, elem);
elem.insertInternal(IntrusiveCollection.this, prev, root);
size++;
return true;
}
/**
* Add an element to the front of the list.
*
* @param elem The new element to add.
*/
public boolean addFirst(Element elem) {
if (elem == null) {
return false;
}
if (elem.isInList(this)) {
return false;
}
Element next = root.getNext(IntrusiveCollection.this);
next.setPrev(IntrusiveCollection.this, elem);
root.setNext(IntrusiveCollection.this, elem);
elem.insertInternal(IntrusiveCollection.this, root, next);
size++;
return true;
}
public static final Log LOG = LogFactory.getLog(IntrusiveCollection.class);
@Override
public boolean remove(Object o) {
try {
Element elem = (Element)o;
if (!elem.isInList(this)) {
return false;
}
removeElement(elem);
return true;
} catch (ClassCastException e) {
return false;
}
}
@Override
public boolean containsAll(Collection<?> collection) {
for (Object o : collection) {
if (!contains(o)) {
return false;
}
}
return true;
}
@Override
public boolean addAll(Collection<? extends E> collection) {
boolean changed = false;
for (E elem : collection) {
if (add(elem)) {
changed = true;
}
}
return changed;
}
@Override
public boolean removeAll(Collection<?> collection) {
boolean changed = false;
for (Object elem : collection) {
if (remove(elem)) {
changed = true;
}
}
return changed;
}
@Override
public boolean retainAll(Collection<?> collection) {
boolean changed = false;
for (Iterator<E> iter = iterator();
iter.hasNext(); ) {
Element elem = iter.next();
if (!collection.contains(elem)) {
iter.remove();
changed = true;
}
}
return changed;
}
/**
* Remove all elements.
*/
@Override
public void clear() {
for (Iterator<E> iter = iterator(); iter.hasNext(); ) {
iter.next();
iter.remove();
}
}
}
| 9,728 | 25.013369 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.io.Closeable;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
/**
* A class that provides a line reader from an input stream.
* Depending on the constructor used, lines will either be terminated by:
* <ul>
* <li>one of the following: '\n' (LF) , '\r' (CR),
* or '\r\n' (CR+LF).</li>
* <li><em>or</em>, a custom byte sequence delimiter</li>
* </ul>
* In both cases, EOF also terminates an otherwise unterminated
* line.
*/
@InterfaceAudience.LimitedPrivate({"MapReduce"})
@InterfaceStability.Unstable
public class LineReader implements Closeable {
private static final int DEFAULT_BUFFER_SIZE = 64 * 1024;
private int bufferSize = DEFAULT_BUFFER_SIZE;
private InputStream in;
private byte[] buffer;
// the number of bytes of real data in the buffer
private int bufferLength = 0;
// the current position in the buffer
private int bufferPosn = 0;
private static final byte CR = '\r';
private static final byte LF = '\n';
// The line delimiter
private final byte[] recordDelimiterBytes;
/**
* Create a line reader that reads from the given stream using the
* default buffer-size (64k).
* @param in The input stream
* @throws IOException
*/
public LineReader(InputStream in) {
this(in, DEFAULT_BUFFER_SIZE);
}
/**
* Create a line reader that reads from the given stream using the
* given buffer-size.
* @param in The input stream
* @param bufferSize Size of the read buffer
* @throws IOException
*/
public LineReader(InputStream in, int bufferSize) {
this.in = in;
this.bufferSize = bufferSize;
this.buffer = new byte[this.bufferSize];
this.recordDelimiterBytes = null;
}
/**
* Create a line reader that reads from the given stream using the
* <code>io.file.buffer.size</code> specified in the given
* <code>Configuration</code>.
* @param in input stream
* @param conf configuration
* @throws IOException
*/
public LineReader(InputStream in, Configuration conf) throws IOException {
this(in, conf.getInt("io.file.buffer.size", DEFAULT_BUFFER_SIZE));
}
/**
* Create a line reader that reads from the given stream using the
* default buffer-size, and using a custom delimiter of array of
* bytes.
* @param in The input stream
* @param recordDelimiterBytes The delimiter
*/
public LineReader(InputStream in, byte[] recordDelimiterBytes) {
this.in = in;
this.bufferSize = DEFAULT_BUFFER_SIZE;
this.buffer = new byte[this.bufferSize];
this.recordDelimiterBytes = recordDelimiterBytes;
}
/**
* Create a line reader that reads from the given stream using the
* given buffer-size, and using a custom delimiter of array of
* bytes.
* @param in The input stream
* @param bufferSize Size of the read buffer
* @param recordDelimiterBytes The delimiter
* @throws IOException
*/
public LineReader(InputStream in, int bufferSize,
byte[] recordDelimiterBytes) {
this.in = in;
this.bufferSize = bufferSize;
this.buffer = new byte[this.bufferSize];
this.recordDelimiterBytes = recordDelimiterBytes;
}
/**
* Create a line reader that reads from the given stream using the
* <code>io.file.buffer.size</code> specified in the given
* <code>Configuration</code>, and using a custom delimiter of array of
* bytes.
* @param in input stream
* @param conf configuration
* @param recordDelimiterBytes The delimiter
* @throws IOException
*/
public LineReader(InputStream in, Configuration conf,
byte[] recordDelimiterBytes) throws IOException {
this.in = in;
this.bufferSize = conf.getInt("io.file.buffer.size", DEFAULT_BUFFER_SIZE);
this.buffer = new byte[this.bufferSize];
this.recordDelimiterBytes = recordDelimiterBytes;
}
/**
* Close the underlying stream.
* @throws IOException
*/
public void close() throws IOException {
in.close();
}
/**
* Read one line from the InputStream into the given Text.
*
* @param str the object to store the given line (without newline)
* @param maxLineLength the maximum number of bytes to store into str;
* the rest of the line is silently discarded.
* @param maxBytesToConsume the maximum number of bytes to consume
* in this call. This is only a hint, because if the line cross
* this threshold, we allow it to happen. It can overshoot
* potentially by as much as one buffer length.
*
* @return the number of bytes read including the (longest) newline
* found.
*
* @throws IOException if the underlying stream throws
*/
public int readLine(Text str, int maxLineLength,
int maxBytesToConsume) throws IOException {
if (this.recordDelimiterBytes != null) {
return readCustomLine(str, maxLineLength, maxBytesToConsume);
} else {
return readDefaultLine(str, maxLineLength, maxBytesToConsume);
}
}
protected int fillBuffer(InputStream in, byte[] buffer, boolean inDelimiter)
throws IOException {
return in.read(buffer);
}
/**
* Read a line terminated by one of CR, LF, or CRLF.
*/
private int readDefaultLine(Text str, int maxLineLength, int maxBytesToConsume)
throws IOException {
/* We're reading data from in, but the head of the stream may be
* already buffered in buffer, so we have several cases:
* 1. No newline characters are in the buffer, so we need to copy
* everything and read another buffer from the stream.
* 2. An unambiguously terminated line is in buffer, so we just
* copy to str.
* 3. Ambiguously terminated line is in buffer, i.e. buffer ends
* in CR. In this case we copy everything up to CR to str, but
* we also need to see what follows CR: if it's LF, then we
* need consume LF as well, so next call to readLine will read
* from after that.
* We use a flag prevCharCR to signal if previous character was CR
* and, if it happens to be at the end of the buffer, delay
* consuming it until we have a chance to look at the char that
* follows.
*/
str.clear();
int txtLength = 0; //tracks str.getLength(), as an optimization
int newlineLength = 0; //length of terminating newline
boolean prevCharCR = false; //true of prev char was CR
long bytesConsumed = 0;
do {
int startPosn = bufferPosn; //starting from where we left off the last time
if (bufferPosn >= bufferLength) {
startPosn = bufferPosn = 0;
if (prevCharCR) {
++bytesConsumed; //account for CR from previous read
}
bufferLength = fillBuffer(in, buffer, prevCharCR);
if (bufferLength <= 0) {
break; // EOF
}
}
for (; bufferPosn < bufferLength; ++bufferPosn) { //search for newline
if (buffer[bufferPosn] == LF) {
newlineLength = (prevCharCR) ? 2 : 1;
++bufferPosn; // at next invocation proceed from following byte
break;
}
if (prevCharCR) { //CR + notLF, we are at notLF
newlineLength = 1;
break;
}
prevCharCR = (buffer[bufferPosn] == CR);
}
int readLength = bufferPosn - startPosn;
if (prevCharCR && newlineLength == 0) {
--readLength; //CR at the end of the buffer
}
bytesConsumed += readLength;
int appendLength = readLength - newlineLength;
if (appendLength > maxLineLength - txtLength) {
appendLength = maxLineLength - txtLength;
}
if (appendLength > 0) {
str.append(buffer, startPosn, appendLength);
txtLength += appendLength;
}
} while (newlineLength == 0 && bytesConsumed < maxBytesToConsume);
if (bytesConsumed > Integer.MAX_VALUE) {
throw new IOException("Too many bytes before newline: " + bytesConsumed);
}
return (int)bytesConsumed;
}
/**
* Read a line terminated by a custom delimiter.
*/
private int readCustomLine(Text str, int maxLineLength, int maxBytesToConsume)
throws IOException {
/* We're reading data from inputStream, but the head of the stream may be
* already captured in the previous buffer, so we have several cases:
*
* 1. The buffer tail does not contain any character sequence which
* matches with the head of delimiter. We count it as a
* ambiguous byte count = 0
*
* 2. The buffer tail contains a X number of characters,
* that forms a sequence, which matches with the
* head of delimiter. We count ambiguous byte count = X
*
* // *** eg: A segment of input file is as follows
*
* " record 1792: I found this bug very interesting and
* I have completely read about it. record 1793: This bug
* can be solved easily record 1794: This ."
*
* delimiter = "record";
*
* supposing:- String at the end of buffer =
* "I found this bug very interesting and I have completely re"
* There for next buffer = "ad about it. record 179 ...."
*
* The matching characters in the input
* buffer tail and delimiter head = "re"
* Therefore, ambiguous byte count = 2 **** //
*
* 2.1 If the following bytes are the remaining characters of
* the delimiter, then we have to capture only up to the starting
* position of delimiter. That means, we need not include the
* ambiguous characters in str.
*
* 2.2 If the following bytes are not the remaining characters of
* the delimiter ( as mentioned in the example ),
* then we have to include the ambiguous characters in str.
*/
str.clear();
int txtLength = 0; // tracks str.getLength(), as an optimization
long bytesConsumed = 0;
int delPosn = 0;
int ambiguousByteCount=0; // To capture the ambiguous characters count
do {
int startPosn = bufferPosn; // Start from previous end position
if (bufferPosn >= bufferLength) {
startPosn = bufferPosn = 0;
bufferLength = fillBuffer(in, buffer, ambiguousByteCount > 0);
if (bufferLength <= 0) {
str.append(recordDelimiterBytes, 0, ambiguousByteCount);
break; // EOF
}
}
for (; bufferPosn < bufferLength; ++bufferPosn) {
if (buffer[bufferPosn] == recordDelimiterBytes[delPosn]) {
delPosn++;
if (delPosn >= recordDelimiterBytes.length) {
bufferPosn++;
break;
}
} else if (delPosn != 0) {
bufferPosn--;
delPosn = 0;
}
}
int readLength = bufferPosn - startPosn;
bytesConsumed += readLength;
int appendLength = readLength - delPosn;
if (appendLength > maxLineLength - txtLength) {
appendLength = maxLineLength - txtLength;
}
if (appendLength > 0) {
if (ambiguousByteCount > 0) {
str.append(recordDelimiterBytes, 0, ambiguousByteCount);
//appending the ambiguous characters (refer case 2.2)
bytesConsumed += ambiguousByteCount;
ambiguousByteCount=0;
}
str.append(buffer, startPosn, appendLength);
txtLength += appendLength;
}
if (bufferPosn >= bufferLength) {
if (delPosn > 0 && delPosn < recordDelimiterBytes.length) {
ambiguousByteCount = delPosn;
bytesConsumed -= ambiguousByteCount; //to be consumed in next
}
}
} while (delPosn < recordDelimiterBytes.length
&& bytesConsumed < maxBytesToConsume);
if (bytesConsumed > Integer.MAX_VALUE) {
throw new IOException("Too many bytes before delimiter: " + bytesConsumed);
}
return (int) bytesConsumed;
}
/**
* Read from the InputStream into the given Text.
* @param str the object to store the given line
* @param maxLineLength the maximum number of bytes to store into str.
* @return the number of bytes read including the newline
* @throws IOException if the underlying stream throws
*/
public int readLine(Text str, int maxLineLength) throws IOException {
return readLine(str, maxLineLength, Integer.MAX_VALUE);
}
/**
* Read from the InputStream into the given Text.
* @param str the object to store the given line
* @return the number of bytes read including the newline
* @throws IOException if the underlying stream throws
*/
public int readLine(Text str) throws IOException {
return readLine(str, Integer.MAX_VALUE, Integer.MAX_VALUE);
}
protected int getBufferPosn() {
return bufferPosn;
}
protected int getBufferSize() {
return bufferSize;
}
}
| 13,804 | 35.233596 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HttpExceptionUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.codehaus.jackson.map.ObjectMapper;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import java.io.IOException;
import java.io.InputStream;
import java.io.Writer;
import java.lang.reflect.Constructor;
import java.net.HttpURLConnection;
import java.util.LinkedHashMap;
import java.util.Map;
/**
* HTTP utility class to help propagate server side exception to the client
* over HTTP as a JSON payload.
* <p/>
* It creates HTTP Servlet and JAX-RPC error responses including details of the
* exception that allows a client to recreate the remote exception.
* <p/>
* It parses HTTP client connections and recreates the exception.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class HttpExceptionUtils {
public static final String ERROR_JSON = "RemoteException";
public static final String ERROR_EXCEPTION_JSON = "exception";
public static final String ERROR_CLASSNAME_JSON = "javaClassName";
public static final String ERROR_MESSAGE_JSON = "message";
private static final String APPLICATION_JSON_MIME = "application/json";
private static final String ENTER = System.getProperty("line.separator");
/**
* Creates a HTTP servlet response serializing the exception in it as JSON.
*
* @param response the servlet response
* @param status the error code to set in the response
* @param ex the exception to serialize in the response
* @throws IOException thrown if there was an error while creating the
* response
*/
public static void createServletExceptionResponse(
HttpServletResponse response, int status, Throwable ex)
throws IOException {
response.setStatus(status);
response.setContentType(APPLICATION_JSON_MIME);
Map<String, Object> json = new LinkedHashMap<String, Object>();
json.put(ERROR_MESSAGE_JSON, getOneLineMessage(ex));
json.put(ERROR_EXCEPTION_JSON, ex.getClass().getSimpleName());
json.put(ERROR_CLASSNAME_JSON, ex.getClass().getName());
Map<String, Object> jsonResponse = new LinkedHashMap<String, Object>();
jsonResponse.put(ERROR_JSON, json);
ObjectMapper jsonMapper = new ObjectMapper();
Writer writer = response.getWriter();
jsonMapper.writerWithDefaultPrettyPrinter().writeValue(writer, jsonResponse);
writer.flush();
}
/**
* Creates a HTTP JAX-RPC response serializing the exception in it as JSON.
*
* @param status the error code to set in the response
* @param ex the exception to serialize in the response
* @return the JAX-RPC response with the set error and JSON encoded exception
*/
public static Response createJerseyExceptionResponse(Response.Status status,
Throwable ex) {
Map<String, Object> json = new LinkedHashMap<String, Object>();
json.put(ERROR_MESSAGE_JSON, getOneLineMessage(ex));
json.put(ERROR_EXCEPTION_JSON, ex.getClass().getSimpleName());
json.put(ERROR_CLASSNAME_JSON, ex.getClass().getName());
Map<String, Object> response = new LinkedHashMap<String, Object>();
response.put(ERROR_JSON, json);
return Response.status(status).type(MediaType.APPLICATION_JSON).
entity(response).build();
}
private static String getOneLineMessage(Throwable exception) {
String message = exception.getMessage();
if (message != null) {
int i = message.indexOf(ENTER);
if (i > -1) {
message = message.substring(0, i);
}
}
return message;
}
// trick, riding on generics to throw an undeclared exception
private static void throwEx(Throwable ex) {
HttpExceptionUtils.<RuntimeException>throwException(ex);
}
@SuppressWarnings("unchecked")
private static <E extends Throwable> void throwException(Throwable ex)
throws E {
throw (E) ex;
}
/**
* Validates the status of an <code>HttpURLConnection</code> against an
* expected HTTP status code. If the current status code is not the expected
* one it throws an exception with a detail message using Server side error
* messages if available.
* <p/>
* <b>NOTE:</b> this method will throw the deserialized exception even if not
* declared in the <code>throws</code> of the method signature.
*
* @param conn the <code>HttpURLConnection</code>.
* @param expectedStatus the expected HTTP status code.
* @throws IOException thrown if the current status code does not match the
* expected one.
*/
@SuppressWarnings("unchecked")
public static void validateResponse(HttpURLConnection conn,
int expectedStatus) throws IOException {
if (conn.getResponseCode() != expectedStatus) {
Exception toThrow;
InputStream es = null;
try {
es = conn.getErrorStream();
ObjectMapper mapper = new ObjectMapper();
Map json = mapper.readValue(es, Map.class);
json = (Map) json.get(ERROR_JSON);
String exClass = (String) json.get(ERROR_CLASSNAME_JSON);
String exMsg = (String) json.get(ERROR_MESSAGE_JSON);
if (exClass != null) {
try {
ClassLoader cl = HttpExceptionUtils.class.getClassLoader();
Class klass = cl.loadClass(exClass);
Constructor constr = klass.getConstructor(String.class);
toThrow = (Exception) constr.newInstance(exMsg);
} catch (Exception ex) {
toThrow = new IOException(String.format(
"HTTP status [%d], exception [%s], message [%s] ",
conn.getResponseCode(), exClass, exMsg));
}
} else {
String msg = (exMsg != null) ? exMsg : conn.getResponseMessage();
toThrow = new IOException(String.format(
"HTTP status [%d], message [%s]", conn.getResponseCode(), msg));
}
} catch (Exception ex) {
toThrow = new IOException(String.format(
"HTTP status [%d], message [%s]", conn.getResponseCode(),
conn.getResponseMessage()));
} finally {
if (es != null) {
try {
es.close();
} catch (IOException ex) {
//ignore
}
}
}
throwEx(toThrow);
}
}
}
| 7,133 | 37.354839 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/AsyncDiskService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/*
* This class is a container of multiple thread pools, each for a volume,
* so that we can schedule async disk operations easily.
*
* Examples of async disk operations are deletion of files.
* We can move the files to a "TO_BE_DELETED" folder before asychronously
* deleting it, to make sure the caller can run it faster.
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Unstable
public class AsyncDiskService {
public static final Log LOG = LogFactory.getLog(AsyncDiskService.class);
// ThreadPool core pool size
private static final int CORE_THREADS_PER_VOLUME = 1;
// ThreadPool maximum pool size
private static final int MAXIMUM_THREADS_PER_VOLUME = 4;
// ThreadPool keep-alive time for threads over core pool size
private static final long THREADS_KEEP_ALIVE_SECONDS = 60;
private final ThreadGroup threadGroup = new ThreadGroup("async disk service");
private ThreadFactory threadFactory;
private HashMap<String, ThreadPoolExecutor> executors
= new HashMap<String, ThreadPoolExecutor>();
/**
* Create a AsyncDiskServices with a set of volumes (specified by their
* root directories).
*
* The AsyncDiskServices uses one ThreadPool per volume to do the async
* disk operations.
*
* @param volumes The roots of the file system volumes.
*/
public AsyncDiskService(String[] volumes) {
threadFactory = new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
return new Thread(threadGroup, r);
}
};
// Create one ThreadPool per volume
for (int v = 0 ; v < volumes.length; v++) {
ThreadPoolExecutor executor = new ThreadPoolExecutor(
CORE_THREADS_PER_VOLUME, MAXIMUM_THREADS_PER_VOLUME,
THREADS_KEEP_ALIVE_SECONDS, TimeUnit.SECONDS,
new LinkedBlockingQueue<Runnable>(), threadFactory);
// This can reduce the number of running threads
executor.allowCoreThreadTimeOut(true);
executors.put(volumes[v], executor);
}
}
/**
* Execute the task sometime in the future, using ThreadPools.
*/
public synchronized void execute(String root, Runnable task) {
ThreadPoolExecutor executor = executors.get(root);
if (executor == null) {
throw new RuntimeException("Cannot find root " + root
+ " for execution of task " + task);
} else {
executor.execute(task);
}
}
/**
* Gracefully start the shut down of all ThreadPools.
*/
public synchronized void shutdown() {
LOG.info("Shutting down all AsyncDiskService threads...");
for (Map.Entry<String, ThreadPoolExecutor> e
: executors.entrySet()) {
e.getValue().shutdown();
}
}
/**
* Wait for the termination of the thread pools.
*
* @param milliseconds The number of milliseconds to wait
* @return true if all thread pools are terminated without time limit
* @throws InterruptedException
*/
public synchronized boolean awaitTermination(long milliseconds)
throws InterruptedException {
long end = Time.now() + milliseconds;
for (Map.Entry<String, ThreadPoolExecutor> e:
executors.entrySet()) {
ThreadPoolExecutor executor = e.getValue();
if (!executor.awaitTermination(
Math.max(end - Time.now(), 0),
TimeUnit.MILLISECONDS)) {
LOG.warn("AsyncDiskService awaitTermination timeout.");
return false;
}
}
LOG.info("All AsyncDiskService threads are terminated.");
return true;
}
/**
* Shut down all ThreadPools immediately.
*/
public synchronized List<Runnable> shutdownNow() {
LOG.info("Shutting down all AsyncDiskService threads immediately...");
List<Runnable> list = new ArrayList<Runnable>();
for (Map.Entry<String, ThreadPoolExecutor> e
: executors.entrySet()) {
list.addAll(e.getValue().shutdownNow());
}
return list;
}
}
| 5,309 | 31.981366 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/FileBasedIPList.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.Reader;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import org.apache.commons.io.Charsets;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* FileBasedIPList loads a list of subnets in CIDR format and ip addresses from
* a file.
*
* Given an ip address, isIn method returns true if ip belongs to one of the
* subnets.
*
* Thread safe.
*/
public class FileBasedIPList implements IPList {
private static final Log LOG = LogFactory.getLog(FileBasedIPList.class);
private final String fileName;
private final MachineList addressList;
public FileBasedIPList(String fileName) {
this.fileName = fileName;
String[] lines;
try {
lines = readLines(fileName);
} catch (IOException e) {
lines = null;
}
if (lines != null) {
addressList = new MachineList(new HashSet<String>(Arrays.asList(lines)));
} else {
addressList = null;
}
}
public FileBasedIPList reload() {
return new FileBasedIPList(fileName);
}
@Override
public boolean isIn(String ipAddress) {
if (ipAddress == null || addressList == null) {
return false;
}
return addressList.includes(ipAddress);
}
/**
* Reads the lines in a file.
* @param fileName
* @return lines in a String array; null if the file does not exist or if the
* file name is null
* @throws IOException
*/
private static String[] readLines(String fileName) throws IOException {
try {
if (fileName != null) {
File file = new File (fileName);
if (file.exists()) {
try (
Reader fileReader = new InputStreamReader(
new FileInputStream(file), Charsets.UTF_8);
BufferedReader bufferedReader = new BufferedReader(fileReader)) {
List<String> lines = new ArrayList<String>();
String line = null;
while ((line = bufferedReader.readLine()) != null) {
lines.add(line);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Loaded IP list of size = " + lines.size() +
" from file = " + fileName);
}
return (lines.toArray(new String[lines.size()]));
}
} else {
LOG.debug("Missing ip list file : "+ fileName);
}
}
} catch (IOException ioe) {
LOG.error(ioe);
throw ioe;
}
return null;
}
}
| 3,493 | 29.12069 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Waitable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.util.concurrent.locks.Condition;
/**
* Represents an object that you can wait for.
*/
public class Waitable<T> {
private T val;
private final Condition cond;
public Waitable(Condition cond) {
this.val = null;
this.cond = cond;
}
public T await() throws InterruptedException {
while (this.val == null) {
this.cond.await();
}
return this.val;
}
public void provide(T val) {
this.val = val;
this.cond.signalAll();
}
public boolean hasVal() {
return this.val != null;
}
public T getVal() {
return this.val;
}
}
| 1,432 | 26.037736 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IdGenerator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Generic ID generator
* used for generating various types of number sequences.
*/
@InterfaceAudience.Private
public interface IdGenerator {
/** Increment and then return the next value. */
public long nextValue();
}
| 1,129 | 34.3125 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/UTF8ByteArrayUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class UTF8ByteArrayUtils {
/**
* Find the first occurrence of the given byte b in a UTF-8 encoded string
* @param utf a byte array containing a UTF-8 encoded string
* @param start starting offset
* @param end ending position
* @param b the byte to find
* @return position that first byte occures otherwise -1
*/
public static int findByte(byte [] utf, int start, int end, byte b) {
for(int i=start; i<end; i++) {
if (utf[i]==b) {
return i;
}
}
return -1;
}
/**
* Find the first occurrence of the given bytes b in a UTF-8 encoded string
* @param utf a byte array containing a UTF-8 encoded string
* @param start starting offset
* @param end ending position
* @param b the bytes to find
* @return position that first byte occures otherwise -1
*/
public static int findBytes(byte [] utf, int start, int end, byte[] b) {
int matchEnd = end - b.length;
for(int i=start; i<=matchEnd; i++) {
boolean matched = true;
for(int j=0; j<b.length; j++) {
if (utf[i+j] != b[j]) {
matched = false;
break;
}
}
if (matched) {
return i;
}
}
return -1;
}
/**
* Find the nth occurrence of the given byte b in a UTF-8 encoded string
* @param utf a byte array containing a UTF-8 encoded string
* @param start starting offset
* @param length the length of byte array
* @param b the byte to find
* @param n the desired occurrence of the given byte
* @return position that nth occurrence of the given byte if exists; otherwise -1
*/
public static int findNthByte(byte [] utf, int start, int length, byte b, int n) {
int pos = -1;
int nextStart = start;
for (int i = 0; i < n; i++) {
pos = findByte(utf, nextStart, length, b);
if (pos < 0) {
return pos;
}
nextStart = pos + 1;
}
return pos;
}
/**
* Find the nth occurrence of the given byte b in a UTF-8 encoded string
* @param utf a byte array containing a UTF-8 encoded string
* @param b the byte to find
* @param n the desired occurrence of the given byte
* @return position that nth occurrence of the given byte if exists; otherwise -1
*/
public static int findNthByte(byte [] utf, byte b, int n) {
return findNthByte(utf, 0, utf.length, b, n);
}
}
| 3,421 | 31.903846 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.io.*;
import java.util.Set;
import java.util.HashSet;
import org.apache.commons.io.Charsets;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability;
// Keeps track of which datanodes/tasktrackers are allowed to connect to the
// namenode/jobtracker.
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Unstable
public class HostsFileReader {
private Set<String> includes;
private Set<String> excludes;
private String includesFile;
private String excludesFile;
private static final Log LOG = LogFactory.getLog(HostsFileReader.class);
public HostsFileReader(String inFile,
String exFile) throws IOException {
includes = new HashSet<String>();
excludes = new HashSet<String>();
includesFile = inFile;
excludesFile = exFile;
refresh();
}
@Private
public HostsFileReader(String includesFile, InputStream inFileInputStream,
String excludesFile, InputStream exFileInputStream) throws IOException {
includes = new HashSet<String>();
excludes = new HashSet<String>();
this.includesFile = includesFile;
this.excludesFile = excludesFile;
refresh(inFileInputStream, exFileInputStream);
}
public static void readFileToSet(String type,
String filename, Set<String> set) throws IOException {
File file = new File(filename);
FileInputStream fis = new FileInputStream(file);
readFileToSetWithFileInputStream(type, filename, fis, set);
}
@Private
public static void readFileToSetWithFileInputStream(String type,
String filename, InputStream fileInputStream, Set<String> set)
throws IOException {
BufferedReader reader = null;
try {
reader = new BufferedReader(
new InputStreamReader(fileInputStream, Charsets.UTF_8));
String line;
while ((line = reader.readLine()) != null) {
String[] nodes = line.split("[ \t\n\f\r]+");
if (nodes != null) {
for (int i = 0; i < nodes.length; i++) {
if (nodes[i].trim().startsWith("#")) {
// Everything from now on is a comment
break;
}
if (!nodes[i].isEmpty()) {
LOG.info("Adding " + nodes[i] + " to the list of " + type +
" hosts from " + filename);
set.add(nodes[i]);
}
}
}
}
} finally {
if (reader != null) {
reader.close();
}
fileInputStream.close();
}
}
public synchronized void refresh() throws IOException {
LOG.info("Refreshing hosts (include/exclude) list");
Set<String> newIncludes = new HashSet<String>();
Set<String> newExcludes = new HashSet<String>();
boolean switchIncludes = false;
boolean switchExcludes = false;
if (!includesFile.isEmpty()) {
readFileToSet("included", includesFile, newIncludes);
switchIncludes = true;
}
if (!excludesFile.isEmpty()) {
readFileToSet("excluded", excludesFile, newExcludes);
switchExcludes = true;
}
if (switchIncludes) {
// switch the new hosts that are to be included
includes = newIncludes;
}
if (switchExcludes) {
// switch the excluded hosts
excludes = newExcludes;
}
}
@Private
public synchronized void refresh(InputStream inFileInputStream,
InputStream exFileInputStream) throws IOException {
LOG.info("Refreshing hosts (include/exclude) list");
Set<String> newIncludes = new HashSet<String>();
Set<String> newExcludes = new HashSet<String>();
boolean switchIncludes = false;
boolean switchExcludes = false;
if (inFileInputStream != null) {
readFileToSetWithFileInputStream("included", includesFile,
inFileInputStream, newIncludes);
switchIncludes = true;
}
if (exFileInputStream != null) {
readFileToSetWithFileInputStream("excluded", excludesFile,
exFileInputStream, newExcludes);
switchExcludes = true;
}
if (switchIncludes) {
// switch the new hosts that are to be included
includes = newIncludes;
}
if (switchExcludes) {
// switch the excluded hosts
excludes = newExcludes;
}
}
public synchronized Set<String> getHosts() {
return includes;
}
public synchronized Set<String> getExcludedHosts() {
return excludes;
}
public synchronized void setIncludesFile(String includesFile) {
LOG.info("Setting the includes file to " + includesFile);
this.includesFile = includesFile;
}
public synchronized void setExcludesFile(String excludesFile) {
LOG.info("Setting the excludes file to " + excludesFile);
this.excludesFile = excludesFile;
}
public synchronized void updateFileNames(String includesFile,
String excludesFile) {
setIncludesFile(includesFile);
setExcludesFile(excludesFile);
}
}
| 5,940 | 32.005556 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/CpuTimeTracker.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import java.math.BigInteger;
/**
* Utility for sampling and computing CPU usage.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class CpuTimeTracker {
public static final int UNAVAILABLE = -1;
private final long minimumTimeInterval;
// CPU used time since system is on (ms)
private BigInteger cumulativeCpuTime = BigInteger.ZERO;
// CPU used time read last time (ms)
private BigInteger lastCumulativeCpuTime = BigInteger.ZERO;
// Unix timestamp while reading the CPU time (ms)
private long sampleTime;
private long lastSampleTime;
private float cpuUsage;
private BigInteger jiffyLengthInMillis;
public CpuTimeTracker(long jiffyLengthInMillis) {
this.jiffyLengthInMillis = BigInteger.valueOf(jiffyLengthInMillis);
this.cpuUsage = UNAVAILABLE;
this.sampleTime = UNAVAILABLE;
this.lastSampleTime = UNAVAILABLE;
minimumTimeInterval = 10 * jiffyLengthInMillis;
}
/**
* Return percentage of cpu time spent over the time since last update.
* CPU time spent is based on elapsed jiffies multiplied by amount of
* time for 1 core. Thus, if you use 2 cores completely you would have spent
* twice the actual time between updates and this will return 200%.
*
* @return Return percentage of cpu usage since last update, {@link
* CpuTimeTracker#UNAVAILABLE} if there haven't been 2 updates more than
* {@link CpuTimeTracker#minimumTimeInterval} apart
*/
public float getCpuTrackerUsagePercent() {
if (lastSampleTime == UNAVAILABLE ||
lastSampleTime > sampleTime) {
// lastSampleTime > sampleTime may happen when the system time is changed
lastSampleTime = sampleTime;
lastCumulativeCpuTime = cumulativeCpuTime;
return cpuUsage;
}
// When lastSampleTime is sufficiently old, update cpuUsage.
// Also take a sample of the current time and cumulative CPU time for the
// use of the next calculation.
if (sampleTime > lastSampleTime + minimumTimeInterval) {
cpuUsage =
((cumulativeCpuTime.subtract(lastCumulativeCpuTime)).floatValue())
* 100F / ((float) (sampleTime - lastSampleTime));
lastSampleTime = sampleTime;
lastCumulativeCpuTime = cumulativeCpuTime;
}
return cpuUsage;
}
/**
* Obtain the cumulative CPU time since the system is on.
* @return cumulative CPU time in milliseconds
*/
public long getCumulativeCpuTime() {
return cumulativeCpuTime.longValue();
}
/**
* Apply delta to accumulators.
* @param elapsedJiffies updated jiffies
* @param newTime new sample time
*/
public void updateElapsedJiffies(BigInteger elapsedJiffies, long newTime) {
cumulativeCpuTime = elapsedJiffies.multiply(jiffyLengthInMillis);
sampleTime = newTime;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("SampleTime " + this.sampleTime);
sb.append(" CummulativeCpuTime " + this.cumulativeCpuTime);
sb.append(" LastSampleTime " + this.lastSampleTime);
sb.append(" LastCummulativeCpuTime " + this.lastCumulativeCpuTime);
sb.append(" CpuUsage " + this.cpuUsage);
sb.append(" JiffyLengthMillisec " + this.jiffyLengthInMillis);
return sb.toString();
}
}
| 4,213 | 35.327586 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import com.google.common.base.Preconditions;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.net.URI;
import java.net.URISyntaxException;
import java.text.DateFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Date;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
import java.util.StringTokenizer;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.lang.SystemUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.net.NetUtils;
import com.google.common.net.InetAddresses;
/**
* General string utils
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class StringUtils {
/**
* Priority of the StringUtils shutdown hook.
*/
public static final int SHUTDOWN_HOOK_PRIORITY = 0;
/**
* Shell environment variables: $ followed by one letter or _ followed by
* multiple letters, numbers, or underscores. The group captures the
* environment variable name without the leading $.
*/
public static final Pattern SHELL_ENV_VAR_PATTERN =
Pattern.compile("\\$([A-Za-z_]{1}[A-Za-z0-9_]*)");
/**
* Windows environment variables: surrounded by %. The group captures the
* environment variable name without the leading and trailing %.
*/
public static final Pattern WIN_ENV_VAR_PATTERN = Pattern.compile("%(.*?)%");
/**
* Regular expression that matches and captures environment variable names
* according to platform-specific rules.
*/
public static final Pattern ENV_VAR_PATTERN = Shell.WINDOWS ?
WIN_ENV_VAR_PATTERN : SHELL_ENV_VAR_PATTERN;
/**
* Make a string representation of the exception.
* @param e The exception to stringify
* @return A string with exception name and call stack.
*/
public static String stringifyException(Throwable e) {
StringWriter stm = new StringWriter();
PrintWriter wrt = new PrintWriter(stm);
e.printStackTrace(wrt);
wrt.close();
return stm.toString();
}
/**
* Given a full hostname, return the word upto the first dot.
* @param fullHostname the full hostname
* @return the hostname to the first dot
*/
public static String simpleHostname(String fullHostname) {
if (InetAddresses.isInetAddress(fullHostname)) {
return fullHostname;
}
int offset = fullHostname.indexOf('.');
if (offset != -1) {
return fullHostname.substring(0, offset);
}
return fullHostname;
}
/**
* Given an integer, return a string that is in an approximate, but human
* readable format.
* @param number the number to format
* @return a human readable form of the integer
*
* @deprecated use {@link TraditionalBinaryPrefix#long2String(long, String, int)}.
*/
@Deprecated
public static String humanReadableInt(long number) {
return TraditionalBinaryPrefix.long2String(number, "", 1);
}
/** The same as String.format(Locale.ENGLISH, format, objects). */
public static String format(final String format, final Object... objects) {
return String.format(Locale.ENGLISH, format, objects);
}
/**
* Format a percentage for presentation to the user.
* @param fraction the percentage as a fraction, e.g. 0.1 = 10%
* @param decimalPlaces the number of decimal places
* @return a string representation of the percentage
*/
public static String formatPercent(double fraction, int decimalPlaces) {
return format("%." + decimalPlaces + "f%%", fraction*100);
}
/**
* Given an array of strings, return a comma-separated list of its elements.
* @param strs Array of strings
* @return Empty string if strs.length is 0, comma separated list of strings
* otherwise
*/
public static String arrayToString(String[] strs) {
if (strs.length == 0) { return ""; }
StringBuilder sbuf = new StringBuilder();
sbuf.append(strs[0]);
for (int idx = 1; idx < strs.length; idx++) {
sbuf.append(",");
sbuf.append(strs[idx]);
}
return sbuf.toString();
}
/**
* Given an array of bytes it will convert the bytes to a hex string
* representation of the bytes
* @param bytes
* @param start start index, inclusively
* @param end end index, exclusively
* @return hex string representation of the byte array
*/
public static String byteToHexString(byte[] bytes, int start, int end) {
if (bytes == null) {
throw new IllegalArgumentException("bytes == null");
}
StringBuilder s = new StringBuilder();
for(int i = start; i < end; i++) {
s.append(format("%02x", bytes[i]));
}
return s.toString();
}
/** Same as byteToHexString(bytes, 0, bytes.length). */
public static String byteToHexString(byte bytes[]) {
return byteToHexString(bytes, 0, bytes.length);
}
/**
* Given a hexstring this will return the byte array corresponding to the
* string
* @param hex the hex String array
* @return a byte array that is a hex string representation of the given
* string. The size of the byte array is therefore hex.length/2
*/
public static byte[] hexStringToByte(String hex) {
byte[] bts = new byte[hex.length() / 2];
for (int i = 0; i < bts.length; i++) {
bts[i] = (byte) Integer.parseInt(hex.substring(2 * i, 2 * i + 2), 16);
}
return bts;
}
/**
*
* @param uris
*/
public static String uriToString(URI[] uris){
if (uris == null) {
return null;
}
StringBuilder ret = new StringBuilder(uris[0].toString());
for(int i = 1; i < uris.length;i++){
ret.append(",");
ret.append(uris[i].toString());
}
return ret.toString();
}
/**
* @param str
* The string array to be parsed into an URI array.
* @return <tt>null</tt> if str is <tt>null</tt>, else the URI array
* equivalent to str.
* @throws IllegalArgumentException
* If any string in str violates RFC 2396.
*/
public static URI[] stringToURI(String[] str){
if (str == null)
return null;
URI[] uris = new URI[str.length];
for (int i = 0; i < str.length;i++){
try{
uris[i] = new URI(str[i]);
}catch(URISyntaxException ur){
throw new IllegalArgumentException(
"Failed to create uri for " + str[i], ur);
}
}
return uris;
}
/**
*
* @param str
*/
public static Path[] stringToPath(String[] str){
if (str == null) {
return null;
}
Path[] p = new Path[str.length];
for (int i = 0; i < str.length;i++){
p[i] = new Path(str[i]);
}
return p;
}
/**
*
* Given a finish and start time in long milliseconds, returns a
* String in the format Xhrs, Ymins, Z sec, for the time difference between two times.
* If finish time comes before start time then negative valeus of X, Y and Z wil return.
*
* @param finishTime finish time
* @param startTime start time
*/
public static String formatTimeDiff(long finishTime, long startTime){
long timeDiff = finishTime - startTime;
return formatTime(timeDiff);
}
/**
*
* Given the time in long milliseconds, returns a
* String in the format Xhrs, Ymins, Z sec.
*
* @param timeDiff The time difference to format
*/
public static String formatTime(long timeDiff){
StringBuilder buf = new StringBuilder();
long hours = timeDiff / (60*60*1000);
long rem = (timeDiff % (60*60*1000));
long minutes = rem / (60*1000);
rem = rem % (60*1000);
long seconds = rem / 1000;
if (hours != 0){
buf.append(hours);
buf.append("hrs, ");
}
if (minutes != 0){
buf.append(minutes);
buf.append("mins, ");
}
// return "0sec if no difference
buf.append(seconds);
buf.append("sec");
return buf.toString();
}
/**
* Formats time in ms and appends difference (finishTime - startTime)
* as returned by formatTimeDiff().
* If finish time is 0, empty string is returned, if start time is 0
* then difference is not appended to return value.
* @param dateFormat date format to use
* @param finishTime fnish time
* @param startTime start time
* @return formatted value.
*/
public static String getFormattedTimeWithDiff(DateFormat dateFormat,
long finishTime, long startTime){
StringBuilder buf = new StringBuilder();
if (0 != finishTime) {
buf.append(dateFormat.format(new Date(finishTime)));
if (0 != startTime){
buf.append(" (" + formatTimeDiff(finishTime , startTime) + ")");
}
}
return buf.toString();
}
/**
* Returns an arraylist of strings.
* @param str the comma seperated string values
* @return the arraylist of the comma seperated string values
*/
public static String[] getStrings(String str){
Collection<String> values = getStringCollection(str);
if(values.size() == 0) {
return null;
}
return values.toArray(new String[values.size()]);
}
/**
* Returns a collection of strings.
* @param str comma seperated string values
* @return an <code>ArrayList</code> of string values
*/
public static Collection<String> getStringCollection(String str){
String delim = ",";
return getStringCollection(str, delim);
}
/**
* Returns a collection of strings.
*
* @param str
* String to parse
* @param delim
* delimiter to separate the values
* @return Collection of parsed elements.
*/
public static Collection<String> getStringCollection(String str, String delim) {
List<String> values = new ArrayList<String>();
if (str == null)
return values;
StringTokenizer tokenizer = new StringTokenizer(str, delim);
while (tokenizer.hasMoreTokens()) {
values.add(tokenizer.nextToken());
}
return values;
}
/**
* Splits a comma separated value <code>String</code>, trimming leading and trailing whitespace on each value.
* Duplicate and empty values are removed.
* @param str a comma separated <String> with values
* @return a <code>Collection</code> of <code>String</code> values
*/
public static Collection<String> getTrimmedStringCollection(String str){
Set<String> set = new LinkedHashSet<String>(
Arrays.asList(getTrimmedStrings(str)));
set.remove("");
return set;
}
/**
* Splits a comma separated value <code>String</code>, trimming leading and trailing whitespace on each value.
* @param str a comma separated <String> with values
* @return an array of <code>String</code> values
*/
public static String[] getTrimmedStrings(String str){
if (null == str || str.trim().isEmpty()) {
return emptyStringArray;
}
return str.trim().split("\\s*,\\s*");
}
/**
* Trims all the strings in a Collection<String> and returns a Set<String>.
* @param strings
* @return
*/
public static Set<String> getTrimmedStrings(Collection<String> strings) {
Set<String> trimmedStrings = new HashSet<String>();
for (String string: strings) {
trimmedStrings.add(string.trim());
}
return trimmedStrings;
}
final public static String[] emptyStringArray = {};
final public static char COMMA = ',';
final public static String COMMA_STR = ",";
final public static char ESCAPE_CHAR = '\\';
/**
* Split a string using the default separator
* @param str a string that may have escaped separator
* @return an array of strings
*/
public static String[] split(String str) {
return split(str, ESCAPE_CHAR, COMMA);
}
/**
* Split a string using the given separator
* @param str a string that may have escaped separator
* @param escapeChar a char that be used to escape the separator
* @param separator a separator char
* @return an array of strings
*/
public static String[] split(
String str, char escapeChar, char separator) {
if (str==null) {
return null;
}
ArrayList<String> strList = new ArrayList<String>();
StringBuilder split = new StringBuilder();
int index = 0;
while ((index = findNext(str, separator, escapeChar, index, split)) >= 0) {
++index; // move over the separator for next search
strList.add(split.toString());
split.setLength(0); // reset the buffer
}
strList.add(split.toString());
// remove trailing empty split(s)
int last = strList.size(); // last split
while (--last>=0 && "".equals(strList.get(last))) {
strList.remove(last);
}
return strList.toArray(new String[strList.size()]);
}
/**
* Split a string using the given separator, with no escaping performed.
* @param str a string to be split. Note that this may not be null.
* @param separator a separator char
* @return an array of strings
*/
public static String[] split(
String str, char separator) {
// String.split returns a single empty result for splitting the empty
// string.
if (str.isEmpty()) {
return new String[]{""};
}
ArrayList<String> strList = new ArrayList<String>();
int startIndex = 0;
int nextIndex = 0;
while ((nextIndex = str.indexOf(separator, startIndex)) != -1) {
strList.add(str.substring(startIndex, nextIndex));
startIndex = nextIndex + 1;
}
strList.add(str.substring(startIndex));
// remove trailing empty split(s)
int last = strList.size(); // last split
while (--last>=0 && "".equals(strList.get(last))) {
strList.remove(last);
}
return strList.toArray(new String[strList.size()]);
}
/**
* Finds the first occurrence of the separator character ignoring the escaped
* separators starting from the index. Note the substring between the index
* and the position of the separator is passed.
* @param str the source string
* @param separator the character to find
* @param escapeChar character used to escape
* @param start from where to search
* @param split used to pass back the extracted string
*/
public static int findNext(String str, char separator, char escapeChar,
int start, StringBuilder split) {
int numPreEscapes = 0;
for (int i = start; i < str.length(); i++) {
char curChar = str.charAt(i);
if (numPreEscapes == 0 && curChar == separator) { // separator
return i;
} else {
split.append(curChar);
numPreEscapes = (curChar == escapeChar)
? (++numPreEscapes) % 2
: 0;
}
}
return -1;
}
/**
* Escape commas in the string using the default escape char
* @param str a string
* @return an escaped string
*/
public static String escapeString(String str) {
return escapeString(str, ESCAPE_CHAR, COMMA);
}
/**
* Escape <code>charToEscape</code> in the string
* with the escape char <code>escapeChar</code>
*
* @param str string
* @param escapeChar escape char
* @param charToEscape the char to be escaped
* @return an escaped string
*/
public static String escapeString(
String str, char escapeChar, char charToEscape) {
return escapeString(str, escapeChar, new char[] {charToEscape});
}
// check if the character array has the character
private static boolean hasChar(char[] chars, char character) {
for (char target : chars) {
if (character == target) {
return true;
}
}
return false;
}
/**
* @param charsToEscape array of characters to be escaped
*/
public static String escapeString(String str, char escapeChar,
char[] charsToEscape) {
if (str == null) {
return null;
}
StringBuilder result = new StringBuilder();
for (int i=0; i<str.length(); i++) {
char curChar = str.charAt(i);
if (curChar == escapeChar || hasChar(charsToEscape, curChar)) {
// special char
result.append(escapeChar);
}
result.append(curChar);
}
return result.toString();
}
/**
* Unescape commas in the string using the default escape char
* @param str a string
* @return an unescaped string
*/
public static String unEscapeString(String str) {
return unEscapeString(str, ESCAPE_CHAR, COMMA);
}
/**
* Unescape <code>charToEscape</code> in the string
* with the escape char <code>escapeChar</code>
*
* @param str string
* @param escapeChar escape char
* @param charToEscape the escaped char
* @return an unescaped string
*/
public static String unEscapeString(
String str, char escapeChar, char charToEscape) {
return unEscapeString(str, escapeChar, new char[] {charToEscape});
}
/**
* @param charsToEscape array of characters to unescape
*/
public static String unEscapeString(String str, char escapeChar,
char[] charsToEscape) {
if (str == null) {
return null;
}
StringBuilder result = new StringBuilder(str.length());
boolean hasPreEscape = false;
for (int i=0; i<str.length(); i++) {
char curChar = str.charAt(i);
if (hasPreEscape) {
if (curChar != escapeChar && !hasChar(charsToEscape, curChar)) {
// no special char
throw new IllegalArgumentException("Illegal escaped string " + str +
" unescaped " + escapeChar + " at " + (i-1));
}
// otherwise discard the escape char
result.append(curChar);
hasPreEscape = false;
} else {
if (hasChar(charsToEscape, curChar)) {
throw new IllegalArgumentException("Illegal escaped string " + str +
" unescaped " + curChar + " at " + i);
} else if (curChar == escapeChar) {
hasPreEscape = true;
} else {
result.append(curChar);
}
}
}
if (hasPreEscape ) {
throw new IllegalArgumentException("Illegal escaped string " + str +
", not expecting " + escapeChar + " in the end." );
}
return result.toString();
}
/**
* Return a message for logging.
* @param prefix prefix keyword for the message
* @param msg content of the message
* @return a message for logging
*/
private static String toStartupShutdownString(String prefix, String [] msg) {
StringBuilder b = new StringBuilder(prefix);
b.append("\n/************************************************************");
for(String s : msg)
b.append("\n" + prefix + s);
b.append("\n************************************************************/");
return b.toString();
}
/**
* Print a log message for starting up and shutting down
* @param clazz the class of the server
* @param args arguments
* @param LOG the target log object
*/
public static void startupShutdownMessage(Class<?> clazz, String[] args,
final org.apache.commons.logging.Log LOG) {
startupShutdownMessage(clazz, args, LogAdapter.create(LOG));
}
/**
* Print a log message for starting up and shutting down
* @param clazz the class of the server
* @param args arguments
* @param LOG the target log object
*/
public static void startupShutdownMessage(Class<?> clazz, String[] args,
final org.slf4j.Logger LOG) {
startupShutdownMessage(clazz, args, LogAdapter.create(LOG));
}
static void startupShutdownMessage(Class<?> clazz, String[] args,
final LogAdapter LOG) {
final String hostname = NetUtils.getHostname();
final String classname = clazz.getSimpleName();
LOG.info(
toStartupShutdownString("STARTUP_MSG: ", new String[] {
"Starting " + classname,
" host = " + hostname,
" args = " + Arrays.asList(args),
" version = " + VersionInfo.getVersion(),
" classpath = " + System.getProperty("java.class.path"),
" build = " + VersionInfo.getUrl() + " -r "
+ VersionInfo.getRevision()
+ "; compiled by '" + VersionInfo.getUser()
+ "' on " + VersionInfo.getDate(),
" java = " + System.getProperty("java.version") }
)
);
if (SystemUtils.IS_OS_UNIX) {
try {
SignalLogger.INSTANCE.register(LOG);
} catch (Throwable t) {
LOG.warn("failed to register any UNIX signal loggers: ", t);
}
}
ShutdownHookManager.get().addShutdownHook(
new Runnable() {
@Override
public void run() {
LOG.info(toStartupShutdownString("SHUTDOWN_MSG: ", new String[]{
"Shutting down " + classname + " at " + hostname}));
}
}, SHUTDOWN_HOOK_PRIORITY);
}
/**
* The traditional binary prefixes, kilo, mega, ..., exa,
* which can be represented by a 64-bit integer.
* TraditionalBinaryPrefix symbol are case insensitive.
*/
public static enum TraditionalBinaryPrefix {
KILO(10),
MEGA(KILO.bitShift + 10),
GIGA(MEGA.bitShift + 10),
TERA(GIGA.bitShift + 10),
PETA(TERA.bitShift + 10),
EXA (PETA.bitShift + 10);
public final long value;
public final char symbol;
public final int bitShift;
public final long bitMask;
private TraditionalBinaryPrefix(int bitShift) {
this.bitShift = bitShift;
this.value = 1L << bitShift;
this.bitMask = this.value - 1L;
this.symbol = toString().charAt(0);
}
/**
* @return The TraditionalBinaryPrefix object corresponding to the symbol.
*/
public static TraditionalBinaryPrefix valueOf(char symbol) {
symbol = Character.toUpperCase(symbol);
for(TraditionalBinaryPrefix prefix : TraditionalBinaryPrefix.values()) {
if (symbol == prefix.symbol) {
return prefix;
}
}
throw new IllegalArgumentException("Unknown symbol '" + symbol + "'");
}
/**
* Convert a string to long.
* The input string is first be trimmed
* and then it is parsed with traditional binary prefix.
*
* For example,
* "-1230k" will be converted to -1230 * 1024 = -1259520;
* "891g" will be converted to 891 * 1024^3 = 956703965184;
*
* @param s input string
* @return a long value represented by the input string.
*/
public static long string2long(String s) {
s = s.trim();
final int lastpos = s.length() - 1;
final char lastchar = s.charAt(lastpos);
if (Character.isDigit(lastchar))
return Long.parseLong(s);
else {
long prefix;
try {
prefix = TraditionalBinaryPrefix.valueOf(lastchar).value;
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Invalid size prefix '" + lastchar
+ "' in '" + s
+ "'. Allowed prefixes are k, m, g, t, p, e(case insensitive)");
}
long num = Long.parseLong(s.substring(0, lastpos));
if (num > (Long.MAX_VALUE/prefix) || num < (Long.MIN_VALUE/prefix)) {
throw new IllegalArgumentException(s + " does not fit in a Long");
}
return num * prefix;
}
}
/**
* Convert a long integer to a string with traditional binary prefix.
*
* @param n the value to be converted
* @param unit The unit, e.g. "B" for bytes.
* @param decimalPlaces The number of decimal places.
* @return a string with traditional binary prefix.
*/
public static String long2String(long n, String unit, int decimalPlaces) {
if (unit == null) {
unit = "";
}
//take care a special case
if (n == Long.MIN_VALUE) {
return "-8 " + EXA.symbol + unit;
}
final StringBuilder b = new StringBuilder();
//take care negative numbers
if (n < 0) {
b.append('-');
n = -n;
}
if (n < KILO.value) {
//no prefix
b.append(n);
return (unit.isEmpty()? b: b.append(" ").append(unit)).toString();
} else {
//find traditional binary prefix
int i = 0;
for(; i < values().length && n >= values()[i].value; i++);
TraditionalBinaryPrefix prefix = values()[i - 1];
if ((n & prefix.bitMask) == 0) {
//exact division
b.append(n >> prefix.bitShift);
} else {
final String format = "%." + decimalPlaces + "f";
String s = format(format, n/(double)prefix.value);
//check a special rounding up case
if (s.startsWith("1024")) {
prefix = values()[i];
s = format(format, n/(double)prefix.value);
}
b.append(s);
}
return b.append(' ').append(prefix.symbol).append(unit).toString();
}
}
}
/**
* Escapes HTML Special characters present in the string.
* @param string
* @return HTML Escaped String representation
*/
public static String escapeHTML(String string) {
if(string == null) {
return null;
}
StringBuilder sb = new StringBuilder();
boolean lastCharacterWasSpace = false;
char[] chars = string.toCharArray();
for(char c : chars) {
if(c == ' ') {
if(lastCharacterWasSpace){
lastCharacterWasSpace = false;
sb.append(" ");
}else {
lastCharacterWasSpace=true;
sb.append(" ");
}
}else {
lastCharacterWasSpace = false;
switch(c) {
case '<': sb.append("<"); break;
case '>': sb.append(">"); break;
case '&': sb.append("&"); break;
case '"': sb.append("""); break;
default : sb.append(c);break;
}
}
}
return sb.toString();
}
/**
* @return a byte description of the given long interger value.
*/
public static String byteDesc(long len) {
return TraditionalBinaryPrefix.long2String(len, "B", 2);
}
/** @deprecated use StringUtils.format("%.2f", d). */
@Deprecated
public static String limitDecimalTo2(double d) {
return format("%.2f", d);
}
/**
* Concatenates strings, using a separator.
*
* @param separator Separator to join with.
* @param strings Strings to join.
*/
public static String join(CharSequence separator, Iterable<?> strings) {
Iterator<?> i = strings.iterator();
if (!i.hasNext()) {
return "";
}
StringBuilder sb = new StringBuilder(i.next().toString());
while (i.hasNext()) {
sb.append(separator);
sb.append(i.next().toString());
}
return sb.toString();
}
public static String join(char separator, Iterable<?> strings) {
return join(separator + "", strings);
}
/**
* Concatenates strings, using a separator.
*
* @param separator to join with
* @param strings to join
* @return the joined string
*/
public static String join(CharSequence separator, String[] strings) {
// Ideally we don't have to duplicate the code here if array is iterable.
StringBuilder sb = new StringBuilder();
boolean first = true;
for (String s : strings) {
if (first) {
first = false;
} else {
sb.append(separator);
}
sb.append(s);
}
return sb.toString();
}
public static String join(char separator, String[] strings) {
return join(separator + "", strings);
}
/**
* Convert SOME_STUFF to SomeStuff
*
* @param s input string
* @return camelized string
*/
public static String camelize(String s) {
StringBuilder sb = new StringBuilder();
String[] words = split(StringUtils.toLowerCase(s), ESCAPE_CHAR, '_');
for (String word : words)
sb.append(org.apache.commons.lang.StringUtils.capitalize(word));
return sb.toString();
}
/**
* Matches a template string against a pattern, replaces matched tokens with
* the supplied replacements, and returns the result. The regular expression
* must use a capturing group. The value of the first capturing group is used
* to look up the replacement. If no replacement is found for the token, then
* it is replaced with the empty string.
*
* For example, assume template is "%foo%_%bar%_%baz%", pattern is "%(.*?)%",
* and replacements contains 2 entries, mapping "foo" to "zoo" and "baz" to
* "zaz". The result returned would be "zoo__zaz".
*
* @param template String template to receive replacements
* @param pattern Pattern to match for identifying tokens, must use a capturing
* group
* @param replacements Map<String, String> mapping tokens identified by the
* capturing group to their replacement values
* @return String template with replacements
*/
public static String replaceTokens(String template, Pattern pattern,
Map<String, String> replacements) {
StringBuffer sb = new StringBuffer();
Matcher matcher = pattern.matcher(template);
while (matcher.find()) {
String replacement = replacements.get(matcher.group(1));
if (replacement == null) {
replacement = "";
}
matcher.appendReplacement(sb, Matcher.quoteReplacement(replacement));
}
matcher.appendTail(sb);
return sb.toString();
}
/**
* Get stack trace for a given thread.
*/
public static String getStackTrace(Thread t) {
final StackTraceElement[] stackTrace = t.getStackTrace();
StringBuilder str = new StringBuilder();
for (StackTraceElement e : stackTrace) {
str.append(e.toString() + "\n");
}
return str.toString();
}
/**
* From a list of command-line arguments, remove both an option and the
* next argument.
*
* @param name Name of the option to remove. Example: -foo.
* @param args List of arguments.
* @return null if the option was not found; the value of the
* option otherwise.
* @throws IllegalArgumentException if the option's argument is not present
*/
public static String popOptionWithArgument(String name, List<String> args)
throws IllegalArgumentException {
String val = null;
for (Iterator<String> iter = args.iterator(); iter.hasNext(); ) {
String cur = iter.next();
if (cur.equals("--")) {
// stop parsing arguments when you see --
break;
} else if (cur.equals(name)) {
iter.remove();
if (!iter.hasNext()) {
throw new IllegalArgumentException("option " + name + " requires 1 " +
"argument.");
}
val = iter.next();
iter.remove();
break;
}
}
return val;
}
/**
* From a list of command-line arguments, remove an option.
*
* @param name Name of the option to remove. Example: -foo.
* @param args List of arguments.
* @return true if the option was found and removed; false otherwise.
*/
public static boolean popOption(String name, List<String> args) {
for (Iterator<String> iter = args.iterator(); iter.hasNext(); ) {
String cur = iter.next();
if (cur.equals("--")) {
// stop parsing arguments when you see --
break;
} else if (cur.equals(name)) {
iter.remove();
return true;
}
}
return false;
}
/**
* From a list of command-line arguments, return the first non-option
* argument. Non-option arguments are those which either come after
* a double dash (--) or do not start with a dash.
*
* @param args List of arguments.
* @return The first non-option argument, or null if there were none.
*/
public static String popFirstNonOption(List<String> args) {
for (Iterator<String> iter = args.iterator(); iter.hasNext(); ) {
String cur = iter.next();
if (cur.equals("--")) {
if (!iter.hasNext()) {
return null;
}
cur = iter.next();
iter.remove();
return cur;
} else if (!cur.startsWith("-")) {
iter.remove();
return cur;
}
}
return null;
}
/**
* Converts all of the characters in this String to lower case with
* Locale.ENGLISH.
*
* @param str string to be converted
* @return the str, converted to lowercase.
*/
public static String toLowerCase(String str) {
return str.toLowerCase(Locale.ENGLISH);
}
/**
* Converts all of the characters in this String to upper case with
* Locale.ENGLISH.
*
* @param str string to be converted
* @return the str, converted to uppercase.
*/
public static String toUpperCase(String str) {
return str.toUpperCase(Locale.ENGLISH);
}
/**
* Compare strings locale-freely by using String#equalsIgnoreCase.
*
* @param s1 Non-null string to be converted
* @param s2 string to be converted
* @return the str, converted to uppercase.
*/
public static boolean equalsIgnoreCase(String s1, String s2) {
Preconditions.checkNotNull(s1);
// don't check non-null against s2 to make the semantics same as
// s1.equals(s2)
return s1.equalsIgnoreCase(s2);
}
}
| 34,397 | 30.791128 | 112 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PerformanceAdvisory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.util;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class PerformanceAdvisory {
public static final Logger LOG =
LoggerFactory.getLogger(PerformanceAdvisory.class);
}
| 1,018 | 38.192308 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LogAdapter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.apache.commons.logging.Log;
import org.slf4j.Logger;
class LogAdapter {
private Log LOG;
private Logger LOGGER;
private LogAdapter(Log LOG) {
this.LOG = LOG;
}
private LogAdapter(Logger LOGGER) {
this.LOGGER = LOGGER;
}
public static LogAdapter create(Log LOG) {
return new LogAdapter(LOG);
}
public static LogAdapter create(Logger LOGGER) {
return new LogAdapter(LOGGER);
}
public void info(String msg) {
if (LOG != null) {
LOG.info(msg);
} else if (LOGGER != null) {
LOGGER.info(msg);
}
}
public void warn(String msg, Throwable t) {
if (LOG != null) {
LOG.warn(msg, t);
} else if (LOGGER != null) {
LOGGER.warn(msg, t);
}
}
public void debug(Throwable t) {
if (LOG != null) {
LOG.debug(t);
} else if (LOGGER != null) {
LOGGER.debug("", t);
}
}
public void error(String msg) {
if (LOG != null) {
LOG.error(msg);
} else if (LOGGER != null) {
LOGGER.error(msg);
}
}
}
| 1,875 | 24.013333 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PureJavaCrc32C.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Some portions of this file Copyright (c) 2004-2006 Intel Corportation
* and licensed under the BSD license.
*/
package org.apache.hadoop.util;
import java.util.zip.Checksum;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* A pure-java implementation of the CRC32 checksum that uses
* the CRC32-C polynomial, the same polynomial used by iSCSI
* and implemented on many Intel chipsets supporting SSE4.2.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class PureJavaCrc32C implements Checksum {
/** the current CRC value, bit-flipped */
private int crc;
/** Create a new PureJavaCrc32 object. */
public PureJavaCrc32C() {
reset();
}
@Override
public long getValue() {
long ret = crc;
return (~ret) & 0xffffffffL;
}
@Override
public void reset() {
crc = 0xffffffff;
}
@Override
public void update(byte[] b, int off, int len) {
int localCrc = crc;
while(len > 7) {
final int c0 =(b[off+0] ^ localCrc) & 0xff;
final int c1 =(b[off+1] ^ (localCrc >>>= 8)) & 0xff;
final int c2 =(b[off+2] ^ (localCrc >>>= 8)) & 0xff;
final int c3 =(b[off+3] ^ (localCrc >>>= 8)) & 0xff;
localCrc = (T[T8_7_start + c0] ^ T[T8_6_start + c1])
^ (T[T8_5_start + c2] ^ T[T8_4_start + c3]);
final int c4 = b[off+4] & 0xff;
final int c5 = b[off+5] & 0xff;
final int c6 = b[off+6] & 0xff;
final int c7 = b[off+7] & 0xff;
localCrc ^= (T[T8_3_start + c4] ^ T[T8_2_start + c5])
^ (T[T8_1_start + c6] ^ T[T8_0_start + c7]);
off += 8;
len -= 8;
}
/* loop unroll - duff's device style */
switch(len) {
case 7: localCrc = (localCrc >>> 8) ^ T[T8_0_start + ((localCrc ^ b[off++]) & 0xff)];
case 6: localCrc = (localCrc >>> 8) ^ T[T8_0_start + ((localCrc ^ b[off++]) & 0xff)];
case 5: localCrc = (localCrc >>> 8) ^ T[T8_0_start + ((localCrc ^ b[off++]) & 0xff)];
case 4: localCrc = (localCrc >>> 8) ^ T[T8_0_start + ((localCrc ^ b[off++]) & 0xff)];
case 3: localCrc = (localCrc >>> 8) ^ T[T8_0_start + ((localCrc ^ b[off++]) & 0xff)];
case 2: localCrc = (localCrc >>> 8) ^ T[T8_0_start + ((localCrc ^ b[off++]) & 0xff)];
case 1: localCrc = (localCrc >>> 8) ^ T[T8_0_start + ((localCrc ^ b[off++]) & 0xff)];
default:
/* nothing */
}
// Publish crc out to object
crc = localCrc;
}
@Override
final public void update(int b) {
crc = (crc >>> 8) ^ T[T8_0_start + ((crc ^ b) & 0xff)];
}
// CRC polynomial tables generated by:
// java -cp build/test/classes/:build/classes/ \
// org.apache.hadoop.util.TestPureJavaCrc32\$Table 82F63B78
private static final int T8_0_start = 0*256;
private static final int T8_1_start = 1*256;
private static final int T8_2_start = 2*256;
private static final int T8_3_start = 3*256;
private static final int T8_4_start = 4*256;
private static final int T8_5_start = 5*256;
private static final int T8_6_start = 6*256;
private static final int T8_7_start = 7*256;
private static final int[] T = new int[] {
/* T8_0 */
0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4,
0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB,
0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B,
0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24,
0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B,
0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384,
0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54,
0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B,
0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A,
0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35,
0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5,
0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA,
0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45,
0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A,
0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A,
0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595,
0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48,
0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957,
0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687,
0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198,
0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927,
0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38,
0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8,
0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7,
0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096,
0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789,
0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859,
0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46,
0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9,
0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6,
0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36,
0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829,
0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C,
0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93,
0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043,
0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C,
0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3,
0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC,
0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C,
0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033,
0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652,
0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D,
0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D,
0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982,
0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D,
0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622,
0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2,
0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED,
0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530,
0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F,
0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF,
0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0,
0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F,
0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540,
0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90,
0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F,
0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE,
0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1,
0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321,
0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E,
0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81,
0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E,
0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E,
0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351,
/* T8_1 */
0x00000000, 0x13A29877, 0x274530EE, 0x34E7A899,
0x4E8A61DC, 0x5D28F9AB, 0x69CF5132, 0x7A6DC945,
0x9D14C3B8, 0x8EB65BCF, 0xBA51F356, 0xA9F36B21,
0xD39EA264, 0xC03C3A13, 0xF4DB928A, 0xE7790AFD,
0x3FC5F181, 0x2C6769F6, 0x1880C16F, 0x0B225918,
0x714F905D, 0x62ED082A, 0x560AA0B3, 0x45A838C4,
0xA2D13239, 0xB173AA4E, 0x859402D7, 0x96369AA0,
0xEC5B53E5, 0xFFF9CB92, 0xCB1E630B, 0xD8BCFB7C,
0x7F8BE302, 0x6C297B75, 0x58CED3EC, 0x4B6C4B9B,
0x310182DE, 0x22A31AA9, 0x1644B230, 0x05E62A47,
0xE29F20BA, 0xF13DB8CD, 0xC5DA1054, 0xD6788823,
0xAC154166, 0xBFB7D911, 0x8B507188, 0x98F2E9FF,
0x404E1283, 0x53EC8AF4, 0x670B226D, 0x74A9BA1A,
0x0EC4735F, 0x1D66EB28, 0x298143B1, 0x3A23DBC6,
0xDD5AD13B, 0xCEF8494C, 0xFA1FE1D5, 0xE9BD79A2,
0x93D0B0E7, 0x80722890, 0xB4958009, 0xA737187E,
0xFF17C604, 0xECB55E73, 0xD852F6EA, 0xCBF06E9D,
0xB19DA7D8, 0xA23F3FAF, 0x96D89736, 0x857A0F41,
0x620305BC, 0x71A19DCB, 0x45463552, 0x56E4AD25,
0x2C896460, 0x3F2BFC17, 0x0BCC548E, 0x186ECCF9,
0xC0D23785, 0xD370AFF2, 0xE797076B, 0xF4359F1C,
0x8E585659, 0x9DFACE2E, 0xA91D66B7, 0xBABFFEC0,
0x5DC6F43D, 0x4E646C4A, 0x7A83C4D3, 0x69215CA4,
0x134C95E1, 0x00EE0D96, 0x3409A50F, 0x27AB3D78,
0x809C2506, 0x933EBD71, 0xA7D915E8, 0xB47B8D9F,
0xCE1644DA, 0xDDB4DCAD, 0xE9537434, 0xFAF1EC43,
0x1D88E6BE, 0x0E2A7EC9, 0x3ACDD650, 0x296F4E27,
0x53028762, 0x40A01F15, 0x7447B78C, 0x67E52FFB,
0xBF59D487, 0xACFB4CF0, 0x981CE469, 0x8BBE7C1E,
0xF1D3B55B, 0xE2712D2C, 0xD69685B5, 0xC5341DC2,
0x224D173F, 0x31EF8F48, 0x050827D1, 0x16AABFA6,
0x6CC776E3, 0x7F65EE94, 0x4B82460D, 0x5820DE7A,
0xFBC3FAF9, 0xE861628E, 0xDC86CA17, 0xCF245260,
0xB5499B25, 0xA6EB0352, 0x920CABCB, 0x81AE33BC,
0x66D73941, 0x7575A136, 0x419209AF, 0x523091D8,
0x285D589D, 0x3BFFC0EA, 0x0F186873, 0x1CBAF004,
0xC4060B78, 0xD7A4930F, 0xE3433B96, 0xF0E1A3E1,
0x8A8C6AA4, 0x992EF2D3, 0xADC95A4A, 0xBE6BC23D,
0x5912C8C0, 0x4AB050B7, 0x7E57F82E, 0x6DF56059,
0x1798A91C, 0x043A316B, 0x30DD99F2, 0x237F0185,
0x844819FB, 0x97EA818C, 0xA30D2915, 0xB0AFB162,
0xCAC27827, 0xD960E050, 0xED8748C9, 0xFE25D0BE,
0x195CDA43, 0x0AFE4234, 0x3E19EAAD, 0x2DBB72DA,
0x57D6BB9F, 0x447423E8, 0x70938B71, 0x63311306,
0xBB8DE87A, 0xA82F700D, 0x9CC8D894, 0x8F6A40E3,
0xF50789A6, 0xE6A511D1, 0xD242B948, 0xC1E0213F,
0x26992BC2, 0x353BB3B5, 0x01DC1B2C, 0x127E835B,
0x68134A1E, 0x7BB1D269, 0x4F567AF0, 0x5CF4E287,
0x04D43CFD, 0x1776A48A, 0x23910C13, 0x30339464,
0x4A5E5D21, 0x59FCC556, 0x6D1B6DCF, 0x7EB9F5B8,
0x99C0FF45, 0x8A626732, 0xBE85CFAB, 0xAD2757DC,
0xD74A9E99, 0xC4E806EE, 0xF00FAE77, 0xE3AD3600,
0x3B11CD7C, 0x28B3550B, 0x1C54FD92, 0x0FF665E5,
0x759BACA0, 0x663934D7, 0x52DE9C4E, 0x417C0439,
0xA6050EC4, 0xB5A796B3, 0x81403E2A, 0x92E2A65D,
0xE88F6F18, 0xFB2DF76F, 0xCFCA5FF6, 0xDC68C781,
0x7B5FDFFF, 0x68FD4788, 0x5C1AEF11, 0x4FB87766,
0x35D5BE23, 0x26772654, 0x12908ECD, 0x013216BA,
0xE64B1C47, 0xF5E98430, 0xC10E2CA9, 0xD2ACB4DE,
0xA8C17D9B, 0xBB63E5EC, 0x8F844D75, 0x9C26D502,
0x449A2E7E, 0x5738B609, 0x63DF1E90, 0x707D86E7,
0x0A104FA2, 0x19B2D7D5, 0x2D557F4C, 0x3EF7E73B,
0xD98EEDC6, 0xCA2C75B1, 0xFECBDD28, 0xED69455F,
0x97048C1A, 0x84A6146D, 0xB041BCF4, 0xA3E32483,
/* T8_2 */
0x00000000, 0xA541927E, 0x4F6F520D, 0xEA2EC073,
0x9EDEA41A, 0x3B9F3664, 0xD1B1F617, 0x74F06469,
0x38513EC5, 0x9D10ACBB, 0x773E6CC8, 0xD27FFEB6,
0xA68F9ADF, 0x03CE08A1, 0xE9E0C8D2, 0x4CA15AAC,
0x70A27D8A, 0xD5E3EFF4, 0x3FCD2F87, 0x9A8CBDF9,
0xEE7CD990, 0x4B3D4BEE, 0xA1138B9D, 0x045219E3,
0x48F3434F, 0xEDB2D131, 0x079C1142, 0xA2DD833C,
0xD62DE755, 0x736C752B, 0x9942B558, 0x3C032726,
0xE144FB14, 0x4405696A, 0xAE2BA919, 0x0B6A3B67,
0x7F9A5F0E, 0xDADBCD70, 0x30F50D03, 0x95B49F7D,
0xD915C5D1, 0x7C5457AF, 0x967A97DC, 0x333B05A2,
0x47CB61CB, 0xE28AF3B5, 0x08A433C6, 0xADE5A1B8,
0x91E6869E, 0x34A714E0, 0xDE89D493, 0x7BC846ED,
0x0F382284, 0xAA79B0FA, 0x40577089, 0xE516E2F7,
0xA9B7B85B, 0x0CF62A25, 0xE6D8EA56, 0x43997828,
0x37691C41, 0x92288E3F, 0x78064E4C, 0xDD47DC32,
0xC76580D9, 0x622412A7, 0x880AD2D4, 0x2D4B40AA,
0x59BB24C3, 0xFCFAB6BD, 0x16D476CE, 0xB395E4B0,
0xFF34BE1C, 0x5A752C62, 0xB05BEC11, 0x151A7E6F,
0x61EA1A06, 0xC4AB8878, 0x2E85480B, 0x8BC4DA75,
0xB7C7FD53, 0x12866F2D, 0xF8A8AF5E, 0x5DE93D20,
0x29195949, 0x8C58CB37, 0x66760B44, 0xC337993A,
0x8F96C396, 0x2AD751E8, 0xC0F9919B, 0x65B803E5,
0x1148678C, 0xB409F5F2, 0x5E273581, 0xFB66A7FF,
0x26217BCD, 0x8360E9B3, 0x694E29C0, 0xCC0FBBBE,
0xB8FFDFD7, 0x1DBE4DA9, 0xF7908DDA, 0x52D11FA4,
0x1E704508, 0xBB31D776, 0x511F1705, 0xF45E857B,
0x80AEE112, 0x25EF736C, 0xCFC1B31F, 0x6A802161,
0x56830647, 0xF3C29439, 0x19EC544A, 0xBCADC634,
0xC85DA25D, 0x6D1C3023, 0x8732F050, 0x2273622E,
0x6ED23882, 0xCB93AAFC, 0x21BD6A8F, 0x84FCF8F1,
0xF00C9C98, 0x554D0EE6, 0xBF63CE95, 0x1A225CEB,
0x8B277743, 0x2E66E53D, 0xC448254E, 0x6109B730,
0x15F9D359, 0xB0B84127, 0x5A968154, 0xFFD7132A,
0xB3764986, 0x1637DBF8, 0xFC191B8B, 0x595889F5,
0x2DA8ED9C, 0x88E97FE2, 0x62C7BF91, 0xC7862DEF,
0xFB850AC9, 0x5EC498B7, 0xB4EA58C4, 0x11ABCABA,
0x655BAED3, 0xC01A3CAD, 0x2A34FCDE, 0x8F756EA0,
0xC3D4340C, 0x6695A672, 0x8CBB6601, 0x29FAF47F,
0x5D0A9016, 0xF84B0268, 0x1265C21B, 0xB7245065,
0x6A638C57, 0xCF221E29, 0x250CDE5A, 0x804D4C24,
0xF4BD284D, 0x51FCBA33, 0xBBD27A40, 0x1E93E83E,
0x5232B292, 0xF77320EC, 0x1D5DE09F, 0xB81C72E1,
0xCCEC1688, 0x69AD84F6, 0x83834485, 0x26C2D6FB,
0x1AC1F1DD, 0xBF8063A3, 0x55AEA3D0, 0xF0EF31AE,
0x841F55C7, 0x215EC7B9, 0xCB7007CA, 0x6E3195B4,
0x2290CF18, 0x87D15D66, 0x6DFF9D15, 0xC8BE0F6B,
0xBC4E6B02, 0x190FF97C, 0xF321390F, 0x5660AB71,
0x4C42F79A, 0xE90365E4, 0x032DA597, 0xA66C37E9,
0xD29C5380, 0x77DDC1FE, 0x9DF3018D, 0x38B293F3,
0x7413C95F, 0xD1525B21, 0x3B7C9B52, 0x9E3D092C,
0xEACD6D45, 0x4F8CFF3B, 0xA5A23F48, 0x00E3AD36,
0x3CE08A10, 0x99A1186E, 0x738FD81D, 0xD6CE4A63,
0xA23E2E0A, 0x077FBC74, 0xED517C07, 0x4810EE79,
0x04B1B4D5, 0xA1F026AB, 0x4BDEE6D8, 0xEE9F74A6,
0x9A6F10CF, 0x3F2E82B1, 0xD50042C2, 0x7041D0BC,
0xAD060C8E, 0x08479EF0, 0xE2695E83, 0x4728CCFD,
0x33D8A894, 0x96993AEA, 0x7CB7FA99, 0xD9F668E7,
0x9557324B, 0x3016A035, 0xDA386046, 0x7F79F238,
0x0B899651, 0xAEC8042F, 0x44E6C45C, 0xE1A75622,
0xDDA47104, 0x78E5E37A, 0x92CB2309, 0x378AB177,
0x437AD51E, 0xE63B4760, 0x0C158713, 0xA954156D,
0xE5F54FC1, 0x40B4DDBF, 0xAA9A1DCC, 0x0FDB8FB2,
0x7B2BEBDB, 0xDE6A79A5, 0x3444B9D6, 0x91052BA8,
/* T8_3 */
0x00000000, 0xDD45AAB8, 0xBF672381, 0x62228939,
0x7B2231F3, 0xA6679B4B, 0xC4451272, 0x1900B8CA,
0xF64463E6, 0x2B01C95E, 0x49234067, 0x9466EADF,
0x8D665215, 0x5023F8AD, 0x32017194, 0xEF44DB2C,
0xE964B13D, 0x34211B85, 0x560392BC, 0x8B463804,
0x924680CE, 0x4F032A76, 0x2D21A34F, 0xF06409F7,
0x1F20D2DB, 0xC2657863, 0xA047F15A, 0x7D025BE2,
0x6402E328, 0xB9474990, 0xDB65C0A9, 0x06206A11,
0xD725148B, 0x0A60BE33, 0x6842370A, 0xB5079DB2,
0xAC072578, 0x71428FC0, 0x136006F9, 0xCE25AC41,
0x2161776D, 0xFC24DDD5, 0x9E0654EC, 0x4343FE54,
0x5A43469E, 0x8706EC26, 0xE524651F, 0x3861CFA7,
0x3E41A5B6, 0xE3040F0E, 0x81268637, 0x5C632C8F,
0x45639445, 0x98263EFD, 0xFA04B7C4, 0x27411D7C,
0xC805C650, 0x15406CE8, 0x7762E5D1, 0xAA274F69,
0xB327F7A3, 0x6E625D1B, 0x0C40D422, 0xD1057E9A,
0xABA65FE7, 0x76E3F55F, 0x14C17C66, 0xC984D6DE,
0xD0846E14, 0x0DC1C4AC, 0x6FE34D95, 0xB2A6E72D,
0x5DE23C01, 0x80A796B9, 0xE2851F80, 0x3FC0B538,
0x26C00DF2, 0xFB85A74A, 0x99A72E73, 0x44E284CB,
0x42C2EEDA, 0x9F874462, 0xFDA5CD5B, 0x20E067E3,
0x39E0DF29, 0xE4A57591, 0x8687FCA8, 0x5BC25610,
0xB4868D3C, 0x69C32784, 0x0BE1AEBD, 0xD6A40405,
0xCFA4BCCF, 0x12E11677, 0x70C39F4E, 0xAD8635F6,
0x7C834B6C, 0xA1C6E1D4, 0xC3E468ED, 0x1EA1C255,
0x07A17A9F, 0xDAE4D027, 0xB8C6591E, 0x6583F3A6,
0x8AC7288A, 0x57828232, 0x35A00B0B, 0xE8E5A1B3,
0xF1E51979, 0x2CA0B3C1, 0x4E823AF8, 0x93C79040,
0x95E7FA51, 0x48A250E9, 0x2A80D9D0, 0xF7C57368,
0xEEC5CBA2, 0x3380611A, 0x51A2E823, 0x8CE7429B,
0x63A399B7, 0xBEE6330F, 0xDCC4BA36, 0x0181108E,
0x1881A844, 0xC5C402FC, 0xA7E68BC5, 0x7AA3217D,
0x52A0C93F, 0x8FE56387, 0xEDC7EABE, 0x30824006,
0x2982F8CC, 0xF4C75274, 0x96E5DB4D, 0x4BA071F5,
0xA4E4AAD9, 0x79A10061, 0x1B838958, 0xC6C623E0,
0xDFC69B2A, 0x02833192, 0x60A1B8AB, 0xBDE41213,
0xBBC47802, 0x6681D2BA, 0x04A35B83, 0xD9E6F13B,
0xC0E649F1, 0x1DA3E349, 0x7F816A70, 0xA2C4C0C8,
0x4D801BE4, 0x90C5B15C, 0xF2E73865, 0x2FA292DD,
0x36A22A17, 0xEBE780AF, 0x89C50996, 0x5480A32E,
0x8585DDB4, 0x58C0770C, 0x3AE2FE35, 0xE7A7548D,
0xFEA7EC47, 0x23E246FF, 0x41C0CFC6, 0x9C85657E,
0x73C1BE52, 0xAE8414EA, 0xCCA69DD3, 0x11E3376B,
0x08E38FA1, 0xD5A62519, 0xB784AC20, 0x6AC10698,
0x6CE16C89, 0xB1A4C631, 0xD3864F08, 0x0EC3E5B0,
0x17C35D7A, 0xCA86F7C2, 0xA8A47EFB, 0x75E1D443,
0x9AA50F6F, 0x47E0A5D7, 0x25C22CEE, 0xF8878656,
0xE1873E9C, 0x3CC29424, 0x5EE01D1D, 0x83A5B7A5,
0xF90696D8, 0x24433C60, 0x4661B559, 0x9B241FE1,
0x8224A72B, 0x5F610D93, 0x3D4384AA, 0xE0062E12,
0x0F42F53E, 0xD2075F86, 0xB025D6BF, 0x6D607C07,
0x7460C4CD, 0xA9256E75, 0xCB07E74C, 0x16424DF4,
0x106227E5, 0xCD278D5D, 0xAF050464, 0x7240AEDC,
0x6B401616, 0xB605BCAE, 0xD4273597, 0x09629F2F,
0xE6264403, 0x3B63EEBB, 0x59416782, 0x8404CD3A,
0x9D0475F0, 0x4041DF48, 0x22635671, 0xFF26FCC9,
0x2E238253, 0xF36628EB, 0x9144A1D2, 0x4C010B6A,
0x5501B3A0, 0x88441918, 0xEA669021, 0x37233A99,
0xD867E1B5, 0x05224B0D, 0x6700C234, 0xBA45688C,
0xA345D046, 0x7E007AFE, 0x1C22F3C7, 0xC167597F,
0xC747336E, 0x1A0299D6, 0x782010EF, 0xA565BA57,
0xBC65029D, 0x6120A825, 0x0302211C, 0xDE478BA4,
0x31035088, 0xEC46FA30, 0x8E647309, 0x5321D9B1,
0x4A21617B, 0x9764CBC3, 0xF54642FA, 0x2803E842,
/* T8_4 */
0x00000000, 0x38116FAC, 0x7022DF58, 0x4833B0F4,
0xE045BEB0, 0xD854D11C, 0x906761E8, 0xA8760E44,
0xC5670B91, 0xFD76643D, 0xB545D4C9, 0x8D54BB65,
0x2522B521, 0x1D33DA8D, 0x55006A79, 0x6D1105D5,
0x8F2261D3, 0xB7330E7F, 0xFF00BE8B, 0xC711D127,
0x6F67DF63, 0x5776B0CF, 0x1F45003B, 0x27546F97,
0x4A456A42, 0x725405EE, 0x3A67B51A, 0x0276DAB6,
0xAA00D4F2, 0x9211BB5E, 0xDA220BAA, 0xE2336406,
0x1BA8B557, 0x23B9DAFB, 0x6B8A6A0F, 0x539B05A3,
0xFBED0BE7, 0xC3FC644B, 0x8BCFD4BF, 0xB3DEBB13,
0xDECFBEC6, 0xE6DED16A, 0xAEED619E, 0x96FC0E32,
0x3E8A0076, 0x069B6FDA, 0x4EA8DF2E, 0x76B9B082,
0x948AD484, 0xAC9BBB28, 0xE4A80BDC, 0xDCB96470,
0x74CF6A34, 0x4CDE0598, 0x04EDB56C, 0x3CFCDAC0,
0x51EDDF15, 0x69FCB0B9, 0x21CF004D, 0x19DE6FE1,
0xB1A861A5, 0x89B90E09, 0xC18ABEFD, 0xF99BD151,
0x37516AAE, 0x0F400502, 0x4773B5F6, 0x7F62DA5A,
0xD714D41E, 0xEF05BBB2, 0xA7360B46, 0x9F2764EA,
0xF236613F, 0xCA270E93, 0x8214BE67, 0xBA05D1CB,
0x1273DF8F, 0x2A62B023, 0x625100D7, 0x5A406F7B,
0xB8730B7D, 0x806264D1, 0xC851D425, 0xF040BB89,
0x5836B5CD, 0x6027DA61, 0x28146A95, 0x10050539,
0x7D1400EC, 0x45056F40, 0x0D36DFB4, 0x3527B018,
0x9D51BE5C, 0xA540D1F0, 0xED736104, 0xD5620EA8,
0x2CF9DFF9, 0x14E8B055, 0x5CDB00A1, 0x64CA6F0D,
0xCCBC6149, 0xF4AD0EE5, 0xBC9EBE11, 0x848FD1BD,
0xE99ED468, 0xD18FBBC4, 0x99BC0B30, 0xA1AD649C,
0x09DB6AD8, 0x31CA0574, 0x79F9B580, 0x41E8DA2C,
0xA3DBBE2A, 0x9BCAD186, 0xD3F96172, 0xEBE80EDE,
0x439E009A, 0x7B8F6F36, 0x33BCDFC2, 0x0BADB06E,
0x66BCB5BB, 0x5EADDA17, 0x169E6AE3, 0x2E8F054F,
0x86F90B0B, 0xBEE864A7, 0xF6DBD453, 0xCECABBFF,
0x6EA2D55C, 0x56B3BAF0, 0x1E800A04, 0x269165A8,
0x8EE76BEC, 0xB6F60440, 0xFEC5B4B4, 0xC6D4DB18,
0xABC5DECD, 0x93D4B161, 0xDBE70195, 0xE3F66E39,
0x4B80607D, 0x73910FD1, 0x3BA2BF25, 0x03B3D089,
0xE180B48F, 0xD991DB23, 0x91A26BD7, 0xA9B3047B,
0x01C50A3F, 0x39D46593, 0x71E7D567, 0x49F6BACB,
0x24E7BF1E, 0x1CF6D0B2, 0x54C56046, 0x6CD40FEA,
0xC4A201AE, 0xFCB36E02, 0xB480DEF6, 0x8C91B15A,
0x750A600B, 0x4D1B0FA7, 0x0528BF53, 0x3D39D0FF,
0x954FDEBB, 0xAD5EB117, 0xE56D01E3, 0xDD7C6E4F,
0xB06D6B9A, 0x887C0436, 0xC04FB4C2, 0xF85EDB6E,
0x5028D52A, 0x6839BA86, 0x200A0A72, 0x181B65DE,
0xFA2801D8, 0xC2396E74, 0x8A0ADE80, 0xB21BB12C,
0x1A6DBF68, 0x227CD0C4, 0x6A4F6030, 0x525E0F9C,
0x3F4F0A49, 0x075E65E5, 0x4F6DD511, 0x777CBABD,
0xDF0AB4F9, 0xE71BDB55, 0xAF286BA1, 0x9739040D,
0x59F3BFF2, 0x61E2D05E, 0x29D160AA, 0x11C00F06,
0xB9B60142, 0x81A76EEE, 0xC994DE1A, 0xF185B1B6,
0x9C94B463, 0xA485DBCF, 0xECB66B3B, 0xD4A70497,
0x7CD10AD3, 0x44C0657F, 0x0CF3D58B, 0x34E2BA27,
0xD6D1DE21, 0xEEC0B18D, 0xA6F30179, 0x9EE26ED5,
0x36946091, 0x0E850F3D, 0x46B6BFC9, 0x7EA7D065,
0x13B6D5B0, 0x2BA7BA1C, 0x63940AE8, 0x5B856544,
0xF3F36B00, 0xCBE204AC, 0x83D1B458, 0xBBC0DBF4,
0x425B0AA5, 0x7A4A6509, 0x3279D5FD, 0x0A68BA51,
0xA21EB415, 0x9A0FDBB9, 0xD23C6B4D, 0xEA2D04E1,
0x873C0134, 0xBF2D6E98, 0xF71EDE6C, 0xCF0FB1C0,
0x6779BF84, 0x5F68D028, 0x175B60DC, 0x2F4A0F70,
0xCD796B76, 0xF56804DA, 0xBD5BB42E, 0x854ADB82,
0x2D3CD5C6, 0x152DBA6A, 0x5D1E0A9E, 0x650F6532,
0x081E60E7, 0x300F0F4B, 0x783CBFBF, 0x402DD013,
0xE85BDE57, 0xD04AB1FB, 0x9879010F, 0xA0686EA3,
/* T8_5 */
0x00000000, 0xEF306B19, 0xDB8CA0C3, 0x34BCCBDA,
0xB2F53777, 0x5DC55C6E, 0x697997B4, 0x8649FCAD,
0x6006181F, 0x8F367306, 0xBB8AB8DC, 0x54BAD3C5,
0xD2F32F68, 0x3DC34471, 0x097F8FAB, 0xE64FE4B2,
0xC00C303E, 0x2F3C5B27, 0x1B8090FD, 0xF4B0FBE4,
0x72F90749, 0x9DC96C50, 0xA975A78A, 0x4645CC93,
0xA00A2821, 0x4F3A4338, 0x7B8688E2, 0x94B6E3FB,
0x12FF1F56, 0xFDCF744F, 0xC973BF95, 0x2643D48C,
0x85F4168D, 0x6AC47D94, 0x5E78B64E, 0xB148DD57,
0x370121FA, 0xD8314AE3, 0xEC8D8139, 0x03BDEA20,
0xE5F20E92, 0x0AC2658B, 0x3E7EAE51, 0xD14EC548,
0x570739E5, 0xB83752FC, 0x8C8B9926, 0x63BBF23F,
0x45F826B3, 0xAAC84DAA, 0x9E748670, 0x7144ED69,
0xF70D11C4, 0x183D7ADD, 0x2C81B107, 0xC3B1DA1E,
0x25FE3EAC, 0xCACE55B5, 0xFE729E6F, 0x1142F576,
0x970B09DB, 0x783B62C2, 0x4C87A918, 0xA3B7C201,
0x0E045BEB, 0xE13430F2, 0xD588FB28, 0x3AB89031,
0xBCF16C9C, 0x53C10785, 0x677DCC5F, 0x884DA746,
0x6E0243F4, 0x813228ED, 0xB58EE337, 0x5ABE882E,
0xDCF77483, 0x33C71F9A, 0x077BD440, 0xE84BBF59,
0xCE086BD5, 0x213800CC, 0x1584CB16, 0xFAB4A00F,
0x7CFD5CA2, 0x93CD37BB, 0xA771FC61, 0x48419778,
0xAE0E73CA, 0x413E18D3, 0x7582D309, 0x9AB2B810,
0x1CFB44BD, 0xF3CB2FA4, 0xC777E47E, 0x28478F67,
0x8BF04D66, 0x64C0267F, 0x507CEDA5, 0xBF4C86BC,
0x39057A11, 0xD6351108, 0xE289DAD2, 0x0DB9B1CB,
0xEBF65579, 0x04C63E60, 0x307AF5BA, 0xDF4A9EA3,
0x5903620E, 0xB6330917, 0x828FC2CD, 0x6DBFA9D4,
0x4BFC7D58, 0xA4CC1641, 0x9070DD9B, 0x7F40B682,
0xF9094A2F, 0x16392136, 0x2285EAEC, 0xCDB581F5,
0x2BFA6547, 0xC4CA0E5E, 0xF076C584, 0x1F46AE9D,
0x990F5230, 0x763F3929, 0x4283F2F3, 0xADB399EA,
0x1C08B7D6, 0xF338DCCF, 0xC7841715, 0x28B47C0C,
0xAEFD80A1, 0x41CDEBB8, 0x75712062, 0x9A414B7B,
0x7C0EAFC9, 0x933EC4D0, 0xA7820F0A, 0x48B26413,
0xCEFB98BE, 0x21CBF3A7, 0x1577387D, 0xFA475364,
0xDC0487E8, 0x3334ECF1, 0x0788272B, 0xE8B84C32,
0x6EF1B09F, 0x81C1DB86, 0xB57D105C, 0x5A4D7B45,
0xBC029FF7, 0x5332F4EE, 0x678E3F34, 0x88BE542D,
0x0EF7A880, 0xE1C7C399, 0xD57B0843, 0x3A4B635A,
0x99FCA15B, 0x76CCCA42, 0x42700198, 0xAD406A81,
0x2B09962C, 0xC439FD35, 0xF08536EF, 0x1FB55DF6,
0xF9FAB944, 0x16CAD25D, 0x22761987, 0xCD46729E,
0x4B0F8E33, 0xA43FE52A, 0x90832EF0, 0x7FB345E9,
0x59F09165, 0xB6C0FA7C, 0x827C31A6, 0x6D4C5ABF,
0xEB05A612, 0x0435CD0B, 0x308906D1, 0xDFB96DC8,
0x39F6897A, 0xD6C6E263, 0xE27A29B9, 0x0D4A42A0,
0x8B03BE0D, 0x6433D514, 0x508F1ECE, 0xBFBF75D7,
0x120CEC3D, 0xFD3C8724, 0xC9804CFE, 0x26B027E7,
0xA0F9DB4A, 0x4FC9B053, 0x7B757B89, 0x94451090,
0x720AF422, 0x9D3A9F3B, 0xA98654E1, 0x46B63FF8,
0xC0FFC355, 0x2FCFA84C, 0x1B736396, 0xF443088F,
0xD200DC03, 0x3D30B71A, 0x098C7CC0, 0xE6BC17D9,
0x60F5EB74, 0x8FC5806D, 0xBB794BB7, 0x544920AE,
0xB206C41C, 0x5D36AF05, 0x698A64DF, 0x86BA0FC6,
0x00F3F36B, 0xEFC39872, 0xDB7F53A8, 0x344F38B1,
0x97F8FAB0, 0x78C891A9, 0x4C745A73, 0xA344316A,
0x250DCDC7, 0xCA3DA6DE, 0xFE816D04, 0x11B1061D,
0xF7FEE2AF, 0x18CE89B6, 0x2C72426C, 0xC3422975,
0x450BD5D8, 0xAA3BBEC1, 0x9E87751B, 0x71B71E02,
0x57F4CA8E, 0xB8C4A197, 0x8C786A4D, 0x63480154,
0xE501FDF9, 0x0A3196E0, 0x3E8D5D3A, 0xD1BD3623,
0x37F2D291, 0xD8C2B988, 0xEC7E7252, 0x034E194B,
0x8507E5E6, 0x6A378EFF, 0x5E8B4525, 0xB1BB2E3C,
/* T8_6 */
0x00000000, 0x68032CC8, 0xD0065990, 0xB8057558,
0xA5E0C5D1, 0xCDE3E919, 0x75E69C41, 0x1DE5B089,
0x4E2DFD53, 0x262ED19B, 0x9E2BA4C3, 0xF628880B,
0xEBCD3882, 0x83CE144A, 0x3BCB6112, 0x53C84DDA,
0x9C5BFAA6, 0xF458D66E, 0x4C5DA336, 0x245E8FFE,
0x39BB3F77, 0x51B813BF, 0xE9BD66E7, 0x81BE4A2F,
0xD27607F5, 0xBA752B3D, 0x02705E65, 0x6A7372AD,
0x7796C224, 0x1F95EEEC, 0xA7909BB4, 0xCF93B77C,
0x3D5B83BD, 0x5558AF75, 0xED5DDA2D, 0x855EF6E5,
0x98BB466C, 0xF0B86AA4, 0x48BD1FFC, 0x20BE3334,
0x73767EEE, 0x1B755226, 0xA370277E, 0xCB730BB6,
0xD696BB3F, 0xBE9597F7, 0x0690E2AF, 0x6E93CE67,
0xA100791B, 0xC90355D3, 0x7106208B, 0x19050C43,
0x04E0BCCA, 0x6CE39002, 0xD4E6E55A, 0xBCE5C992,
0xEF2D8448, 0x872EA880, 0x3F2BDDD8, 0x5728F110,
0x4ACD4199, 0x22CE6D51, 0x9ACB1809, 0xF2C834C1,
0x7AB7077A, 0x12B42BB2, 0xAAB15EEA, 0xC2B27222,
0xDF57C2AB, 0xB754EE63, 0x0F519B3B, 0x6752B7F3,
0x349AFA29, 0x5C99D6E1, 0xE49CA3B9, 0x8C9F8F71,
0x917A3FF8, 0xF9791330, 0x417C6668, 0x297F4AA0,
0xE6ECFDDC, 0x8EEFD114, 0x36EAA44C, 0x5EE98884,
0x430C380D, 0x2B0F14C5, 0x930A619D, 0xFB094D55,
0xA8C1008F, 0xC0C22C47, 0x78C7591F, 0x10C475D7,
0x0D21C55E, 0x6522E996, 0xDD279CCE, 0xB524B006,
0x47EC84C7, 0x2FEFA80F, 0x97EADD57, 0xFFE9F19F,
0xE20C4116, 0x8A0F6DDE, 0x320A1886, 0x5A09344E,
0x09C17994, 0x61C2555C, 0xD9C72004, 0xB1C40CCC,
0xAC21BC45, 0xC422908D, 0x7C27E5D5, 0x1424C91D,
0xDBB77E61, 0xB3B452A9, 0x0BB127F1, 0x63B20B39,
0x7E57BBB0, 0x16549778, 0xAE51E220, 0xC652CEE8,
0x959A8332, 0xFD99AFFA, 0x459CDAA2, 0x2D9FF66A,
0x307A46E3, 0x58796A2B, 0xE07C1F73, 0x887F33BB,
0xF56E0EF4, 0x9D6D223C, 0x25685764, 0x4D6B7BAC,
0x508ECB25, 0x388DE7ED, 0x808892B5, 0xE88BBE7D,
0xBB43F3A7, 0xD340DF6F, 0x6B45AA37, 0x034686FF,
0x1EA33676, 0x76A01ABE, 0xCEA56FE6, 0xA6A6432E,
0x6935F452, 0x0136D89A, 0xB933ADC2, 0xD130810A,
0xCCD53183, 0xA4D61D4B, 0x1CD36813, 0x74D044DB,
0x27180901, 0x4F1B25C9, 0xF71E5091, 0x9F1D7C59,
0x82F8CCD0, 0xEAFBE018, 0x52FE9540, 0x3AFDB988,
0xC8358D49, 0xA036A181, 0x1833D4D9, 0x7030F811,
0x6DD54898, 0x05D66450, 0xBDD31108, 0xD5D03DC0,
0x8618701A, 0xEE1B5CD2, 0x561E298A, 0x3E1D0542,
0x23F8B5CB, 0x4BFB9903, 0xF3FEEC5B, 0x9BFDC093,
0x546E77EF, 0x3C6D5B27, 0x84682E7F, 0xEC6B02B7,
0xF18EB23E, 0x998D9EF6, 0x2188EBAE, 0x498BC766,
0x1A438ABC, 0x7240A674, 0xCA45D32C, 0xA246FFE4,
0xBFA34F6D, 0xD7A063A5, 0x6FA516FD, 0x07A63A35,
0x8FD9098E, 0xE7DA2546, 0x5FDF501E, 0x37DC7CD6,
0x2A39CC5F, 0x423AE097, 0xFA3F95CF, 0x923CB907,
0xC1F4F4DD, 0xA9F7D815, 0x11F2AD4D, 0x79F18185,
0x6414310C, 0x0C171DC4, 0xB412689C, 0xDC114454,
0x1382F328, 0x7B81DFE0, 0xC384AAB8, 0xAB878670,
0xB66236F9, 0xDE611A31, 0x66646F69, 0x0E6743A1,
0x5DAF0E7B, 0x35AC22B3, 0x8DA957EB, 0xE5AA7B23,
0xF84FCBAA, 0x904CE762, 0x2849923A, 0x404ABEF2,
0xB2828A33, 0xDA81A6FB, 0x6284D3A3, 0x0A87FF6B,
0x17624FE2, 0x7F61632A, 0xC7641672, 0xAF673ABA,
0xFCAF7760, 0x94AC5BA8, 0x2CA92EF0, 0x44AA0238,
0x594FB2B1, 0x314C9E79, 0x8949EB21, 0xE14AC7E9,
0x2ED97095, 0x46DA5C5D, 0xFEDF2905, 0x96DC05CD,
0x8B39B544, 0xE33A998C, 0x5B3FECD4, 0x333CC01C,
0x60F48DC6, 0x08F7A10E, 0xB0F2D456, 0xD8F1F89E,
0xC5144817, 0xAD1764DF, 0x15121187, 0x7D113D4F,
/* T8_7 */
0x00000000, 0x493C7D27, 0x9278FA4E, 0xDB448769,
0x211D826D, 0x6821FF4A, 0xB3657823, 0xFA590504,
0x423B04DA, 0x0B0779FD, 0xD043FE94, 0x997F83B3,
0x632686B7, 0x2A1AFB90, 0xF15E7CF9, 0xB86201DE,
0x847609B4, 0xCD4A7493, 0x160EF3FA, 0x5F328EDD,
0xA56B8BD9, 0xEC57F6FE, 0x37137197, 0x7E2F0CB0,
0xC64D0D6E, 0x8F717049, 0x5435F720, 0x1D098A07,
0xE7508F03, 0xAE6CF224, 0x7528754D, 0x3C14086A,
0x0D006599, 0x443C18BE, 0x9F789FD7, 0xD644E2F0,
0x2C1DE7F4, 0x65219AD3, 0xBE651DBA, 0xF759609D,
0x4F3B6143, 0x06071C64, 0xDD439B0D, 0x947FE62A,
0x6E26E32E, 0x271A9E09, 0xFC5E1960, 0xB5626447,
0x89766C2D, 0xC04A110A, 0x1B0E9663, 0x5232EB44,
0xA86BEE40, 0xE1579367, 0x3A13140E, 0x732F6929,
0xCB4D68F7, 0x827115D0, 0x593592B9, 0x1009EF9E,
0xEA50EA9A, 0xA36C97BD, 0x782810D4, 0x31146DF3,
0x1A00CB32, 0x533CB615, 0x8878317C, 0xC1444C5B,
0x3B1D495F, 0x72213478, 0xA965B311, 0xE059CE36,
0x583BCFE8, 0x1107B2CF, 0xCA4335A6, 0x837F4881,
0x79264D85, 0x301A30A2, 0xEB5EB7CB, 0xA262CAEC,
0x9E76C286, 0xD74ABFA1, 0x0C0E38C8, 0x453245EF,
0xBF6B40EB, 0xF6573DCC, 0x2D13BAA5, 0x642FC782,
0xDC4DC65C, 0x9571BB7B, 0x4E353C12, 0x07094135,
0xFD504431, 0xB46C3916, 0x6F28BE7F, 0x2614C358,
0x1700AEAB, 0x5E3CD38C, 0x857854E5, 0xCC4429C2,
0x361D2CC6, 0x7F2151E1, 0xA465D688, 0xED59ABAF,
0x553BAA71, 0x1C07D756, 0xC743503F, 0x8E7F2D18,
0x7426281C, 0x3D1A553B, 0xE65ED252, 0xAF62AF75,
0x9376A71F, 0xDA4ADA38, 0x010E5D51, 0x48322076,
0xB26B2572, 0xFB575855, 0x2013DF3C, 0x692FA21B,
0xD14DA3C5, 0x9871DEE2, 0x4335598B, 0x0A0924AC,
0xF05021A8, 0xB96C5C8F, 0x6228DBE6, 0x2B14A6C1,
0x34019664, 0x7D3DEB43, 0xA6796C2A, 0xEF45110D,
0x151C1409, 0x5C20692E, 0x8764EE47, 0xCE589360,
0x763A92BE, 0x3F06EF99, 0xE44268F0, 0xAD7E15D7,
0x572710D3, 0x1E1B6DF4, 0xC55FEA9D, 0x8C6397BA,
0xB0779FD0, 0xF94BE2F7, 0x220F659E, 0x6B3318B9,
0x916A1DBD, 0xD856609A, 0x0312E7F3, 0x4A2E9AD4,
0xF24C9B0A, 0xBB70E62D, 0x60346144, 0x29081C63,
0xD3511967, 0x9A6D6440, 0x4129E329, 0x08159E0E,
0x3901F3FD, 0x703D8EDA, 0xAB7909B3, 0xE2457494,
0x181C7190, 0x51200CB7, 0x8A648BDE, 0xC358F6F9,
0x7B3AF727, 0x32068A00, 0xE9420D69, 0xA07E704E,
0x5A27754A, 0x131B086D, 0xC85F8F04, 0x8163F223,
0xBD77FA49, 0xF44B876E, 0x2F0F0007, 0x66337D20,
0x9C6A7824, 0xD5560503, 0x0E12826A, 0x472EFF4D,
0xFF4CFE93, 0xB67083B4, 0x6D3404DD, 0x240879FA,
0xDE517CFE, 0x976D01D9, 0x4C2986B0, 0x0515FB97,
0x2E015D56, 0x673D2071, 0xBC79A718, 0xF545DA3F,
0x0F1CDF3B, 0x4620A21C, 0x9D642575, 0xD4585852,
0x6C3A598C, 0x250624AB, 0xFE42A3C2, 0xB77EDEE5,
0x4D27DBE1, 0x041BA6C6, 0xDF5F21AF, 0x96635C88,
0xAA7754E2, 0xE34B29C5, 0x380FAEAC, 0x7133D38B,
0x8B6AD68F, 0xC256ABA8, 0x19122CC1, 0x502E51E6,
0xE84C5038, 0xA1702D1F, 0x7A34AA76, 0x3308D751,
0xC951D255, 0x806DAF72, 0x5B29281B, 0x1215553C,
0x230138CF, 0x6A3D45E8, 0xB179C281, 0xF845BFA6,
0x021CBAA2, 0x4B20C785, 0x906440EC, 0xD9583DCB,
0x613A3C15, 0x28064132, 0xF342C65B, 0xBA7EBB7C,
0x4027BE78, 0x091BC35F, 0xD25F4436, 0x9B633911,
0xA777317B, 0xEE4B4C5C, 0x350FCB35, 0x7C33B612,
0x866AB316, 0xCF56CE31, 0x14124958, 0x5D2E347F,
0xE54C35A1, 0xAC704886, 0x7734CFEF, 0x3E08B2C8,
0xC451B7CC, 0x8D6DCAEB, 0x56294D82, 0x1F1530A5
};
}
| 31,248 | 47.979624 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IPList.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@InterfaceStability.Unstable
@InterfaceAudience.Public
public interface IPList {
/**
* returns true if the ipAddress is in the IPList.
* @param ipAddress
* @return boolean value indicating whether the ipAddress is in the IPList
*/
public abstract boolean isIn(String ipAddress);
}
| 1,257 | 37.121212 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCodeLoader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
/**
* A helper to load the native hadoop code i.e. libhadoop.so.
* This handles the fallback to either the bundled libhadoop-Linux-i386-32.so
* or the default java implementations where appropriate.
*
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class NativeCodeLoader {
private static final Log LOG =
LogFactory.getLog(NativeCodeLoader.class);
private static boolean nativeCodeLoaded = false;
static {
// Try to load native hadoop library and set fallback flag appropriately
if(LOG.isDebugEnabled()) {
LOG.debug("Trying to load the custom-built native-hadoop library...");
}
try {
System.loadLibrary("hadoop");
LOG.debug("Loaded the native-hadoop library");
nativeCodeLoaded = true;
} catch (Throwable t) {
// Ignore failure to load
if(LOG.isDebugEnabled()) {
LOG.debug("Failed to load native-hadoop with error: " + t);
LOG.debug("java.library.path=" +
System.getProperty("java.library.path"));
}
}
if (!nativeCodeLoaded) {
LOG.warn("Unable to load native-hadoop library for your platform... " +
"using builtin-java classes where applicable");
}
}
/**
* Check if native-hadoop code is loaded for this platform.
*
* @return <code>true</code> if native-hadoop is loaded,
* else <code>false</code>
*/
public static boolean isNativeCodeLoaded() {
return nativeCodeLoaded;
}
/**
* Returns true only if this build was compiled with support for snappy.
*/
public static native boolean buildSupportsSnappy();
/**
* Returns true only if this build was compiled with support for openssl.
*/
public static native boolean buildSupportsOpenssl();
public static native String getLibraryName();
/**
* Return if native hadoop libraries, if present, can be used for this job.
* @param conf configuration
*
* @return <code>true</code> if native hadoop libraries, if present, can be
* used for this job; <code>false</code> otherwise.
*/
public boolean getLoadNativeLibraries(Configuration conf) {
return conf.getBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY,
CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_DEFAULT);
}
/**
* Set if native hadoop libraries, if present, can be used for this job.
*
* @param conf configuration
* @param loadNativeLibraries can native hadoop libraries be loaded
*/
public void setLoadNativeLibraries(Configuration conf,
boolean loadNativeLibraries) {
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY,
loadNativeLibraries);
}
}
| 3,909 | 33.298246 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/CloseableReferenceCount.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.nio.channels.AsynchronousCloseException;
import java.nio.channels.ClosedChannelException;
import java.util.concurrent.atomic.AtomicInteger;
import com.google.common.base.Preconditions;
/**
* A closeable object that maintains a reference count.
*
* Once the object is closed, attempting to take a new reference will throw
* ClosedChannelException.
*/
public class CloseableReferenceCount {
/**
* Bit mask representing a closed domain socket.
*/
private static final int STATUS_CLOSED_MASK = 1 << 30;
/**
* The status bits.
*
* Bit 30: 0 = open, 1 = closed.
* Bits 29 to 0: the reference count.
*/
private final AtomicInteger status = new AtomicInteger(0);
public CloseableReferenceCount() { }
/**
* Increment the reference count.
*
* @throws ClosedChannelException If the status is closed.
*/
public void reference() throws ClosedChannelException {
int curBits = status.incrementAndGet();
if ((curBits & STATUS_CLOSED_MASK) != 0) {
status.decrementAndGet();
throw new ClosedChannelException();
}
}
/**
* Decrement the reference count.
*
* @return True if the object is closed and has no outstanding
* references.
*/
public boolean unreference() {
int newVal = status.decrementAndGet();
Preconditions.checkState(newVal != 0xffffffff,
"called unreference when the reference count was already at 0.");
return newVal == STATUS_CLOSED_MASK;
}
/**
* Decrement the reference count, checking to make sure that the
* CloseableReferenceCount is not closed.
*
* @throws AsynchronousCloseException If the status is closed.
*/
public void unreferenceCheckClosed() throws ClosedChannelException {
int newVal = status.decrementAndGet();
if ((newVal & STATUS_CLOSED_MASK) != 0) {
throw new AsynchronousCloseException();
}
}
/**
* Return true if the status is currently open.
*
* @return True if the status is currently open.
*/
public boolean isOpen() {
return ((status.get() & STATUS_CLOSED_MASK) == 0);
}
/**
* Mark the status as closed.
*
* Once the status is closed, it cannot be reopened.
*
* @return The current reference count.
* @throws ClosedChannelException If someone else closes the object
* before we do.
*/
public int setClosed() throws ClosedChannelException {
while (true) {
int curBits = status.get();
if ((curBits & STATUS_CLOSED_MASK) != 0) {
throw new ClosedChannelException();
}
if (status.compareAndSet(curBits, curBits | STATUS_CLOSED_MASK)) {
return curBits & (~STATUS_CLOSED_MASK);
}
}
}
/**
* Get the current reference count.
*
* @return The current reference count.
*/
public int getReferenceCount() {
return status.get() & (~STATUS_CLOSED_MASK);
}
}
| 3,830 | 29.404762 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightCache.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.util.Comparator;
import java.util.Iterator;
import java.util.PriorityQueue;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
/**
* A low memory footprint Cache which extends {@link LightWeightGSet}.
* An entry in the cache is expired if
* (1) it is added to the cache longer than the creation-expiration period, and
* (2) it is not accessed for the access-expiration period.
* When an entry is expired, it may be evicted from the cache.
* When the size limit of the cache is set, the cache will evict the entries
* with earliest expiration time, even if they are not expired.
*
* It is guaranteed that number of entries in the cache is less than or equal
* to the size limit. However, It is not guaranteed that expired entries are
* evicted from the cache. An expired entry may possibly be accessed after its
* expiration time. In such case, the expiration time may be updated.
*
* This class does not support null entry.
*
* This class is not thread safe.
*
* @param <K> Key type for looking up the entries
* @param <E> Entry type, which must be
* (1) a subclass of K, and
* (2) implementing {@link Entry} interface, and
*/
@InterfaceAudience.Private
public class LightWeightCache<K, E extends K> extends LightWeightGSet<K, E> {
/** Limit the number of entries in each eviction. */
private static final int EVICTION_LIMIT = 1 << 16;
/**
* Entries of {@link LightWeightCache}.
*/
public static interface Entry extends LinkedElement {
/** Set the expiration time. */
public void setExpirationTime(long timeNano);
/** Get the expiration time. */
public long getExpirationTime();
}
/** Comparator for sorting entries by expiration time in ascending order. */
private static final Comparator<Entry> expirationTimeComparator
= new Comparator<Entry>() {
@Override
public int compare(Entry left, Entry right) {
final long l = left.getExpirationTime();
final long r = right.getExpirationTime();
return l > r? 1: l < r? -1: 0;
}
};
/** A clock for measuring time so that it can be mocked in unit tests. */
static class Clock {
/** @return the current time. */
long currentTime() {
return System.nanoTime();
}
}
private static int updateRecommendedLength(int recommendedLength,
int sizeLimit) {
return sizeLimit > 0 && sizeLimit < recommendedLength?
(sizeLimit/4*3) // 0.75 load factor
: recommendedLength;
}
/*
* The memory footprint for java.util.PriorityQueue is low but the
* remove(Object) method runs in linear time. We may improve it by using a
* balanced tree. However, we do not yet have a low memory footprint balanced
* tree implementation.
*/
private final PriorityQueue<Entry> queue;
private final long creationExpirationPeriod;
private final long accessExpirationPeriod;
private final int sizeLimit;
private final Clock clock;
/**
* @param recommendedLength Recommended size of the internal array.
* @param sizeLimit the limit of the size of the cache.
* The limit is disabled if it is <= 0.
* @param creationExpirationPeriod the time period C > 0 in nanoseconds that
* the creation of an entry is expired if it is added to the cache
* longer than C.
* @param accessExpirationPeriod the time period A >= 0 in nanoseconds that
* the access of an entry is expired if it is not accessed
* longer than A.
*/
public LightWeightCache(final int recommendedLength,
final int sizeLimit,
final long creationExpirationPeriod,
final long accessExpirationPeriod) {
this(recommendedLength, sizeLimit,
creationExpirationPeriod, accessExpirationPeriod, new Clock());
}
@VisibleForTesting
LightWeightCache(final int recommendedLength,
final int sizeLimit,
final long creationExpirationPeriod,
final long accessExpirationPeriod,
final Clock clock) {
super(updateRecommendedLength(recommendedLength, sizeLimit));
this.sizeLimit = sizeLimit;
if (creationExpirationPeriod <= 0) {
throw new IllegalArgumentException("creationExpirationPeriod = "
+ creationExpirationPeriod + " <= 0");
}
this.creationExpirationPeriod = creationExpirationPeriod;
if (accessExpirationPeriod < 0) {
throw new IllegalArgumentException("accessExpirationPeriod = "
+ accessExpirationPeriod + " < 0");
}
this.accessExpirationPeriod = accessExpirationPeriod;
this.queue = new PriorityQueue<Entry>(
sizeLimit > 0? sizeLimit + 1: 1 << 10, expirationTimeComparator);
this.clock = clock;
}
void setExpirationTime(final Entry e, final long expirationPeriod) {
e.setExpirationTime(clock.currentTime() + expirationPeriod);
}
boolean isExpired(final Entry e, final long now) {
return now > e.getExpirationTime();
}
private E evict() {
@SuppressWarnings("unchecked")
final E polled = (E)queue.poll();
final E removed = super.remove(polled);
Preconditions.checkState(removed == polled);
return polled;
}
/** Evict expired entries. */
private void evictExpiredEntries() {
final long now = clock.currentTime();
for(int i = 0; i < EVICTION_LIMIT; i++) {
final Entry peeked = queue.peek();
if (peeked == null || !isExpired(peeked, now)) {
return;
}
final E evicted = evict();
Preconditions.checkState(evicted == peeked);
}
}
/** Evict entries in order to enforce the size limit of the cache. */
private void evictEntries() {
if (sizeLimit > 0) {
for(int i = size(); i > sizeLimit; i--) {
evict();
}
}
}
@Override
public E get(K key) {
final E entry = super.get(key);
if (entry != null) {
if (accessExpirationPeriod > 0) {
// update expiration time
final Entry existing = (Entry)entry;
Preconditions.checkState(queue.remove(existing));
setExpirationTime(existing, accessExpirationPeriod);
queue.offer(existing);
}
}
return entry;
}
@Override
public E put(final E entry) {
if (!(entry instanceof Entry)) {
throw new HadoopIllegalArgumentException(
"!(entry instanceof Entry), entry.getClass()=" + entry.getClass());
}
evictExpiredEntries();
final E existing = super.put(entry);
if (existing != null) {
queue.remove(existing);
}
final Entry e = (Entry)entry;
setExpirationTime(e, creationExpirationPeriod);
queue.offer(e);
evictEntries();
return existing;
}
@Override
public E remove(K key) {
evictExpiredEntries();
final E removed = super.remove(key);
if (removed != null) {
Preconditions.checkState(queue.remove(removed));
}
return removed;
}
@Override
public Iterator<E> iterator() {
final Iterator<E> iter = super.iterator();
return new Iterator<E>() {
@Override
public boolean hasNext() {
return iter.hasNext();
}
@Override
public E next() {
return iter.next();
}
@Override
public void remove() {
// It would be tricky to support this because LightWeightCache#remove
// may evict multiple elements via evictExpiredEntries.
throw new UnsupportedOperationException("Remove via iterator is " +
"not supported for LightWeightCache");
}
};
}
}
| 8,499 | 31.19697 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Progressable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* A facility for reporting progress.
*
* <p>Clients and/or applications can use the provided <code>Progressable</code>
* to explicitly report progress to the Hadoop framework. This is especially
* important for operations which take significant amount of time since,
* in-lieu of the reported progress, the framework has to assume that an error
* has occured and time-out the operation.</p>
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface Progressable {
/**
* Report progress to the Hadoop framework.
*/
public void progress();
}
| 1,539 | 36.560976 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ZKUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.io.File;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.zookeeper.ZooDefs;
import org.apache.zookeeper.data.ACL;
import org.apache.zookeeper.data.Id;
import com.google.common.base.Charsets;
import com.google.common.base.Splitter;
import com.google.common.collect.Lists;
import com.google.common.io.Files;
/**
* Utilities for working with ZooKeeper.
*/
@InterfaceAudience.Private
public class ZKUtil {
/**
* Parse ACL permission string, partially borrowed from
* ZooKeeperMain private method
*/
private static int getPermFromString(String permString) {
int perm = 0;
for (int i = 0; i < permString.length(); i++) {
char c = permString.charAt(i);
switch (c) {
case 'r':
perm |= ZooDefs.Perms.READ;
break;
case 'w':
perm |= ZooDefs.Perms.WRITE;
break;
case 'c':
perm |= ZooDefs.Perms.CREATE;
break;
case 'd':
perm |= ZooDefs.Perms.DELETE;
break;
case 'a':
perm |= ZooDefs.Perms.ADMIN;
break;
default:
throw new BadAclFormatException(
"Invalid permission '" + c + "' in permission string '" +
permString + "'");
}
}
return perm;
}
/**
* Helper method to remove a subset of permissions (remove) from a
* given set (perms).
* @param perms The permissions flag to remove from. Should be an OR of a
* some combination of {@link ZooDefs.Perms}
* @param remove The permissions to be removed. Should be an OR of a
* some combination of {@link ZooDefs.Perms}
* @return A permissions flag that is an OR of {@link ZooDefs.Perms}
* present in perms and not present in remove
*/
public static int removeSpecificPerms(int perms, int remove) {
return perms ^ remove;
}
/**
* Parse comma separated list of ACL entries to secure generated nodes, e.g.
* <code>sasl:hdfs/[email protected]:cdrwa,sasl:hdfs/[email protected]:cdrwa</code>
*
* @return ACL list
* @throws {@link BadAclFormatException} if an ACL is invalid
*/
public static List<ACL> parseACLs(String aclString) throws
BadAclFormatException {
List<ACL> acl = Lists.newArrayList();
if (aclString == null) {
return acl;
}
List<String> aclComps = Lists.newArrayList(
Splitter.on(',').omitEmptyStrings().trimResults()
.split(aclString));
for (String a : aclComps) {
// from ZooKeeperMain private method
int firstColon = a.indexOf(':');
int lastColon = a.lastIndexOf(':');
if (firstColon == -1 || lastColon == -1 || firstColon == lastColon) {
throw new BadAclFormatException(
"ACL '" + a + "' not of expected form scheme:id:perm");
}
ACL newAcl = new ACL();
newAcl.setId(new Id(a.substring(0, firstColon), a.substring(
firstColon + 1, lastColon)));
newAcl.setPerms(getPermFromString(a.substring(lastColon + 1)));
acl.add(newAcl);
}
return acl;
}
/**
* Parse a comma-separated list of authentication mechanisms. Each
* such mechanism should be of the form 'scheme:auth' -- the same
* syntax used for the 'addAuth' command in the ZK CLI.
*
* @param authString the comma-separated auth mechanisms
* @return a list of parsed authentications
* @throws {@link BadAuthFormatException} if the auth format is invalid
*/
public static List<ZKAuthInfo> parseAuth(String authString) throws
BadAuthFormatException{
List<ZKAuthInfo> ret = Lists.newArrayList();
if (authString == null) {
return ret;
}
List<String> authComps = Lists.newArrayList(
Splitter.on(',').omitEmptyStrings().trimResults()
.split(authString));
for (String comp : authComps) {
String parts[] = comp.split(":", 2);
if (parts.length != 2) {
throw new BadAuthFormatException(
"Auth '" + comp + "' not of expected form scheme:auth");
}
ret.add(new ZKAuthInfo(parts[0],
parts[1].getBytes(Charsets.UTF_8)));
}
return ret;
}
/**
* Because ZK ACLs and authentication information may be secret,
* allow the configuration values to be indirected through a file
* by specifying the configuration as "@/path/to/file". If this
* syntax is used, this function will return the contents of the file
* as a String.
*
* @param valInConf the value from the Configuration
* @return either the same value, or the contents of the referenced
* file if the configured value starts with "@"
* @throws IOException if the file cannot be read
*/
public static String resolveConfIndirection(String valInConf)
throws IOException {
if (valInConf == null) return null;
if (!valInConf.startsWith("@")) {
return valInConf;
}
String path = valInConf.substring(1).trim();
return Files.toString(new File(path), Charsets.UTF_8).trim();
}
/**
* An authentication token passed to ZooKeeper.addAuthInfo
*/
@InterfaceAudience.Private
public static class ZKAuthInfo {
private final String scheme;
private final byte[] auth;
public ZKAuthInfo(String scheme, byte[] auth) {
super();
this.scheme = scheme;
this.auth = auth;
}
public String getScheme() {
return scheme;
}
public byte[] getAuth() {
return auth;
}
}
@InterfaceAudience.Private
public static class BadAclFormatException extends
HadoopIllegalArgumentException {
private static final long serialVersionUID = 1L;
public BadAclFormatException(String message) {
super(message);
}
}
@InterfaceAudience.Private
public static class BadAuthFormatException extends
HadoopIllegalArgumentException {
private static final long serialVersionUID = 1L;
public BadAuthFormatException(String message) {
super(message);
}
}
}
| 6,926 | 30.343891 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.InputStream;
import java.io.InterruptedIOException;
import java.nio.charset.Charset;
import java.util.Arrays;
import java.util.Map;
import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* A base class for running a Unix command.
*
* <code>Shell</code> can be used to run unix commands like <code>du</code> or
* <code>df</code>. It also offers facilities to gate commands by
* time-intervals.
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Unstable
abstract public class Shell {
public static final Log LOG = LogFactory.getLog(Shell.class);
private static boolean IS_JAVA7_OR_ABOVE =
System.getProperty("java.version").substring(0, 3).compareTo("1.7") >= 0;
public static boolean isJava7OrAbove() {
return IS_JAVA7_OR_ABOVE;
}
/**
* Maximum command line length in Windows
* KB830473 documents this as 8191
*/
public static final int WINDOWS_MAX_SHELL_LENGHT = 8191;
/**
* Checks if a given command (String[]) fits in the Windows maximum command line length
* Note that the input is expected to already include space delimiters, no extra count
* will be added for delimiters.
*
* @param commands command parts, including any space delimiters
*/
public static void checkWindowsCommandLineLength(String...commands)
throws IOException {
int len = 0;
for (String s: commands) {
len += s.length();
}
if (len > WINDOWS_MAX_SHELL_LENGHT) {
throw new IOException(String.format(
"The command line has a length of %d exceeds maximum allowed length of %d. " +
"Command starts with: %s",
len, WINDOWS_MAX_SHELL_LENGHT,
StringUtils.join("", commands).substring(0, 100)));
}
}
/** a Unix command to get the current user's name */
public final static String USER_NAME_COMMAND = "whoami";
/** Windows CreateProcess synchronization object */
public static final Object WindowsProcessLaunchLock = new Object();
// OSType detection
public enum OSType {
OS_TYPE_LINUX,
OS_TYPE_WIN,
OS_TYPE_SOLARIS,
OS_TYPE_MAC,
OS_TYPE_FREEBSD,
OS_TYPE_OTHER
}
public static final OSType osType = getOSType();
static private OSType getOSType() {
String osName = System.getProperty("os.name");
if (osName.startsWith("Windows")) {
return OSType.OS_TYPE_WIN;
} else if (osName.contains("SunOS") || osName.contains("Solaris")) {
return OSType.OS_TYPE_SOLARIS;
} else if (osName.contains("Mac")) {
return OSType.OS_TYPE_MAC;
} else if (osName.contains("FreeBSD")) {
return OSType.OS_TYPE_FREEBSD;
} else if (osName.startsWith("Linux")) {
return OSType.OS_TYPE_LINUX;
} else {
// Some other form of Unix
return OSType.OS_TYPE_OTHER;
}
}
// Helper static vars for each platform
public static final boolean WINDOWS = (osType == OSType.OS_TYPE_WIN);
public static final boolean SOLARIS = (osType == OSType.OS_TYPE_SOLARIS);
public static final boolean MAC = (osType == OSType.OS_TYPE_MAC);
public static final boolean FREEBSD = (osType == OSType.OS_TYPE_FREEBSD);
public static final boolean LINUX = (osType == OSType.OS_TYPE_LINUX);
public static final boolean OTHER = (osType == OSType.OS_TYPE_OTHER);
public static final boolean PPC_64
= System.getProperties().getProperty("os.arch").contains("ppc64");
/** a Unix command to get the current user's groups list */
public static String[] getGroupsCommand() {
return (WINDOWS)? new String[]{"cmd", "/c", "groups"}
: new String[]{"bash", "-c", "groups"};
}
/**
* a Unix command to get a given user's groups list.
* If the OS is not WINDOWS, the command will get the user's primary group
* first and finally get the groups list which includes the primary group.
* i.e. the user's primary group will be included twice.
*/
public static String[] getGroupsForUserCommand(final String user) {
//'groups username' command return is non-consistent across different unixes
return (WINDOWS)? new String[] { WINUTILS, "groups", "-F", "\"" + user + "\""}
: new String [] {"bash", "-c", "id -gn " + user
+ "&& id -Gn " + user};
}
/** a Unix command to get a given netgroup's user list */
public static String[] getUsersForNetgroupCommand(final String netgroup) {
//'groups username' command return is non-consistent across different unixes
return (WINDOWS)? new String [] {"cmd", "/c", "getent netgroup " + netgroup}
: new String [] {"bash", "-c", "getent netgroup " + netgroup};
}
/** Return a command to get permission information. */
public static String[] getGetPermissionCommand() {
return (WINDOWS) ? new String[] { WINUTILS, "ls", "-F" }
: new String[] { "/bin/ls", "-ld" };
}
/** Return a command to set permission */
public static String[] getSetPermissionCommand(String perm, boolean recursive) {
if (recursive) {
return (WINDOWS) ? new String[] { WINUTILS, "chmod", "-R", perm }
: new String[] { "chmod", "-R", perm };
} else {
return (WINDOWS) ? new String[] { WINUTILS, "chmod", perm }
: new String[] { "chmod", perm };
}
}
/**
* Return a command to set permission for specific file.
*
* @param perm String permission to set
* @param recursive boolean true to apply to all sub-directories recursively
* @param file String file to set
* @return String[] containing command and arguments
*/
public static String[] getSetPermissionCommand(String perm, boolean recursive,
String file) {
String[] baseCmd = getSetPermissionCommand(perm, recursive);
String[] cmdWithFile = Arrays.copyOf(baseCmd, baseCmd.length + 1);
cmdWithFile[cmdWithFile.length - 1] = file;
return cmdWithFile;
}
/** Return a command to set owner */
public static String[] getSetOwnerCommand(String owner) {
return (WINDOWS) ? new String[] { WINUTILS, "chown", "\"" + owner + "\"" }
: new String[] { "chown", owner };
}
/** Return a command to create symbolic links */
public static String[] getSymlinkCommand(String target, String link) {
return WINDOWS ? new String[] { WINUTILS, "symlink", link, target }
: new String[] { "ln", "-s", target, link };
}
/** Return a command to read the target of the a symbolic link*/
public static String[] getReadlinkCommand(String link) {
return WINDOWS ? new String[] { WINUTILS, "readlink", link }
: new String[] { "readlink", link };
}
/** Return a command for determining if process with specified pid is alive. */
public static String[] getCheckProcessIsAliveCommand(String pid) {
return Shell.WINDOWS ?
new String[] { Shell.WINUTILS, "task", "isAlive", pid } :
new String[] { "kill", "-0", isSetsidAvailable ? "-" + pid : pid };
}
/** Return a command to send a signal to a given pid */
public static String[] getSignalKillCommand(int code, String pid) {
return Shell.WINDOWS ? new String[] { Shell.WINUTILS, "task", "kill", pid } :
new String[] { "kill", "-" + code, isSetsidAvailable ? "-" + pid : pid };
}
public static final String ENV_NAME_REGEX = "[A-Za-z_][A-Za-z0-9_]*";
/** Return a regular expression string that match environment variables */
public static String getEnvironmentVariableRegex() {
return (WINDOWS)
? "%(" + ENV_NAME_REGEX + "?)%"
: "\\$(" + ENV_NAME_REGEX + ")";
}
/**
* Returns a File referencing a script with the given basename, inside the
* given parent directory. The file extension is inferred by platform: ".cmd"
* on Windows, or ".sh" otherwise.
*
* @param parent File parent directory
* @param basename String script file basename
* @return File referencing the script in the directory
*/
public static File appendScriptExtension(File parent, String basename) {
return new File(parent, appendScriptExtension(basename));
}
/**
* Returns a script file name with the given basename. The file extension is
* inferred by platform: ".cmd" on Windows, or ".sh" otherwise.
*
* @param basename String script file basename
* @return String script file name
*/
public static String appendScriptExtension(String basename) {
return basename + (WINDOWS ? ".cmd" : ".sh");
}
/**
* Returns a command to run the given script. The script interpreter is
* inferred by platform: cmd on Windows or bash otherwise.
*
* @param script File script to run
* @return String[] command to run the script
*/
public static String[] getRunScriptCommand(File script) {
String absolutePath = script.getAbsolutePath();
return WINDOWS ? new String[] { "cmd", "/c", absolutePath } :
new String[] { "/bin/bash", absolutePath };
}
/** a Unix command to set permission */
public static final String SET_PERMISSION_COMMAND = "chmod";
/** a Unix command to set owner */
public static final String SET_OWNER_COMMAND = "chown";
/** a Unix command to set the change user's groups list */
public static final String SET_GROUP_COMMAND = "chgrp";
/** a Unix command to create a link */
public static final String LINK_COMMAND = "ln";
/** a Unix command to get a link target */
public static final String READ_LINK_COMMAND = "readlink";
/**Time after which the executing script would be timedout*/
protected long timeOutInterval = 0L;
/** If or not script timed out*/
private AtomicBoolean timedOut;
/** Centralized logic to discover and validate the sanity of the Hadoop
* home directory. Returns either NULL or a directory that exists and
* was specified via either -Dhadoop.home.dir or the HADOOP_HOME ENV
* variable. This does a lot of work so it should only be called
* privately for initialization once per process.
**/
private static String checkHadoopHome() {
// first check the Dflag hadoop.home.dir with JVM scope
String home = System.getProperty("hadoop.home.dir");
// fall back to the system/user-global env variable
if (home == null) {
home = System.getenv("HADOOP_HOME");
}
try {
// couldn't find either setting for hadoop's home directory
if (home == null) {
throw new IOException("HADOOP_HOME or hadoop.home.dir are not set.");
}
if (home.startsWith("\"") && home.endsWith("\"")) {
home = home.substring(1, home.length()-1);
}
// check that the home setting is actually a directory that exists
File homedir = new File(home);
if (!homedir.isAbsolute() || !homedir.exists() || !homedir.isDirectory()) {
throw new IOException("Hadoop home directory " + homedir
+ " does not exist, is not a directory, or is not an absolute path.");
}
home = homedir.getCanonicalPath();
} catch (IOException ioe) {
if (LOG.isDebugEnabled()) {
LOG.debug("Failed to detect a valid hadoop home directory", ioe);
}
home = null;
}
return home;
}
private static String HADOOP_HOME_DIR = checkHadoopHome();
// Public getter, throws an exception if HADOOP_HOME failed validation
// checks and is being referenced downstream.
public static final String getHadoopHome() throws IOException {
if (HADOOP_HOME_DIR == null) {
throw new IOException("Misconfigured HADOOP_HOME cannot be referenced.");
}
return HADOOP_HOME_DIR;
}
/** fully qualify the path to a binary that should be in a known hadoop
* bin location. This is primarily useful for disambiguating call-outs
* to executable sub-components of Hadoop to avoid clashes with other
* executables that may be in the path. Caveat: this call doesn't
* just format the path to the bin directory. It also checks for file
* existence of the composed path. The output of this call should be
* cached by callers.
* */
public static final String getQualifiedBinPath(String executable)
throws IOException {
// construct hadoop bin path to the specified executable
String fullExeName = HADOOP_HOME_DIR + File.separator + "bin"
+ File.separator + executable;
File exeFile = new File(fullExeName);
if (!exeFile.exists()) {
throw new IOException("Could not locate executable " + fullExeName
+ " in the Hadoop binaries.");
}
return exeFile.getCanonicalPath();
}
/** a Windows utility to emulate Unix commands */
public static final String WINUTILS = getWinUtilsPath();
public static final String getWinUtilsPath() {
String winUtilsPath = null;
try {
if (WINDOWS) {
winUtilsPath = getQualifiedBinPath("winutils.exe");
}
} catch (IOException ioe) {
LOG.error("Failed to locate the winutils binary in the hadoop binary path",
ioe);
}
return winUtilsPath;
}
public static final boolean isSetsidAvailable = isSetsidSupported();
private static boolean isSetsidSupported() {
if (Shell.WINDOWS) {
return false;
}
ShellCommandExecutor shexec = null;
boolean setsidSupported = true;
try {
String[] args = {"setsid", "bash", "-c", "echo $$"};
shexec = new ShellCommandExecutor(args);
shexec.execute();
} catch (IOException ioe) {
LOG.debug("setsid is not available on this machine. So not using it.");
setsidSupported = false;
} catch (Error err) {
if (err.getMessage().contains("posix_spawn is not " +
"a supported process launch mechanism")
&& (Shell.FREEBSD || Shell.MAC)) {
// HADOOP-11924: This is a workaround to avoid failure of class init
// by JDK issue on TR locale(JDK-8047340).
LOG.info("Avoiding JDK-8047340 on BSD-based systems.", err);
setsidSupported = false;
}
} finally { // handle the exit code
if (LOG.isDebugEnabled()) {
LOG.debug("setsid exited with exit code "
+ (shexec != null ? shexec.getExitCode() : "(null executor)"));
}
}
return setsidSupported;
}
/** Token separator regex used to parse Shell tool outputs */
public static final String TOKEN_SEPARATOR_REGEX
= WINDOWS ? "[|\n\r]" : "[ \t\n\r\f]";
private long interval; // refresh interval in msec
private long lastTime; // last time the command was performed
final private boolean redirectErrorStream; // merge stdout and stderr
private Map<String, String> environment; // env for the command execution
private File dir;
private Process process; // sub process used to execute the command
private int exitCode;
/**If or not script finished executing*/
private volatile AtomicBoolean completed;
public Shell() {
this(0L);
}
public Shell(long interval) {
this(interval, false);
}
/**
* @param interval the minimum duration to wait before re-executing the
* command.
*/
public Shell(long interval, boolean redirectErrorStream) {
this.interval = interval;
this.lastTime = (interval<0) ? 0 : -interval;
this.redirectErrorStream = redirectErrorStream;
}
/** set the environment for the command
* @param env Mapping of environment variables
*/
protected void setEnvironment(Map<String, String> env) {
this.environment = env;
}
/** set the working directory
* @param dir The directory where the command would be executed
*/
protected void setWorkingDirectory(File dir) {
this.dir = dir;
}
/** check to see if a command needs to be executed and execute if needed */
protected void run() throws IOException {
if (lastTime + interval > Time.monotonicNow())
return;
exitCode = 0; // reset for next run
runCommand();
}
/** Run a command */
private void runCommand() throws IOException {
ProcessBuilder builder = new ProcessBuilder(getExecString());
Timer timeOutTimer = null;
ShellTimeoutTimerTask timeoutTimerTask = null;
timedOut = new AtomicBoolean(false);
completed = new AtomicBoolean(false);
if (environment != null) {
builder.environment().putAll(this.environment);
}
if (dir != null) {
builder.directory(this.dir);
}
builder.redirectErrorStream(redirectErrorStream);
if (Shell.WINDOWS) {
synchronized (WindowsProcessLaunchLock) {
// To workaround the race condition issue with child processes
// inheriting unintended handles during process launch that can
// lead to hangs on reading output and error streams, we
// serialize process creation. More info available at:
// http://support.microsoft.com/kb/315939
process = builder.start();
}
} else {
process = builder.start();
}
if (timeOutInterval > 0) {
timeOutTimer = new Timer("Shell command timeout");
timeoutTimerTask = new ShellTimeoutTimerTask(
this);
//One time scheduling.
timeOutTimer.schedule(timeoutTimerTask, timeOutInterval);
}
final BufferedReader errReader =
new BufferedReader(new InputStreamReader(
process.getErrorStream(), Charset.defaultCharset()));
BufferedReader inReader =
new BufferedReader(new InputStreamReader(
process.getInputStream(), Charset.defaultCharset()));
final StringBuffer errMsg = new StringBuffer();
// read error and input streams as this would free up the buffers
// free the error stream buffer
Thread errThread = new Thread() {
@Override
public void run() {
try {
String line = errReader.readLine();
while((line != null) && !isInterrupted()) {
errMsg.append(line);
errMsg.append(System.getProperty("line.separator"));
line = errReader.readLine();
}
} catch(IOException ioe) {
LOG.warn("Error reading the error stream", ioe);
}
}
};
try {
errThread.start();
} catch (IllegalStateException ise) {
} catch (OutOfMemoryError oe) {
LOG.error("Caught " + oe + ". One possible reason is that ulimit"
+ " setting of 'max user processes' is too low. If so, do"
+ " 'ulimit -u <largerNum>' and try again.");
throw oe;
}
try {
parseExecResult(inReader); // parse the output
// clear the input stream buffer
String line = inReader.readLine();
while(line != null) {
line = inReader.readLine();
}
// wait for the process to finish and check the exit code
exitCode = process.waitFor();
// make sure that the error thread exits
joinThread(errThread);
completed.set(true);
//the timeout thread handling
//taken care in finally block
if (exitCode != 0) {
throw new ExitCodeException(exitCode, errMsg.toString());
}
} catch (InterruptedException ie) {
InterruptedIOException iie = new InterruptedIOException(ie.toString());
iie.initCause(ie);
throw iie;
} finally {
if (timeOutTimer != null) {
timeOutTimer.cancel();
}
// close the input stream
try {
// JDK 7 tries to automatically drain the input streams for us
// when the process exits, but since close is not synchronized,
// it creates a race if we close the stream first and the same
// fd is recycled. the stream draining thread will attempt to
// drain that fd!! it may block, OOM, or cause bizarre behavior
// see: https://bugs.openjdk.java.net/browse/JDK-8024521
// issue is fixed in build 7u60
InputStream stdout = process.getInputStream();
synchronized (stdout) {
inReader.close();
}
} catch (IOException ioe) {
LOG.warn("Error while closing the input stream", ioe);
}
if (!completed.get()) {
errThread.interrupt();
joinThread(errThread);
}
try {
InputStream stderr = process.getErrorStream();
synchronized (stderr) {
errReader.close();
}
} catch (IOException ioe) {
LOG.warn("Error while closing the error stream", ioe);
}
process.destroy();
lastTime = Time.monotonicNow();
}
}
private static void joinThread(Thread t) {
while (t.isAlive()) {
try {
t.join();
} catch (InterruptedException ie) {
if (LOG.isWarnEnabled()) {
LOG.warn("Interrupted while joining on: " + t, ie);
}
t.interrupt(); // propagate interrupt
}
}
}
/** return an array containing the command name & its parameters */
protected abstract String[] getExecString();
/** Parse the execution result */
protected abstract void parseExecResult(BufferedReader lines)
throws IOException;
/**
* Get the environment variable
*/
public String getEnvironment(String env) {
return environment.get(env);
}
/** get the current sub-process executing the given command
* @return process executing the command
*/
public Process getProcess() {
return process;
}
/** get the exit code
* @return the exit code of the process
*/
public int getExitCode() {
return exitCode;
}
/**
* This is an IOException with exit code added.
*/
public static class ExitCodeException extends IOException {
private final int exitCode;
public ExitCodeException(int exitCode, String message) {
super(message);
this.exitCode = exitCode;
}
public int getExitCode() {
return exitCode;
}
@Override
public String toString() {
final StringBuilder sb =
new StringBuilder("ExitCodeException ");
sb.append("exitCode=").append(exitCode)
.append(": ");
sb.append(super.getMessage());
return sb.toString();
}
}
public interface CommandExecutor {
void execute() throws IOException;
int getExitCode() throws IOException;
String getOutput() throws IOException;
void close();
}
/**
* A simple shell command executor.
*
* <code>ShellCommandExecutor</code>should be used in cases where the output
* of the command needs no explicit parsing and where the command, working
* directory and the environment remains unchanged. The output of the command
* is stored as-is and is expected to be small.
*/
public static class ShellCommandExecutor extends Shell
implements CommandExecutor {
private String[] command;
private StringBuffer output;
public ShellCommandExecutor(String[] execString) {
this(execString, null);
}
public ShellCommandExecutor(String[] execString, File dir) {
this(execString, dir, null);
}
public ShellCommandExecutor(String[] execString, File dir,
Map<String, String> env) {
this(execString, dir, env , 0L);
}
/**
* Create a new instance of the ShellCommandExecutor to execute a command.
*
* @param execString The command to execute with arguments
* @param dir If not-null, specifies the directory which should be set
* as the current working directory for the command.
* If null, the current working directory is not modified.
* @param env If not-null, environment of the command will include the
* key-value pairs specified in the map. If null, the current
* environment is not modified.
* @param timeout Specifies the time in milliseconds, after which the
* command will be killed and the status marked as timedout.
* If 0, the command will not be timed out.
*/
public ShellCommandExecutor(String[] execString, File dir,
Map<String, String> env, long timeout) {
command = execString.clone();
if (dir != null) {
setWorkingDirectory(dir);
}
if (env != null) {
setEnvironment(env);
}
timeOutInterval = timeout;
}
/** Execute the shell command. */
public void execute() throws IOException {
this.run();
}
@Override
public String[] getExecString() {
return command;
}
@Override
protected void parseExecResult(BufferedReader lines) throws IOException {
output = new StringBuffer();
char[] buf = new char[512];
int nRead;
while ( (nRead = lines.read(buf, 0, buf.length)) > 0 ) {
output.append(buf, 0, nRead);
}
}
/** Get the output of the shell command.*/
public String getOutput() {
return (output == null) ? "" : output.toString();
}
/**
* Returns the commands of this instance.
* Arguments with spaces in are presented with quotes round; other
* arguments are presented raw
*
* @return a string representation of the object.
*/
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
String[] args = getExecString();
for (String s : args) {
if (s.indexOf(' ') >= 0) {
builder.append('"').append(s).append('"');
} else {
builder.append(s);
}
builder.append(' ');
}
return builder.toString();
}
@Override
public void close() {
}
}
/**
* To check if the passed script to shell command executor timed out or
* not.
*
* @return if the script timed out.
*/
public boolean isTimedOut() {
return timedOut.get();
}
/**
* Set if the command has timed out.
*
*/
private void setTimedOut() {
this.timedOut.set(true);
}
/**
* Static method to execute a shell command.
* Covers most of the simple cases without requiring the user to implement
* the <code>Shell</code> interface.
* @param cmd shell command to execute.
* @return the output of the executed command.
*/
public static String execCommand(String ... cmd) throws IOException {
return execCommand(null, cmd, 0L);
}
/**
* Static method to execute a shell command.
* Covers most of the simple cases without requiring the user to implement
* the <code>Shell</code> interface.
* @param env the map of environment key=value
* @param cmd shell command to execute.
* @param timeout time in milliseconds after which script should be marked timeout
* @return the output of the executed command.o
*/
public static String execCommand(Map<String, String> env, String[] cmd,
long timeout) throws IOException {
ShellCommandExecutor exec = new ShellCommandExecutor(cmd, null, env,
timeout);
exec.execute();
return exec.getOutput();
}
/**
* Static method to execute a shell command.
* Covers most of the simple cases without requiring the user to implement
* the <code>Shell</code> interface.
* @param env the map of environment key=value
* @param cmd shell command to execute.
* @return the output of the executed command.
*/
public static String execCommand(Map<String,String> env, String ... cmd)
throws IOException {
return execCommand(env, cmd, 0L);
}
/**
* Timer which is used to timeout scripts spawned off by shell.
*/
private static class ShellTimeoutTimerTask extends TimerTask {
private Shell shell;
public ShellTimeoutTimerTask(Shell shell) {
this.shell = shell;
}
@Override
public void run() {
Process p = shell.getProcess();
try {
p.exitValue();
} catch (Exception e) {
//Process has not terminated.
//So check if it has completed
//if not just destroy it.
if (p != null && !shell.completed.get()) {
shell.setTimedOut();
p.destroy();
}
}
}
}
}
| 29,409 | 32.649886 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Progress.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.util.ArrayList;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/** Utility to assist with generation of progress reports. Applications build
* a hierarchy of {@link Progress} instances, each modelling a phase of
* execution. The root is constructed with {@link #Progress()}. Nodes for
* sub-phases are created by calling {@link #addPhase()}.
*/
@InterfaceAudience.LimitedPrivate({"MapReduce"})
@InterfaceStability.Unstable
public class Progress {
private static final Log LOG = LogFactory.getLog(Progress.class);
private String status = "";
private float progress;
private int currentPhase;
private ArrayList<Progress> phases = new ArrayList<Progress>();
private Progress parent;
// Each phase can have different progress weightage. For example, in
// Map Task, map phase accounts for 66.7% and sort phase for 33.3%.
// User needs to give weightages as parameters to all phases(when adding
// phases) in a Progress object, if he wants to give weightage to any of the
// phases. So when nodes are added without specifying weightage, it means
// fixed weightage for all phases.
private boolean fixedWeightageForAllPhases = false;
private float progressPerPhase = 0.0f;
private ArrayList<Float> progressWeightagesForPhases = new ArrayList<Float>();
/** Creates a new root node. */
public Progress() {}
/** Adds a named node to the tree. */
public Progress addPhase(String status) {
Progress phase = addPhase();
phase.setStatus(status);
return phase;
}
/** Adds a node to the tree. Gives equal weightage to all phases */
public synchronized Progress addPhase() {
Progress phase = addNewPhase();
// set equal weightage for all phases
progressPerPhase = 1.0f / phases.size();
fixedWeightageForAllPhases = true;
return phase;
}
/** Adds a new phase. Caller needs to set progress weightage */
private synchronized Progress addNewPhase() {
Progress phase = new Progress();
phases.add(phase);
phase.setParent(this);
return phase;
}
/** Adds a named node with a specified progress weightage to the tree. */
public Progress addPhase(String status, float weightage) {
Progress phase = addPhase(weightage);
phase.setStatus(status);
return phase;
}
/** Adds a node with a specified progress weightage to the tree. */
public synchronized Progress addPhase(float weightage) {
Progress phase = new Progress();
progressWeightagesForPhases.add(weightage);
phases.add(phase);
phase.setParent(this);
// Ensure that the sum of weightages does not cross 1.0
float sum = 0;
for (int i = 0; i < phases.size(); i++) {
sum += progressWeightagesForPhases.get(i);
}
if (sum > 1.0) {
LOG.warn("Sum of weightages can not be more than 1.0; But sum = " + sum);
}
return phase;
}
/** Adds n nodes to the tree. Gives equal weightage to all phases */
public synchronized void addPhases(int n) {
for (int i = 0; i < n; i++) {
addNewPhase();
}
// set equal weightage for all phases
progressPerPhase = 1.0f / phases.size();
fixedWeightageForAllPhases = true;
}
/**
* returns progress weightage of the given phase
* @param phaseNum the phase number of the phase(child node) for which we need
* progress weightage
* @return returns the progress weightage of the specified phase
*/
float getProgressWeightage(int phaseNum) {
if (fixedWeightageForAllPhases) {
return progressPerPhase; // all phases are of equal weightage
}
return progressWeightagesForPhases.get(phaseNum);
}
synchronized Progress getParent() { return parent; }
synchronized void setParent(Progress parent) { this.parent = parent; }
/** Called during execution to move to the next phase at this level in the
* tree. */
public synchronized void startNextPhase() {
currentPhase++;
}
/** Returns the current sub-node executing. */
public synchronized Progress phase() {
return phases.get(currentPhase);
}
/** Completes this node, moving the parent node to its next child. */
public void complete() {
// we have to traverse up to our parent, so be careful about locking.
Progress myParent;
synchronized(this) {
progress = 1.0f;
myParent = parent;
}
if (myParent != null) {
// this will synchronize on the parent, so we make sure we release
// our lock before getting the parent's, since we're traversing
// against the normal traversal direction used by get() or toString().
// We don't need transactional semantics, so we're OK doing this.
myParent.startNextPhase();
}
}
/** Called during execution on a leaf node to set its progress. */
public synchronized void set(float progress) {
if (Float.isNaN(progress)) {
progress = 0;
LOG.debug("Illegal progress value found, progress is Float.NaN. " +
"Progress will be changed to 0");
}
else if (progress == Float.NEGATIVE_INFINITY) {
progress = 0;
LOG.debug("Illegal progress value found, progress is " +
"Float.NEGATIVE_INFINITY. Progress will be changed to 0");
}
else if (progress < 0) {
progress = 0;
LOG.debug("Illegal progress value found, progress is less than 0." +
" Progress will be changed to 0");
}
else if (progress > 1) {
progress = 1;
LOG.debug("Illegal progress value found, progress is larger than 1." +
" Progress will be changed to 1");
}
else if (progress == Float.POSITIVE_INFINITY) {
progress = 1;
LOG.debug("Illegal progress value found, progress is " +
"Float.POSITIVE_INFINITY. Progress will be changed to 1");
}
this.progress = progress;
}
/** Returns the overall progress of the root. */
// this method probably does not need to be synchronized as getInternal() is
// synchronized and the node's parent never changes. Still, it doesn't hurt.
public synchronized float get() {
Progress node = this;
while (node.getParent() != null) { // find the root
node = parent;
}
return node.getInternal();
}
/**
* Returns progress in this node. get() would give overall progress of the
* root node(not just given current node).
*/
public synchronized float getProgress() {
return getInternal();
}
/** Computes progress in this node. */
private synchronized float getInternal() {
int phaseCount = phases.size();
if (phaseCount != 0) {
float subProgress = 0.0f;
float progressFromCurrentPhase = 0.0f;
if (currentPhase < phaseCount) {
subProgress = phase().getInternal();
progressFromCurrentPhase =
getProgressWeightage(currentPhase) * subProgress;
}
float progressFromCompletedPhases = 0.0f;
if (fixedWeightageForAllPhases) { // same progress weightage for each phase
progressFromCompletedPhases = progressPerPhase * currentPhase;
}
else {
for (int i = 0; i < currentPhase; i++) {
// progress weightages of phases could be different. Add them
progressFromCompletedPhases += getProgressWeightage(i);
}
}
return progressFromCompletedPhases + progressFromCurrentPhase;
} else {
return progress;
}
}
public synchronized void setStatus(String status) {
this.status = status;
}
@Override
public String toString() {
StringBuilder result = new StringBuilder();
toString(result);
return result.toString();
}
private synchronized void toString(StringBuilder buffer) {
buffer.append(status);
if (phases.size() != 0 && currentPhase < phases.size()) {
buffer.append(" > ");
phase().toString(buffer);
}
}
}
| 8,830 | 33.228682 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GSet.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* A {@link GSet} is set,
* which supports the {@link #get(Object)} operation.
* The {@link #get(Object)} operation uses a key to lookup an element.
*
* Null element is not supported.
*
* @param <K> The type of the keys.
* @param <E> The type of the elements, which must be a subclass of the keys.
*/
@InterfaceAudience.Private
public interface GSet<K, E extends K> extends Iterable<E> {
static final Log LOG = LogFactory.getLog(GSet.class);
/**
* @return The size of this set.
*/
int size();
/**
* Does this set contain an element corresponding to the given key?
* @param key The given key.
* @return true if the given key equals to a stored element.
* Otherwise, return false.
* @throws NullPointerException if key == null.
*/
boolean contains(K key);
/**
* Return the stored element which is equal to the given key.
* This operation is similar to {@link java.util.Map#get(Object)}.
* @param key The given key.
* @return The stored element if it exists.
* Otherwise, return null.
* @throws NullPointerException if key == null.
*/
E get(K key);
/**
* Add/replace an element.
* If the element does not exist, add it to the set.
* Otherwise, replace the existing element.
*
* Note that this operation
* is similar to {@link java.util.Map#put(Object, Object)}
* but is different from {@link java.util.Set#add(Object)}
* which does not replace the existing element if there is any.
*
* @param element The element being put.
* @return the previous stored element if there is any.
* Otherwise, return null.
* @throws NullPointerException if element == null.
*/
E put(E element);
/**
* Remove the element corresponding to the given key.
* This operation is similar to {@link java.util.Map#remove(Object)}.
* @param key The key of the element being removed.
* @return If such element exists, return it.
* Otherwise, return null.
* @throws NullPointerException if key == null.
*/
E remove(K key);
void clear();
}
| 3,076 | 32.813187 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SequentialNumber.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Sequential number generator.
*
* This class is thread safe.
*/
@InterfaceAudience.Private
public abstract class SequentialNumber {
private final AtomicLong currentValue;
/** Create a new instance with the given initial value. */
protected SequentialNumber(final long initialValue) {
currentValue = new AtomicLong(initialValue);
}
/** @return the current value. */
public long getCurrentValue() {
return currentValue.get();
}
/** Set current value. */
public void setCurrentValue(long value) {
currentValue.set(value);
}
/** Increment and then return the next value. */
public long nextValue() {
return currentValue.incrementAndGet();
}
/** Skip to the new value. */
public void skipTo(long newValue) throws IllegalStateException {
for(;;) {
final long c = getCurrentValue();
if (newValue < c) {
throw new IllegalStateException(
"Cannot skip to less than the current value (="
+ c + "), where newValue=" + newValue);
}
if (currentValue.compareAndSet(c, newValue)) {
return;
}
}
}
@Override
public boolean equals(final Object that) {
if (that == null || this.getClass() != that.getClass()) {
return false;
}
final AtomicLong thatValue = ((SequentialNumber)that).currentValue;
return currentValue.equals(thatValue);
}
@Override
public int hashCode() {
final long v = currentValue.get();
return (int)v ^ (int)(v >>> 32);
}
}
| 2,456 | 28.25 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.io.File;
import java.io.IOException;
import java.nio.file.DirectoryStream;
import java.nio.file.DirectoryIteratorException;
import java.nio.file.Files;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
/**
* Class that provides utility functions for checking disk problem
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class DiskChecker {
public static class DiskErrorException extends IOException {
public DiskErrorException(String msg) {
super(msg);
}
public DiskErrorException(String msg, Throwable cause) {
super(msg, cause);
}
}
public static class DiskOutOfSpaceException extends IOException {
public DiskOutOfSpaceException(String msg) {
super(msg);
}
}
/**
* The semantics of mkdirsWithExistsCheck method is different from the mkdirs
* method provided in the Sun's java.io.File class in the following way:
* While creating the non-existent parent directories, this method checks for
* the existence of those directories if the mkdir fails at any point (since
* that directory might have just been created by some other process).
* If both mkdir() and the exists() check fails for any seemingly
* non-existent directory, then we signal an error; Sun's mkdir would signal
* an error (return false) if a directory it is attempting to create already
* exists or the mkdir fails.
* @param dir
* @return true on success, false on failure
*/
public static boolean mkdirsWithExistsCheck(File dir) {
if (dir.mkdir() || dir.exists()) {
return true;
}
File canonDir = null;
try {
canonDir = dir.getCanonicalFile();
} catch (IOException e) {
return false;
}
String parent = canonDir.getParent();
return (parent != null) &&
(mkdirsWithExistsCheck(new File(parent)) &&
(canonDir.mkdir() || canonDir.exists()));
}
/**
* Recurse down a directory tree, checking all child directories.
* @param dir
* @throws DiskErrorException
*/
public static void checkDirs(File dir) throws DiskErrorException {
checkDir(dir);
IOException ex = null;
try (DirectoryStream<java.nio.file.Path> stream =
Files.newDirectoryStream(dir.toPath())) {
for (java.nio.file.Path entry: stream) {
File child = entry.toFile();
if (child.isDirectory()) {
checkDirs(child);
}
}
} catch (DirectoryIteratorException de) {
ex = de.getCause();
} catch (IOException ie) {
ex = ie;
}
if (ex != null) {
throw new DiskErrorException("I/O error when open a directory: "
+ dir.toString(), ex);
}
}
/**
* Create the directory if it doesn't exist and check that dir is readable,
* writable and executable
*
* @param dir
* @throws DiskErrorException
*/
public static void checkDir(File dir) throws DiskErrorException {
if (!mkdirsWithExistsCheck(dir)) {
throw new DiskErrorException("Cannot create directory: "
+ dir.toString());
}
checkDirAccess(dir);
}
/**
* Create the directory or check permissions if it already exists.
*
* The semantics of mkdirsWithExistsAndPermissionCheck method is different
* from the mkdirs method provided in the Sun's java.io.File class in the
* following way:
* While creating the non-existent parent directories, this method checks for
* the existence of those directories if the mkdir fails at any point (since
* that directory might have just been created by some other process).
* If both mkdir() and the exists() check fails for any seemingly
* non-existent directory, then we signal an error; Sun's mkdir would signal
* an error (return false) if a directory it is attempting to create already
* exists or the mkdir fails.
*
* @param localFS local filesystem
* @param dir directory to be created or checked
* @param expected expected permission
* @throws IOException
*/
public static void mkdirsWithExistsAndPermissionCheck(
LocalFileSystem localFS, Path dir, FsPermission expected)
throws IOException {
File directory = localFS.pathToFile(dir);
boolean created = false;
if (!directory.exists())
created = mkdirsWithExistsCheck(directory);
if (created || !localFS.getFileStatus(dir).getPermission().equals(expected))
localFS.setPermission(dir, expected);
}
/**
* Create the local directory if necessary, check permissions and also ensure
* it can be read from and written into.
*
* @param localFS local filesystem
* @param dir directory
* @param expected permission
* @throws DiskErrorException
* @throws IOException
*/
public static void checkDir(LocalFileSystem localFS, Path dir,
FsPermission expected)
throws DiskErrorException, IOException {
mkdirsWithExistsAndPermissionCheck(localFS, dir, expected);
checkDirAccess(localFS.pathToFile(dir));
}
/**
* Checks that the given file is a directory and that the current running
* process can read, write, and execute it.
*
* @param dir File to check
* @throws DiskErrorException if dir is not a directory, not readable, not
* writable, or not executable
*/
private static void checkDirAccess(File dir) throws DiskErrorException {
if (!dir.isDirectory()) {
throw new DiskErrorException("Not a directory: "
+ dir.toString());
}
checkAccessByFileMethods(dir);
}
/**
* Checks that the current running process can read, write, and execute the
* given directory by using methods of the File object.
*
* @param dir File to check
* @throws DiskErrorException if dir is not readable, not writable, or not
* executable
*/
private static void checkAccessByFileMethods(File dir)
throws DiskErrorException {
if (!FileUtil.canRead(dir)) {
throw new DiskErrorException("Directory is not readable: "
+ dir.toString());
}
if (!FileUtil.canWrite(dir)) {
throw new DiskErrorException("Directory is not writable: "
+ dir.toString());
}
if (!FileUtil.canExecute(dir)) {
throw new DiskErrorException("Directory is not executable: "
+ dir.toString());
}
}
}
| 7,552 | 33.488584 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PrintJarMainClass.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.util.jar.*;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* A micro-application that prints the main class name out of a jar file.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class PrintJarMainClass {
/**
* @param args
*/
public static void main(String[] args) {
try (JarFile jar_file = new JarFile(args[0])) {
Manifest manifest = jar_file.getManifest();
if (manifest != null) {
String value = manifest.getMainAttributes().getValue("Main-Class");
if (value != null) {
System.out.println(value.replaceAll("/", "."));
return;
}
}
} catch (Throwable e) {
// ignore it
}
System.out.println("UNKNOWN");
System.exit(1);
}
}
| 1,680 | 30.12963 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProgramDriver.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.Map;
import java.util.TreeMap;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/** A driver that is used to run programs added to it
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Unstable
public class ProgramDriver {
/**
* A description of a program based on its class and a
* human-readable description.
*/
Map<String, ProgramDescription> programs;
public ProgramDriver(){
programs = new TreeMap<String, ProgramDescription>();
}
static private class ProgramDescription {
static final Class<?>[] paramTypes = new Class<?>[] {String[].class};
/**
* Create a description of an example program.
* @param mainClass the class with the main for the example program
* @param description a string to display to the user in help messages
* @throws SecurityException if we can't use reflection
* @throws NoSuchMethodException if the class doesn't have a main method
*/
public ProgramDescription(Class<?> mainClass,
String description)
throws SecurityException, NoSuchMethodException {
this.main = mainClass.getMethod("main", paramTypes);
this.description = description;
}
/**
* Invoke the example application with the given arguments
* @param args the arguments for the application
* @throws Throwable The exception thrown by the invoked method
*/
public void invoke(String[] args)
throws Throwable {
try {
main.invoke(null, new Object[]{args});
} catch (InvocationTargetException except) {
throw except.getCause();
}
}
public String getDescription() {
return description;
}
private Method main;
private String description;
}
private static void printUsage(Map<String, ProgramDescription> programs) {
System.out.println("Valid program names are:");
for(Map.Entry<String, ProgramDescription> item : programs.entrySet()) {
System.out.println(" " + item.getKey() + ": " +
item.getValue().getDescription());
}
}
/**
* This is the method that adds the classed to the repository
* @param name The name of the string you want the class instance to be called with
* @param mainClass The class that you want to add to the repository
* @param description The description of the class
* @throws NoSuchMethodException
* @throws SecurityException
*/
public void addClass(String name, Class<?> mainClass, String description)
throws Throwable {
programs.put(name , new ProgramDescription(mainClass, description));
}
/**
* This is a driver for the example programs.
* It looks at the first command line argument and tries to find an
* example program with that name.
* If it is found, it calls the main method in that class with the rest
* of the command line arguments.
* @param args The argument from the user. args[0] is the command to run.
* @return -1 on error, 0 on success
* @throws NoSuchMethodException
* @throws SecurityException
* @throws IllegalAccessException
* @throws IllegalArgumentException
* @throws Throwable Anything thrown by the example program's main
*/
public int run(String[] args)
throws Throwable
{
// Make sure they gave us a program name.
if (args.length == 0) {
System.out.println("An example program must be given as the" +
" first argument.");
printUsage(programs);
return -1;
}
// And that it is good.
ProgramDescription pgm = programs.get(args[0]);
if (pgm == null) {
System.out.println("Unknown program '" + args[0] + "' chosen.");
printUsage(programs);
return -1;
}
// Remove the leading argument and call main
String[] new_args = new String[args.length - 1];
for(int i=1; i < args.length; ++i) {
new_args[i-1] = args[i];
}
pgm.invoke(new_args);
return 0;
}
/**
* API compatible with Hadoop 1.x
*/
public void driver(String[] argv) throws Throwable {
if (run(argv) == -1) {
System.exit(-1);
}
}
}
| 5,221 | 32.050633 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCrc32.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.nio.ByteBuffer;
import org.apache.hadoop.fs.ChecksumException;
import com.google.common.annotations.VisibleForTesting;
/**
* Wrapper around JNI support code to do checksum computation
* natively.
*/
class NativeCrc32 {
/**
* Return true if the JNI-based native CRC extensions are available.
*/
public static boolean isAvailable() {
return NativeCodeLoader.isNativeCodeLoaded();
}
/**
* Verify the given buffers of data and checksums, and throw an exception
* if any checksum is invalid. The buffers given to this function should
* have their position initially at the start of the data, and their limit
* set at the end of the data. The position, limit, and mark are not
* modified.
*
* @param bytesPerSum the chunk size (eg 512 bytes)
* @param checksumType the DataChecksum type constant (NULL is not supported)
* @param sums the DirectByteBuffer pointing at the beginning of the
* stored checksums
* @param data the DirectByteBuffer pointing at the beginning of the
* data to check
* @param basePos the position in the file where the data buffer starts
* @param fileName the name of the file being verified
* @throws ChecksumException if there is an invalid checksum
*/
public static void verifyChunkedSums(int bytesPerSum, int checksumType,
ByteBuffer sums, ByteBuffer data, String fileName, long basePos)
throws ChecksumException {
nativeComputeChunkedSums(bytesPerSum, checksumType,
sums, sums.position(),
data, data.position(), data.remaining(),
fileName, basePos, true);
}
public static void verifyChunkedSumsByteArray(int bytesPerSum,
int checksumType, byte[] sums, int sumsOffset, byte[] data,
int dataOffset, int dataLength, String fileName, long basePos)
throws ChecksumException {
nativeComputeChunkedSumsByteArray(bytesPerSum, checksumType,
sums, sumsOffset,
data, dataOffset, dataLength,
fileName, basePos, true);
}
public static void calculateChunkedSums(int bytesPerSum, int checksumType,
ByteBuffer sums, ByteBuffer data) {
nativeComputeChunkedSums(bytesPerSum, checksumType,
sums, sums.position(),
data, data.position(), data.remaining(),
"", 0, false);
}
public static void calculateChunkedSumsByteArray(int bytesPerSum,
int checksumType, byte[] sums, int sumsOffset, byte[] data,
int dataOffset, int dataLength) {
nativeComputeChunkedSumsByteArray(bytesPerSum, checksumType,
sums, sumsOffset,
data, dataOffset, dataLength,
"", 0, false);
}
/**
* Verify the given buffers of data and checksums, and throw an exception
* if any checksum is invalid. The buffers given to this function should
* have their position initially at the start of the data, and their limit
* set at the end of the data. The position, limit, and mark are not
* modified. This method is retained only for backwards-compatibility with
* prior jar versions that need the corresponding JNI function.
*
* @param bytesPerSum the chunk size (eg 512 bytes)
* @param checksumType the DataChecksum type constant
* @param sums the DirectByteBuffer pointing at the beginning of the
* stored checksums
* @param sumsOffset start offset in sums buffer
* @param data the DirectByteBuffer pointing at the beginning of the
* data to check
* @param dataOffset start offset in data buffer
* @param dataLength length of data buffer
* @param fileName the name of the file being verified
* @param basePos the position in the file where the data buffer starts
* @throws ChecksumException if there is an invalid checksum
* @deprecated use {@link #nativeComputeChunkedSums(int, int, ByteBuffer, int,
* ByteBuffer, int, int, String, long, boolean)} instead
*/
@Deprecated
@VisibleForTesting
static native void nativeVerifyChunkedSums(
int bytesPerSum, int checksumType,
ByteBuffer sums, int sumsOffset,
ByteBuffer data, int dataOffset, int dataLength,
String fileName, long basePos) throws ChecksumException;
private static native void nativeComputeChunkedSums(
int bytesPerSum, int checksumType,
ByteBuffer sums, int sumsOffset,
ByteBuffer data, int dataOffset, int dataLength,
String fileName, long basePos, boolean verify);
private static native void nativeComputeChunkedSumsByteArray(
int bytesPerSum, int checksumType,
byte[] sums, int sumsOffset,
byte[] data, int dataOffset, int dataLength,
String fileName, long basePos, boolean verify);
// Copy the constants over from DataChecksum so that javah will pick them up
// and make them available in the native code header.
public static final int CHECKSUM_CRC32 = DataChecksum.CHECKSUM_CRC32;
public static final int CHECKSUM_CRC32C = DataChecksum.CHECKSUM_CRC32C;
}
| 5,815 | 40.542857 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IndexedSorter.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Interface for sort algorithms accepting {@link IndexedSortable} items.
*
* A sort algorithm implementing this interface may only
* {@link IndexedSortable#compare} and {@link IndexedSortable#swap} items
* for a range of indices to effect a sort across that range.
*/
@InterfaceAudience.LimitedPrivate({"MapReduce"})
@InterfaceStability.Unstable
public interface IndexedSorter {
/**
* Sort the items accessed through the given IndexedSortable over the given
* range of logical indices. From the perspective of the sort algorithm,
* each index between l (inclusive) and r (exclusive) is an addressable
* entry.
* @see IndexedSortable#compare
* @see IndexedSortable#swap
*/
void sort(IndexedSortable s, int l, int r);
/**
* Same as {@link #sort(IndexedSortable,int,int)}, but indicate progress
* periodically.
* @see #sort(IndexedSortable,int,int)
*/
void sort(IndexedSortable s, int l, int r, Progressable rep);
}
| 1,926 | 36.057692 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringInterner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import com.google.common.collect.Interner;
import com.google.common.collect.Interners;
/**
* Provides equivalent behavior to String.intern() to optimize performance,
* whereby does not consume memory in the permanent generation.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class StringInterner {
/**
* Retains a strong reference to each string instance it has interned.
*/
private final static Interner<String> strongInterner;
/**
* Retains a weak reference to each string instance it has interned.
*/
private final static Interner<String> weakInterner;
static {
strongInterner = Interners.newStrongInterner();
weakInterner = Interners.newWeakInterner();
}
/**
* Interns and returns a reference to the representative instance
* for any of a collection of string instances that are equal to each other.
* Retains strong reference to the instance,
* thus preventing it from being garbage-collected.
*
* @param sample string instance to be interned
* @return strong reference to interned string instance
*/
public static String strongIntern(String sample) {
if (sample == null) {
return null;
}
return strongInterner.intern(sample);
}
/**
* Interns and returns a reference to the representative instance
* for any of a collection of string instances that are equal to each other.
* Retains weak reference to the instance,
* and so does not prevent it from being garbage-collected.
*
* @param sample string instance to be interned
* @return weak reference to interned string instance
*/
public static String weakIntern(String sample) {
if (sample == null) {
return null;
}
return weakInterner.intern(sample);
}
}
| 2,752 | 31.388235 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ApplicationClassLoader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
import java.io.InputStream;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLClassLoader;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Properties;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
/**
* A {@link URLClassLoader} for application isolation. Classes from the
* application JARs are loaded in preference to the parent loader.
*/
@Public
@Unstable
public class ApplicationClassLoader extends URLClassLoader {
/**
* Default value of the system classes if the user did not override them.
* JDK classes, hadoop classes and resources, and some select third-party
* classes are considered system classes, and are not loaded by the
* application classloader.
*/
public static final String SYSTEM_CLASSES_DEFAULT;
private static final String PROPERTIES_FILE =
"org.apache.hadoop.application-classloader.properties";
private static final String SYSTEM_CLASSES_DEFAULT_KEY =
"system.classes.default";
private static final Log LOG =
LogFactory.getLog(ApplicationClassLoader.class.getName());
private static final FilenameFilter JAR_FILENAME_FILTER =
new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
return name.endsWith(".jar") || name.endsWith(".JAR");
}
};
static {
try (InputStream is = ApplicationClassLoader.class.getClassLoader()
.getResourceAsStream(PROPERTIES_FILE);) {
if (is == null) {
throw new ExceptionInInitializerError("properties file " +
PROPERTIES_FILE + " is not found");
}
Properties props = new Properties();
props.load(is);
// get the system classes default
String systemClassesDefault =
props.getProperty(SYSTEM_CLASSES_DEFAULT_KEY);
if (systemClassesDefault == null) {
throw new ExceptionInInitializerError("property " +
SYSTEM_CLASSES_DEFAULT_KEY + " is not found");
}
SYSTEM_CLASSES_DEFAULT = systemClassesDefault;
} catch (IOException e) {
throw new ExceptionInInitializerError(e);
}
}
private final ClassLoader parent;
private final List<String> systemClasses;
public ApplicationClassLoader(URL[] urls, ClassLoader parent,
List<String> systemClasses) {
super(urls, parent);
if (LOG.isDebugEnabled()) {
LOG.debug("urls: " + Arrays.toString(urls));
LOG.debug("system classes: " + systemClasses);
}
this.parent = parent;
if (parent == null) {
throw new IllegalArgumentException("No parent classloader!");
}
// if the caller-specified system classes are null or empty, use the default
this.systemClasses = (systemClasses == null || systemClasses.isEmpty()) ?
Arrays.asList(StringUtils.getTrimmedStrings(SYSTEM_CLASSES_DEFAULT)) :
systemClasses;
LOG.info("system classes: " + this.systemClasses);
}
public ApplicationClassLoader(String classpath, ClassLoader parent,
List<String> systemClasses) throws MalformedURLException {
this(constructUrlsFromClasspath(classpath), parent, systemClasses);
}
static URL[] constructUrlsFromClasspath(String classpath)
throws MalformedURLException {
List<URL> urls = new ArrayList<URL>();
for (String element : classpath.split(File.pathSeparator)) {
if (element.endsWith("/*")) {
String dir = element.substring(0, element.length() - 1);
File[] files = new File(dir).listFiles(JAR_FILENAME_FILTER);
if (files != null) {
for (File file : files) {
urls.add(file.toURI().toURL());
}
}
} else {
File file = new File(element);
if (file.exists()) {
urls.add(new File(element).toURI().toURL());
}
}
}
return urls.toArray(new URL[urls.size()]);
}
@Override
public URL getResource(String name) {
URL url = null;
if (!isSystemClass(name, systemClasses)) {
url= findResource(name);
if (url == null && name.startsWith("/")) {
if (LOG.isDebugEnabled()) {
LOG.debug("Remove leading / off " + name);
}
url= findResource(name.substring(1));
}
}
if (url == null) {
url= parent.getResource(name);
}
if (url != null) {
if (LOG.isDebugEnabled()) {
LOG.debug("getResource("+name+")=" + url);
}
}
return url;
}
@Override
public Class<?> loadClass(String name) throws ClassNotFoundException {
return this.loadClass(name, false);
}
@Override
protected synchronized Class<?> loadClass(String name, boolean resolve)
throws ClassNotFoundException {
if (LOG.isDebugEnabled()) {
LOG.debug("Loading class: " + name);
}
Class<?> c = findLoadedClass(name);
ClassNotFoundException ex = null;
if (c == null && !isSystemClass(name, systemClasses)) {
// Try to load class from this classloader's URLs. Note that this is like
// the servlet spec, not the usual Java 2 behaviour where we ask the
// parent to attempt to load first.
try {
c = findClass(name);
if (LOG.isDebugEnabled() && c != null) {
LOG.debug("Loaded class: " + name + " ");
}
} catch (ClassNotFoundException e) {
if (LOG.isDebugEnabled()) {
LOG.debug(e);
}
ex = e;
}
}
if (c == null) { // try parent
c = parent.loadClass(name);
if (LOG.isDebugEnabled() && c != null) {
LOG.debug("Loaded class from parent: " + name + " ");
}
}
if (c == null) {
throw ex != null ? ex : new ClassNotFoundException(name);
}
if (resolve) {
resolveClass(c);
}
return c;
}
/**
* Checks if a class should be included as a system class.
*
* A class is a system class if and only if it matches one of the positive
* patterns and none of the negative ones.
*
* @param name the class name to check
* @param systemClasses a list of system class configurations.
* @return true if the class is a system class
*/
public static boolean isSystemClass(String name, List<String> systemClasses) {
boolean result = false;
if (systemClasses != null) {
String canonicalName = name.replace('/', '.');
while (canonicalName.startsWith(".")) {
canonicalName=canonicalName.substring(1);
}
for (String c : systemClasses) {
boolean shouldInclude = true;
if (c.startsWith("-")) {
c = c.substring(1);
shouldInclude = false;
}
if (canonicalName.startsWith(c)) {
if ( c.endsWith(".") // package
|| canonicalName.length() == c.length() // class
|| canonicalName.length() > c.length() // nested
&& canonicalName.charAt(c.length()) == '$' ) {
if (shouldInclude) {
result = true;
} else {
return false;
}
}
}
}
}
return result;
}
}
| 8,231 | 31.031128 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DirectBufferPool.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.lang.ref.WeakReference;
import java.nio.ByteBuffer;
import java.util.Queue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ConcurrentMap;
import org.apache.hadoop.classification.InterfaceAudience;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.classification.InterfaceStability;
/**
* A simple class for pooling direct ByteBuffers. This is necessary
* because Direct Byte Buffers do not take up much space on the heap,
* and hence will not trigger GCs on their own. However, they do take
* native memory, and thus can cause high memory usage if not pooled.
* The pooled instances are referred to only via weak references, allowing
* them to be collected when a GC does run.
*
* This class only does effective pooling when many buffers will be
* allocated at the same size. There is no attempt to reuse larger
* buffers to satisfy smaller allocations.
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public class DirectBufferPool {
// Essentially implement a multimap with weak values.
final ConcurrentMap<Integer, Queue<WeakReference<ByteBuffer>>> buffersBySize =
new ConcurrentHashMap<Integer, Queue<WeakReference<ByteBuffer>>>();
/**
* Allocate a direct buffer of the specified size, in bytes.
* If a pooled buffer is available, returns that. Otherwise
* allocates a new one.
*/
public ByteBuffer getBuffer(int size) {
Queue<WeakReference<ByteBuffer>> list = buffersBySize.get(size);
if (list == null) {
// no available buffers for this size
return ByteBuffer.allocateDirect(size);
}
WeakReference<ByteBuffer> ref;
while ((ref = list.poll()) != null) {
ByteBuffer b = ref.get();
if (b != null) {
return b;
}
}
return ByteBuffer.allocateDirect(size);
}
/**
* Return a buffer into the pool. After being returned,
* the buffer may be recycled, so the user must not
* continue to use it in any way.
* @param buf the buffer to return
*/
public void returnBuffer(ByteBuffer buf) {
buf.clear(); // reset mark, limit, etc
int size = buf.capacity();
Queue<WeakReference<ByteBuffer>> list = buffersBySize.get(size);
if (list == null) {
list = new ConcurrentLinkedQueue<WeakReference<ByteBuffer>>();
Queue<WeakReference<ByteBuffer>> prev = buffersBySize.putIfAbsent(size, list);
// someone else put a queue in the map before we did
if (prev != null) {
list = prev;
}
}
list.add(new WeakReference<ByteBuffer>(buf));
}
/**
* Return the number of available buffers of a given size.
* This is used only for tests.
*/
@VisibleForTesting
int countBuffersOfSize(int size) {
Queue<WeakReference<ByteBuffer>> list = buffersBySize.get(size);
if (list == null) {
return 0;
}
return list.size();
}
}
| 3,832 | 34.165138 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LimitInputStream.java
|
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import java.io.FilterInputStream;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
/**
* Copied from guava source code v15 (LimitedInputStream)
* Guava deprecated LimitInputStream in v14 and removed it in v15. Copying this class here
* allows to be compatible with guava 11 to 15+.
*
* Originally: org.apache.hadoop.hbase.io.LimitInputStream
*/
@Unstable
public final class LimitInputStream extends FilterInputStream {
private long left;
private long mark = -1;
public LimitInputStream(InputStream in, long limit) {
super(in);
checkNotNull(in);
checkArgument(limit >= 0, "limit must be non-negative");
left = limit;
}
@Override
public int available() throws IOException {
return (int) Math.min(in.available(), left);
}
// it's okay to mark even if mark isn't supported, as reset won't work
@Override
public synchronized void mark(int readLimit) {
in.mark(readLimit);
mark = left;
}
@Override
public int read() throws IOException {
if (left == 0) {
return -1;
}
int result = in.read();
if (result != -1) {
--left;
}
return result;
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
if (left == 0) {
return -1;
}
len = (int) Math.min(len, left);
int result = in.read(b, off, len);
if (result != -1) {
left -= result;
}
return result;
}
@Override
public synchronized void reset() throws IOException {
if (!in.markSupported()) {
throw new IOException("Mark not supported");
}
if (mark == -1) {
throw new IOException("Mark not set");
}
in.reset();
left = mark;
}
@Override
public long skip(long n) throws IOException {
n = Math.min(n, left);
long skipped = in.skip(n);
left -= skipped;
return skipped;
}
}
| 2,899 | 25.363636 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ClassUtil.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.io.IOException;
import java.net.URL;
import java.net.URLDecoder;
import java.util.Enumeration;
import org.apache.hadoop.classification.InterfaceAudience;
@InterfaceAudience.Private
public class ClassUtil {
/**
* Find a jar that contains a class of the same name, if any.
* It will return a jar file, even if that is not the first thing
* on the class path that has a class with the same name.
*
* @param clazz the class to find.
* @return a jar file that contains the class, or null.
* @throws IOException
*/
public static String findContainingJar(Class<?> clazz) {
ClassLoader loader = clazz.getClassLoader();
String classFile = clazz.getName().replaceAll("\\.", "/") + ".class";
try {
for(final Enumeration<URL> itr = loader.getResources(classFile);
itr.hasMoreElements();) {
final URL url = itr.nextElement();
if ("jar".equals(url.getProtocol())) {
String toReturn = url.getPath();
if (toReturn.startsWith("file:")) {
toReturn = toReturn.substring("file:".length());
}
toReturn = URLDecoder.decode(toReturn, "UTF-8");
return toReturn.replaceAll("!.*$", "");
}
}
} catch (IOException e) {
throw new RuntimeException(e);
}
return null;
}
}
| 2,165 | 34.508197 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Plugin to calculate resource information on the system.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public abstract class SysInfo {
/**
* Return default OS instance.
* @throws UnsupportedOperationException If cannot determine OS.
* @return Default instance for the detected OS.
*/
public static SysInfo newInstance() {
if (Shell.LINUX) {
return new SysInfoLinux();
}
if (Shell.WINDOWS) {
return new SysInfoWindows();
}
throw new UnsupportedOperationException("Could not determine OS");
}
/**
* Obtain the total size of the virtual memory present in the system.
*
* @return virtual memory size in bytes.
*/
public abstract long getVirtualMemorySize();
/**
* Obtain the total size of the physical memory present in the system.
*
* @return physical memory size bytes.
*/
public abstract long getPhysicalMemorySize();
/**
* Obtain the total size of the available virtual memory present
* in the system.
*
* @return available virtual memory size in bytes.
*/
public abstract long getAvailableVirtualMemorySize();
/**
* Obtain the total size of the available physical memory present
* in the system.
*
* @return available physical memory size bytes.
*/
public abstract long getAvailablePhysicalMemorySize();
/**
* Obtain the total number of logical processors present on the system.
*
* @return number of logical processors
*/
public abstract int getNumProcessors();
/**
* Obtain total number of physical cores present on the system.
*
* @return number of physical cores
*/
public abstract int getNumCores();
/**
* Obtain the CPU frequency of on the system.
*
* @return CPU frequency in kHz
*/
public abstract long getCpuFrequency();
/**
* Obtain the cumulative CPU time since the system is on.
*
* @return cumulative CPU time in milliseconds
*/
public abstract long getCumulativeCpuTime();
/**
* Obtain the CPU usage % of the machine. Return -1 if it is unavailable
*
* @return CPU usage as a percentage of available cycles.
*/
public abstract float getCpuUsage();
/**
* Obtain the aggregated number of bytes read over the network.
* @return total number of bytes read.
*/
public abstract long getNetworkBytesRead();
/**
* Obtain the aggregated number of bytes written to the network.
* @return total number of bytes written.
*/
public abstract long getNetworkBytesWritten();
/**
* Obtain the aggregated number of bytes read from disks.
*
* @return total number of bytes read.
*/
public abstract long getStorageBytesRead();
/**
* Obtain the aggregated number of bytes written to disks.
*
* @return total number of bytes written.
*/
public abstract long getStorageBytesWritten();
}
| 3,832 | 26.775362 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericsUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.lang.reflect.Array;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Contains utility methods for dealing with Java Generics.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class GenericsUtil {
/**
* Returns the Class object (of type <code>Class<T></code>) of the
* argument of type <code>T</code>.
* @param <T> The type of the argument
* @param t the object to get it class
* @return <code>Class<T></code>
*/
public static <T> Class<T> getClass(T t) {
@SuppressWarnings("unchecked")
Class<T> clazz = (Class<T>)t.getClass();
return clazz;
}
/**
* Converts the given <code>List<T></code> to a an array of
* <code>T[]</code>.
* @param c the Class object of the items in the list
* @param list the list to convert
*/
public static <T> T[] toArray(Class<T> c, List<T> list)
{
@SuppressWarnings("unchecked")
T[] ta= (T[])Array.newInstance(c, list.size());
for (int i= 0; i<list.size(); i++)
ta[i]= list.get(i);
return ta;
}
/**
* Converts the given <code>List<T></code> to a an array of
* <code>T[]</code>.
* @param list the list to convert
* @throws ArrayIndexOutOfBoundsException if the list is empty.
* Use {@link #toArray(Class, List)} if the list may be empty.
*/
public static <T> T[] toArray(List<T> list) {
return toArray(getClass(list.get(0)), list);
}
}
| 2,381 | 30.342105 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SignalLogger.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import sun.misc.Signal;
import sun.misc.SignalHandler;
import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This class logs a message whenever we're about to exit on a UNIX signal.
* This is helpful for determining the root cause of a process' exit.
* For example, if the process exited because the system administrator
* ran a standard "kill," you would see 'EXITING ON SIGNAL SIGTERM' in the log.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public enum SignalLogger {
INSTANCE;
private boolean registered = false;
/**
* Our signal handler.
*/
private static class Handler implements SignalHandler {
final private LogAdapter LOG;
final private SignalHandler prevHandler;
Handler(String name, LogAdapter LOG) {
this.LOG = LOG;
prevHandler = Signal.handle(new Signal(name), this);
}
/**
* Handle an incoming signal.
*
* @param signal The incoming signal
*/
@Override
public void handle(Signal signal) {
LOG.error("RECEIVED SIGNAL " + signal.getNumber() +
": SIG" + signal.getName());
prevHandler.handle(signal);
}
}
/**
* Register some signal handlers.
*
* @param LOG The log4j logfile to use in the signal handlers.
*/
public void register(final Log LOG) {
register(LogAdapter.create(LOG));
}
void register(final LogAdapter LOG) {
if (registered) {
throw new IllegalStateException("Can't re-install the signal handlers.");
}
registered = true;
StringBuilder bld = new StringBuilder();
bld.append("registered UNIX signal handlers for [");
final String SIGNALS[] = { "TERM", "HUP", "INT" };
String separator = "";
for (String signalName : SIGNALS) {
try {
new Handler(signalName, LOG);
bld.append(separator);
bld.append(signalName);
separator = ", ";
} catch (Exception e) {
LOG.debug(e);
}
}
bld.append("]");
LOG.info(bld.toString());
}
}
| 2,965 | 29.265306 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Daemon.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.util.concurrent.ThreadFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/** A thread that has called {@link Thread#setDaemon(boolean) } with true.*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Unstable
public class Daemon extends Thread {
{
setDaemon(true); // always a daemon
}
/**
* Provide a factory for named daemon threads,
* for use in ExecutorServices constructors
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
public static class DaemonFactory extends Daemon implements ThreadFactory {
@Override
public Thread newThread(Runnable runnable) {
return new Daemon(runnable);
}
}
Runnable runnable = null;
/** Construct a daemon thread. */
public Daemon() {
super();
}
/** Construct a daemon thread. */
public Daemon(Runnable runnable) {
super(runnable);
this.runnable = runnable;
this.setName(((Object)runnable).toString());
}
/** Construct a daemon thread to be part of a specified thread group. */
public Daemon(ThreadGroup group, Runnable runnable) {
super(group, runnable);
this.runnable = runnable;
this.setName(((Object)runnable).toString());
}
public Runnable getRunnable() {
return runnable;
}
}
| 2,223 | 29.465753 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceStability;
@InterfaceStability.Evolving
public class ThreadUtil {
private static final Log LOG = LogFactory.getLog(ThreadUtil.class);
/**
* Cause the current thread to sleep as close as possible to the provided
* number of milliseconds. This method will log and ignore any
* {@link InterruptedException} encountered.
*
* @param millis the number of milliseconds for the current thread to sleep
*/
public static void sleepAtLeastIgnoreInterrupts(long millis) {
long start = Time.now();
while (Time.now() - start < millis) {
long timeToSleep = millis -
(Time.now() - start);
try {
Thread.sleep(timeToSleep);
} catch (InterruptedException ie) {
LOG.warn("interrupted while sleeping", ie);
}
}
}
}
| 1,762 | 34.26 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/CacheableIPList.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
/**
* CacheableIPList loads a list of subnets from a file.
* The list is cached and the cache can be refreshed by specifying cache timeout.
* A negative value of cache timeout disables any caching.
*
* Thread safe.
*/
public class CacheableIPList implements IPList {
private final long cacheTimeout;
private volatile long cacheExpiryTimeStamp;
private volatile FileBasedIPList ipList;
public CacheableIPList(FileBasedIPList ipList, long cacheTimeout) {
this.cacheTimeout = cacheTimeout;
this.ipList = ipList;
updateCacheExpiryTime();
}
/**
* Reloads the ip list
*/
private void reset() {
ipList = ipList.reload();
updateCacheExpiryTime();
}
private void updateCacheExpiryTime() {
if (cacheTimeout < 0) {
cacheExpiryTimeStamp = -1; // no automatic cache expiry.
}else {
cacheExpiryTimeStamp = System.currentTimeMillis() + cacheTimeout;
}
}
/**
* Refreshes the ip list
*/
public void refresh () {
cacheExpiryTimeStamp = 0;
}
@Override
public boolean isIn(String ipAddress) {
//is cache expired
//Uses Double Checked Locking using volatile
if (cacheExpiryTimeStamp >= 0 && cacheExpiryTimeStamp < System.currentTimeMillis()) {
synchronized(this) {
//check if cache expired again
if (cacheExpiryTimeStamp < System.currentTimeMillis()) {
reset();
}
}
}
return ipList.isIn(ipAddress);
}
}
| 2,295 | 29.210526 | 89 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.