repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Closeable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/** @deprecated use java.io.Closeable */
@Deprecated
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface Closeable extends java.io.Closeable{
}
| 1,121 | 36.4 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BoundedByteArrayOutputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.io;
import java.io.EOFException;
import java.io.IOException;
import java.io.OutputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* A byte array backed output stream with a limit. The limit should be smaller
* than the buffer capacity. The object can be reused through <code>reset</code>
* API and choose different limits in each round.
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Unstable
public class BoundedByteArrayOutputStream extends OutputStream {
private byte[] buffer;
private int startOffset;
private int limit;
private int currentPointer;
/**
* Create a BoundedByteArrayOutputStream with the specified
* capacity
* @param capacity The capacity of the underlying byte array
*/
public BoundedByteArrayOutputStream(int capacity) {
this(capacity, capacity);
}
/**
* Create a BoundedByteArrayOutputStream with the specified
* capacity and limit.
* @param capacity The capacity of the underlying byte array
* @param limit The maximum limit upto which data can be written
*/
public BoundedByteArrayOutputStream(int capacity, int limit) {
this(new byte[capacity], 0, limit);
}
protected BoundedByteArrayOutputStream(byte[] buf, int offset, int limit) {
resetBuffer(buf, offset, limit);
}
protected void resetBuffer(byte[] buf, int offset, int limit) {
int capacity = buf.length - offset;
if ((capacity < limit) || (capacity | limit) < 0) {
throw new IllegalArgumentException("Invalid capacity/limit");
}
this.buffer = buf;
this.startOffset = offset;
this.currentPointer = offset;
this.limit = offset + limit;
}
@Override
public void write(int b) throws IOException {
if (currentPointer >= limit) {
throw new EOFException("Reaching the limit of the buffer.");
}
buffer[currentPointer++] = (byte) b;
}
@Override
public void write(byte b[], int off, int len) throws IOException {
if ((off < 0) || (off > b.length) || (len < 0) || ((off + len) > b.length)
|| ((off + len) < 0)) {
throw new IndexOutOfBoundsException();
} else if (len == 0) {
return;
}
if (currentPointer + len > limit) {
throw new EOFException("Reach the limit of the buffer");
}
System.arraycopy(b, off, buffer, currentPointer, len);
currentPointer += len;
}
/**
* Reset the limit
* @param newlim New Limit
*/
public void reset(int newlim) {
if (newlim > (buffer.length - startOffset)) {
throw new IndexOutOfBoundsException("Limit exceeds buffer size");
}
this.limit = newlim;
this.currentPointer = startOffset;
}
/** Reset the buffer */
public void reset() {
this.limit = buffer.length - startOffset;
this.currentPointer = startOffset;
}
/** Return the current limit */
public int getLimit() {
return limit;
}
/** Returns the underlying buffer.
* Data is only valid to {@link #size()}.
*/
public byte[] getBuffer() {
return buffer;
}
/** Returns the length of the valid data
* currently in the buffer.
*/
public int size() {
return currentPointer - startOffset;
}
public int available() {
return limit - currentPointer;
}
}
| 4,153 | 28.671429 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.IOException;
import java.io.DataInput;
import java.io.DataOutput;
import java.nio.ByteBuffer;
import java.nio.CharBuffer;
import java.nio.charset.CharacterCodingException;
import java.nio.charset.Charset;
import java.nio.charset.CharsetDecoder;
import java.nio.charset.CharsetEncoder;
import java.nio.charset.CodingErrorAction;
import java.nio.charset.MalformedInputException;
import java.text.CharacterIterator;
import java.text.StringCharacterIterator;
import java.util.Arrays;
import org.apache.avro.reflect.Stringable;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/** This class stores text using standard UTF8 encoding. It provides methods
* to serialize, deserialize, and compare texts at byte level. The type of
* length is integer and is serialized using zero-compressed format. <p>In
* addition, it provides methods for string traversal without converting the
* byte array to a string. <p>Also includes utilities for
* serializing/deserialing a string, coding/decoding a string, checking if a
* byte array contains valid UTF8 code, calculating the length of an encoded
* string.
*/
@Stringable
@InterfaceAudience.Public
@InterfaceStability.Stable
public class Text extends BinaryComparable
implements WritableComparable<BinaryComparable> {
private static final ThreadLocal<CharsetEncoder> ENCODER_FACTORY =
new ThreadLocal<CharsetEncoder>() {
@Override
protected CharsetEncoder initialValue() {
return Charset.forName("UTF-8").newEncoder().
onMalformedInput(CodingErrorAction.REPORT).
onUnmappableCharacter(CodingErrorAction.REPORT);
}
};
private static final ThreadLocal<CharsetDecoder> DECODER_FACTORY =
new ThreadLocal<CharsetDecoder>() {
@Override
protected CharsetDecoder initialValue() {
return Charset.forName("UTF-8").newDecoder().
onMalformedInput(CodingErrorAction.REPORT).
onUnmappableCharacter(CodingErrorAction.REPORT);
}
};
private static final byte [] EMPTY_BYTES = new byte[0];
private byte[] bytes;
private int length;
public Text() {
bytes = EMPTY_BYTES;
}
/** Construct from a string.
*/
public Text(String string) {
set(string);
}
/** Construct from another text. */
public Text(Text utf8) {
set(utf8);
}
/** Construct from a byte array.
*/
public Text(byte[] utf8) {
set(utf8);
}
/**
* Get a copy of the bytes that is exactly the length of the data.
* See {@link #getBytes()} for faster access to the underlying array.
*/
public byte[] copyBytes() {
byte[] result = new byte[length];
System.arraycopy(bytes, 0, result, 0, length);
return result;
}
/**
* Returns the raw bytes; however, only data up to {@link #getLength()} is
* valid. Please use {@link #copyBytes()} if you
* need the returned array to be precisely the length of the data.
*/
@Override
public byte[] getBytes() {
return bytes;
}
/** Returns the number of bytes in the byte array */
@Override
public int getLength() {
return length;
}
/**
* Returns the Unicode Scalar Value (32-bit integer value)
* for the character at <code>position</code>. Note that this
* method avoids using the converter or doing String instantiation
* @return the Unicode scalar value at position or -1
* if the position is invalid or points to a
* trailing byte
*/
public int charAt(int position) {
if (position > this.length) return -1; // too long
if (position < 0) return -1; // duh.
ByteBuffer bb = (ByteBuffer)ByteBuffer.wrap(bytes).position(position);
return bytesToCodePoint(bb.slice());
}
public int find(String what) {
return find(what, 0);
}
/**
* Finds any occurence of <code>what</code> in the backing
* buffer, starting as position <code>start</code>. The starting
* position is measured in bytes and the return value is in
* terms of byte position in the buffer. The backing buffer is
* not converted to a string for this operation.
* @return byte position of the first occurence of the search
* string in the UTF-8 buffer or -1 if not found
*/
public int find(String what, int start) {
try {
ByteBuffer src = ByteBuffer.wrap(this.bytes,0,this.length);
ByteBuffer tgt = encode(what);
byte b = tgt.get();
src.position(start);
while (src.hasRemaining()) {
if (b == src.get()) { // matching first byte
src.mark(); // save position in loop
tgt.mark(); // save position in target
boolean found = true;
int pos = src.position()-1;
while (tgt.hasRemaining()) {
if (!src.hasRemaining()) { // src expired first
tgt.reset();
src.reset();
found = false;
break;
}
if (!(tgt.get() == src.get())) {
tgt.reset();
src.reset();
found = false;
break; // no match
}
}
if (found) return pos;
}
}
return -1; // not found
} catch (CharacterCodingException e) {
// can't get here
e.printStackTrace();
return -1;
}
}
/** Set to contain the contents of a string.
*/
public void set(String string) {
try {
ByteBuffer bb = encode(string, true);
bytes = bb.array();
length = bb.limit();
}catch(CharacterCodingException e) {
throw new RuntimeException("Should not have happened ", e);
}
}
/** Set to a utf8 byte array
*/
public void set(byte[] utf8) {
set(utf8, 0, utf8.length);
}
/** copy a text. */
public void set(Text other) {
set(other.getBytes(), 0, other.getLength());
}
/**
* Set the Text to range of bytes
* @param utf8 the data to copy from
* @param start the first position of the new string
* @param len the number of bytes of the new string
*/
public void set(byte[] utf8, int start, int len) {
setCapacity(len, false);
System.arraycopy(utf8, start, bytes, 0, len);
this.length = len;
}
/**
* Append a range of bytes to the end of the given text
* @param utf8 the data to copy from
* @param start the first position to append from utf8
* @param len the number of bytes to append
*/
public void append(byte[] utf8, int start, int len) {
setCapacity(length + len, true);
System.arraycopy(utf8, start, bytes, length, len);
length += len;
}
/**
* Clear the string to empty.
*
* <em>Note</em>: For performance reasons, this call does not clear the
* underlying byte array that is retrievable via {@link #getBytes()}.
* In order to free the byte-array memory, call {@link #set(byte[])}
* with an empty byte array (For example, <code>new byte[0]</code>).
*/
public void clear() {
length = 0;
}
/*
* Sets the capacity of this Text object to <em>at least</em>
* <code>len</code> bytes. If the current buffer is longer,
* then the capacity and existing content of the buffer are
* unchanged. If <code>len</code> is larger
* than the current capacity, the Text object's capacity is
* increased to match.
* @param len the number of bytes we need
* @param keepData should the old data be kept
*/
private void setCapacity(int len, boolean keepData) {
if (bytes == null || bytes.length < len) {
if (bytes != null && keepData) {
bytes = Arrays.copyOf(bytes, Math.max(len,length << 1));
} else {
bytes = new byte[len];
}
}
}
/**
* Convert text back to string
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
try {
return decode(bytes, 0, length);
} catch (CharacterCodingException e) {
throw new RuntimeException("Should not have happened " , e);
}
}
/** deserialize
*/
@Override
public void readFields(DataInput in) throws IOException {
int newLength = WritableUtils.readVInt(in);
readWithKnownLength(in, newLength);
}
public void readFields(DataInput in, int maxLength) throws IOException {
int newLength = WritableUtils.readVInt(in);
if (newLength < 0) {
throw new IOException("tried to deserialize " + newLength +
" bytes of data! newLength must be non-negative.");
} else if (newLength >= maxLength) {
throw new IOException("tried to deserialize " + newLength +
" bytes of data, but maxLength = " + maxLength);
}
readWithKnownLength(in, newLength);
}
/** Skips over one Text in the input. */
public static void skip(DataInput in) throws IOException {
int length = WritableUtils.readVInt(in);
WritableUtils.skipFully(in, length);
}
/**
* Read a Text object whose length is already known.
* This allows creating Text from a stream which uses a different serialization
* format.
*/
public void readWithKnownLength(DataInput in, int len) throws IOException {
setCapacity(len, false);
in.readFully(bytes, 0, len);
length = len;
}
/** serialize
* write this object to out
* length uses zero-compressed encoding
* @see Writable#write(DataOutput)
*/
@Override
public void write(DataOutput out) throws IOException {
WritableUtils.writeVInt(out, length);
out.write(bytes, 0, length);
}
public void write(DataOutput out, int maxLength) throws IOException {
if (length > maxLength) {
throw new IOException("data was too long to write! Expected " +
"less than or equal to " + maxLength + " bytes, but got " +
length + " bytes.");
}
WritableUtils.writeVInt(out, length);
out.write(bytes, 0, length);
}
/** Returns true iff <code>o</code> is a Text with the same contents. */
@Override
public boolean equals(Object o) {
if (o instanceof Text)
return super.equals(o);
return false;
}
@Override
public int hashCode() {
return super.hashCode();
}
/** A WritableComparator optimized for Text keys. */
public static class Comparator extends WritableComparator {
public Comparator() {
super(Text.class);
}
@Override
public int compare(byte[] b1, int s1, int l1,
byte[] b2, int s2, int l2) {
int n1 = WritableUtils.decodeVIntSize(b1[s1]);
int n2 = WritableUtils.decodeVIntSize(b2[s2]);
return compareBytes(b1, s1+n1, l1-n1, b2, s2+n2, l2-n2);
}
}
static {
// register this comparator
WritableComparator.define(Text.class, new Comparator());
}
/// STATIC UTILITIES FROM HERE DOWN
/**
* Converts the provided byte array to a String using the
* UTF-8 encoding. If the input is malformed,
* replace by a default value.
*/
public static String decode(byte[] utf8) throws CharacterCodingException {
return decode(ByteBuffer.wrap(utf8), true);
}
public static String decode(byte[] utf8, int start, int length)
throws CharacterCodingException {
return decode(ByteBuffer.wrap(utf8, start, length), true);
}
/**
* Converts the provided byte array to a String using the
* UTF-8 encoding. If <code>replace</code> is true, then
* malformed input is replaced with the
* substitution character, which is U+FFFD. Otherwise the
* method throws a MalformedInputException.
*/
public static String decode(byte[] utf8, int start, int length, boolean replace)
throws CharacterCodingException {
return decode(ByteBuffer.wrap(utf8, start, length), replace);
}
private static String decode(ByteBuffer utf8, boolean replace)
throws CharacterCodingException {
CharsetDecoder decoder = DECODER_FACTORY.get();
if (replace) {
decoder.onMalformedInput(
java.nio.charset.CodingErrorAction.REPLACE);
decoder.onUnmappableCharacter(CodingErrorAction.REPLACE);
}
String str = decoder.decode(utf8).toString();
// set decoder back to its default value: REPORT
if (replace) {
decoder.onMalformedInput(CodingErrorAction.REPORT);
decoder.onUnmappableCharacter(CodingErrorAction.REPORT);
}
return str;
}
/**
* Converts the provided String to bytes using the
* UTF-8 encoding. If the input is malformed,
* invalid chars are replaced by a default value.
* @return ByteBuffer: bytes stores at ByteBuffer.array()
* and length is ByteBuffer.limit()
*/
public static ByteBuffer encode(String string)
throws CharacterCodingException {
return encode(string, true);
}
/**
* Converts the provided String to bytes using the
* UTF-8 encoding. If <code>replace</code> is true, then
* malformed input is replaced with the
* substitution character, which is U+FFFD. Otherwise the
* method throws a MalformedInputException.
* @return ByteBuffer: bytes stores at ByteBuffer.array()
* and length is ByteBuffer.limit()
*/
public static ByteBuffer encode(String string, boolean replace)
throws CharacterCodingException {
CharsetEncoder encoder = ENCODER_FACTORY.get();
if (replace) {
encoder.onMalformedInput(CodingErrorAction.REPLACE);
encoder.onUnmappableCharacter(CodingErrorAction.REPLACE);
}
ByteBuffer bytes =
encoder.encode(CharBuffer.wrap(string.toCharArray()));
if (replace) {
encoder.onMalformedInput(CodingErrorAction.REPORT);
encoder.onUnmappableCharacter(CodingErrorAction.REPORT);
}
return bytes;
}
static final public int DEFAULT_MAX_LEN = 1024 * 1024;
/** Read a UTF8 encoded string from in
*/
public static String readString(DataInput in) throws IOException {
return readString(in, Integer.MAX_VALUE);
}
/** Read a UTF8 encoded string with a maximum size
*/
public static String readString(DataInput in, int maxLength)
throws IOException {
int length = WritableUtils.readVIntInRange(in, 0, maxLength);
byte [] bytes = new byte[length];
in.readFully(bytes, 0, length);
return decode(bytes);
}
/** Write a UTF8 encoded string to out
*/
public static int writeString(DataOutput out, String s) throws IOException {
ByteBuffer bytes = encode(s);
int length = bytes.limit();
WritableUtils.writeVInt(out, length);
out.write(bytes.array(), 0, length);
return length;
}
/** Write a UTF8 encoded string with a maximum size to out
*/
public static int writeString(DataOutput out, String s, int maxLength)
throws IOException {
ByteBuffer bytes = encode(s);
int length = bytes.limit();
if (length > maxLength) {
throw new IOException("string was too long to write! Expected " +
"less than or equal to " + maxLength + " bytes, but got " +
length + " bytes.");
}
WritableUtils.writeVInt(out, length);
out.write(bytes.array(), 0, length);
return length;
}
////// states for validateUTF8
private static final int LEAD_BYTE = 0;
private static final int TRAIL_BYTE_1 = 1;
private static final int TRAIL_BYTE = 2;
/**
* Check if a byte array contains valid utf-8
* @param utf8 byte array
* @throws MalformedInputException if the byte array contains invalid utf-8
*/
public static void validateUTF8(byte[] utf8) throws MalformedInputException {
validateUTF8(utf8, 0, utf8.length);
}
/**
* Check to see if a byte array is valid utf-8
* @param utf8 the array of bytes
* @param start the offset of the first byte in the array
* @param len the length of the byte sequence
* @throws MalformedInputException if the byte array contains invalid bytes
*/
public static void validateUTF8(byte[] utf8, int start, int len)
throws MalformedInputException {
int count = start;
int leadByte = 0;
int length = 0;
int state = LEAD_BYTE;
while (count < start+len) {
int aByte = utf8[count] & 0xFF;
switch (state) {
case LEAD_BYTE:
leadByte = aByte;
length = bytesFromUTF8[aByte];
switch (length) {
case 0: // check for ASCII
if (leadByte > 0x7F)
throw new MalformedInputException(count);
break;
case 1:
if (leadByte < 0xC2 || leadByte > 0xDF)
throw new MalformedInputException(count);
state = TRAIL_BYTE_1;
break;
case 2:
if (leadByte < 0xE0 || leadByte > 0xEF)
throw new MalformedInputException(count);
state = TRAIL_BYTE_1;
break;
case 3:
if (leadByte < 0xF0 || leadByte > 0xF4)
throw new MalformedInputException(count);
state = TRAIL_BYTE_1;
break;
default:
// too long! Longest valid UTF-8 is 4 bytes (lead + three)
// or if < 0 we got a trail byte in the lead byte position
throw new MalformedInputException(count);
} // switch (length)
break;
case TRAIL_BYTE_1:
if (leadByte == 0xF0 && aByte < 0x90)
throw new MalformedInputException(count);
if (leadByte == 0xF4 && aByte > 0x8F)
throw new MalformedInputException(count);
if (leadByte == 0xE0 && aByte < 0xA0)
throw new MalformedInputException(count);
if (leadByte == 0xED && aByte > 0x9F)
throw new MalformedInputException(count);
// falls through to regular trail-byte test!!
case TRAIL_BYTE:
if (aByte < 0x80 || aByte > 0xBF)
throw new MalformedInputException(count);
if (--length == 0) {
state = LEAD_BYTE;
} else {
state = TRAIL_BYTE;
}
break;
default:
break;
} // switch (state)
count++;
}
}
/**
* Magic numbers for UTF-8. These are the number of bytes
* that <em>follow</em> a given lead byte. Trailing bytes
* have the value -1. The values 4 and 5 are presented in
* this table, even though valid UTF-8 cannot include the
* five and six byte sequences.
*/
static final int[] bytesFromUTF8 =
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0,
// trail bytes
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3,
3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5 };
/**
* Returns the next code point at the current position in
* the buffer. The buffer's position will be incremented.
* Any mark set on this buffer will be changed by this method!
*/
public static int bytesToCodePoint(ByteBuffer bytes) {
bytes.mark();
byte b = bytes.get();
bytes.reset();
int extraBytesToRead = bytesFromUTF8[(b & 0xFF)];
if (extraBytesToRead < 0) return -1; // trailing byte!
int ch = 0;
switch (extraBytesToRead) {
case 5: ch += (bytes.get() & 0xFF); ch <<= 6; /* remember, illegal UTF-8 */
case 4: ch += (bytes.get() & 0xFF); ch <<= 6; /* remember, illegal UTF-8 */
case 3: ch += (bytes.get() & 0xFF); ch <<= 6;
case 2: ch += (bytes.get() & 0xFF); ch <<= 6;
case 1: ch += (bytes.get() & 0xFF); ch <<= 6;
case 0: ch += (bytes.get() & 0xFF);
}
ch -= offsetsFromUTF8[extraBytesToRead];
return ch;
}
static final int offsetsFromUTF8[] =
{ 0x00000000, 0x00003080,
0x000E2080, 0x03C82080, 0xFA082080, 0x82082080 };
/**
* For the given string, returns the number of UTF-8 bytes
* required to encode the string.
* @param string text to encode
* @return number of UTF-8 bytes required to encode
*/
public static int utf8Length(String string) {
CharacterIterator iter = new StringCharacterIterator(string);
char ch = iter.first();
int size = 0;
while (ch != CharacterIterator.DONE) {
if ((ch >= 0xD800) && (ch < 0xDC00)) {
// surrogate pair?
char trail = iter.next();
if ((trail > 0xDBFF) && (trail < 0xE000)) {
// valid pair
size += 4;
} else {
// invalid pair
size += 3;
iter.previous(); // rewind one
}
} else if (ch < 0x80) {
size++;
} else if (ch < 0x800) {
size += 2;
} else {
// ch < 0x10000, that is, the largest char value
size += 3;
}
ch = iter.next();
}
return size;
}
}
| 21,993 | 31.154971 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DataOutputOutputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.DataOutput;
import java.io.IOException;
import java.io.OutputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* OutputStream implementation that wraps a DataOutput.
*/
@InterfaceAudience.Public
@InterfaceStability.Unstable
public class DataOutputOutputStream extends OutputStream {
private final DataOutput out;
/**
* Construct an OutputStream from the given DataOutput. If 'out'
* is already an OutputStream, simply returns it. Otherwise, wraps
* it in an OutputStream.
* @param out the DataOutput to wrap
* @return an OutputStream instance that outputs to 'out'
*/
public static OutputStream constructOutputStream(DataOutput out) {
if (out instanceof OutputStream) {
return (OutputStream)out;
} else {
return new DataOutputOutputStream(out);
}
}
private DataOutputOutputStream(DataOutput out) {
this.out = out;
}
@Override
public void write(int b) throws IOException {
out.writeByte(b);
}
@Override
public void write(byte[] b, int off, int len) throws IOException {
out.write(b, off, len);
}
@Override
public void write(byte[] b) throws IOException {
out.write(b);
}
}
| 2,112 | 28.347222 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MD5Hash.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.IOException;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.InputStream;
import java.util.Arrays;
import java.security.*;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/** A Writable for MD5 hash values.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class MD5Hash implements WritableComparable<MD5Hash> {
public static final int MD5_LEN = 16;
private static final ThreadLocal<MessageDigest> DIGESTER_FACTORY =
new ThreadLocal<MessageDigest>() {
@Override
protected MessageDigest initialValue() {
try {
return MessageDigest.getInstance("MD5");
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
}
};
private byte[] digest;
/** Constructs an MD5Hash. */
public MD5Hash() {
this.digest = new byte[MD5_LEN];
}
/** Constructs an MD5Hash from a hex string. */
public MD5Hash(String hex) {
setDigest(hex);
}
/** Constructs an MD5Hash with a specified value. */
public MD5Hash(byte[] digest) {
if (digest.length != MD5_LEN)
throw new IllegalArgumentException("Wrong length: " + digest.length);
this.digest = digest;
}
// javadoc from Writable
@Override
public void readFields(DataInput in) throws IOException {
in.readFully(digest);
}
/** Constructs, reads and returns an instance. */
public static MD5Hash read(DataInput in) throws IOException {
MD5Hash result = new MD5Hash();
result.readFields(in);
return result;
}
// javadoc from Writable
@Override
public void write(DataOutput out) throws IOException {
out.write(digest);
}
/** Copy the contents of another instance into this instance. */
public void set(MD5Hash that) {
System.arraycopy(that.digest, 0, this.digest, 0, MD5_LEN);
}
/** Returns the digest bytes. */
public byte[] getDigest() { return digest; }
/** Construct a hash value for a byte array. */
public static MD5Hash digest(byte[] data) {
return digest(data, 0, data.length);
}
/**
* Create a thread local MD5 digester
*/
public static MessageDigest getDigester() {
MessageDigest digester = DIGESTER_FACTORY.get();
digester.reset();
return digester;
}
/** Construct a hash value for the content from the InputStream. */
public static MD5Hash digest(InputStream in) throws IOException {
final byte[] buffer = new byte[4*1024];
final MessageDigest digester = getDigester();
for(int n; (n = in.read(buffer)) != -1; ) {
digester.update(buffer, 0, n);
}
return new MD5Hash(digester.digest());
}
/** Construct a hash value for a byte array. */
public static MD5Hash digest(byte[] data, int start, int len) {
byte[] digest;
MessageDigest digester = getDigester();
digester.update(data, start, len);
digest = digester.digest();
return new MD5Hash(digest);
}
/** Construct a hash value for a String. */
public static MD5Hash digest(String string) {
return digest(UTF8.getBytes(string));
}
/** Construct a hash value for a String. */
public static MD5Hash digest(UTF8 utf8) {
return digest(utf8.getBytes(), 0, utf8.getLength());
}
/** Construct a half-sized version of this MD5. Fits in a long **/
public long halfDigest() {
long value = 0;
for (int i = 0; i < 8; i++)
value |= ((digest[i] & 0xffL) << (8*(7-i)));
return value;
}
/**
* Return a 32-bit digest of the MD5.
* @return the first 4 bytes of the md5
*/
public int quarterDigest() {
int value = 0;
for (int i = 0; i < 4; i++)
value |= ((digest[i] & 0xff) << (8*(3-i)));
return value;
}
/** Returns true iff <code>o</code> is an MD5Hash whose digest contains the
* same values. */
@Override
public boolean equals(Object o) {
if (!(o instanceof MD5Hash))
return false;
MD5Hash other = (MD5Hash)o;
return Arrays.equals(this.digest, other.digest);
}
/** Returns a hash code value for this object.
* Only uses the first 4 bytes, since md5s are evenly distributed.
*/
@Override
public int hashCode() {
return quarterDigest();
}
/** Compares this object with the specified object for order.*/
@Override
public int compareTo(MD5Hash that) {
return WritableComparator.compareBytes(this.digest, 0, MD5_LEN,
that.digest, 0, MD5_LEN);
}
/** A WritableComparator optimized for MD5Hash keys. */
public static class Comparator extends WritableComparator {
public Comparator() {
super(MD5Hash.class);
}
@Override
public int compare(byte[] b1, int s1, int l1,
byte[] b2, int s2, int l2) {
return compareBytes(b1, s1, MD5_LEN, b2, s2, MD5_LEN);
}
}
static { // register this comparator
WritableComparator.define(MD5Hash.class, new Comparator());
}
private static final char[] HEX_DIGITS =
{'0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f'};
/** Returns a string representation of this object. */
@Override
public String toString() {
StringBuilder buf = new StringBuilder(MD5_LEN*2);
for (int i = 0; i < MD5_LEN; i++) {
int b = digest[i];
buf.append(HEX_DIGITS[(b >> 4) & 0xf]);
buf.append(HEX_DIGITS[b & 0xf]);
}
return buf.toString();
}
/** Sets the digest value from a hex string. */
public void setDigest(String hex) {
if (hex.length() != MD5_LEN*2)
throw new IllegalArgumentException("Wrong length: " + hex.length());
byte[] digest = new byte[MD5_LEN];
for (int i = 0; i < MD5_LEN; i++) {
int j = i << 1;
digest[i] = (byte)(charToNibble(hex.charAt(j)) << 4 |
charToNibble(hex.charAt(j+1)));
}
this.digest = digest;
}
private static final int charToNibble(char c) {
if (c >= '0' && c <= '9') {
return c - '0';
} else if (c >= 'a' && c <= 'f') {
return 0xa + (c - 'a');
} else if (c >= 'A' && c <= 'F') {
return 0xA + (c - 'A');
} else {
throw new RuntimeException("Not a hex character: " + c);
}
}
}
| 7,091 | 27.946939 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* A {@link Writable} which is also {@link Comparable}.
*
* <p><code>WritableComparable</code>s can be compared to each other, typically
* via <code>Comparator</code>s. Any type which is to be used as a
* <code>key</code> in the Hadoop Map-Reduce framework should implement this
* interface.</p>
*
* <p>Note that <code>hashCode()</code> is frequently used in Hadoop to partition
* keys. It's important that your implementation of hashCode() returns the same
* result across different instances of the JVM. Note also that the default
* <code>hashCode()</code> implementation in <code>Object</code> does <b>not</b>
* satisfy this property.</p>
*
* <p>Example:</p>
* <p><blockquote><pre>
* public class MyWritableComparable implements WritableComparable<MyWritableComparable> {
* // Some data
* private int counter;
* private long timestamp;
*
* public void write(DataOutput out) throws IOException {
* out.writeInt(counter);
* out.writeLong(timestamp);
* }
*
* public void readFields(DataInput in) throws IOException {
* counter = in.readInt();
* timestamp = in.readLong();
* }
*
* public int compareTo(MyWritableComparable o) {
* int thisValue = this.value;
* int thatValue = o.value;
* return (thisValue < thatValue ? -1 : (thisValue==thatValue ? 0 : 1));
* }
*
* public int hashCode() {
* final int prime = 31;
* int result = 1;
* result = prime * result + counter;
* result = prime * result + (int) (timestamp ^ (timestamp >>> 32));
* return result
* }
* }
* </pre></blockquote></p>
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface WritableComparable<T> extends Writable, Comparable<T> {
}
| 2,834 | 36.8 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IntWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/** A WritableComparable for ints. */
@InterfaceAudience.Public
@InterfaceStability.Stable
public class IntWritable implements WritableComparable<IntWritable> {
private int value;
public IntWritable() {}
public IntWritable(int value) { set(value); }
/** Set the value of this IntWritable. */
public void set(int value) { this.value = value; }
/** Return the value of this IntWritable. */
public int get() { return value; }
@Override
public void readFields(DataInput in) throws IOException {
value = in.readInt();
}
@Override
public void write(DataOutput out) throws IOException {
out.writeInt(value);
}
/** Returns true iff <code>o</code> is a IntWritable with the same value. */
@Override
public boolean equals(Object o) {
if (!(o instanceof IntWritable))
return false;
IntWritable other = (IntWritable)o;
return this.value == other.value;
}
@Override
public int hashCode() {
return value;
}
/** Compares two IntWritables. */
@Override
public int compareTo(IntWritable o) {
int thisValue = this.value;
int thatValue = o.value;
return (thisValue<thatValue ? -1 : (thisValue==thatValue ? 0 : 1));
}
@Override
public String toString() {
return Integer.toString(value);
}
/** A Comparator optimized for IntWritable. */
public static class Comparator extends WritableComparator {
public Comparator() {
super(IntWritable.class);
}
@Override
public int compare(byte[] b1, int s1, int l1,
byte[] b2, int s2, int l2) {
int thisValue = readInt(b1, s1);
int thatValue = readInt(b2, s2);
return (thisValue<thatValue ? -1 : (thisValue==thatValue ? 0 : 1));
}
}
static { // register this comparator
WritableComparator.define(IntWritable.class, new Comparator());
}
}
| 2,935 | 27.784314 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BloomMapFile.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.bloom.DynamicBloomFilter;
import org.apache.hadoop.util.bloom.Filter;
import org.apache.hadoop.util.bloom.Key;
import org.apache.hadoop.util.hash.Hash;
/**
* This class extends {@link MapFile} and provides very much the same
* functionality. However, it uses dynamic Bloom filters to provide
* quick membership test for keys, and it offers a fast version of
* {@link Reader#get(WritableComparable, Writable)} operation, especially in
* case of sparsely populated MapFile-s.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class BloomMapFile {
private static final Log LOG = LogFactory.getLog(BloomMapFile.class);
public static final String BLOOM_FILE_NAME = "bloom";
public static final int HASH_COUNT = 5;
public static void delete(FileSystem fs, String name) throws IOException {
Path dir = new Path(name);
Path data = new Path(dir, MapFile.DATA_FILE_NAME);
Path index = new Path(dir, MapFile.INDEX_FILE_NAME);
Path bloom = new Path(dir, BLOOM_FILE_NAME);
fs.delete(data, true);
fs.delete(index, true);
fs.delete(bloom, true);
fs.delete(dir, true);
}
private static byte[] byteArrayForBloomKey(DataOutputBuffer buf) {
int cleanLength = buf.getLength();
byte [] ba = buf.getData();
if (cleanLength != ba.length) {
ba = new byte[cleanLength];
System.arraycopy(buf.getData(), 0, ba, 0, cleanLength);
}
return ba;
}
public static class Writer extends MapFile.Writer {
private DynamicBloomFilter bloomFilter;
private int numKeys;
private int vectorSize;
private Key bloomKey = new Key();
private DataOutputBuffer buf = new DataOutputBuffer();
private FileSystem fs;
private Path dir;
@Deprecated
public Writer(Configuration conf, FileSystem fs, String dirName,
Class<? extends WritableComparable> keyClass,
Class<? extends Writable> valClass, CompressionType compress,
CompressionCodec codec, Progressable progress) throws IOException {
this(conf, new Path(dirName), keyClass(keyClass), valueClass(valClass),
compression(compress, codec), progressable(progress));
}
@Deprecated
public Writer(Configuration conf, FileSystem fs, String dirName,
Class<? extends WritableComparable> keyClass,
Class valClass, CompressionType compress,
Progressable progress) throws IOException {
this(conf, new Path(dirName), keyClass(keyClass), valueClass(valClass),
compression(compress), progressable(progress));
}
@Deprecated
public Writer(Configuration conf, FileSystem fs, String dirName,
Class<? extends WritableComparable> keyClass,
Class valClass, CompressionType compress)
throws IOException {
this(conf, new Path(dirName), keyClass(keyClass), valueClass(valClass),
compression(compress));
}
@Deprecated
public Writer(Configuration conf, FileSystem fs, String dirName,
WritableComparator comparator, Class valClass,
CompressionType compress, CompressionCodec codec, Progressable progress)
throws IOException {
this(conf, new Path(dirName), comparator(comparator),
valueClass(valClass), compression(compress, codec),
progressable(progress));
}
@Deprecated
public Writer(Configuration conf, FileSystem fs, String dirName,
WritableComparator comparator, Class valClass,
CompressionType compress, Progressable progress) throws IOException {
this(conf, new Path(dirName), comparator(comparator),
valueClass(valClass), compression(compress),
progressable(progress));
}
@Deprecated
public Writer(Configuration conf, FileSystem fs, String dirName,
WritableComparator comparator, Class valClass, CompressionType compress)
throws IOException {
this(conf, new Path(dirName), comparator(comparator),
valueClass(valClass), compression(compress));
}
@Deprecated
public Writer(Configuration conf, FileSystem fs, String dirName,
WritableComparator comparator, Class valClass) throws IOException {
this(conf, new Path(dirName), comparator(comparator),
valueClass(valClass));
}
@Deprecated
public Writer(Configuration conf, FileSystem fs, String dirName,
Class<? extends WritableComparable> keyClass,
Class valClass) throws IOException {
this(conf, new Path(dirName), keyClass(keyClass), valueClass(valClass));
}
public Writer(Configuration conf, Path dir,
SequenceFile.Writer.Option... options) throws IOException {
super(conf, dir, options);
this.fs = dir.getFileSystem(conf);
this.dir = dir;
initBloomFilter(conf);
}
private synchronized void initBloomFilter(Configuration conf) {
numKeys = conf.getInt("io.mapfile.bloom.size", 1024 * 1024);
// vector size should be <code>-kn / (ln(1 - c^(1/k)))</code> bits for
// single key, where <code> is the number of hash functions,
// <code>n</code> is the number of keys and <code>c</code> is the desired
// max. error rate.
// Our desired error rate is by default 0.005, i.e. 0.5%
float errorRate = conf.getFloat("io.mapfile.bloom.error.rate", 0.005f);
vectorSize = (int)Math.ceil((double)(-HASH_COUNT * numKeys) /
Math.log(1.0 - Math.pow(errorRate, 1.0/HASH_COUNT)));
bloomFilter = new DynamicBloomFilter(vectorSize, HASH_COUNT,
Hash.getHashType(conf), numKeys);
}
@Override
public synchronized void append(WritableComparable key, Writable val)
throws IOException {
super.append(key, val);
buf.reset();
key.write(buf);
bloomKey.set(byteArrayForBloomKey(buf), 1.0);
bloomFilter.add(bloomKey);
}
@Override
public synchronized void close() throws IOException {
super.close();
DataOutputStream out = fs.create(new Path(dir, BLOOM_FILE_NAME), true);
try {
bloomFilter.write(out);
out.flush();
out.close();
out = null;
} finally {
IOUtils.closeStream(out);
}
}
}
public static class Reader extends MapFile.Reader {
private DynamicBloomFilter bloomFilter;
private DataOutputBuffer buf = new DataOutputBuffer();
private Key bloomKey = new Key();
public Reader(Path dir, Configuration conf,
SequenceFile.Reader.Option... options) throws IOException {
super(dir, conf, options);
initBloomFilter(dir, conf);
}
@Deprecated
public Reader(FileSystem fs, String dirName, Configuration conf)
throws IOException {
this(new Path(dirName), conf);
}
@Deprecated
public Reader(FileSystem fs, String dirName, WritableComparator comparator,
Configuration conf, boolean open) throws IOException {
this(new Path(dirName), conf, comparator(comparator));
}
@Deprecated
public Reader(FileSystem fs, String dirName, WritableComparator comparator,
Configuration conf) throws IOException {
this(new Path(dirName), conf, comparator(comparator));
}
private void initBloomFilter(Path dirName,
Configuration conf) {
DataInputStream in = null;
try {
FileSystem fs = dirName.getFileSystem(conf);
in = fs.open(new Path(dirName, BLOOM_FILE_NAME));
bloomFilter = new DynamicBloomFilter();
bloomFilter.readFields(in);
in.close();
in = null;
} catch (IOException ioe) {
LOG.warn("Can't open BloomFilter: " + ioe + " - fallback to MapFile.");
bloomFilter = null;
} finally {
IOUtils.closeStream(in);
}
}
/**
* Checks if this MapFile has the indicated key. The membership test is
* performed using a Bloom filter, so the result has always non-zero
* probability of false positives.
* @param key key to check
* @return false iff key doesn't exist, true if key probably exists.
* @throws IOException
*/
public boolean probablyHasKey(WritableComparable key) throws IOException {
if (bloomFilter == null) {
return true;
}
buf.reset();
key.write(buf);
bloomKey.set(byteArrayForBloomKey(buf), 1.0);
return bloomFilter.membershipTest(bloomKey);
}
/**
* Fast version of the
* {@link MapFile.Reader#get(WritableComparable, Writable)} method. First
* it checks the Bloom filter for the existence of the key, and only if
* present it performs the real get operation. This yields significant
* performance improvements for get operations on sparsely populated files.
*/
@Override
public synchronized Writable get(WritableComparable key, Writable val)
throws IOException {
if (!probablyHasKey(key)) {
return null;
}
return super.get(key, val);
}
/**
* Retrieve the Bloom filter used by this instance of the Reader.
* @return a Bloom filter (see {@link Filter})
*/
public Filter getBloomFilter() {
return bloomFilter;
}
}
}
| 10,686 | 35.599315 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.*;
import java.util.*;
import java.rmi.server.UID;
import java.security.MessageDigest;
import org.apache.commons.io.Charsets;
import org.apache.commons.logging.*;
import org.apache.hadoop.util.Options;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.Options.CreateOpts;
import org.apache.hadoop.io.compress.CodecPool;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionInputStream;
import org.apache.hadoop.io.compress.CompressionOutputStream;
import org.apache.hadoop.io.compress.Compressor;
import org.apache.hadoop.io.compress.Decompressor;
import org.apache.hadoop.io.compress.DefaultCodec;
import org.apache.hadoop.io.compress.GzipCodec;
import org.apache.hadoop.io.compress.zlib.ZlibFactory;
import org.apache.hadoop.io.serializer.Deserializer;
import org.apache.hadoop.io.serializer.Serializer;
import org.apache.hadoop.io.serializer.SerializationFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.Progress;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.NativeCodeLoader;
import org.apache.hadoop.util.MergeSort;
import org.apache.hadoop.util.PriorityQueue;
import org.apache.hadoop.util.Time;
/**
* <code>SequenceFile</code>s are flat files consisting of binary key/value
* pairs.
*
* <p><code>SequenceFile</code> provides {@link SequenceFile.Writer},
* {@link SequenceFile.Reader} and {@link Sorter} classes for writing,
* reading and sorting respectively.</p>
*
* There are three <code>SequenceFile</code> <code>Writer</code>s based on the
* {@link CompressionType} used to compress key/value pairs:
* <ol>
* <li>
* <code>Writer</code> : Uncompressed records.
* </li>
* <li>
* <code>RecordCompressWriter</code> : Record-compressed files, only compress
* values.
* </li>
* <li>
* <code>BlockCompressWriter</code> : Block-compressed files, both keys &
* values are collected in 'blocks'
* separately and compressed. The size of
* the 'block' is configurable.
* </ol>
*
* <p>The actual compression algorithm used to compress key and/or values can be
* specified by using the appropriate {@link CompressionCodec}.</p>
*
* <p>The recommended way is to use the static <tt>createWriter</tt> methods
* provided by the <code>SequenceFile</code> to chose the preferred format.</p>
*
* <p>The {@link SequenceFile.Reader} acts as the bridge and can read any of the
* above <code>SequenceFile</code> formats.</p>
*
* <h4 id="Formats">SequenceFile Formats</h4>
*
* <p>Essentially there are 3 different formats for <code>SequenceFile</code>s
* depending on the <code>CompressionType</code> specified. All of them share a
* <a href="#Header">common header</a> described below.
*
* <h5 id="Header">SequenceFile Header</h5>
* <ul>
* <li>
* version - 3 bytes of magic header <b>SEQ</b>, followed by 1 byte of actual
* version number (e.g. SEQ4 or SEQ6)
* </li>
* <li>
* keyClassName -key class
* </li>
* <li>
* valueClassName - value class
* </li>
* <li>
* compression - A boolean which specifies if compression is turned on for
* keys/values in this file.
* </li>
* <li>
* blockCompression - A boolean which specifies if block-compression is
* turned on for keys/values in this file.
* </li>
* <li>
* compression codec - <code>CompressionCodec</code> class which is used for
* compression of keys and/or values (if compression is
* enabled).
* </li>
* <li>
* metadata - {@link Metadata} for this file.
* </li>
* <li>
* sync - A sync marker to denote end of the header.
* </li>
* </ul>
*
* <h5 id="#UncompressedFormat">Uncompressed SequenceFile Format</h5>
* <ul>
* <li>
* <a href="#Header">Header</a>
* </li>
* <li>
* Record
* <ul>
* <li>Record length</li>
* <li>Key length</li>
* <li>Key</li>
* <li>Value</li>
* </ul>
* </li>
* <li>
* A sync-marker every few <code>100</code> bytes or so.
* </li>
* </ul>
*
* <h5 id="#RecordCompressedFormat">Record-Compressed SequenceFile Format</h5>
* <ul>
* <li>
* <a href="#Header">Header</a>
* </li>
* <li>
* Record
* <ul>
* <li>Record length</li>
* <li>Key length</li>
* <li>Key</li>
* <li><i>Compressed</i> Value</li>
* </ul>
* </li>
* <li>
* A sync-marker every few <code>100</code> bytes or so.
* </li>
* </ul>
*
* <h5 id="#BlockCompressedFormat">Block-Compressed SequenceFile Format</h5>
* <ul>
* <li>
* <a href="#Header">Header</a>
* </li>
* <li>
* Record <i>Block</i>
* <ul>
* <li>Uncompressed number of records in the block</li>
* <li>Compressed key-lengths block-size</li>
* <li>Compressed key-lengths block</li>
* <li>Compressed keys block-size</li>
* <li>Compressed keys block</li>
* <li>Compressed value-lengths block-size</li>
* <li>Compressed value-lengths block</li>
* <li>Compressed values block-size</li>
* <li>Compressed values block</li>
* </ul>
* </li>
* <li>
* A sync-marker every block.
* </li>
* </ul>
*
* <p>The compressed blocks of key lengths and value lengths consist of the
* actual lengths of individual keys/values encoded in ZeroCompressedInteger
* format.</p>
*
* @see CompressionCodec
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class SequenceFile {
private static final Log LOG = LogFactory.getLog(SequenceFile.class);
private SequenceFile() {} // no public ctor
private static final byte BLOCK_COMPRESS_VERSION = (byte)4;
private static final byte CUSTOM_COMPRESS_VERSION = (byte)5;
private static final byte VERSION_WITH_METADATA = (byte)6;
private static byte[] VERSION = new byte[] {
(byte)'S', (byte)'E', (byte)'Q', VERSION_WITH_METADATA
};
private static final int SYNC_ESCAPE = -1; // "length" of sync entries
private static final int SYNC_HASH_SIZE = 16; // number of bytes in hash
private static final int SYNC_SIZE = 4+SYNC_HASH_SIZE; // escape + hash
/** The number of bytes between sync points.*/
public static final int SYNC_INTERVAL = 100*SYNC_SIZE;
/**
* The compression type used to compress key/value pairs in the
* {@link SequenceFile}.
*
* @see SequenceFile.Writer
*/
public static enum CompressionType {
/** Do not compress records. */
NONE,
/** Compress values only, each separately. */
RECORD,
/** Compress sequences of records together in blocks. */
BLOCK
}
/**
* Get the compression type for the reduce outputs
* @param job the job config to look in
* @return the kind of compression to use
*/
static public CompressionType getDefaultCompressionType(Configuration job) {
String name = job.get("io.seqfile.compression.type");
return name == null ? CompressionType.RECORD :
CompressionType.valueOf(name);
}
/**
* Set the default compression type for sequence files.
* @param job the configuration to modify
* @param val the new compression type (none, block, record)
*/
static public void setDefaultCompressionType(Configuration job,
CompressionType val) {
job.set("io.seqfile.compression.type", val.toString());
}
/**
* Create a new Writer with the given options.
* @param conf the configuration to use
* @param opts the options to create the file with
* @return a new Writer
* @throws IOException
*/
public static Writer createWriter(Configuration conf, Writer.Option... opts
) throws IOException {
Writer.CompressionOption compressionOption =
Options.getOption(Writer.CompressionOption.class, opts);
CompressionType kind;
if (compressionOption != null) {
kind = compressionOption.getValue();
} else {
kind = getDefaultCompressionType(conf);
opts = Options.prependOptions(opts, Writer.compression(kind));
}
switch (kind) {
default:
case NONE:
return new Writer(conf, opts);
case RECORD:
return new RecordCompressWriter(conf, opts);
case BLOCK:
return new BlockCompressWriter(conf, opts);
}
}
/**
* Construct the preferred type of SequenceFile Writer.
* @param fs The configured filesystem.
* @param conf The configuration.
* @param name The name of the file.
* @param keyClass The 'key' type.
* @param valClass The 'value' type.
* @return Returns the handle to the constructed SequenceFile Writer.
* @throws IOException
* @deprecated Use {@link #createWriter(Configuration, Writer.Option...)}
* instead.
*/
@Deprecated
public static Writer
createWriter(FileSystem fs, Configuration conf, Path name,
Class keyClass, Class valClass) throws IOException {
return createWriter(conf, Writer.filesystem(fs),
Writer.file(name), Writer.keyClass(keyClass),
Writer.valueClass(valClass));
}
/**
* Construct the preferred type of SequenceFile Writer.
* @param fs The configured filesystem.
* @param conf The configuration.
* @param name The name of the file.
* @param keyClass The 'key' type.
* @param valClass The 'value' type.
* @param compressionType The compression type.
* @return Returns the handle to the constructed SequenceFile Writer.
* @throws IOException
* @deprecated Use {@link #createWriter(Configuration, Writer.Option...)}
* instead.
*/
@Deprecated
public static Writer
createWriter(FileSystem fs, Configuration conf, Path name,
Class keyClass, Class valClass,
CompressionType compressionType) throws IOException {
return createWriter(conf, Writer.filesystem(fs),
Writer.file(name), Writer.keyClass(keyClass),
Writer.valueClass(valClass),
Writer.compression(compressionType));
}
/**
* Construct the preferred type of SequenceFile Writer.
* @param fs The configured filesystem.
* @param conf The configuration.
* @param name The name of the file.
* @param keyClass The 'key' type.
* @param valClass The 'value' type.
* @param compressionType The compression type.
* @param progress The Progressable object to track progress.
* @return Returns the handle to the constructed SequenceFile Writer.
* @throws IOException
* @deprecated Use {@link #createWriter(Configuration, Writer.Option...)}
* instead.
*/
@Deprecated
public static Writer
createWriter(FileSystem fs, Configuration conf, Path name,
Class keyClass, Class valClass, CompressionType compressionType,
Progressable progress) throws IOException {
return createWriter(conf, Writer.file(name),
Writer.filesystem(fs),
Writer.keyClass(keyClass),
Writer.valueClass(valClass),
Writer.compression(compressionType),
Writer.progressable(progress));
}
/**
* Construct the preferred type of SequenceFile Writer.
* @param fs The configured filesystem.
* @param conf The configuration.
* @param name The name of the file.
* @param keyClass The 'key' type.
* @param valClass The 'value' type.
* @param compressionType The compression type.
* @param codec The compression codec.
* @return Returns the handle to the constructed SequenceFile Writer.
* @throws IOException
* @deprecated Use {@link #createWriter(Configuration, Writer.Option...)}
* instead.
*/
@Deprecated
public static Writer
createWriter(FileSystem fs, Configuration conf, Path name,
Class keyClass, Class valClass, CompressionType compressionType,
CompressionCodec codec) throws IOException {
return createWriter(conf, Writer.file(name),
Writer.filesystem(fs),
Writer.keyClass(keyClass),
Writer.valueClass(valClass),
Writer.compression(compressionType, codec));
}
/**
* Construct the preferred type of SequenceFile Writer.
* @param fs The configured filesystem.
* @param conf The configuration.
* @param name The name of the file.
* @param keyClass The 'key' type.
* @param valClass The 'value' type.
* @param compressionType The compression type.
* @param codec The compression codec.
* @param progress The Progressable object to track progress.
* @param metadata The metadata of the file.
* @return Returns the handle to the constructed SequenceFile Writer.
* @throws IOException
* @deprecated Use {@link #createWriter(Configuration, Writer.Option...)}
* instead.
*/
@Deprecated
public static Writer
createWriter(FileSystem fs, Configuration conf, Path name,
Class keyClass, Class valClass,
CompressionType compressionType, CompressionCodec codec,
Progressable progress, Metadata metadata) throws IOException {
return createWriter(conf, Writer.file(name),
Writer.filesystem(fs),
Writer.keyClass(keyClass),
Writer.valueClass(valClass),
Writer.compression(compressionType, codec),
Writer.progressable(progress),
Writer.metadata(metadata));
}
/**
* Construct the preferred type of SequenceFile Writer.
* @param fs The configured filesystem.
* @param conf The configuration.
* @param name The name of the file.
* @param keyClass The 'key' type.
* @param valClass The 'value' type.
* @param bufferSize buffer size for the underlaying outputstream.
* @param replication replication factor for the file.
* @param blockSize block size for the file.
* @param compressionType The compression type.
* @param codec The compression codec.
* @param progress The Progressable object to track progress.
* @param metadata The metadata of the file.
* @return Returns the handle to the constructed SequenceFile Writer.
* @throws IOException
* @deprecated Use {@link #createWriter(Configuration, Writer.Option...)}
* instead.
*/
@Deprecated
public static Writer
createWriter(FileSystem fs, Configuration conf, Path name,
Class keyClass, Class valClass, int bufferSize,
short replication, long blockSize,
CompressionType compressionType, CompressionCodec codec,
Progressable progress, Metadata metadata) throws IOException {
return createWriter(conf, Writer.file(name),
Writer.filesystem(fs),
Writer.keyClass(keyClass),
Writer.valueClass(valClass),
Writer.bufferSize(bufferSize),
Writer.replication(replication),
Writer.blockSize(blockSize),
Writer.compression(compressionType, codec),
Writer.progressable(progress),
Writer.metadata(metadata));
}
/**
* Construct the preferred type of SequenceFile Writer.
* @param fs The configured filesystem.
* @param conf The configuration.
* @param name The name of the file.
* @param keyClass The 'key' type.
* @param valClass The 'value' type.
* @param bufferSize buffer size for the underlaying outputstream.
* @param replication replication factor for the file.
* @param blockSize block size for the file.
* @param createParent create parent directory if non-existent
* @param compressionType The compression type.
* @param codec The compression codec.
* @param metadata The metadata of the file.
* @return Returns the handle to the constructed SequenceFile Writer.
* @throws IOException
*/
@Deprecated
public static Writer
createWriter(FileSystem fs, Configuration conf, Path name,
Class keyClass, Class valClass, int bufferSize,
short replication, long blockSize, boolean createParent,
CompressionType compressionType, CompressionCodec codec,
Metadata metadata) throws IOException {
return createWriter(FileContext.getFileContext(fs.getUri(), conf),
conf, name, keyClass, valClass, compressionType, codec,
metadata, EnumSet.of(CreateFlag.CREATE,CreateFlag.OVERWRITE),
CreateOpts.bufferSize(bufferSize),
createParent ? CreateOpts.createParent()
: CreateOpts.donotCreateParent(),
CreateOpts.repFac(replication),
CreateOpts.blockSize(blockSize)
);
}
/**
* Construct the preferred type of SequenceFile Writer.
* @param fc The context for the specified file.
* @param conf The configuration.
* @param name The name of the file.
* @param keyClass The 'key' type.
* @param valClass The 'value' type.
* @param compressionType The compression type.
* @param codec The compression codec.
* @param metadata The metadata of the file.
* @param createFlag gives the semantics of create: overwrite, append etc.
* @param opts file creation options; see {@link CreateOpts}.
* @return Returns the handle to the constructed SequenceFile Writer.
* @throws IOException
*/
public static Writer
createWriter(FileContext fc, Configuration conf, Path name,
Class keyClass, Class valClass,
CompressionType compressionType, CompressionCodec codec,
Metadata metadata,
final EnumSet<CreateFlag> createFlag, CreateOpts... opts)
throws IOException {
return createWriter(conf, fc.create(name, createFlag, opts),
keyClass, valClass, compressionType, codec, metadata).ownStream();
}
/**
* Construct the preferred type of SequenceFile Writer.
* @param fs The configured filesystem.
* @param conf The configuration.
* @param name The name of the file.
* @param keyClass The 'key' type.
* @param valClass The 'value' type.
* @param compressionType The compression type.
* @param codec The compression codec.
* @param progress The Progressable object to track progress.
* @return Returns the handle to the constructed SequenceFile Writer.
* @throws IOException
* @deprecated Use {@link #createWriter(Configuration, Writer.Option...)}
* instead.
*/
@Deprecated
public static Writer
createWriter(FileSystem fs, Configuration conf, Path name,
Class keyClass, Class valClass,
CompressionType compressionType, CompressionCodec codec,
Progressable progress) throws IOException {
return createWriter(conf, Writer.file(name),
Writer.filesystem(fs),
Writer.keyClass(keyClass),
Writer.valueClass(valClass),
Writer.compression(compressionType, codec),
Writer.progressable(progress));
}
/**
* Construct the preferred type of 'raw' SequenceFile Writer.
* @param conf The configuration.
* @param out The stream on top which the writer is to be constructed.
* @param keyClass The 'key' type.
* @param valClass The 'value' type.
* @param compressionType The compression type.
* @param codec The compression codec.
* @param metadata The metadata of the file.
* @return Returns the handle to the constructed SequenceFile Writer.
* @throws IOException
* @deprecated Use {@link #createWriter(Configuration, Writer.Option...)}
* instead.
*/
@Deprecated
public static Writer
createWriter(Configuration conf, FSDataOutputStream out,
Class keyClass, Class valClass,
CompressionType compressionType,
CompressionCodec codec, Metadata metadata) throws IOException {
return createWriter(conf, Writer.stream(out), Writer.keyClass(keyClass),
Writer.valueClass(valClass),
Writer.compression(compressionType, codec),
Writer.metadata(metadata));
}
/**
* Construct the preferred type of 'raw' SequenceFile Writer.
* @param conf The configuration.
* @param out The stream on top which the writer is to be constructed.
* @param keyClass The 'key' type.
* @param valClass The 'value' type.
* @param compressionType The compression type.
* @param codec The compression codec.
* @return Returns the handle to the constructed SequenceFile Writer.
* @throws IOException
* @deprecated Use {@link #createWriter(Configuration, Writer.Option...)}
* instead.
*/
@Deprecated
public static Writer
createWriter(Configuration conf, FSDataOutputStream out,
Class keyClass, Class valClass, CompressionType compressionType,
CompressionCodec codec) throws IOException {
return createWriter(conf, Writer.stream(out), Writer.keyClass(keyClass),
Writer.valueClass(valClass),
Writer.compression(compressionType, codec));
}
/** The interface to 'raw' values of SequenceFiles. */
public static interface ValueBytes {
/** Writes the uncompressed bytes to the outStream.
* @param outStream : Stream to write uncompressed bytes into.
* @throws IOException
*/
public void writeUncompressedBytes(DataOutputStream outStream)
throws IOException;
/** Write compressed bytes to outStream.
* Note: that it will NOT compress the bytes if they are not compressed.
* @param outStream : Stream to write compressed bytes into.
*/
public void writeCompressedBytes(DataOutputStream outStream)
throws IllegalArgumentException, IOException;
/**
* Size of stored data.
*/
public int getSize();
}
private static class UncompressedBytes implements ValueBytes {
private int dataSize;
private byte[] data;
private UncompressedBytes() {
data = null;
dataSize = 0;
}
private void reset(DataInputStream in, int length) throws IOException {
if (data == null) {
data = new byte[length];
} else if (length > data.length) {
data = new byte[Math.max(length, data.length * 2)];
}
dataSize = -1;
in.readFully(data, 0, length);
dataSize = length;
}
@Override
public int getSize() {
return dataSize;
}
@Override
public void writeUncompressedBytes(DataOutputStream outStream)
throws IOException {
outStream.write(data, 0, dataSize);
}
@Override
public void writeCompressedBytes(DataOutputStream outStream)
throws IllegalArgumentException, IOException {
throw
new IllegalArgumentException("UncompressedBytes cannot be compressed!");
}
} // UncompressedBytes
private static class CompressedBytes implements ValueBytes {
private int dataSize;
private byte[] data;
DataInputBuffer rawData = null;
CompressionCodec codec = null;
CompressionInputStream decompressedStream = null;
private CompressedBytes(CompressionCodec codec) {
data = null;
dataSize = 0;
this.codec = codec;
}
private void reset(DataInputStream in, int length) throws IOException {
if (data == null) {
data = new byte[length];
} else if (length > data.length) {
data = new byte[Math.max(length, data.length * 2)];
}
dataSize = -1;
in.readFully(data, 0, length);
dataSize = length;
}
@Override
public int getSize() {
return dataSize;
}
@Override
public void writeUncompressedBytes(DataOutputStream outStream)
throws IOException {
if (decompressedStream == null) {
rawData = new DataInputBuffer();
decompressedStream = codec.createInputStream(rawData);
} else {
decompressedStream.resetState();
}
rawData.reset(data, 0, dataSize);
byte[] buffer = new byte[8192];
int bytesRead = 0;
while ((bytesRead = decompressedStream.read(buffer, 0, 8192)) != -1) {
outStream.write(buffer, 0, bytesRead);
}
}
@Override
public void writeCompressedBytes(DataOutputStream outStream)
throws IllegalArgumentException, IOException {
outStream.write(data, 0, dataSize);
}
} // CompressedBytes
/**
* The class encapsulating with the metadata of a file.
* The metadata of a file is a list of attribute name/value
* pairs of Text type.
*
*/
public static class Metadata implements Writable {
private TreeMap<Text, Text> theMetadata;
public Metadata() {
this(new TreeMap<Text, Text>());
}
public Metadata(TreeMap<Text, Text> arg) {
if (arg == null) {
this.theMetadata = new TreeMap<Text, Text>();
} else {
this.theMetadata = arg;
}
}
public Text get(Text name) {
return this.theMetadata.get(name);
}
public void set(Text name, Text value) {
this.theMetadata.put(name, value);
}
public TreeMap<Text, Text> getMetadata() {
return new TreeMap<Text, Text>(this.theMetadata);
}
@Override
public void write(DataOutput out) throws IOException {
out.writeInt(this.theMetadata.size());
Iterator<Map.Entry<Text, Text>> iter =
this.theMetadata.entrySet().iterator();
while (iter.hasNext()) {
Map.Entry<Text, Text> en = iter.next();
en.getKey().write(out);
en.getValue().write(out);
}
}
@Override
public void readFields(DataInput in) throws IOException {
int sz = in.readInt();
if (sz < 0) throw new IOException("Invalid size: " + sz + " for file metadata object");
this.theMetadata = new TreeMap<Text, Text>();
for (int i = 0; i < sz; i++) {
Text key = new Text();
Text val = new Text();
key.readFields(in);
val.readFields(in);
this.theMetadata.put(key, val);
}
}
@Override
public boolean equals(Object other) {
if (other == null) {
return false;
}
if (other.getClass() != this.getClass()) {
return false;
} else {
return equals((Metadata)other);
}
}
public boolean equals(Metadata other) {
if (other == null) return false;
if (this.theMetadata.size() != other.theMetadata.size()) {
return false;
}
Iterator<Map.Entry<Text, Text>> iter1 =
this.theMetadata.entrySet().iterator();
Iterator<Map.Entry<Text, Text>> iter2 =
other.theMetadata.entrySet().iterator();
while (iter1.hasNext() && iter2.hasNext()) {
Map.Entry<Text, Text> en1 = iter1.next();
Map.Entry<Text, Text> en2 = iter2.next();
if (!en1.getKey().equals(en2.getKey())) {
return false;
}
if (!en1.getValue().equals(en2.getValue())) {
return false;
}
}
if (iter1.hasNext() || iter2.hasNext()) {
return false;
}
return true;
}
@Override
public int hashCode() {
assert false : "hashCode not designed";
return 42; // any arbitrary constant will do
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("size: ").append(this.theMetadata.size()).append("\n");
Iterator<Map.Entry<Text, Text>> iter =
this.theMetadata.entrySet().iterator();
while (iter.hasNext()) {
Map.Entry<Text, Text> en = iter.next();
sb.append("\t").append(en.getKey().toString()).append("\t").append(en.getValue().toString());
sb.append("\n");
}
return sb.toString();
}
}
/** Write key/value pairs to a sequence-format file. */
public static class Writer implements java.io.Closeable, Syncable {
private Configuration conf;
FSDataOutputStream out;
boolean ownOutputStream = true;
DataOutputBuffer buffer = new DataOutputBuffer();
Class keyClass;
Class valClass;
private final CompressionType compress;
CompressionCodec codec = null;
CompressionOutputStream deflateFilter = null;
DataOutputStream deflateOut = null;
Metadata metadata = null;
Compressor compressor = null;
private boolean appendMode = false;
protected Serializer keySerializer;
protected Serializer uncompressedValSerializer;
protected Serializer compressedValSerializer;
// Insert a globally unique 16-byte value every few entries, so that one
// can seek into the middle of a file and then synchronize with record
// starts and ends by scanning for this value.
long lastSyncPos; // position of last sync
byte[] sync; // 16 random bytes
{
try {
MessageDigest digester = MessageDigest.getInstance("MD5");
long time = Time.now();
digester.update((new UID()+"@"+time).getBytes(Charsets.UTF_8));
sync = digester.digest();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public static interface Option {}
static class FileOption extends Options.PathOption
implements Option {
FileOption(Path path) {
super(path);
}
}
/**
* @deprecated only used for backwards-compatibility in the createWriter methods
* that take FileSystem.
*/
@Deprecated
private static class FileSystemOption implements Option {
private final FileSystem value;
protected FileSystemOption(FileSystem value) {
this.value = value;
}
public FileSystem getValue() {
return value;
}
}
static class StreamOption extends Options.FSDataOutputStreamOption
implements Option {
StreamOption(FSDataOutputStream stream) {
super(stream);
}
}
static class BufferSizeOption extends Options.IntegerOption
implements Option {
BufferSizeOption(int value) {
super(value);
}
}
static class BlockSizeOption extends Options.LongOption implements Option {
BlockSizeOption(long value) {
super(value);
}
}
static class ReplicationOption extends Options.IntegerOption
implements Option {
ReplicationOption(int value) {
super(value);
}
}
static class AppendIfExistsOption extends Options.BooleanOption implements
Option {
AppendIfExistsOption(boolean value) {
super(value);
}
}
static class KeyClassOption extends Options.ClassOption implements Option {
KeyClassOption(Class<?> value) {
super(value);
}
}
static class ValueClassOption extends Options.ClassOption
implements Option {
ValueClassOption(Class<?> value) {
super(value);
}
}
static class MetadataOption implements Option {
private final Metadata value;
MetadataOption(Metadata value) {
this.value = value;
}
Metadata getValue() {
return value;
}
}
static class ProgressableOption extends Options.ProgressableOption
implements Option {
ProgressableOption(Progressable value) {
super(value);
}
}
private static class CompressionOption implements Option {
private final CompressionType value;
private final CompressionCodec codec;
CompressionOption(CompressionType value) {
this(value, null);
}
CompressionOption(CompressionType value, CompressionCodec codec) {
this.value = value;
this.codec = (CompressionType.NONE != value && null == codec)
? new DefaultCodec()
: codec;
}
CompressionType getValue() {
return value;
}
CompressionCodec getCodec() {
return codec;
}
}
public static Option file(Path value) {
return new FileOption(value);
}
/**
* @deprecated only used for backwards-compatibility in the createWriter methods
* that take FileSystem.
*/
@Deprecated
private static Option filesystem(FileSystem fs) {
return new SequenceFile.Writer.FileSystemOption(fs);
}
public static Option bufferSize(int value) {
return new BufferSizeOption(value);
}
public static Option stream(FSDataOutputStream value) {
return new StreamOption(value);
}
public static Option replication(short value) {
return new ReplicationOption(value);
}
public static Option appendIfExists(boolean value) {
return new AppendIfExistsOption(value);
}
public static Option blockSize(long value) {
return new BlockSizeOption(value);
}
public static Option progressable(Progressable value) {
return new ProgressableOption(value);
}
public static Option keyClass(Class<?> value) {
return new KeyClassOption(value);
}
public static Option valueClass(Class<?> value) {
return new ValueClassOption(value);
}
public static Option metadata(Metadata value) {
return new MetadataOption(value);
}
public static Option compression(CompressionType value) {
return new CompressionOption(value);
}
public static Option compression(CompressionType value,
CompressionCodec codec) {
return new CompressionOption(value, codec);
}
/**
* Construct a uncompressed writer from a set of options.
* @param conf the configuration to use
* @param options the options used when creating the writer
* @throws IOException if it fails
*/
Writer(Configuration conf,
Option... opts) throws IOException {
BlockSizeOption blockSizeOption =
Options.getOption(BlockSizeOption.class, opts);
BufferSizeOption bufferSizeOption =
Options.getOption(BufferSizeOption.class, opts);
ReplicationOption replicationOption =
Options.getOption(ReplicationOption.class, opts);
ProgressableOption progressOption =
Options.getOption(ProgressableOption.class, opts);
FileOption fileOption = Options.getOption(FileOption.class, opts);
AppendIfExistsOption appendIfExistsOption = Options.getOption(
AppendIfExistsOption.class, opts);
FileSystemOption fsOption = Options.getOption(FileSystemOption.class, opts);
StreamOption streamOption = Options.getOption(StreamOption.class, opts);
KeyClassOption keyClassOption =
Options.getOption(KeyClassOption.class, opts);
ValueClassOption valueClassOption =
Options.getOption(ValueClassOption.class, opts);
MetadataOption metadataOption =
Options.getOption(MetadataOption.class, opts);
CompressionOption compressionTypeOption =
Options.getOption(CompressionOption.class, opts);
// check consistency of options
if ((fileOption == null) == (streamOption == null)) {
throw new IllegalArgumentException("file or stream must be specified");
}
if (fileOption == null && (blockSizeOption != null ||
bufferSizeOption != null ||
replicationOption != null ||
progressOption != null)) {
throw new IllegalArgumentException("file modifier options not " +
"compatible with stream");
}
FSDataOutputStream out;
boolean ownStream = fileOption != null;
if (ownStream) {
Path p = fileOption.getValue();
FileSystem fs;
if (fsOption != null) {
fs = fsOption.getValue();
} else {
fs = p.getFileSystem(conf);
}
int bufferSize = bufferSizeOption == null ? getBufferSize(conf) :
bufferSizeOption.getValue();
short replication = replicationOption == null ?
fs.getDefaultReplication(p) :
(short) replicationOption.getValue();
long blockSize = blockSizeOption == null ? fs.getDefaultBlockSize(p) :
blockSizeOption.getValue();
Progressable progress = progressOption == null ? null :
progressOption.getValue();
if (appendIfExistsOption != null && appendIfExistsOption.getValue()
&& fs.exists(p)) {
// Read the file and verify header details
SequenceFile.Reader reader = new SequenceFile.Reader(conf,
SequenceFile.Reader.file(p), new Reader.OnlyHeaderOption());
try {
if (keyClassOption.getValue() != reader.getKeyClass()
|| valueClassOption.getValue() != reader.getValueClass()) {
throw new IllegalArgumentException(
"Key/value class provided does not match the file");
}
if (reader.getVersion() != VERSION[3]) {
throw new VersionMismatchException(VERSION[3],
reader.getVersion());
}
if (metadataOption != null) {
LOG.info("MetaData Option is ignored during append");
}
metadataOption = (MetadataOption) SequenceFile.Writer
.metadata(reader.getMetadata());
CompressionOption readerCompressionOption = new CompressionOption(
reader.getCompressionType(), reader.getCompressionCodec());
if (readerCompressionOption.value != compressionTypeOption.value
|| !readerCompressionOption.codec.getClass().getName()
.equals(compressionTypeOption.codec.getClass().getName())) {
throw new IllegalArgumentException(
"Compression option provided does not match the file");
}
sync = reader.getSync();
} finally {
reader.close();
}
out = fs.append(p, bufferSize, progress);
this.appendMode = true;
} else {
out = fs
.create(p, true, bufferSize, replication, blockSize, progress);
}
} else {
out = streamOption.getValue();
}
Class<?> keyClass = keyClassOption == null ?
Object.class : keyClassOption.getValue();
Class<?> valueClass = valueClassOption == null ?
Object.class : valueClassOption.getValue();
Metadata metadata = metadataOption == null ?
new Metadata() : metadataOption.getValue();
this.compress = compressionTypeOption.getValue();
final CompressionCodec codec = compressionTypeOption.getCodec();
if (codec != null &&
(codec instanceof GzipCodec) &&
!NativeCodeLoader.isNativeCodeLoaded() &&
!ZlibFactory.isNativeZlibLoaded(conf)) {
throw new IllegalArgumentException("SequenceFile doesn't work with " +
"GzipCodec without native-hadoop " +
"code!");
}
init(conf, out, ownStream, keyClass, valueClass, codec, metadata);
}
/** Create the named file.
* @deprecated Use
* {@link SequenceFile#createWriter(Configuration, Writer.Option...)}
* instead.
*/
@Deprecated
public Writer(FileSystem fs, Configuration conf, Path name,
Class keyClass, Class valClass) throws IOException {
this.compress = CompressionType.NONE;
init(conf, fs.create(name), true, keyClass, valClass, null,
new Metadata());
}
/** Create the named file with write-progress reporter.
* @deprecated Use
* {@link SequenceFile#createWriter(Configuration, Writer.Option...)}
* instead.
*/
@Deprecated
public Writer(FileSystem fs, Configuration conf, Path name,
Class keyClass, Class valClass,
Progressable progress, Metadata metadata) throws IOException {
this.compress = CompressionType.NONE;
init(conf, fs.create(name, progress), true, keyClass, valClass,
null, metadata);
}
/** Create the named file with write-progress reporter.
* @deprecated Use
* {@link SequenceFile#createWriter(Configuration, Writer.Option...)}
* instead.
*/
@Deprecated
public Writer(FileSystem fs, Configuration conf, Path name,
Class keyClass, Class valClass,
int bufferSize, short replication, long blockSize,
Progressable progress, Metadata metadata) throws IOException {
this.compress = CompressionType.NONE;
init(conf,
fs.create(name, true, bufferSize, replication, blockSize, progress),
true, keyClass, valClass, null, metadata);
}
boolean isCompressed() { return compress != CompressionType.NONE; }
boolean isBlockCompressed() { return compress == CompressionType.BLOCK; }
Writer ownStream() { this.ownOutputStream = true; return this; }
/** Write and flush the file header. */
private void writeFileHeader()
throws IOException {
out.write(VERSION);
Text.writeString(out, keyClass.getName());
Text.writeString(out, valClass.getName());
out.writeBoolean(this.isCompressed());
out.writeBoolean(this.isBlockCompressed());
if (this.isCompressed()) {
Text.writeString(out, (codec.getClass()).getName());
}
this.metadata.write(out);
out.write(sync); // write the sync bytes
out.flush(); // flush header
}
/** Initialize. */
@SuppressWarnings("unchecked")
void init(Configuration conf, FSDataOutputStream out, boolean ownStream,
Class keyClass, Class valClass,
CompressionCodec codec, Metadata metadata)
throws IOException {
this.conf = conf;
this.out = out;
this.ownOutputStream = ownStream;
this.keyClass = keyClass;
this.valClass = valClass;
this.codec = codec;
this.metadata = metadata;
SerializationFactory serializationFactory = new SerializationFactory(conf);
this.keySerializer = serializationFactory.getSerializer(keyClass);
if (this.keySerializer == null) {
throw new IOException(
"Could not find a serializer for the Key class: '"
+ keyClass.getCanonicalName() + "'. "
+ "Please ensure that the configuration '" +
CommonConfigurationKeys.IO_SERIALIZATIONS_KEY + "' is "
+ "properly configured, if you're using"
+ "custom serialization.");
}
this.keySerializer.open(buffer);
this.uncompressedValSerializer = serializationFactory.getSerializer(valClass);
if (this.uncompressedValSerializer == null) {
throw new IOException(
"Could not find a serializer for the Value class: '"
+ valClass.getCanonicalName() + "'. "
+ "Please ensure that the configuration '" +
CommonConfigurationKeys.IO_SERIALIZATIONS_KEY + "' is "
+ "properly configured, if you're using"
+ "custom serialization.");
}
this.uncompressedValSerializer.open(buffer);
if (this.codec != null) {
ReflectionUtils.setConf(this.codec, this.conf);
this.compressor = CodecPool.getCompressor(this.codec);
this.deflateFilter = this.codec.createOutputStream(buffer, compressor);
this.deflateOut =
new DataOutputStream(new BufferedOutputStream(deflateFilter));
this.compressedValSerializer = serializationFactory.getSerializer(valClass);
if (this.compressedValSerializer == null) {
throw new IOException(
"Could not find a serializer for the Value class: '"
+ valClass.getCanonicalName() + "'. "
+ "Please ensure that the configuration '" +
CommonConfigurationKeys.IO_SERIALIZATIONS_KEY + "' is "
+ "properly configured, if you're using"
+ "custom serialization.");
}
this.compressedValSerializer.open(deflateOut);
}
if (appendMode) {
sync();
} else {
writeFileHeader();
}
}
/** Returns the class of keys in this file. */
public Class getKeyClass() { return keyClass; }
/** Returns the class of values in this file. */
public Class getValueClass() { return valClass; }
/** Returns the compression codec of data in this file. */
public CompressionCodec getCompressionCodec() { return codec; }
/** create a sync point */
public void sync() throws IOException {
if (sync != null && lastSyncPos != out.getPos()) {
out.writeInt(SYNC_ESCAPE); // mark the start of the sync
out.write(sync); // write sync
lastSyncPos = out.getPos(); // update lastSyncPos
}
}
/**
* flush all currently written data to the file system
* @deprecated Use {@link #hsync()} or {@link #hflush()} instead
*/
@Deprecated
public void syncFs() throws IOException {
if (out != null) {
out.sync(); // flush contents to file system
}
}
@Override
public void hsync() throws IOException {
if (out != null) {
out.hsync();
}
}
@Override
public void hflush() throws IOException {
if (out != null) {
out.hflush();
}
}
/** Returns the configuration of this file. */
Configuration getConf() { return conf; }
/** Close the file. */
@Override
public synchronized void close() throws IOException {
keySerializer.close();
uncompressedValSerializer.close();
if (compressedValSerializer != null) {
compressedValSerializer.close();
}
CodecPool.returnCompressor(compressor);
compressor = null;
if (out != null) {
// Close the underlying stream iff we own it...
if (ownOutputStream) {
out.close();
} else {
out.flush();
}
out = null;
}
}
synchronized void checkAndWriteSync() throws IOException {
if (sync != null &&
out.getPos() >= lastSyncPos+SYNC_INTERVAL) { // time to emit sync
sync();
}
}
/** Append a key/value pair. */
public void append(Writable key, Writable val)
throws IOException {
append((Object) key, (Object) val);
}
/** Append a key/value pair. */
@SuppressWarnings("unchecked")
public synchronized void append(Object key, Object val)
throws IOException {
if (key.getClass() != keyClass)
throw new IOException("wrong key class: "+key.getClass().getName()
+" is not "+keyClass);
if (val.getClass() != valClass)
throw new IOException("wrong value class: "+val.getClass().getName()
+" is not "+valClass);
buffer.reset();
// Append the 'key'
keySerializer.serialize(key);
int keyLength = buffer.getLength();
if (keyLength < 0)
throw new IOException("negative length keys not allowed: " + key);
// Append the 'value'
if (compress == CompressionType.RECORD) {
deflateFilter.resetState();
compressedValSerializer.serialize(val);
deflateOut.flush();
deflateFilter.finish();
} else {
uncompressedValSerializer.serialize(val);
}
// Write the record out
checkAndWriteSync(); // sync
out.writeInt(buffer.getLength()); // total record length
out.writeInt(keyLength); // key portion length
out.write(buffer.getData(), 0, buffer.getLength()); // data
}
public synchronized void appendRaw(byte[] keyData, int keyOffset,
int keyLength, ValueBytes val) throws IOException {
if (keyLength < 0)
throw new IOException("negative length keys not allowed: " + keyLength);
int valLength = val.getSize();
checkAndWriteSync();
out.writeInt(keyLength+valLength); // total record length
out.writeInt(keyLength); // key portion length
out.write(keyData, keyOffset, keyLength); // key
val.writeUncompressedBytes(out); // value
}
/** Returns the current length of the output file.
*
* <p>This always returns a synchronized position. In other words,
* immediately after calling {@link SequenceFile.Reader#seek(long)} with a position
* returned by this method, {@link SequenceFile.Reader#next(Writable)} may be called. However
* the key may be earlier in the file than key last written when this
* method was called (e.g., with block-compression, it may be the first key
* in the block that was being written when this method was called).
*/
public synchronized long getLength() throws IOException {
return out.getPos();
}
} // class Writer
/** Write key/compressed-value pairs to a sequence-format file. */
static class RecordCompressWriter extends Writer {
RecordCompressWriter(Configuration conf,
Option... options) throws IOException {
super(conf, options);
}
/** Append a key/value pair. */
@Override
@SuppressWarnings("unchecked")
public synchronized void append(Object key, Object val)
throws IOException {
if (key.getClass() != keyClass)
throw new IOException("wrong key class: "+key.getClass().getName()
+" is not "+keyClass);
if (val.getClass() != valClass)
throw new IOException("wrong value class: "+val.getClass().getName()
+" is not "+valClass);
buffer.reset();
// Append the 'key'
keySerializer.serialize(key);
int keyLength = buffer.getLength();
if (keyLength < 0)
throw new IOException("negative length keys not allowed: " + key);
// Compress 'value' and append it
deflateFilter.resetState();
compressedValSerializer.serialize(val);
deflateOut.flush();
deflateFilter.finish();
// Write the record out
checkAndWriteSync(); // sync
out.writeInt(buffer.getLength()); // total record length
out.writeInt(keyLength); // key portion length
out.write(buffer.getData(), 0, buffer.getLength()); // data
}
/** Append a key/value pair. */
@Override
public synchronized void appendRaw(byte[] keyData, int keyOffset,
int keyLength, ValueBytes val) throws IOException {
if (keyLength < 0)
throw new IOException("negative length keys not allowed: " + keyLength);
int valLength = val.getSize();
checkAndWriteSync(); // sync
out.writeInt(keyLength+valLength); // total record length
out.writeInt(keyLength); // key portion length
out.write(keyData, keyOffset, keyLength); // 'key' data
val.writeCompressedBytes(out); // 'value' data
}
} // RecordCompressionWriter
/** Write compressed key/value blocks to a sequence-format file. */
static class BlockCompressWriter extends Writer {
private int noBufferedRecords = 0;
private DataOutputBuffer keyLenBuffer = new DataOutputBuffer();
private DataOutputBuffer keyBuffer = new DataOutputBuffer();
private DataOutputBuffer valLenBuffer = new DataOutputBuffer();
private DataOutputBuffer valBuffer = new DataOutputBuffer();
private final int compressionBlockSize;
BlockCompressWriter(Configuration conf,
Option... options) throws IOException {
super(conf, options);
compressionBlockSize =
conf.getInt("io.seqfile.compress.blocksize", 1000000);
keySerializer.close();
keySerializer.open(keyBuffer);
uncompressedValSerializer.close();
uncompressedValSerializer.open(valBuffer);
}
/** Workhorse to check and write out compressed data/lengths */
private synchronized
void writeBuffer(DataOutputBuffer uncompressedDataBuffer)
throws IOException {
deflateFilter.resetState();
buffer.reset();
deflateOut.write(uncompressedDataBuffer.getData(), 0,
uncompressedDataBuffer.getLength());
deflateOut.flush();
deflateFilter.finish();
WritableUtils.writeVInt(out, buffer.getLength());
out.write(buffer.getData(), 0, buffer.getLength());
}
/** Compress and flush contents to dfs */
@Override
public synchronized void sync() throws IOException {
if (noBufferedRecords > 0) {
super.sync();
// No. of records
WritableUtils.writeVInt(out, noBufferedRecords);
// Write 'keys' and lengths
writeBuffer(keyLenBuffer);
writeBuffer(keyBuffer);
// Write 'values' and lengths
writeBuffer(valLenBuffer);
writeBuffer(valBuffer);
// Flush the file-stream
out.flush();
// Reset internal states
keyLenBuffer.reset();
keyBuffer.reset();
valLenBuffer.reset();
valBuffer.reset();
noBufferedRecords = 0;
}
}
/** Close the file. */
@Override
public synchronized void close() throws IOException {
if (out != null) {
sync();
}
super.close();
}
/** Append a key/value pair. */
@Override
@SuppressWarnings("unchecked")
public synchronized void append(Object key, Object val)
throws IOException {
if (key.getClass() != keyClass)
throw new IOException("wrong key class: "+key+" is not "+keyClass);
if (val.getClass() != valClass)
throw new IOException("wrong value class: "+val+" is not "+valClass);
// Save key/value into respective buffers
int oldKeyLength = keyBuffer.getLength();
keySerializer.serialize(key);
int keyLength = keyBuffer.getLength() - oldKeyLength;
if (keyLength < 0)
throw new IOException("negative length keys not allowed: " + key);
WritableUtils.writeVInt(keyLenBuffer, keyLength);
int oldValLength = valBuffer.getLength();
uncompressedValSerializer.serialize(val);
int valLength = valBuffer.getLength() - oldValLength;
WritableUtils.writeVInt(valLenBuffer, valLength);
// Added another key/value pair
++noBufferedRecords;
// Compress and flush?
int currentBlockSize = keyBuffer.getLength() + valBuffer.getLength();
if (currentBlockSize >= compressionBlockSize) {
sync();
}
}
/** Append a key/value pair. */
@Override
public synchronized void appendRaw(byte[] keyData, int keyOffset,
int keyLength, ValueBytes val) throws IOException {
if (keyLength < 0)
throw new IOException("negative length keys not allowed");
int valLength = val.getSize();
// Save key/value data in relevant buffers
WritableUtils.writeVInt(keyLenBuffer, keyLength);
keyBuffer.write(keyData, keyOffset, keyLength);
WritableUtils.writeVInt(valLenBuffer, valLength);
val.writeUncompressedBytes(valBuffer);
// Added another key/value pair
++noBufferedRecords;
// Compress and flush?
int currentBlockSize = keyBuffer.getLength() + valBuffer.getLength();
if (currentBlockSize >= compressionBlockSize) {
sync();
}
}
} // BlockCompressionWriter
/** Get the configured buffer size */
private static int getBufferSize(Configuration conf) {
return conf.getInt("io.file.buffer.size", 4096);
}
/** Reads key/value pairs from a sequence-format file. */
public static class Reader implements java.io.Closeable {
private String filename;
private FSDataInputStream in;
private DataOutputBuffer outBuf = new DataOutputBuffer();
private byte version;
private String keyClassName;
private String valClassName;
private Class keyClass;
private Class valClass;
private CompressionCodec codec = null;
private Metadata metadata = null;
private byte[] sync = new byte[SYNC_HASH_SIZE];
private byte[] syncCheck = new byte[SYNC_HASH_SIZE];
private boolean syncSeen;
private long headerEnd;
private long end;
private int keyLength;
private int recordLength;
private boolean decompress;
private boolean blockCompressed;
private Configuration conf;
private int noBufferedRecords = 0;
private boolean lazyDecompress = true;
private boolean valuesDecompressed = true;
private int noBufferedKeys = 0;
private int noBufferedValues = 0;
private DataInputBuffer keyLenBuffer = null;
private CompressionInputStream keyLenInFilter = null;
private DataInputStream keyLenIn = null;
private Decompressor keyLenDecompressor = null;
private DataInputBuffer keyBuffer = null;
private CompressionInputStream keyInFilter = null;
private DataInputStream keyIn = null;
private Decompressor keyDecompressor = null;
private DataInputBuffer valLenBuffer = null;
private CompressionInputStream valLenInFilter = null;
private DataInputStream valLenIn = null;
private Decompressor valLenDecompressor = null;
private DataInputBuffer valBuffer = null;
private CompressionInputStream valInFilter = null;
private DataInputStream valIn = null;
private Decompressor valDecompressor = null;
private Deserializer keyDeserializer;
private Deserializer valDeserializer;
/**
* A tag interface for all of the Reader options
*/
public static interface Option {}
/**
* Create an option to specify the path name of the sequence file.
* @param value the path to read
* @return a new option
*/
public static Option file(Path value) {
return new FileOption(value);
}
/**
* Create an option to specify the stream with the sequence file.
* @param value the stream to read.
* @return a new option
*/
public static Option stream(FSDataInputStream value) {
return new InputStreamOption(value);
}
/**
* Create an option to specify the starting byte to read.
* @param value the number of bytes to skip over
* @return a new option
*/
public static Option start(long value) {
return new StartOption(value);
}
/**
* Create an option to specify the number of bytes to read.
* @param value the number of bytes to read
* @return a new option
*/
public static Option length(long value) {
return new LengthOption(value);
}
/**
* Create an option with the buffer size for reading the given pathname.
* @param value the number of bytes to buffer
* @return a new option
*/
public static Option bufferSize(int value) {
return new BufferSizeOption(value);
}
private static class FileOption extends Options.PathOption
implements Option {
private FileOption(Path value) {
super(value);
}
}
private static class InputStreamOption
extends Options.FSDataInputStreamOption
implements Option {
private InputStreamOption(FSDataInputStream value) {
super(value);
}
}
private static class StartOption extends Options.LongOption
implements Option {
private StartOption(long value) {
super(value);
}
}
private static class LengthOption extends Options.LongOption
implements Option {
private LengthOption(long value) {
super(value);
}
}
private static class BufferSizeOption extends Options.IntegerOption
implements Option {
private BufferSizeOption(int value) {
super(value);
}
}
// only used directly
private static class OnlyHeaderOption extends Options.BooleanOption
implements Option {
private OnlyHeaderOption() {
super(true);
}
}
public Reader(Configuration conf, Option... opts) throws IOException {
// Look up the options, these are null if not set
FileOption fileOpt = Options.getOption(FileOption.class, opts);
InputStreamOption streamOpt =
Options.getOption(InputStreamOption.class, opts);
StartOption startOpt = Options.getOption(StartOption.class, opts);
LengthOption lenOpt = Options.getOption(LengthOption.class, opts);
BufferSizeOption bufOpt = Options.getOption(BufferSizeOption.class,opts);
OnlyHeaderOption headerOnly =
Options.getOption(OnlyHeaderOption.class, opts);
// check for consistency
if ((fileOpt == null) == (streamOpt == null)) {
throw new
IllegalArgumentException("File or stream option must be specified");
}
if (fileOpt == null && bufOpt != null) {
throw new IllegalArgumentException("buffer size can only be set when" +
" a file is specified.");
}
// figure out the real values
Path filename = null;
FSDataInputStream file;
final long len;
if (fileOpt != null) {
filename = fileOpt.getValue();
FileSystem fs = filename.getFileSystem(conf);
int bufSize = bufOpt == null ? getBufferSize(conf): bufOpt.getValue();
len = null == lenOpt
? fs.getFileStatus(filename).getLen()
: lenOpt.getValue();
file = openFile(fs, filename, bufSize, len);
} else {
len = null == lenOpt ? Long.MAX_VALUE : lenOpt.getValue();
file = streamOpt.getValue();
}
long start = startOpt == null ? 0 : startOpt.getValue();
// really set up
initialize(filename, file, start, len, conf, headerOnly != null);
}
/**
* Construct a reader by opening a file from the given file system.
* @param fs The file system used to open the file.
* @param file The file being read.
* @param conf Configuration
* @throws IOException
* @deprecated Use Reader(Configuration, Option...) instead.
*/
@Deprecated
public Reader(FileSystem fs, Path file,
Configuration conf) throws IOException {
this(conf, file(file.makeQualified(fs)));
}
/**
* Construct a reader by the given input stream.
* @param in An input stream.
* @param buffersize unused
* @param start The starting position.
* @param length The length being read.
* @param conf Configuration
* @throws IOException
* @deprecated Use Reader(Configuration, Reader.Option...) instead.
*/
@Deprecated
public Reader(FSDataInputStream in, int buffersize,
long start, long length, Configuration conf) throws IOException {
this(conf, stream(in), start(start), length(length));
}
/** Common work of the constructors. */
private void initialize(Path filename, FSDataInputStream in,
long start, long length, Configuration conf,
boolean tempReader) throws IOException {
if (in == null) {
throw new IllegalArgumentException("in == null");
}
this.filename = filename == null ? "<unknown>" : filename.toString();
this.in = in;
this.conf = conf;
boolean succeeded = false;
try {
seek(start);
this.end = this.in.getPos() + length;
// if it wrapped around, use the max
if (end < length) {
end = Long.MAX_VALUE;
}
init(tempReader);
succeeded = true;
} finally {
if (!succeeded) {
IOUtils.cleanup(LOG, this.in);
}
}
}
/**
* Override this method to specialize the type of
* {@link FSDataInputStream} returned.
* @param fs The file system used to open the file.
* @param file The file being read.
* @param bufferSize The buffer size used to read the file.
* @param length The length being read if it is >= 0. Otherwise,
* the length is not available.
* @return The opened stream.
* @throws IOException
*/
protected FSDataInputStream openFile(FileSystem fs, Path file,
int bufferSize, long length) throws IOException {
return fs.open(file, bufferSize);
}
/**
* Initialize the {@link Reader}
* @param tmpReader <code>true</code> if we are constructing a temporary
* reader {@link SequenceFile.Sorter.cloneFileAttributes},
* and hence do not initialize every component;
* <code>false</code> otherwise.
* @throws IOException
*/
private void init(boolean tempReader) throws IOException {
byte[] versionBlock = new byte[VERSION.length];
in.readFully(versionBlock);
if ((versionBlock[0] != VERSION[0]) ||
(versionBlock[1] != VERSION[1]) ||
(versionBlock[2] != VERSION[2]))
throw new IOException(this + " not a SequenceFile");
// Set 'version'
version = versionBlock[3];
if (version > VERSION[3])
throw new VersionMismatchException(VERSION[3], version);
if (version < BLOCK_COMPRESS_VERSION) {
UTF8 className = new UTF8();
className.readFields(in);
keyClassName = className.toStringChecked(); // key class name
className.readFields(in);
valClassName = className.toStringChecked(); // val class name
} else {
keyClassName = Text.readString(in);
valClassName = Text.readString(in);
}
if (version > 2) { // if version > 2
this.decompress = in.readBoolean(); // is compressed?
} else {
decompress = false;
}
if (version >= BLOCK_COMPRESS_VERSION) { // if version >= 4
this.blockCompressed = in.readBoolean(); // is block-compressed?
} else {
blockCompressed = false;
}
// if version >= 5
// setup the compression codec
if (decompress) {
if (version >= CUSTOM_COMPRESS_VERSION) {
String codecClassname = Text.readString(in);
try {
Class<? extends CompressionCodec> codecClass
= conf.getClassByName(codecClassname).asSubclass(CompressionCodec.class);
this.codec = ReflectionUtils.newInstance(codecClass, conf);
} catch (ClassNotFoundException cnfe) {
throw new IllegalArgumentException("Unknown codec: " +
codecClassname, cnfe);
}
} else {
codec = new DefaultCodec();
((Configurable)codec).setConf(conf);
}
}
this.metadata = new Metadata();
if (version >= VERSION_WITH_METADATA) { // if version >= 6
this.metadata.readFields(in);
}
if (version > 1) { // if version > 1
in.readFully(sync); // read sync bytes
headerEnd = in.getPos(); // record end of header
}
// Initialize... *not* if this we are constructing a temporary Reader
if (!tempReader) {
valBuffer = new DataInputBuffer();
if (decompress) {
valDecompressor = CodecPool.getDecompressor(codec);
valInFilter = codec.createInputStream(valBuffer, valDecompressor);
valIn = new DataInputStream(valInFilter);
} else {
valIn = valBuffer;
}
if (blockCompressed) {
keyLenBuffer = new DataInputBuffer();
keyBuffer = new DataInputBuffer();
valLenBuffer = new DataInputBuffer();
keyLenDecompressor = CodecPool.getDecompressor(codec);
keyLenInFilter = codec.createInputStream(keyLenBuffer,
keyLenDecompressor);
keyLenIn = new DataInputStream(keyLenInFilter);
keyDecompressor = CodecPool.getDecompressor(codec);
keyInFilter = codec.createInputStream(keyBuffer, keyDecompressor);
keyIn = new DataInputStream(keyInFilter);
valLenDecompressor = CodecPool.getDecompressor(codec);
valLenInFilter = codec.createInputStream(valLenBuffer,
valLenDecompressor);
valLenIn = new DataInputStream(valLenInFilter);
}
SerializationFactory serializationFactory =
new SerializationFactory(conf);
this.keyDeserializer =
getDeserializer(serializationFactory, getKeyClass());
if (this.keyDeserializer == null) {
throw new IOException(
"Could not find a deserializer for the Key class: '"
+ getKeyClass().getCanonicalName() + "'. "
+ "Please ensure that the configuration '" +
CommonConfigurationKeys.IO_SERIALIZATIONS_KEY + "' is "
+ "properly configured, if you're using "
+ "custom serialization.");
}
if (!blockCompressed) {
this.keyDeserializer.open(valBuffer);
} else {
this.keyDeserializer.open(keyIn);
}
this.valDeserializer =
getDeserializer(serializationFactory, getValueClass());
if (this.valDeserializer == null) {
throw new IOException(
"Could not find a deserializer for the Value class: '"
+ getValueClass().getCanonicalName() + "'. "
+ "Please ensure that the configuration '" +
CommonConfigurationKeys.IO_SERIALIZATIONS_KEY + "' is "
+ "properly configured, if you're using "
+ "custom serialization.");
}
this.valDeserializer.open(valIn);
}
}
@SuppressWarnings("unchecked")
private Deserializer getDeserializer(SerializationFactory sf, Class c) {
return sf.getDeserializer(c);
}
/** Close the file. */
@Override
public synchronized void close() throws IOException {
// Return the decompressors to the pool
CodecPool.returnDecompressor(keyLenDecompressor);
CodecPool.returnDecompressor(keyDecompressor);
CodecPool.returnDecompressor(valLenDecompressor);
CodecPool.returnDecompressor(valDecompressor);
keyLenDecompressor = keyDecompressor = null;
valLenDecompressor = valDecompressor = null;
if (keyDeserializer != null) {
keyDeserializer.close();
}
if (valDeserializer != null) {
valDeserializer.close();
}
// Close the input-stream
in.close();
}
/** Returns the name of the key class. */
public String getKeyClassName() {
return keyClassName;
}
/** Returns the class of keys in this file. */
public synchronized Class<?> getKeyClass() {
if (null == keyClass) {
try {
keyClass = WritableName.getClass(getKeyClassName(), conf);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
return keyClass;
}
/** Returns the name of the value class. */
public String getValueClassName() {
return valClassName;
}
/** Returns the class of values in this file. */
public synchronized Class<?> getValueClass() {
if (null == valClass) {
try {
valClass = WritableName.getClass(getValueClassName(), conf);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
return valClass;
}
/** Returns true if values are compressed. */
public boolean isCompressed() { return decompress; }
/** Returns true if records are block-compressed. */
public boolean isBlockCompressed() { return blockCompressed; }
/** Returns the compression codec of data in this file. */
public CompressionCodec getCompressionCodec() { return codec; }
private byte[] getSync() {
return sync;
}
private byte getVersion() {
return version;
}
/**
* Get the compression type for this file.
* @return the compression type
*/
public CompressionType getCompressionType() {
if (decompress) {
return blockCompressed ? CompressionType.BLOCK : CompressionType.RECORD;
} else {
return CompressionType.NONE;
}
}
/** Returns the metadata object of the file */
public Metadata getMetadata() {
return this.metadata;
}
/** Returns the configuration used for this file. */
Configuration getConf() { return conf; }
/** Read a compressed buffer */
private synchronized void readBuffer(DataInputBuffer buffer,
CompressionInputStream filter) throws IOException {
// Read data into a temporary buffer
DataOutputBuffer dataBuffer = new DataOutputBuffer();
try {
int dataBufferLength = WritableUtils.readVInt(in);
dataBuffer.write(in, dataBufferLength);
// Set up 'buffer' connected to the input-stream
buffer.reset(dataBuffer.getData(), 0, dataBuffer.getLength());
} finally {
dataBuffer.close();
}
// Reset the codec
filter.resetState();
}
/** Read the next 'compressed' block */
private synchronized void readBlock() throws IOException {
// Check if we need to throw away a whole block of
// 'values' due to 'lazy decompression'
if (lazyDecompress && !valuesDecompressed) {
in.seek(WritableUtils.readVInt(in)+in.getPos());
in.seek(WritableUtils.readVInt(in)+in.getPos());
}
// Reset internal states
noBufferedKeys = 0; noBufferedValues = 0; noBufferedRecords = 0;
valuesDecompressed = false;
//Process sync
if (sync != null) {
in.readInt();
in.readFully(syncCheck); // read syncCheck
if (!Arrays.equals(sync, syncCheck)) // check it
throw new IOException("File is corrupt!");
}
syncSeen = true;
// Read number of records in this block
noBufferedRecords = WritableUtils.readVInt(in);
// Read key lengths and keys
readBuffer(keyLenBuffer, keyLenInFilter);
readBuffer(keyBuffer, keyInFilter);
noBufferedKeys = noBufferedRecords;
// Read value lengths and values
if (!lazyDecompress) {
readBuffer(valLenBuffer, valLenInFilter);
readBuffer(valBuffer, valInFilter);
noBufferedValues = noBufferedRecords;
valuesDecompressed = true;
}
}
/**
* Position valLenIn/valIn to the 'value'
* corresponding to the 'current' key
*/
private synchronized void seekToCurrentValue() throws IOException {
if (!blockCompressed) {
if (decompress) {
valInFilter.resetState();
}
valBuffer.reset();
} else {
// Check if this is the first value in the 'block' to be read
if (lazyDecompress && !valuesDecompressed) {
// Read the value lengths and values
readBuffer(valLenBuffer, valLenInFilter);
readBuffer(valBuffer, valInFilter);
noBufferedValues = noBufferedRecords;
valuesDecompressed = true;
}
// Calculate the no. of bytes to skip
// Note: 'current' key has already been read!
int skipValBytes = 0;
int currentKey = noBufferedKeys + 1;
for (int i=noBufferedValues; i > currentKey; --i) {
skipValBytes += WritableUtils.readVInt(valLenIn);
--noBufferedValues;
}
// Skip to the 'val' corresponding to 'current' key
if (skipValBytes > 0) {
if (valIn.skipBytes(skipValBytes) != skipValBytes) {
throw new IOException("Failed to seek to " + currentKey +
"(th) value!");
}
}
}
}
/**
* Get the 'value' corresponding to the last read 'key'.
* @param val : The 'value' to be read.
* @throws IOException
*/
public synchronized void getCurrentValue(Writable val)
throws IOException {
if (val instanceof Configurable) {
((Configurable) val).setConf(this.conf);
}
// Position stream to 'current' value
seekToCurrentValue();
if (!blockCompressed) {
val.readFields(valIn);
if (valIn.read() > 0) {
LOG.info("available bytes: " + valIn.available());
throw new IOException(val+" read "+(valBuffer.getPosition()-keyLength)
+ " bytes, should read " +
(valBuffer.getLength()-keyLength));
}
} else {
// Get the value
int valLength = WritableUtils.readVInt(valLenIn);
val.readFields(valIn);
// Read another compressed 'value'
--noBufferedValues;
// Sanity check
if ((valLength < 0) && LOG.isDebugEnabled()) {
LOG.debug(val + " is a zero-length value");
}
}
}
/**
* Get the 'value' corresponding to the last read 'key'.
* @param val : The 'value' to be read.
* @throws IOException
*/
public synchronized Object getCurrentValue(Object val)
throws IOException {
if (val instanceof Configurable) {
((Configurable) val).setConf(this.conf);
}
// Position stream to 'current' value
seekToCurrentValue();
if (!blockCompressed) {
val = deserializeValue(val);
if (valIn.read() > 0) {
LOG.info("available bytes: " + valIn.available());
throw new IOException(val+" read "+(valBuffer.getPosition()-keyLength)
+ " bytes, should read " +
(valBuffer.getLength()-keyLength));
}
} else {
// Get the value
int valLength = WritableUtils.readVInt(valLenIn);
val = deserializeValue(val);
// Read another compressed 'value'
--noBufferedValues;
// Sanity check
if ((valLength < 0) && LOG.isDebugEnabled()) {
LOG.debug(val + " is a zero-length value");
}
}
return val;
}
@SuppressWarnings("unchecked")
private Object deserializeValue(Object val) throws IOException {
return valDeserializer.deserialize(val);
}
/** Read the next key in the file into <code>key</code>, skipping its
* value. True if another entry exists, and false at end of file. */
public synchronized boolean next(Writable key) throws IOException {
if (key.getClass() != getKeyClass())
throw new IOException("wrong key class: "+key.getClass().getName()
+" is not "+keyClass);
if (!blockCompressed) {
outBuf.reset();
keyLength = next(outBuf);
if (keyLength < 0)
return false;
valBuffer.reset(outBuf.getData(), outBuf.getLength());
key.readFields(valBuffer);
valBuffer.mark(0);
if (valBuffer.getPosition() != keyLength)
throw new IOException(key + " read " + valBuffer.getPosition()
+ " bytes, should read " + keyLength);
} else {
//Reset syncSeen
syncSeen = false;
if (noBufferedKeys == 0) {
try {
readBlock();
} catch (EOFException eof) {
return false;
}
}
int keyLength = WritableUtils.readVInt(keyLenIn);
// Sanity check
if (keyLength < 0) {
return false;
}
//Read another compressed 'key'
key.readFields(keyIn);
--noBufferedKeys;
}
return true;
}
/** Read the next key/value pair in the file into <code>key</code> and
* <code>val</code>. Returns true if such a pair exists and false when at
* end of file */
public synchronized boolean next(Writable key, Writable val)
throws IOException {
if (val.getClass() != getValueClass())
throw new IOException("wrong value class: "+val+" is not "+valClass);
boolean more = next(key);
if (more) {
getCurrentValue(val);
}
return more;
}
/**
* Read and return the next record length, potentially skipping over
* a sync block.
* @return the length of the next record or -1 if there is no next record
* @throws IOException
*/
private synchronized int readRecordLength() throws IOException {
if (in.getPos() >= end) {
return -1;
}
int length = in.readInt();
if (version > 1 && sync != null &&
length == SYNC_ESCAPE) { // process a sync entry
in.readFully(syncCheck); // read syncCheck
if (!Arrays.equals(sync, syncCheck)) // check it
throw new IOException("File is corrupt!");
syncSeen = true;
if (in.getPos() >= end) {
return -1;
}
length = in.readInt(); // re-read length
} else {
syncSeen = false;
}
return length;
}
/** Read the next key/value pair in the file into <code>buffer</code>.
* Returns the length of the key read, or -1 if at end of file. The length
* of the value may be computed by calling buffer.getLength() before and
* after calls to this method. */
/** @deprecated Call {@link #nextRaw(DataOutputBuffer,SequenceFile.ValueBytes)}. */
@Deprecated
synchronized int next(DataOutputBuffer buffer) throws IOException {
// Unsupported for block-compressed sequence files
if (blockCompressed) {
throw new IOException("Unsupported call for block-compressed" +
" SequenceFiles - use SequenceFile.Reader.next(DataOutputStream, ValueBytes)");
}
try {
int length = readRecordLength();
if (length == -1) {
return -1;
}
int keyLength = in.readInt();
buffer.write(in, length);
return keyLength;
} catch (ChecksumException e) { // checksum failure
handleChecksumException(e);
return next(buffer);
}
}
public ValueBytes createValueBytes() {
ValueBytes val = null;
if (!decompress || blockCompressed) {
val = new UncompressedBytes();
} else {
val = new CompressedBytes(codec);
}
return val;
}
/**
* Read 'raw' records.
* @param key - The buffer into which the key is read
* @param val - The 'raw' value
* @return Returns the total record length or -1 for end of file
* @throws IOException
*/
public synchronized int nextRaw(DataOutputBuffer key, ValueBytes val)
throws IOException {
if (!blockCompressed) {
int length = readRecordLength();
if (length == -1) {
return -1;
}
int keyLength = in.readInt();
int valLength = length - keyLength;
key.write(in, keyLength);
if (decompress) {
CompressedBytes value = (CompressedBytes)val;
value.reset(in, valLength);
} else {
UncompressedBytes value = (UncompressedBytes)val;
value.reset(in, valLength);
}
return length;
} else {
//Reset syncSeen
syncSeen = false;
// Read 'key'
if (noBufferedKeys == 0) {
if (in.getPos() >= end)
return -1;
try {
readBlock();
} catch (EOFException eof) {
return -1;
}
}
int keyLength = WritableUtils.readVInt(keyLenIn);
if (keyLength < 0) {
throw new IOException("zero length key found!");
}
key.write(keyIn, keyLength);
--noBufferedKeys;
// Read raw 'value'
seekToCurrentValue();
int valLength = WritableUtils.readVInt(valLenIn);
UncompressedBytes rawValue = (UncompressedBytes)val;
rawValue.reset(valIn, valLength);
--noBufferedValues;
return (keyLength+valLength);
}
}
/**
* Read 'raw' keys.
* @param key - The buffer into which the key is read
* @return Returns the key length or -1 for end of file
* @throws IOException
*/
public synchronized int nextRawKey(DataOutputBuffer key)
throws IOException {
if (!blockCompressed) {
recordLength = readRecordLength();
if (recordLength == -1) {
return -1;
}
keyLength = in.readInt();
key.write(in, keyLength);
return keyLength;
} else {
//Reset syncSeen
syncSeen = false;
// Read 'key'
if (noBufferedKeys == 0) {
if (in.getPos() >= end)
return -1;
try {
readBlock();
} catch (EOFException eof) {
return -1;
}
}
int keyLength = WritableUtils.readVInt(keyLenIn);
if (keyLength < 0) {
throw new IOException("zero length key found!");
}
key.write(keyIn, keyLength);
--noBufferedKeys;
return keyLength;
}
}
/** Read the next key in the file, skipping its
* value. Return null at end of file. */
public synchronized Object next(Object key) throws IOException {
if (key != null && key.getClass() != getKeyClass()) {
throw new IOException("wrong key class: "+key.getClass().getName()
+" is not "+keyClass);
}
if (!blockCompressed) {
outBuf.reset();
keyLength = next(outBuf);
if (keyLength < 0)
return null;
valBuffer.reset(outBuf.getData(), outBuf.getLength());
key = deserializeKey(key);
valBuffer.mark(0);
if (valBuffer.getPosition() != keyLength)
throw new IOException(key + " read " + valBuffer.getPosition()
+ " bytes, should read " + keyLength);
} else {
//Reset syncSeen
syncSeen = false;
if (noBufferedKeys == 0) {
try {
readBlock();
} catch (EOFException eof) {
return null;
}
}
int keyLength = WritableUtils.readVInt(keyLenIn);
// Sanity check
if (keyLength < 0) {
return null;
}
//Read another compressed 'key'
key = deserializeKey(key);
--noBufferedKeys;
}
return key;
}
@SuppressWarnings("unchecked")
private Object deserializeKey(Object key) throws IOException {
return keyDeserializer.deserialize(key);
}
/**
* Read 'raw' values.
* @param val - The 'raw' value
* @return Returns the value length
* @throws IOException
*/
public synchronized int nextRawValue(ValueBytes val)
throws IOException {
// Position stream to current value
seekToCurrentValue();
if (!blockCompressed) {
int valLength = recordLength - keyLength;
if (decompress) {
CompressedBytes value = (CompressedBytes)val;
value.reset(in, valLength);
} else {
UncompressedBytes value = (UncompressedBytes)val;
value.reset(in, valLength);
}
return valLength;
} else {
int valLength = WritableUtils.readVInt(valLenIn);
UncompressedBytes rawValue = (UncompressedBytes)val;
rawValue.reset(valIn, valLength);
--noBufferedValues;
return valLength;
}
}
private void handleChecksumException(ChecksumException e)
throws IOException {
if (this.conf.getBoolean("io.skip.checksum.errors", false)) {
LOG.warn("Bad checksum at "+getPosition()+". Skipping entries.");
sync(getPosition()+this.conf.getInt("io.bytes.per.checksum", 512));
} else {
throw e;
}
}
/** disables sync. often invoked for tmp files */
synchronized void ignoreSync() {
sync = null;
}
/** Set the current byte position in the input file.
*
* <p>The position passed must be a position returned by {@link
* SequenceFile.Writer#getLength()} when writing this file. To seek to an arbitrary
* position, use {@link SequenceFile.Reader#sync(long)}.
*/
public synchronized void seek(long position) throws IOException {
in.seek(position);
if (blockCompressed) { // trigger block read
noBufferedKeys = 0;
valuesDecompressed = true;
}
}
/** Seek to the next sync mark past a given position.*/
public synchronized void sync(long position) throws IOException {
if (position+SYNC_SIZE >= end) {
seek(end);
return;
}
if (position < headerEnd) {
// seek directly to first record
in.seek(headerEnd);
// note the sync marker "seen" in the header
syncSeen = true;
return;
}
try {
seek(position+4); // skip escape
in.readFully(syncCheck);
int syncLen = sync.length;
for (int i = 0; in.getPos() < end; i++) {
int j = 0;
for (; j < syncLen; j++) {
if (sync[j] != syncCheck[(i+j)%syncLen])
break;
}
if (j == syncLen) {
in.seek(in.getPos() - SYNC_SIZE); // position before sync
return;
}
syncCheck[i%syncLen] = in.readByte();
}
} catch (ChecksumException e) { // checksum failure
handleChecksumException(e);
}
}
/** Returns true iff the previous call to next passed a sync mark.*/
public synchronized boolean syncSeen() { return syncSeen; }
/** Return the current byte position in the input file. */
public synchronized long getPosition() throws IOException {
return in.getPos();
}
/** Returns the name of the file. */
@Override
public String toString() {
return filename;
}
}
/** Sorts key/value pairs in a sequence-format file.
*
* <p>For best performance, applications should make sure that the {@link
* Writable#readFields(DataInput)} implementation of their keys is
* very efficient. In particular, it should avoid allocating memory.
*/
public static class Sorter {
private RawComparator comparator;
private MergeSort mergeSort; //the implementation of merge sort
private Path[] inFiles; // when merging or sorting
private Path outFile;
private int memory; // bytes
private int factor; // merged per pass
private FileSystem fs = null;
private Class keyClass;
private Class valClass;
private Configuration conf;
private Metadata metadata;
private Progressable progressable = null;
/** Sort and merge files containing the named classes. */
public Sorter(FileSystem fs, Class<? extends WritableComparable> keyClass,
Class valClass, Configuration conf) {
this(fs, WritableComparator.get(keyClass, conf), keyClass, valClass, conf);
}
/** Sort and merge using an arbitrary {@link RawComparator}. */
public Sorter(FileSystem fs, RawComparator comparator, Class keyClass,
Class valClass, Configuration conf) {
this(fs, comparator, keyClass, valClass, conf, new Metadata());
}
/** Sort and merge using an arbitrary {@link RawComparator}. */
public Sorter(FileSystem fs, RawComparator comparator, Class keyClass,
Class valClass, Configuration conf, Metadata metadata) {
this.fs = fs;
this.comparator = comparator;
this.keyClass = keyClass;
this.valClass = valClass;
this.memory = conf.getInt("io.sort.mb", 100) * 1024 * 1024;
this.factor = conf.getInt("io.sort.factor", 100);
this.conf = conf;
this.metadata = metadata;
}
/** Set the number of streams to merge at once.*/
public void setFactor(int factor) { this.factor = factor; }
/** Get the number of streams to merge at once.*/
public int getFactor() { return factor; }
/** Set the total amount of buffer memory, in bytes.*/
public void setMemory(int memory) { this.memory = memory; }
/** Get the total amount of buffer memory, in bytes.*/
public int getMemory() { return memory; }
/** Set the progressable object in order to report progress. */
public void setProgressable(Progressable progressable) {
this.progressable = progressable;
}
/**
* Perform a file sort from a set of input files into an output file.
* @param inFiles the files to be sorted
* @param outFile the sorted output file
* @param deleteInput should the input files be deleted as they are read?
*/
public void sort(Path[] inFiles, Path outFile,
boolean deleteInput) throws IOException {
if (fs.exists(outFile)) {
throw new IOException("already exists: " + outFile);
}
this.inFiles = inFiles;
this.outFile = outFile;
int segments = sortPass(deleteInput);
if (segments > 1) {
mergePass(outFile.getParent());
}
}
/**
* Perform a file sort from a set of input files and return an iterator.
* @param inFiles the files to be sorted
* @param tempDir the directory where temp files are created during sort
* @param deleteInput should the input files be deleted as they are read?
* @return iterator the RawKeyValueIterator
*/
public RawKeyValueIterator sortAndIterate(Path[] inFiles, Path tempDir,
boolean deleteInput) throws IOException {
Path outFile = new Path(tempDir + Path.SEPARATOR + "all.2");
if (fs.exists(outFile)) {
throw new IOException("already exists: " + outFile);
}
this.inFiles = inFiles;
//outFile will basically be used as prefix for temp files in the cases
//where sort outputs multiple sorted segments. For the single segment
//case, the outputFile itself will contain the sorted data for that
//segment
this.outFile = outFile;
int segments = sortPass(deleteInput);
if (segments > 1)
return merge(outFile.suffix(".0"), outFile.suffix(".0.index"),
tempDir);
else if (segments == 1)
return merge(new Path[]{outFile}, true, tempDir);
else return null;
}
/**
* The backwards compatible interface to sort.
* @param inFile the input file to sort
* @param outFile the sorted output file
*/
public void sort(Path inFile, Path outFile) throws IOException {
sort(new Path[]{inFile}, outFile, false);
}
private int sortPass(boolean deleteInput) throws IOException {
if(LOG.isDebugEnabled()) {
LOG.debug("running sort pass");
}
SortPass sortPass = new SortPass(); // make the SortPass
sortPass.setProgressable(progressable);
mergeSort = new MergeSort(sortPass.new SeqFileComparator());
try {
return sortPass.run(deleteInput); // run it
} finally {
sortPass.close(); // close it
}
}
private class SortPass {
private int memoryLimit = memory/4;
private int recordLimit = 1000000;
private DataOutputBuffer rawKeys = new DataOutputBuffer();
private byte[] rawBuffer;
private int[] keyOffsets = new int[1024];
private int[] pointers = new int[keyOffsets.length];
private int[] pointersCopy = new int[keyOffsets.length];
private int[] keyLengths = new int[keyOffsets.length];
private ValueBytes[] rawValues = new ValueBytes[keyOffsets.length];
private ArrayList segmentLengths = new ArrayList();
private Reader in = null;
private FSDataOutputStream out = null;
private FSDataOutputStream indexOut = null;
private Path outName;
private Progressable progressable = null;
public int run(boolean deleteInput) throws IOException {
int segments = 0;
int currentFile = 0;
boolean atEof = (currentFile >= inFiles.length);
CompressionType compressionType;
CompressionCodec codec = null;
segmentLengths.clear();
if (atEof) {
return 0;
}
// Initialize
in = new Reader(fs, inFiles[currentFile], conf);
compressionType = in.getCompressionType();
codec = in.getCompressionCodec();
for (int i=0; i < rawValues.length; ++i) {
rawValues[i] = null;
}
while (!atEof) {
int count = 0;
int bytesProcessed = 0;
rawKeys.reset();
while (!atEof &&
bytesProcessed < memoryLimit && count < recordLimit) {
// Read a record into buffer
// Note: Attempt to re-use 'rawValue' as far as possible
int keyOffset = rawKeys.getLength();
ValueBytes rawValue =
(count == keyOffsets.length || rawValues[count] == null) ?
in.createValueBytes() :
rawValues[count];
int recordLength = in.nextRaw(rawKeys, rawValue);
if (recordLength == -1) {
in.close();
if (deleteInput) {
fs.delete(inFiles[currentFile], true);
}
currentFile += 1;
atEof = currentFile >= inFiles.length;
if (!atEof) {
in = new Reader(fs, inFiles[currentFile], conf);
} else {
in = null;
}
continue;
}
int keyLength = rawKeys.getLength() - keyOffset;
if (count == keyOffsets.length)
grow();
keyOffsets[count] = keyOffset; // update pointers
pointers[count] = count;
keyLengths[count] = keyLength;
rawValues[count] = rawValue;
bytesProcessed += recordLength;
count++;
}
// buffer is full -- sort & flush it
if(LOG.isDebugEnabled()) {
LOG.debug("flushing segment " + segments);
}
rawBuffer = rawKeys.getData();
sort(count);
// indicate we're making progress
if (progressable != null) {
progressable.progress();
}
flush(count, bytesProcessed, compressionType, codec,
segments==0 && atEof);
segments++;
}
return segments;
}
public void close() throws IOException {
if (in != null) {
in.close();
}
if (out != null) {
out.close();
}
if (indexOut != null) {
indexOut.close();
}
}
private void grow() {
int newLength = keyOffsets.length * 3 / 2;
keyOffsets = grow(keyOffsets, newLength);
pointers = grow(pointers, newLength);
pointersCopy = new int[newLength];
keyLengths = grow(keyLengths, newLength);
rawValues = grow(rawValues, newLength);
}
private int[] grow(int[] old, int newLength) {
int[] result = new int[newLength];
System.arraycopy(old, 0, result, 0, old.length);
return result;
}
private ValueBytes[] grow(ValueBytes[] old, int newLength) {
ValueBytes[] result = new ValueBytes[newLength];
System.arraycopy(old, 0, result, 0, old.length);
for (int i=old.length; i < newLength; ++i) {
result[i] = null;
}
return result;
}
private void flush(int count, int bytesProcessed,
CompressionType compressionType,
CompressionCodec codec,
boolean done) throws IOException {
if (out == null) {
outName = done ? outFile : outFile.suffix(".0");
out = fs.create(outName);
if (!done) {
indexOut = fs.create(outName.suffix(".index"));
}
}
long segmentStart = out.getPos();
Writer writer = createWriter(conf, Writer.stream(out),
Writer.keyClass(keyClass), Writer.valueClass(valClass),
Writer.compression(compressionType, codec),
Writer.metadata(done ? metadata : new Metadata()));
if (!done) {
writer.sync = null; // disable sync on temp files
}
for (int i = 0; i < count; i++) { // write in sorted order
int p = pointers[i];
writer.appendRaw(rawBuffer, keyOffsets[p], keyLengths[p], rawValues[p]);
}
writer.close();
if (!done) {
// Save the segment length
WritableUtils.writeVLong(indexOut, segmentStart);
WritableUtils.writeVLong(indexOut, (out.getPos()-segmentStart));
indexOut.flush();
}
}
private void sort(int count) {
System.arraycopy(pointers, 0, pointersCopy, 0, count);
mergeSort.mergeSort(pointersCopy, pointers, 0, count);
}
class SeqFileComparator implements Comparator<IntWritable> {
@Override
public int compare(IntWritable I, IntWritable J) {
return comparator.compare(rawBuffer, keyOffsets[I.get()],
keyLengths[I.get()], rawBuffer,
keyOffsets[J.get()], keyLengths[J.get()]);
}
}
/** set the progressable object in order to report progress */
public void setProgressable(Progressable progressable)
{
this.progressable = progressable;
}
} // SequenceFile.Sorter.SortPass
/** The interface to iterate over raw keys/values of SequenceFiles. */
public static interface RawKeyValueIterator {
/** Gets the current raw key
* @return DataOutputBuffer
* @throws IOException
*/
DataOutputBuffer getKey() throws IOException;
/** Gets the current raw value
* @return ValueBytes
* @throws IOException
*/
ValueBytes getValue() throws IOException;
/** Sets up the current key and value (for getKey and getValue)
* @return true if there exists a key/value, false otherwise
* @throws IOException
*/
boolean next() throws IOException;
/** closes the iterator so that the underlying streams can be closed
* @throws IOException
*/
void close() throws IOException;
/** Gets the Progress object; this has a float (0.0 - 1.0)
* indicating the bytes processed by the iterator so far
*/
Progress getProgress();
}
/**
* Merges the list of segments of type <code>SegmentDescriptor</code>
* @param segments the list of SegmentDescriptors
* @param tmpDir the directory to write temporary files into
* @return RawKeyValueIterator
* @throws IOException
*/
public RawKeyValueIterator merge(List <SegmentDescriptor> segments,
Path tmpDir)
throws IOException {
// pass in object to report progress, if present
MergeQueue mQueue = new MergeQueue(segments, tmpDir, progressable);
return mQueue.merge();
}
/**
* Merges the contents of files passed in Path[] using a max factor value
* that is already set
* @param inNames the array of path names
* @param deleteInputs true if the input files should be deleted when
* unnecessary
* @param tmpDir the directory to write temporary files into
* @return RawKeyValueIteratorMergeQueue
* @throws IOException
*/
public RawKeyValueIterator merge(Path [] inNames, boolean deleteInputs,
Path tmpDir)
throws IOException {
return merge(inNames, deleteInputs,
(inNames.length < factor) ? inNames.length : factor,
tmpDir);
}
/**
* Merges the contents of files passed in Path[]
* @param inNames the array of path names
* @param deleteInputs true if the input files should be deleted when
* unnecessary
* @param factor the factor that will be used as the maximum merge fan-in
* @param tmpDir the directory to write temporary files into
* @return RawKeyValueIteratorMergeQueue
* @throws IOException
*/
public RawKeyValueIterator merge(Path [] inNames, boolean deleteInputs,
int factor, Path tmpDir)
throws IOException {
//get the segments from inNames
ArrayList <SegmentDescriptor> a = new ArrayList <SegmentDescriptor>();
for (int i = 0; i < inNames.length; i++) {
SegmentDescriptor s = new SegmentDescriptor(0,
fs.getFileStatus(inNames[i]).getLen(), inNames[i]);
s.preserveInput(!deleteInputs);
s.doSync();
a.add(s);
}
this.factor = factor;
MergeQueue mQueue = new MergeQueue(a, tmpDir, progressable);
return mQueue.merge();
}
/**
* Merges the contents of files passed in Path[]
* @param inNames the array of path names
* @param tempDir the directory for creating temp files during merge
* @param deleteInputs true if the input files should be deleted when
* unnecessary
* @return RawKeyValueIteratorMergeQueue
* @throws IOException
*/
public RawKeyValueIterator merge(Path [] inNames, Path tempDir,
boolean deleteInputs)
throws IOException {
//outFile will basically be used as prefix for temp files for the
//intermediate merge outputs
this.outFile = new Path(tempDir + Path.SEPARATOR + "merged");
//get the segments from inNames
ArrayList <SegmentDescriptor> a = new ArrayList <SegmentDescriptor>();
for (int i = 0; i < inNames.length; i++) {
SegmentDescriptor s = new SegmentDescriptor(0,
fs.getFileStatus(inNames[i]).getLen(), inNames[i]);
s.preserveInput(!deleteInputs);
s.doSync();
a.add(s);
}
factor = (inNames.length < factor) ? inNames.length : factor;
// pass in object to report progress, if present
MergeQueue mQueue = new MergeQueue(a, tempDir, progressable);
return mQueue.merge();
}
/**
* Clones the attributes (like compression of the input file and creates a
* corresponding Writer
* @param inputFile the path of the input file whose attributes should be
* cloned
* @param outputFile the path of the output file
* @param prog the Progressable to report status during the file write
* @return Writer
* @throws IOException
*/
public Writer cloneFileAttributes(Path inputFile, Path outputFile,
Progressable prog) throws IOException {
Reader reader = new Reader(conf,
Reader.file(inputFile),
new Reader.OnlyHeaderOption());
CompressionType compress = reader.getCompressionType();
CompressionCodec codec = reader.getCompressionCodec();
reader.close();
Writer writer = createWriter(conf,
Writer.file(outputFile),
Writer.keyClass(keyClass),
Writer.valueClass(valClass),
Writer.compression(compress, codec),
Writer.progressable(prog));
return writer;
}
/**
* Writes records from RawKeyValueIterator into a file represented by the
* passed writer
* @param records the RawKeyValueIterator
* @param writer the Writer created earlier
* @throws IOException
*/
public void writeFile(RawKeyValueIterator records, Writer writer)
throws IOException {
while(records.next()) {
writer.appendRaw(records.getKey().getData(), 0,
records.getKey().getLength(), records.getValue());
}
writer.sync();
}
/** Merge the provided files.
* @param inFiles the array of input path names
* @param outFile the final output file
* @throws IOException
*/
public void merge(Path[] inFiles, Path outFile) throws IOException {
if (fs.exists(outFile)) {
throw new IOException("already exists: " + outFile);
}
RawKeyValueIterator r = merge(inFiles, false, outFile.getParent());
Writer writer = cloneFileAttributes(inFiles[0], outFile, null);
writeFile(r, writer);
writer.close();
}
/** sort calls this to generate the final merged output */
private int mergePass(Path tmpDir) throws IOException {
if(LOG.isDebugEnabled()) {
LOG.debug("running merge pass");
}
Writer writer = cloneFileAttributes(
outFile.suffix(".0"), outFile, null);
RawKeyValueIterator r = merge(outFile.suffix(".0"),
outFile.suffix(".0.index"), tmpDir);
writeFile(r, writer);
writer.close();
return 0;
}
/** Used by mergePass to merge the output of the sort
* @param inName the name of the input file containing sorted segments
* @param indexIn the offsets of the sorted segments
* @param tmpDir the relative directory to store intermediate results in
* @return RawKeyValueIterator
* @throws IOException
*/
private RawKeyValueIterator merge(Path inName, Path indexIn, Path tmpDir)
throws IOException {
//get the segments from indexIn
//we create a SegmentContainer so that we can track segments belonging to
//inName and delete inName as soon as we see that we have looked at all
//the contained segments during the merge process & hence don't need
//them anymore
SegmentContainer container = new SegmentContainer(inName, indexIn);
MergeQueue mQueue = new MergeQueue(container.getSegmentList(), tmpDir, progressable);
return mQueue.merge();
}
/** This class implements the core of the merge logic */
private class MergeQueue extends PriorityQueue
implements RawKeyValueIterator {
private boolean compress;
private boolean blockCompress;
private DataOutputBuffer rawKey = new DataOutputBuffer();
private ValueBytes rawValue;
private long totalBytesProcessed;
private float progPerByte;
private Progress mergeProgress = new Progress();
private Path tmpDir;
private Progressable progress = null; //handle to the progress reporting object
private SegmentDescriptor minSegment;
//a TreeMap used to store the segments sorted by size (segment offset and
//segment path name is used to break ties between segments of same sizes)
private Map<SegmentDescriptor, Void> sortedSegmentSizes =
new TreeMap<SegmentDescriptor, Void>();
@SuppressWarnings("unchecked")
public void put(SegmentDescriptor stream) throws IOException {
if (size() == 0) {
compress = stream.in.isCompressed();
blockCompress = stream.in.isBlockCompressed();
} else if (compress != stream.in.isCompressed() ||
blockCompress != stream.in.isBlockCompressed()) {
throw new IOException("All merged files must be compressed or not.");
}
super.put(stream);
}
/**
* A queue of file segments to merge
* @param segments the file segments to merge
* @param tmpDir a relative local directory to save intermediate files in
* @param progress the reference to the Progressable object
*/
public MergeQueue(List <SegmentDescriptor> segments,
Path tmpDir, Progressable progress) {
int size = segments.size();
for (int i = 0; i < size; i++) {
sortedSegmentSizes.put(segments.get(i), null);
}
this.tmpDir = tmpDir;
this.progress = progress;
}
@Override
protected boolean lessThan(Object a, Object b) {
// indicate we're making progress
if (progress != null) {
progress.progress();
}
SegmentDescriptor msa = (SegmentDescriptor)a;
SegmentDescriptor msb = (SegmentDescriptor)b;
return comparator.compare(msa.getKey().getData(), 0,
msa.getKey().getLength(), msb.getKey().getData(), 0,
msb.getKey().getLength()) < 0;
}
@Override
public void close() throws IOException {
SegmentDescriptor ms; // close inputs
while ((ms = (SegmentDescriptor)pop()) != null) {
ms.cleanup();
}
minSegment = null;
}
@Override
public DataOutputBuffer getKey() throws IOException {
return rawKey;
}
@Override
public ValueBytes getValue() throws IOException {
return rawValue;
}
@Override
public boolean next() throws IOException {
if (size() == 0)
return false;
if (minSegment != null) {
//minSegment is non-null for all invocations of next except the first
//one. For the first invocation, the priority queue is ready for use
//but for the subsequent invocations, first adjust the queue
adjustPriorityQueue(minSegment);
if (size() == 0) {
minSegment = null;
return false;
}
}
minSegment = (SegmentDescriptor)top();
long startPos = minSegment.in.getPosition(); // Current position in stream
//save the raw key reference
rawKey = minSegment.getKey();
//load the raw value. Re-use the existing rawValue buffer
if (rawValue == null) {
rawValue = minSegment.in.createValueBytes();
}
minSegment.nextRawValue(rawValue);
long endPos = minSegment.in.getPosition(); // End position after reading value
updateProgress(endPos - startPos);
return true;
}
@Override
public Progress getProgress() {
return mergeProgress;
}
private void adjustPriorityQueue(SegmentDescriptor ms) throws IOException{
long startPos = ms.in.getPosition(); // Current position in stream
boolean hasNext = ms.nextRawKey();
long endPos = ms.in.getPosition(); // End position after reading key
updateProgress(endPos - startPos);
if (hasNext) {
adjustTop();
} else {
pop();
ms.cleanup();
}
}
private void updateProgress(long bytesProcessed) {
totalBytesProcessed += bytesProcessed;
if (progPerByte > 0) {
mergeProgress.set(totalBytesProcessed * progPerByte);
}
}
/** This is the single level merge that is called multiple times
* depending on the factor size and the number of segments
* @return RawKeyValueIterator
* @throws IOException
*/
public RawKeyValueIterator merge() throws IOException {
//create the MergeStreams from the sorted map created in the constructor
//and dump the final output to a file
int numSegments = sortedSegmentSizes.size();
int origFactor = factor;
int passNo = 1;
LocalDirAllocator lDirAlloc = new LocalDirAllocator("io.seqfile.local.dir");
do {
//get the factor for this pass of merge
factor = getPassFactor(passNo, numSegments);
List<SegmentDescriptor> segmentsToMerge =
new ArrayList<SegmentDescriptor>();
int segmentsConsidered = 0;
int numSegmentsToConsider = factor;
while (true) {
//extract the smallest 'factor' number of segment pointers from the
//TreeMap. Call cleanup on the empty segments (no key/value data)
SegmentDescriptor[] mStream =
getSegmentDescriptors(numSegmentsToConsider);
for (int i = 0; i < mStream.length; i++) {
if (mStream[i].nextRawKey()) {
segmentsToMerge.add(mStream[i]);
segmentsConsidered++;
// Count the fact that we read some bytes in calling nextRawKey()
updateProgress(mStream[i].in.getPosition());
}
else {
mStream[i].cleanup();
numSegments--; //we ignore this segment for the merge
}
}
//if we have the desired number of segments
//or looked at all available segments, we break
if (segmentsConsidered == factor ||
sortedSegmentSizes.size() == 0) {
break;
}
numSegmentsToConsider = factor - segmentsConsidered;
}
//feed the streams to the priority queue
initialize(segmentsToMerge.size()); clear();
for (int i = 0; i < segmentsToMerge.size(); i++) {
put(segmentsToMerge.get(i));
}
//if we have lesser number of segments remaining, then just return the
//iterator, else do another single level merge
if (numSegments <= factor) {
//calculate the length of the remaining segments. Required for
//calculating the merge progress
long totalBytes = 0;
for (int i = 0; i < segmentsToMerge.size(); i++) {
totalBytes += segmentsToMerge.get(i).segmentLength;
}
if (totalBytes != 0) //being paranoid
progPerByte = 1.0f / (float)totalBytes;
//reset factor to what it originally was
factor = origFactor;
return this;
} else {
//we want to spread the creation of temp files on multiple disks if
//available under the space constraints
long approxOutputSize = 0;
for (SegmentDescriptor s : segmentsToMerge) {
approxOutputSize += s.segmentLength +
ChecksumFileSystem.getApproxChkSumLength(
s.segmentLength);
}
Path tmpFilename =
new Path(tmpDir, "intermediate").suffix("." + passNo);
Path outputFile = lDirAlloc.getLocalPathForWrite(
tmpFilename.toString(),
approxOutputSize, conf);
if(LOG.isDebugEnabled()) {
LOG.debug("writing intermediate results to " + outputFile);
}
Writer writer = cloneFileAttributes(
fs.makeQualified(segmentsToMerge.get(0).segmentPathName),
fs.makeQualified(outputFile), null);
writer.sync = null; //disable sync for temp files
writeFile(this, writer);
writer.close();
//we finished one single level merge; now clean up the priority
//queue
this.close();
SegmentDescriptor tempSegment =
new SegmentDescriptor(0,
fs.getFileStatus(outputFile).getLen(), outputFile);
//put the segment back in the TreeMap
sortedSegmentSizes.put(tempSegment, null);
numSegments = sortedSegmentSizes.size();
passNo++;
}
//we are worried about only the first pass merge factor. So reset the
//factor to what it originally was
factor = origFactor;
} while(true);
}
//Hadoop-591
public int getPassFactor(int passNo, int numSegments) {
if (passNo > 1 || numSegments <= factor || factor == 1)
return factor;
int mod = (numSegments - 1) % (factor - 1);
if (mod == 0)
return factor;
return mod + 1;
}
/** Return (& remove) the requested number of segment descriptors from the
* sorted map.
*/
public SegmentDescriptor[] getSegmentDescriptors(int numDescriptors) {
if (numDescriptors > sortedSegmentSizes.size())
numDescriptors = sortedSegmentSizes.size();
SegmentDescriptor[] SegmentDescriptors =
new SegmentDescriptor[numDescriptors];
Iterator iter = sortedSegmentSizes.keySet().iterator();
int i = 0;
while (i < numDescriptors) {
SegmentDescriptors[i++] = (SegmentDescriptor)iter.next();
iter.remove();
}
return SegmentDescriptors;
}
} // SequenceFile.Sorter.MergeQueue
/** This class defines a merge segment. This class can be subclassed to
* provide a customized cleanup method implementation. In this
* implementation, cleanup closes the file handle and deletes the file
*/
public class SegmentDescriptor implements Comparable {
long segmentOffset; //the start of the segment in the file
long segmentLength; //the length of the segment
Path segmentPathName; //the path name of the file containing the segment
boolean ignoreSync = true; //set to true for temp files
private Reader in = null;
private DataOutputBuffer rawKey = null; //this will hold the current key
private boolean preserveInput = false; //delete input segment files?
/** Constructs a segment
* @param segmentOffset the offset of the segment in the file
* @param segmentLength the length of the segment
* @param segmentPathName the path name of the file containing the segment
*/
public SegmentDescriptor (long segmentOffset, long segmentLength,
Path segmentPathName) {
this.segmentOffset = segmentOffset;
this.segmentLength = segmentLength;
this.segmentPathName = segmentPathName;
}
/** Do the sync checks */
public void doSync() {ignoreSync = false;}
/** Whether to delete the files when no longer needed */
public void preserveInput(boolean preserve) {
preserveInput = preserve;
}
public boolean shouldPreserveInput() {
return preserveInput;
}
@Override
public int compareTo(Object o) {
SegmentDescriptor that = (SegmentDescriptor)o;
if (this.segmentLength != that.segmentLength) {
return (this.segmentLength < that.segmentLength ? -1 : 1);
}
if (this.segmentOffset != that.segmentOffset) {
return (this.segmentOffset < that.segmentOffset ? -1 : 1);
}
return (this.segmentPathName.toString()).
compareTo(that.segmentPathName.toString());
}
@Override
public boolean equals(Object o) {
if (!(o instanceof SegmentDescriptor)) {
return false;
}
SegmentDescriptor that = (SegmentDescriptor)o;
if (this.segmentLength == that.segmentLength &&
this.segmentOffset == that.segmentOffset &&
this.segmentPathName.toString().equals(
that.segmentPathName.toString())) {
return true;
}
return false;
}
@Override
public int hashCode() {
return 37 * 17 + (int) (segmentOffset^(segmentOffset>>>32));
}
/** Fills up the rawKey object with the key returned by the Reader
* @return true if there is a key returned; false, otherwise
* @throws IOException
*/
public boolean nextRawKey() throws IOException {
if (in == null) {
int bufferSize = getBufferSize(conf);
Reader reader = new Reader(conf,
Reader.file(segmentPathName),
Reader.bufferSize(bufferSize),
Reader.start(segmentOffset),
Reader.length(segmentLength));
//sometimes we ignore syncs especially for temp merge files
if (ignoreSync) reader.ignoreSync();
if (reader.getKeyClass() != keyClass)
throw new IOException("wrong key class: " + reader.getKeyClass() +
" is not " + keyClass);
if (reader.getValueClass() != valClass)
throw new IOException("wrong value class: "+reader.getValueClass()+
" is not " + valClass);
this.in = reader;
rawKey = new DataOutputBuffer();
}
rawKey.reset();
int keyLength =
in.nextRawKey(rawKey);
return (keyLength >= 0);
}
/** Fills up the passed rawValue with the value corresponding to the key
* read earlier
* @param rawValue
* @return the length of the value
* @throws IOException
*/
public int nextRawValue(ValueBytes rawValue) throws IOException {
int valLength = in.nextRawValue(rawValue);
return valLength;
}
/** Returns the stored rawKey */
public DataOutputBuffer getKey() {
return rawKey;
}
/** closes the underlying reader */
private void close() throws IOException {
this.in.close();
this.in = null;
}
/** The default cleanup. Subclasses can override this with a custom
* cleanup
*/
public void cleanup() throws IOException {
close();
if (!preserveInput) {
fs.delete(segmentPathName, true);
}
}
} // SequenceFile.Sorter.SegmentDescriptor
/** This class provisions multiple segments contained within a single
* file
*/
private class LinkedSegmentsDescriptor extends SegmentDescriptor {
SegmentContainer parentContainer = null;
/** Constructs a segment
* @param segmentOffset the offset of the segment in the file
* @param segmentLength the length of the segment
* @param segmentPathName the path name of the file containing the segment
* @param parent the parent SegmentContainer that holds the segment
*/
public LinkedSegmentsDescriptor (long segmentOffset, long segmentLength,
Path segmentPathName, SegmentContainer parent) {
super(segmentOffset, segmentLength, segmentPathName);
this.parentContainer = parent;
}
/** The default cleanup. Subclasses can override this with a custom
* cleanup
*/
@Override
public void cleanup() throws IOException {
super.close();
if (super.shouldPreserveInput()) return;
parentContainer.cleanup();
}
@Override
public boolean equals(Object o) {
if (!(o instanceof LinkedSegmentsDescriptor)) {
return false;
}
return super.equals(o);
}
} //SequenceFile.Sorter.LinkedSegmentsDescriptor
/** The class that defines a container for segments to be merged. Primarily
* required to delete temp files as soon as all the contained segments
* have been looked at */
private class SegmentContainer {
private int numSegmentsCleanedUp = 0; //track the no. of segment cleanups
private int numSegmentsContained; //# of segments contained
private Path inName; //input file from where segments are created
//the list of segments read from the file
private ArrayList <SegmentDescriptor> segments =
new ArrayList <SegmentDescriptor>();
/** This constructor is there primarily to serve the sort routine that
* generates a single output file with an associated index file */
public SegmentContainer(Path inName, Path indexIn) throws IOException {
//get the segments from indexIn
FSDataInputStream fsIndexIn = fs.open(indexIn);
long end = fs.getFileStatus(indexIn).getLen();
while (fsIndexIn.getPos() < end) {
long segmentOffset = WritableUtils.readVLong(fsIndexIn);
long segmentLength = WritableUtils.readVLong(fsIndexIn);
Path segmentName = inName;
segments.add(new LinkedSegmentsDescriptor(segmentOffset,
segmentLength, segmentName, this));
}
fsIndexIn.close();
fs.delete(indexIn, true);
numSegmentsContained = segments.size();
this.inName = inName;
}
public List <SegmentDescriptor> getSegmentList() {
return segments;
}
public void cleanup() throws IOException {
numSegmentsCleanedUp++;
if (numSegmentsCleanedUp == numSegmentsContained) {
fs.delete(inName, true);
}
}
} //SequenceFile.Sorter.SegmentContainer
} // SequenceFile.Sorter
} // SequenceFile
| 132,287 | 34.229827 | 109 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableFactories.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.util.ReflectionUtils;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
/** Factories for non-public writables. Defining a factory permits {@link
* ObjectWritable} to be able to construct instances of non-public classes. */
@InterfaceAudience.Public
@InterfaceStability.Stable
public class WritableFactories {
private static final Map<Class, WritableFactory> CLASS_TO_FACTORY =
new ConcurrentHashMap<Class, WritableFactory>();
private WritableFactories() {} // singleton
/** Define a factory for a class. */
public static void setFactory(Class c, WritableFactory factory) {
CLASS_TO_FACTORY.put(c, factory);
}
/** Define a factory for a class. */
public static WritableFactory getFactory(Class c) {
return CLASS_TO_FACTORY.get(c);
}
/** Create a new instance of a class with a defined factory. */
public static Writable newInstance(Class<? extends Writable> c, Configuration conf) {
WritableFactory factory = WritableFactories.getFactory(c);
if (factory != null) {
Writable result = factory.newInstance();
if (result instanceof Configurable) {
((Configurable) result).setConf(conf);
}
return result;
} else {
return ReflectionUtils.newInstance(c, conf);
}
}
/** Create a new instance of a class with a defined factory. */
public static Writable newInstance(Class<? extends Writable> c) {
return newInstance(c, null);
}
}
| 2,483 | 35 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FloatWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.*;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/** A WritableComparable for floats. */
@InterfaceAudience.Public
@InterfaceStability.Stable
public class FloatWritable implements WritableComparable<FloatWritable> {
private float value;
public FloatWritable() {}
public FloatWritable(float value) { set(value); }
/** Set the value of this FloatWritable. */
public void set(float value) { this.value = value; }
/** Return the value of this FloatWritable. */
public float get() { return value; }
@Override
public void readFields(DataInput in) throws IOException {
value = in.readFloat();
}
@Override
public void write(DataOutput out) throws IOException {
out.writeFloat(value);
}
/** Returns true iff <code>o</code> is a FloatWritable with the same value. */
@Override
public boolean equals(Object o) {
if (!(o instanceof FloatWritable))
return false;
FloatWritable other = (FloatWritable)o;
return this.value == other.value;
}
@Override
public int hashCode() {
return Float.floatToIntBits(value);
}
/** Compares two FloatWritables. */
@Override
public int compareTo(FloatWritable o) {
float thisValue = this.value;
float thatValue = o.value;
return (thisValue<thatValue ? -1 : (thisValue==thatValue ? 0 : 1));
}
@Override
public String toString() {
return Float.toString(value);
}
/** A Comparator optimized for FloatWritable. */
public static class Comparator extends WritableComparator {
public Comparator() {
super(FloatWritable.class);
}
@Override
public int compare(byte[] b1, int s1, int l1,
byte[] b2, int s2, int l2) {
float thisValue = readFloat(b1, s1);
float thatValue = readFloat(b2, s2);
return (thisValue<thatValue ? -1 : (thisValue==thatValue ? 0 : 1));
}
}
static { // register this comparator
WritableComparator.define(FloatWritable.class, new Comparator());
}
}
| 2,943 | 28.737374 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VIntWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.*;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/** A WritableComparable for integer values stored in variable-length format.
* Such values take between one and five bytes. Smaller values take fewer bytes.
*
* @see org.apache.hadoop.io.WritableUtils#readVInt(DataInput)
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class VIntWritable implements WritableComparable<VIntWritable> {
private int value;
public VIntWritable() {}
public VIntWritable(int value) { set(value); }
/** Set the value of this VIntWritable. */
public void set(int value) { this.value = value; }
/** Return the value of this VIntWritable. */
public int get() { return value; }
@Override
public void readFields(DataInput in) throws IOException {
value = WritableUtils.readVInt(in);
}
@Override
public void write(DataOutput out) throws IOException {
WritableUtils.writeVInt(out, value);
}
/** Returns true iff <code>o</code> is a VIntWritable with the same value. */
@Override
public boolean equals(Object o) {
if (!(o instanceof VIntWritable))
return false;
VIntWritable other = (VIntWritable)o;
return this.value == other.value;
}
@Override
public int hashCode() {
return value;
}
/** Compares two VIntWritables. */
@Override
public int compareTo(VIntWritable o) {
int thisValue = this.value;
int thatValue = o.value;
return (thisValue < thatValue ? -1 : (thisValue == thatValue ? 0 : 1));
}
@Override
public String toString() {
return Integer.toString(value);
}
}
| 2,502 | 28.447059 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SetFile.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
/** A file-based set of keys. */
@InterfaceAudience.Public
@InterfaceStability.Stable
public class SetFile extends MapFile {
protected SetFile() {} // no public ctor
/**
* Write a new set file.
*/
public static class Writer extends MapFile.Writer {
/** Create the named set for keys of the named class.
* @deprecated pass a Configuration too
*/
public Writer(FileSystem fs, String dirName,
Class<? extends WritableComparable> keyClass) throws IOException {
super(new Configuration(), fs, dirName, keyClass, NullWritable.class);
}
/** Create a set naming the element class and compression type. */
public Writer(Configuration conf, FileSystem fs, String dirName,
Class<? extends WritableComparable> keyClass,
SequenceFile.CompressionType compress)
throws IOException {
this(conf, fs, dirName, WritableComparator.get(keyClass, conf), compress);
}
/** Create a set naming the element comparator and compression type. */
public Writer(Configuration conf, FileSystem fs, String dirName,
WritableComparator comparator,
SequenceFile.CompressionType compress) throws IOException {
super(conf, new Path(dirName),
comparator(comparator),
valueClass(NullWritable.class),
compression(compress));
}
/** Append a key to a set. The key must be strictly greater than the
* previous key added to the set. */
public void append(WritableComparable key) throws IOException{
append(key, NullWritable.get());
}
}
/** Provide access to an existing set file. */
public static class Reader extends MapFile.Reader {
/** Construct a set reader for the named set.*/
public Reader(FileSystem fs, String dirName, Configuration conf) throws IOException {
super(fs, dirName, conf);
}
/** Construct a set reader for the named set using the named comparator.*/
public Reader(FileSystem fs, String dirName, WritableComparator comparator, Configuration conf)
throws IOException {
super(new Path(dirName), conf, comparator(comparator));
}
// javadoc inherited
@Override
public boolean seek(WritableComparable key)
throws IOException {
return super.seek(key);
}
/** Read the next key in a set into <code>key</code>. Returns
* true if such a key exists and false when at the end of the set. */
public boolean next(WritableComparable key)
throws IOException {
return next(key, NullWritable.get());
}
/** Read the matching key from a set into <code>key</code>.
* Returns <code>key</code>, or null if no match exists. */
public WritableComparable get(WritableComparable key)
throws IOException {
if (seek(key)) {
next(key);
return key;
} else
return null;
}
}
}
| 4,056 | 33.974138 | 99 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.*;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ReflectionUtils;
import java.util.zip.GZIPInputStream;
import java.util.zip.GZIPOutputStream;
@InterfaceAudience.Public
@InterfaceStability.Stable
public final class WritableUtils {
public static byte[] readCompressedByteArray(DataInput in) throws IOException {
int length = in.readInt();
if (length == -1) return null;
byte[] buffer = new byte[length];
in.readFully(buffer); // could/should use readFully(buffer,0,length)?
GZIPInputStream gzi = new GZIPInputStream(new ByteArrayInputStream(buffer, 0, buffer.length));
byte[] outbuf = new byte[length];
ByteArrayOutputStream bos = new ByteArrayOutputStream();
int len;
while((len=gzi.read(outbuf, 0, outbuf.length)) != -1){
bos.write(outbuf, 0, len);
}
byte[] decompressed = bos.toByteArray();
bos.close();
gzi.close();
return decompressed;
}
public static void skipCompressedByteArray(DataInput in) throws IOException {
int length = in.readInt();
if (length != -1) {
skipFully(in, length);
}
}
public static int writeCompressedByteArray(DataOutput out,
byte[] bytes) throws IOException {
if (bytes != null) {
ByteArrayOutputStream bos = new ByteArrayOutputStream();
GZIPOutputStream gzout = new GZIPOutputStream(bos);
try {
gzout.write(bytes, 0, bytes.length);
gzout.close();
gzout = null;
} finally {
IOUtils.closeStream(gzout);
}
byte[] buffer = bos.toByteArray();
int len = buffer.length;
out.writeInt(len);
out.write(buffer, 0, len);
/* debug only! Once we have confidence, can lose this. */
return ((bytes.length != 0) ? (100*buffer.length)/bytes.length : 0);
} else {
out.writeInt(-1);
return -1;
}
}
/* Ugly utility, maybe someone else can do this better */
public static String readCompressedString(DataInput in) throws IOException {
byte[] bytes = readCompressedByteArray(in);
if (bytes == null) return null;
return new String(bytes, "UTF-8");
}
public static int writeCompressedString(DataOutput out, String s) throws IOException {
return writeCompressedByteArray(out, (s != null) ? s.getBytes("UTF-8") : null);
}
/*
*
* Write a String as a Network Int n, followed by n Bytes
* Alternative to 16 bit read/writeUTF.
* Encoding standard is... ?
*
*/
public static void writeString(DataOutput out, String s) throws IOException {
if (s != null) {
byte[] buffer = s.getBytes("UTF-8");
int len = buffer.length;
out.writeInt(len);
out.write(buffer, 0, len);
} else {
out.writeInt(-1);
}
}
/*
* Read a String as a Network Int n, followed by n Bytes
* Alternative to 16 bit read/writeUTF.
* Encoding standard is... ?
*
*/
public static String readString(DataInput in) throws IOException{
int length = in.readInt();
if (length == -1) return null;
byte[] buffer = new byte[length];
in.readFully(buffer); // could/should use readFully(buffer,0,length)?
return new String(buffer,"UTF-8");
}
/*
* Write a String array as a Nework Int N, followed by Int N Byte Array Strings.
* Could be generalised using introspection.
*
*/
public static void writeStringArray(DataOutput out, String[] s) throws IOException{
out.writeInt(s.length);
for(int i = 0; i < s.length; i++) {
writeString(out, s[i]);
}
}
/*
* Write a String array as a Nework Int N, followed by Int N Byte Array of
* compressed Strings. Handles also null arrays and null values.
* Could be generalised using introspection.
*
*/
public static void writeCompressedStringArray(DataOutput out, String[] s) throws IOException{
if (s == null) {
out.writeInt(-1);
return;
}
out.writeInt(s.length);
for(int i = 0; i < s.length; i++) {
writeCompressedString(out, s[i]);
}
}
/*
* Write a String array as a Nework Int N, followed by Int N Byte Array Strings.
* Could be generalised using introspection. Actually this bit couldn't...
*
*/
public static String[] readStringArray(DataInput in) throws IOException {
int len = in.readInt();
if (len == -1) return null;
String[] s = new String[len];
for(int i = 0; i < len; i++) {
s[i] = readString(in);
}
return s;
}
/*
* Write a String array as a Nework Int N, followed by Int N Byte Array Strings.
* Could be generalised using introspection. Handles null arrays and null values.
*
*/
public static String[] readCompressedStringArray(DataInput in) throws IOException {
int len = in.readInt();
if (len == -1) return null;
String[] s = new String[len];
for(int i = 0; i < len; i++) {
s[i] = readCompressedString(in);
}
return s;
}
/*
*
* Test Utility Method Display Byte Array.
*
*/
public static void displayByteArray(byte[] record){
int i;
for(i=0;i < record.length -1; i++){
if (i % 16 == 0) { System.out.println(); }
System.out.print(Integer.toHexString(record[i] >> 4 & 0x0F));
System.out.print(Integer.toHexString(record[i] & 0x0F));
System.out.print(",");
}
System.out.print(Integer.toHexString(record[i] >> 4 & 0x0F));
System.out.print(Integer.toHexString(record[i] & 0x0F));
System.out.println();
}
/**
* Make a copy of a writable object using serialization to a buffer.
* @param orig The object to copy
* @return The copied object
*/
public static <T extends Writable> T clone(T orig, Configuration conf) {
try {
@SuppressWarnings("unchecked") // Unchecked cast from Class to Class<T>
T newInst = ReflectionUtils.newInstance((Class<T>) orig.getClass(), conf);
ReflectionUtils.copy(conf, orig, newInst);
return newInst;
} catch (IOException e) {
throw new RuntimeException("Error writing/reading clone buffer", e);
}
}
/**
* Make a copy of the writable object using serialiation to a buffer
* @param dst the object to copy from
* @param src the object to copy into, which is destroyed
* @throws IOException
* @deprecated use ReflectionUtils.cloneInto instead.
*/
@Deprecated
public static void cloneInto(Writable dst, Writable src) throws IOException {
ReflectionUtils.cloneWritableInto(dst, src);
}
/**
* Serializes an integer to a binary stream with zero-compressed encoding.
* For -120 <= i <= 127, only one byte is used with the actual value.
* For other values of i, the first byte value indicates whether the
* integer is positive or negative, and the number of bytes that follow.
* If the first byte value v is between -121 and -124, the following integer
* is positive, with number of bytes that follow are -(v+120).
* If the first byte value v is between -125 and -128, the following integer
* is negative, with number of bytes that follow are -(v+124). Bytes are
* stored in the high-non-zero-byte-first order.
*
* @param stream Binary output stream
* @param i Integer to be serialized
* @throws java.io.IOException
*/
public static void writeVInt(DataOutput stream, int i) throws IOException {
writeVLong(stream, i);
}
/**
* Serializes a long to a binary stream with zero-compressed encoding.
* For -112 <= i <= 127, only one byte is used with the actual value.
* For other values of i, the first byte value indicates whether the
* long is positive or negative, and the number of bytes that follow.
* If the first byte value v is between -113 and -120, the following long
* is positive, with number of bytes that follow are -(v+112).
* If the first byte value v is between -121 and -128, the following long
* is negative, with number of bytes that follow are -(v+120). Bytes are
* stored in the high-non-zero-byte-first order.
*
* @param stream Binary output stream
* @param i Long to be serialized
* @throws java.io.IOException
*/
public static void writeVLong(DataOutput stream, long i) throws IOException {
if (i >= -112 && i <= 127) {
stream.writeByte((byte)i);
return;
}
int len = -112;
if (i < 0) {
i ^= -1L; // take one's complement'
len = -120;
}
long tmp = i;
while (tmp != 0) {
tmp = tmp >> 8;
len--;
}
stream.writeByte((byte)len);
len = (len < -120) ? -(len + 120) : -(len + 112);
for (int idx = len; idx != 0; idx--) {
int shiftbits = (idx - 1) * 8;
long mask = 0xFFL << shiftbits;
stream.writeByte((byte)((i & mask) >> shiftbits));
}
}
/**
* Reads a zero-compressed encoded long from input stream and returns it.
* @param stream Binary input stream
* @throws java.io.IOException
* @return deserialized long from stream.
*/
public static long readVLong(DataInput stream) throws IOException {
byte firstByte = stream.readByte();
int len = decodeVIntSize(firstByte);
if (len == 1) {
return firstByte;
}
long i = 0;
for (int idx = 0; idx < len-1; idx++) {
byte b = stream.readByte();
i = i << 8;
i = i | (b & 0xFF);
}
return (isNegativeVInt(firstByte) ? (i ^ -1L) : i);
}
/**
* Reads a zero-compressed encoded integer from input stream and returns it.
* @param stream Binary input stream
* @throws java.io.IOException
* @return deserialized integer from stream.
*/
public static int readVInt(DataInput stream) throws IOException {
long n = readVLong(stream);
if ((n > Integer.MAX_VALUE) || (n < Integer.MIN_VALUE)) {
throw new IOException("value too long to fit in integer");
}
return (int)n;
}
/**
* Reads an integer from the input stream and returns it.
*
* This function validates that the integer is between [lower, upper],
* inclusive.
*
* @param stream Binary input stream
* @throws java.io.IOException
* @return deserialized integer from stream
*/
public static int readVIntInRange(DataInput stream, int lower, int upper)
throws IOException {
long n = readVLong(stream);
if (n < lower) {
if (lower == 0) {
throw new IOException("expected non-negative integer, got " + n);
} else {
throw new IOException("expected integer greater than or equal to " +
lower + ", got " + n);
}
}
if (n > upper) {
throw new IOException("expected integer less or equal to " + upper +
", got " + n);
}
return (int)n;
}
/**
* Given the first byte of a vint/vlong, determine the sign
* @param value the first byte
* @return is the value negative
*/
public static boolean isNegativeVInt(byte value) {
return value < -120 || (value >= -112 && value < 0);
}
/**
* Parse the first byte of a vint/vlong to determine the number of bytes
* @param value the first byte of the vint/vlong
* @return the total number of bytes (1 to 9)
*/
public static int decodeVIntSize(byte value) {
if (value >= -112) {
return 1;
} else if (value < -120) {
return -119 - value;
}
return -111 - value;
}
/**
* Get the encoded length if an integer is stored in a variable-length format
* @return the encoded length
*/
public static int getVIntSize(long i) {
if (i >= -112 && i <= 127) {
return 1;
}
if (i < 0) {
i ^= -1L; // take one's complement'
}
// find the number of bytes with non-leading zeros
int dataBits = Long.SIZE - Long.numberOfLeadingZeros(i);
// find the number of data bytes + length byte
return (dataBits + 7) / 8 + 1;
}
/**
* Read an Enum value from DataInput, Enums are read and written
* using String values.
* @param <T> Enum type
* @param in DataInput to read from
* @param enumType Class type of Enum
* @return Enum represented by String read from DataInput
* @throws IOException
*/
public static <T extends Enum<T>> T readEnum(DataInput in, Class<T> enumType)
throws IOException{
return T.valueOf(enumType, Text.readString(in));
}
/**
* writes String value of enum to DataOutput.
* @param out Dataoutput stream
* @param enumVal enum value
* @throws IOException
*/
public static void writeEnum(DataOutput out, Enum<?> enumVal)
throws IOException{
Text.writeString(out, enumVal.name());
}
/**
* Skip <i>len</i> number of bytes in input stream<i>in</i>
* @param in input stream
* @param len number of bytes to skip
* @throws IOException when skipped less number of bytes
*/
public static void skipFully(DataInput in, int len) throws IOException {
int total = 0;
int cur = 0;
while ((total<len) && ((cur = in.skipBytes(len-total)) > 0)) {
total += cur;
}
if (total<len) {
throw new IOException("Not able to skip " + len + " bytes, possibly " +
"due to end of input.");
}
}
/** Convert writables to a byte array */
public static byte[] toByteArray(Writable... writables) {
final DataOutputBuffer out = new DataOutputBuffer();
try {
for(Writable w : writables) {
w.write(out);
}
out.close();
} catch (IOException e) {
throw new RuntimeException("Fail to convert writables to a byte array",e);
}
return out.getData();
}
/**
* Read a string, but check it for sanity. The format consists of a vint
* followed by the given number of bytes.
* @param in the stream to read from
* @param maxLength the largest acceptable length of the encoded string
* @return the bytes as a string
* @throws IOException if reading from the DataInput fails
* @throws IllegalArgumentException if the encoded byte size for string
is negative or larger than maxSize. Only the vint is read.
*/
public static String readStringSafely(DataInput in,
int maxLength
) throws IOException,
IllegalArgumentException {
int length = readVInt(in);
if (length < 0 || length > maxLength) {
throw new IllegalArgumentException("Encoded byte size for String was " + length +
", which is outside of 0.." +
maxLength + " range.");
}
byte [] bytes = new byte[length];
in.readFully(bytes, 0, length);
return Text.decode(bytes);
}
}
| 15,707 | 31.320988 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/RawComparator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.util.Comparator;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.serializer.DeserializerComparator;
/**
* <p>
* A {@link Comparator} that operates directly on byte representations of
* objects.
* </p>
* @param <T>
* @see DeserializerComparator
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface RawComparator<T> extends Comparator<T> {
/**
* Compare two objects in binary.
* b1[s1:l1] is the first object, and b2[s2:l2] is the second object.
*
* @param b1 The first byte array.
* @param s1 The position index in b1. The object under comparison's starting index.
* @param l1 The length of the object in b1.
* @param b2 The second byte array.
* @param s2 The position index in b2. The object under comparison's starting index.
* @param l2 The length of the object under comparison in b2.
* @return An integer result of the comparison.
*/
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2);
}
| 1,933 | 34.814815 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import com.google.common.collect.ComparisonChain;
import org.apache.commons.lang.builder.HashCodeBuilder;
import java.nio.ByteBuffer;
import java.util.Map;
import java.util.TreeMap;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This is a simple ByteBufferPool which just creates ByteBuffers as needed.
* It also caches ByteBuffers after they're released. It will always return
* the smallest cached buffer with at least the capacity you request.
* We don't try to do anything clever here like try to limit the maximum cache
* size.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public final class ElasticByteBufferPool implements ByteBufferPool {
private static final class Key implements Comparable<Key> {
private final int capacity;
private final long insertionTime;
Key(int capacity, long insertionTime) {
this.capacity = capacity;
this.insertionTime = insertionTime;
}
@Override
public int compareTo(Key other) {
return ComparisonChain.start().
compare(capacity, other.capacity).
compare(insertionTime, other.insertionTime).
result();
}
@Override
public boolean equals(Object rhs) {
if (rhs == null) {
return false;
}
try {
Key o = (Key)rhs;
return (compareTo(o) == 0);
} catch (ClassCastException e) {
return false;
}
}
@Override
public int hashCode() {
return new HashCodeBuilder().
append(capacity).
append(insertionTime).
toHashCode();
}
}
private final TreeMap<Key, ByteBuffer> buffers =
new TreeMap<Key, ByteBuffer>();
private final TreeMap<Key, ByteBuffer> directBuffers =
new TreeMap<Key, ByteBuffer>();
private final TreeMap<Key, ByteBuffer> getBufferTree(boolean direct) {
return direct ? directBuffers : buffers;
}
@Override
public synchronized ByteBuffer getBuffer(boolean direct, int length) {
TreeMap<Key, ByteBuffer> tree = getBufferTree(direct);
Map.Entry<Key, ByteBuffer> entry =
tree.ceilingEntry(new Key(length, 0));
if (entry == null) {
return direct ? ByteBuffer.allocateDirect(length) :
ByteBuffer.allocate(length);
}
tree.remove(entry.getKey());
return entry.getValue();
}
@Override
public synchronized void putBuffer(ByteBuffer buffer) {
TreeMap<Key, ByteBuffer> tree = getBufferTree(buffer.isDirect());
while (true) {
Key key = new Key(buffer.capacity(), System.nanoTime());
if (!tree.containsKey(key)) {
tree.put(key, buffer);
return;
}
// Buffers are indexed by (capacity, time).
// If our key is not unique on the first try, we try again, since the
// time will be different. Since we use nanoseconds, it's pretty
// unlikely that we'll loop even once, unless the system clock has a
// poor granularity.
}
}
}
| 3,865 | 31.487395 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DataOutputByteBuffer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.DataOutputStream;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.ArrayList;
import java.util.LinkedList;
public class DataOutputByteBuffer extends DataOutputStream {
static class Buffer extends OutputStream {
final byte[] b = new byte[1];
final boolean direct;
final List<ByteBuffer> active = new ArrayList<ByteBuffer>();
final List<ByteBuffer> inactive = new LinkedList<ByteBuffer>();
int size;
int length;
ByteBuffer current;
Buffer(int size, boolean direct) {
this.direct = direct;
this.size = size;
current = direct
? ByteBuffer.allocateDirect(size)
: ByteBuffer.allocate(size);
}
@Override
public void write(int b) {
this.b[0] = (byte)(b & 0xFF);
write(this.b);
}
@Override
public void write(byte[] b) {
write(b, 0, b.length);
}
@Override
public void write(byte[] b, int off, int len) {
int rem = current.remaining();
while (len > rem) {
current.put(b, off, rem);
length += rem;
current.flip();
active.add(current);
off += rem;
len -= rem;
rem = getBuffer(len);
}
current.put(b, off, len);
length += len;
}
int getBuffer(int newsize) {
if (inactive.isEmpty()) {
size = Math.max(size << 1, newsize);
current = direct
? ByteBuffer.allocateDirect(size)
: ByteBuffer.allocate(size);
} else {
current = inactive.remove(0);
}
return current.remaining();
}
ByteBuffer[] getData() {
ByteBuffer[] ret = active.toArray(new ByteBuffer[active.size() + 1]);
ByteBuffer tmp = current.duplicate();
tmp.flip();
ret[ret.length - 1] = tmp.slice();
return ret;
}
int getLength() {
return length;
}
void reset() {
length = 0;
current.rewind();
inactive.add(0, current);
for (int i = active.size() - 1; i >= 0; --i) {
ByteBuffer b = active.remove(i);
b.rewind();
inactive.add(0, b);
}
current = inactive.remove(0);
}
}
private final Buffer buffers;
public DataOutputByteBuffer() {
this(32);
}
public DataOutputByteBuffer(int size) {
this(size, false);
}
public DataOutputByteBuffer(int size, boolean direct) {
this(new Buffer(size, direct));
}
private DataOutputByteBuffer(Buffer buffers) {
super(buffers);
this.buffers = buffers;
}
public ByteBuffer[] getData() {
return buffers.getData();
}
public int getLength() {
return buffers.getLength();
}
public void reset() {
this.written = 0;
buffers.reset();
}
}
| 3,595 | 25.057971 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/NullWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.*;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/** Singleton Writable with no data. */
@InterfaceAudience.Public
@InterfaceStability.Stable
public class NullWritable implements WritableComparable<NullWritable> {
private static final NullWritable THIS = new NullWritable();
private NullWritable() {} // no public ctor
/** Returns the single instance of this class. */
public static NullWritable get() { return THIS; }
@Override
public String toString() {
return "(null)";
}
@Override
public int hashCode() { return 0; }
@Override
public int compareTo(NullWritable other) {
return 0;
}
@Override
public boolean equals(Object other) { return other instanceof NullWritable; }
@Override
public void readFields(DataInput in) throws IOException {}
@Override
public void write(DataOutput out) throws IOException {}
/** A Comparator "optimized" for NullWritable. */
public static class Comparator extends WritableComparator {
public Comparator() {
super(NullWritable.class);
}
/**
* Compare the buffers in serialized form.
*/
@Override
public int compare(byte[] b1, int s1, int l1,
byte[] b2, int s2, int l2) {
assert 0 == l1;
assert 0 == l2;
return 0;
}
}
static { // register this comparator
WritableComparator.define(NullWritable.class, new Comparator());
}
}
| 2,409 | 29.125 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VLongWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.*;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/** A WritableComparable for longs in a variable-length format. Such values take
* between one and five bytes. Smaller values take fewer bytes.
*
* @see org.apache.hadoop.io.WritableUtils#readVLong(DataInput)
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class VLongWritable implements WritableComparable<VLongWritable> {
private long value;
public VLongWritable() {}
public VLongWritable(long value) { set(value); }
/** Set the value of this LongWritable. */
public void set(long value) { this.value = value; }
/** Return the value of this LongWritable. */
public long get() { return value; }
@Override
public void readFields(DataInput in) throws IOException {
value = WritableUtils.readVLong(in);
}
@Override
public void write(DataOutput out) throws IOException {
WritableUtils.writeVLong(out, value);
}
/** Returns true iff <code>o</code> is a VLongWritable with the same value. */
@Override
public boolean equals(Object o) {
if (!(o instanceof VLongWritable))
return false;
VLongWritable other = (VLongWritable)o;
return this.value == other.value;
}
@Override
public int hashCode() {
return (int)value;
}
/** Compares two VLongWritables. */
@Override
public int compareTo(VLongWritable o) {
long thisValue = this.value;
long thatValue = o.value;
return (thisValue < thatValue ? -1 : (thisValue == thatValue ? 0 : 1));
}
@Override
public String toString() {
return Long.toString(value);
}
}
| 2,510 | 28.541176 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MultipleIOException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/** Encapsulate a list of {@link IOException} into an {@link IOException} */
@InterfaceAudience.Public
@InterfaceStability.Stable
public class MultipleIOException extends IOException {
/** Require by {@link java.io.Serializable} */
private static final long serialVersionUID = 1L;
private final List<IOException> exceptions;
/** Constructor is private, use {@link #createIOException(List)}. */
private MultipleIOException(List<IOException> exceptions) {
super(exceptions.size() + " exceptions " + exceptions);
this.exceptions = exceptions;
}
/** @return the underlying exceptions */
public List<IOException> getExceptions() {return exceptions;}
/** A convenient method to create an {@link IOException}. */
public static IOException createIOException(List<IOException> exceptions) {
if (exceptions == null || exceptions.isEmpty()) {
return null;
}
if (exceptions.size() == 1) {
return exceptions.get(0);
}
return new MultipleIOException(exceptions);
}
/**
* Build an {@link IOException} using {@link MultipleIOException}
* if there are more than one.
*/
public static class Builder {
private List<IOException> exceptions;
/** Add the given {@link Throwable} to the exception list. */
public void add(Throwable t) {
if (exceptions == null) {
exceptions = new ArrayList<>();
}
exceptions.add(t instanceof IOException? (IOException)t
: new IOException(t));
}
/**
* @return null if nothing is added to this builder;
* otherwise, return an {@link IOException}
*/
public IOException build() {
return createIOException(exceptions);
}
}
}
| 2,753 | 33 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Stringifier.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Stringifier interface offers two methods to convert an object
* to a string representation and restore the object given its
* string representation.
* @param <T> the class of the objects to stringify
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface Stringifier<T> extends java.io.Closeable {
/**
* Converts the object to a string representation
* @param obj the object to convert
* @return the string representation of the object
* @throws IOException if the object cannot be converted
*/
public String toString(T obj) throws IOException;
/**
* Restores the object from its string representation.
* @param str the string representation of the object
* @return restored object
* @throws IOException if the object cannot be restored
*/
public T fromString(String str) throws IOException;
/**
* Closes this object.
* @throws IOException if an I/O error occurs
* */
@Override
public void close() throws IOException;
}
| 2,009 | 31.95082 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/EnumSetWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.EnumSet;
import java.util.Iterator;
import java.util.AbstractCollection;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
/** A Writable wrapper for EnumSet. */
@InterfaceAudience.Public
@InterfaceStability.Stable
public class EnumSetWritable<E extends Enum<E>> extends AbstractCollection<E>
implements Writable, Configurable {
private EnumSet<E> value;
private transient Class<E> elementType;
private transient Configuration conf;
EnumSetWritable() {
}
@Override
public Iterator<E> iterator() { return value.iterator(); }
@Override
public int size() { return value.size(); }
@Override
public boolean add(E e) {
if (value == null) {
value = EnumSet.of(e);
set(value, null);
}
return value.add(e);
}
/**
* Construct a new EnumSetWritable. If the <tt>value</tt> argument is null or
* its size is zero, the <tt>elementType</tt> argument must not be null. If
* the argument <tt>value</tt>'s size is bigger than zero, the argument
* <tt>elementType</tt> is not be used.
*
* @param value
* @param elementType
*/
public EnumSetWritable(EnumSet<E> value, Class<E> elementType) {
set(value, elementType);
}
/**
* Construct a new EnumSetWritable. Argument <tt>value</tt> should not be null
* or empty.
*
* @param value
*/
public EnumSetWritable(EnumSet<E> value) {
this(value, null);
}
/**
* reset the EnumSetWritable with specified
* <tt>value</value> and <tt>elementType</tt>. If the <tt>value</tt> argument
* is null or its size is zero, the <tt>elementType</tt> argument must not be
* null. If the argument <tt>value</tt>'s size is bigger than zero, the
* argument <tt>elementType</tt> is not be used.
*
* @param value
* @param elementType
*/
public void set(EnumSet<E> value, Class<E> elementType) {
if ((value == null || value.size() == 0)
&& (this.elementType == null && elementType == null)) {
throw new IllegalArgumentException(
"The EnumSet argument is null, or is an empty set but with no elementType provided.");
}
this.value = value;
if (value != null && value.size() > 0) {
Iterator<E> iterator = value.iterator();
this.elementType = iterator.next().getDeclaringClass();
} else if (elementType != null) {
this.elementType = elementType;
}
}
/** Return the value of this EnumSetWritable. */
public EnumSet<E> get() {
return value;
}
@Override
@SuppressWarnings("unchecked")
public void readFields(DataInput in) throws IOException {
int length = in.readInt();
if (length == -1)
this.value = null;
else if (length == 0) {
this.elementType = (Class<E>) ObjectWritable.loadClass(conf,
WritableUtils.readString(in));
this.value = EnumSet.noneOf(this.elementType);
} else {
E first = (E) ObjectWritable.readObject(in, conf);
this.value = (EnumSet<E>) EnumSet.of(first);
for (int i = 1; i < length; i++)
this.value.add((E) ObjectWritable.readObject(in, conf));
}
}
@Override
public void write(DataOutput out) throws IOException {
if (this.value == null) {
out.writeInt(-1);
WritableUtils.writeString(out, this.elementType.getName());
} else {
Object[] array = this.value.toArray();
int length = array.length;
out.writeInt(length);
if (length == 0) {
if (this.elementType == null)
throw new UnsupportedOperationException(
"Unable to serialize empty EnumSet with no element type provided.");
WritableUtils.writeString(out, this.elementType.getName());
}
for (int i = 0; i < length; i++) {
ObjectWritable.writeObject(out, array[i], array[i].getClass(), conf);
}
}
}
/**
* Returns true if <code>o</code> is an EnumSetWritable with the same value,
* or both are null.
*/
@Override
public boolean equals(Object o) {
if (o == null) {
throw new IllegalArgumentException("null argument passed in equal().");
}
if (!(o instanceof EnumSetWritable))
return false;
EnumSetWritable<?> other = (EnumSetWritable<?>) o;
if (this == o || (this.value == other.value))
return true;
if (this.value == null) // other.value must not be null if we reach here
return false;
return this.value.equals(other.value);
}
/**
* Returns the class of all the elements of the underlying EnumSetWriable. It
* may return null.
*
* @return the element class
*/
public Class<E> getElementType() {
return elementType;
}
@Override
public int hashCode() {
if (value == null)
return 0;
return (int) value.hashCode();
}
@Override
public String toString() {
if (value == null)
return "(null)";
return value.toString();
}
@Override
public Configuration getConf() {
return this.conf;
}
@Override
public void setConf(Configuration conf) {
this.conf = conf;
}
static {
WritableFactories.setFactory(EnumSetWritable.class, new WritableFactory() {
@SuppressWarnings("unchecked")
@Override
public Writable newInstance() {
return new EnumSetWritable();
}
});
}
}
| 6,350 | 27.868182 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.FileDescriptor;
import java.io.IOException;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.nativeio.NativeIO;
import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.POSIX_FADV_WILLNEED;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
/**
* Manages a pool of threads which can issue readahead requests on file descriptors.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class ReadaheadPool {
static final Log LOG = LogFactory.getLog(ReadaheadPool.class);
private static final int POOL_SIZE = 4;
private static final int MAX_POOL_SIZE = 16;
private static final int CAPACITY = 1024;
private final ThreadPoolExecutor pool;
private static ReadaheadPool instance;
/**
* Return the singleton instance for the current process.
*/
public static ReadaheadPool getInstance() {
synchronized (ReadaheadPool.class) {
if (instance == null && NativeIO.isAvailable()) {
instance = new ReadaheadPool();
}
return instance;
}
}
private ReadaheadPool() {
pool = new ThreadPoolExecutor(POOL_SIZE, MAX_POOL_SIZE, 3L, TimeUnit.SECONDS,
new ArrayBlockingQueue<Runnable>(CAPACITY));
pool.setRejectedExecutionHandler(new ThreadPoolExecutor.DiscardOldestPolicy());
pool.setThreadFactory(new ThreadFactoryBuilder()
.setDaemon(true)
.setNameFormat("Readahead Thread #%d")
.build());
}
/**
* Issue a request to readahead on the given file descriptor.
*
* @param identifier a textual identifier that will be used in error
* messages (e.g. the file name)
* @param fd the file descriptor to read ahead
* @param curPos the current offset at which reads are being issued
* @param readaheadLength the configured length to read ahead
* @param maxOffsetToRead the maximum offset that will be readahead
* (useful if, for example, only some segment of the file is
* requested by the user). Pass {@link Long.MAX_VALUE} to allow
* readahead to the end of the file.
* @param lastReadahead the result returned by the previous invocation
* of this function on this file descriptor, or null if this is
* the first call
* @return an object representing this outstanding request, or null
* if no readahead was performed
*/
public ReadaheadRequest readaheadStream(
String identifier,
FileDescriptor fd,
long curPos,
long readaheadLength,
long maxOffsetToRead,
ReadaheadRequest lastReadahead) {
Preconditions.checkArgument(curPos <= maxOffsetToRead,
"Readahead position %s higher than maxOffsetToRead %s",
curPos, maxOffsetToRead);
if (readaheadLength <= 0) {
return null;
}
long lastOffset = Long.MIN_VALUE;
if (lastReadahead != null) {
lastOffset = lastReadahead.getOffset();
}
// trigger each readahead when we have reached the halfway mark
// in the previous readahead. This gives the system time
// to satisfy the readahead before we start reading the data.
long nextOffset = lastOffset + readaheadLength / 2;
if (curPos >= nextOffset) {
// cancel any currently pending readahead, to avoid
// piling things up in the queue. Each reader should have at most
// one outstanding request in the queue.
if (lastReadahead != null) {
lastReadahead.cancel();
lastReadahead = null;
}
long length = Math.min(readaheadLength,
maxOffsetToRead - curPos);
if (length <= 0) {
// we've reached the end of the stream
return null;
}
return submitReadahead(identifier, fd, curPos, length);
} else {
return lastReadahead;
}
}
/**
* Submit a request to readahead on the given file descriptor.
* @param identifier a textual identifier used in error messages, etc.
* @param fd the file descriptor to readahead
* @param off the offset at which to start the readahead
* @param len the number of bytes to read
* @return an object representing this pending request
*/
public ReadaheadRequest submitReadahead(
String identifier, FileDescriptor fd, long off, long len) {
ReadaheadRequestImpl req = new ReadaheadRequestImpl(
identifier, fd, off, len);
pool.execute(req);
if (LOG.isTraceEnabled()) {
LOG.trace("submit readahead: " + req);
}
return req;
}
/**
* An outstanding readahead request that has been submitted to
* the pool. This request may be pending or may have been
* completed.
*/
public interface ReadaheadRequest {
/**
* Cancels the request for readahead. This should be used
* if the reader no longer needs the requested data, <em>before</em>
* closing the related file descriptor.
*
* It is safe to use even if the readahead request has already
* been fulfilled.
*/
public void cancel();
/**
* @return the requested offset
*/
public long getOffset();
/**
* @return the requested length
*/
public long getLength();
}
private static class ReadaheadRequestImpl implements Runnable, ReadaheadRequest {
private final String identifier;
private final FileDescriptor fd;
private final long off, len;
private volatile boolean canceled = false;
private ReadaheadRequestImpl(String identifier, FileDescriptor fd, long off, long len) {
this.identifier = identifier;
this.fd = fd;
this.off = off;
this.len = len;
}
@Override
public void run() {
if (canceled) return;
// There's a very narrow race here that the file will close right at
// this instant. But if that happens, we'll likely receive an EBADF
// error below, and see that it's canceled, ignoring the error.
// It's also possible that we'll end up requesting readahead on some
// other FD, which may be wasted work, but won't cause a problem.
try {
NativeIO.POSIX.getCacheManipulator().posixFadviseIfPossible(identifier,
fd, off, len, POSIX_FADV_WILLNEED);
} catch (IOException ioe) {
if (canceled) {
// no big deal - the reader canceled the request and closed
// the file.
return;
}
LOG.warn("Failed readahead on " + identifier,
ioe);
}
}
@Override
public void cancel() {
canceled = true;
// We could attempt to remove it from the work queue, but that would
// add complexity. In practice, the work queues remain very short,
// so removing canceled requests has no gain.
}
@Override
public long getOffset() {
return off;
}
@Override
public long getLength() {
return len;
}
@Override
public String toString() {
return "ReadaheadRequestImpl [identifier='" + identifier + "', fd=" + fd
+ ", off=" + off + ", len=" + len + "]";
}
}
}
| 8,241 | 32.504065 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.DataInput;
import java.io.IOException;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ReflectionUtils;
/** A Comparator for {@link WritableComparable}s.
*
* <p>This base implemenation uses the natural ordering. To define alternate
* orderings, override {@link #compare(WritableComparable,WritableComparable)}.
*
* <p>One may optimize compare-intensive operations by overriding
* {@link #compare(byte[],int,int,byte[],int,int)}. Static utility methods are
* provided to assist in optimized implementations of this method.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class WritableComparator implements RawComparator, Configurable {
private static final ConcurrentHashMap<Class, WritableComparator> comparators
= new ConcurrentHashMap<Class, WritableComparator>(); // registry
private Configuration conf;
/** For backwards compatibility. **/
public static WritableComparator get(Class<? extends WritableComparable> c) {
return get(c, null);
}
/** Get a comparator for a {@link WritableComparable} implementation. */
public static WritableComparator get(
Class<? extends WritableComparable> c, Configuration conf) {
WritableComparator comparator = comparators.get(c);
if (comparator == null) {
// force the static initializers to run
forceInit(c);
// look to see if it is defined now
comparator = comparators.get(c);
// if not, use the generic one
if (comparator == null) {
comparator = new WritableComparator(c, conf, true);
}
}
// Newly passed Configuration objects should be used.
ReflectionUtils.setConf(comparator, conf);
return comparator;
}
@Override
public void setConf(Configuration conf) {
this.conf = conf;
}
@Override
public Configuration getConf() {
return conf;
}
/**
* Force initialization of the static members.
* As of Java 5, referencing a class doesn't force it to initialize. Since
* this class requires that the classes be initialized to declare their
* comparators, we force that initialization to happen.
* @param cls the class to initialize
*/
private static void forceInit(Class<?> cls) {
try {
Class.forName(cls.getName(), true, cls.getClassLoader());
} catch (ClassNotFoundException e) {
throw new IllegalArgumentException("Can't initialize class " + cls, e);
}
}
/** Register an optimized comparator for a {@link WritableComparable}
* implementation. Comparators registered with this method must be
* thread-safe. */
public static void define(Class c, WritableComparator comparator) {
comparators.put(c, comparator);
}
private final Class<? extends WritableComparable> keyClass;
private final WritableComparable key1;
private final WritableComparable key2;
private final DataInputBuffer buffer;
protected WritableComparator() {
this(null);
}
/** Construct for a {@link WritableComparable} implementation. */
protected WritableComparator(Class<? extends WritableComparable> keyClass) {
this(keyClass, null, false);
}
protected WritableComparator(Class<? extends WritableComparable> keyClass,
boolean createInstances) {
this(keyClass, null, createInstances);
}
protected WritableComparator(Class<? extends WritableComparable> keyClass,
Configuration conf,
boolean createInstances) {
this.keyClass = keyClass;
this.conf = (conf != null) ? conf : new Configuration();
if (createInstances) {
key1 = newKey();
key2 = newKey();
buffer = new DataInputBuffer();
} else {
key1 = key2 = null;
buffer = null;
}
}
/** Returns the WritableComparable implementation class. */
public Class<? extends WritableComparable> getKeyClass() { return keyClass; }
/** Construct a new {@link WritableComparable} instance. */
public WritableComparable newKey() {
return ReflectionUtils.newInstance(keyClass, conf);
}
/** Optimization hook. Override this to make SequenceFile.Sorter's scream.
*
* <p>The default implementation reads the data into two {@link
* WritableComparable}s (using {@link
* Writable#readFields(DataInput)}, then calls {@link
* #compare(WritableComparable,WritableComparable)}.
*/
@Override
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
try {
buffer.reset(b1, s1, l1); // parse key1
key1.readFields(buffer);
buffer.reset(b2, s2, l2); // parse key2
key2.readFields(buffer);
buffer.reset(null, 0, 0); // clean up reference
} catch (IOException e) {
throw new RuntimeException(e);
}
return compare(key1, key2); // compare them
}
/** Compare two WritableComparables.
*
* <p> The default implementation uses the natural ordering, calling {@link
* Comparable#compareTo(Object)}. */
@SuppressWarnings("unchecked")
public int compare(WritableComparable a, WritableComparable b) {
return a.compareTo(b);
}
@Override
public int compare(Object a, Object b) {
return compare((WritableComparable)a, (WritableComparable)b);
}
/** Lexicographic order of binary data. */
public static int compareBytes(byte[] b1, int s1, int l1,
byte[] b2, int s2, int l2) {
return FastByteComparisons.compareTo(b1, s1, l1, b2, s2, l2);
}
/** Compute hash for binary data. */
public static int hashBytes(byte[] bytes, int offset, int length) {
int hash = 1;
for (int i = offset; i < offset + length; i++)
hash = (31 * hash) + (int)bytes[i];
return hash;
}
/** Compute hash for binary data. */
public static int hashBytes(byte[] bytes, int length) {
return hashBytes(bytes, 0, length);
}
/** Parse an unsigned short from a byte array. */
public static int readUnsignedShort(byte[] bytes, int start) {
return (((bytes[start] & 0xff) << 8) +
((bytes[start+1] & 0xff)));
}
/** Parse an integer from a byte array. */
public static int readInt(byte[] bytes, int start) {
return (((bytes[start ] & 0xff) << 24) +
((bytes[start+1] & 0xff) << 16) +
((bytes[start+2] & 0xff) << 8) +
((bytes[start+3] & 0xff)));
}
/** Parse a float from a byte array. */
public static float readFloat(byte[] bytes, int start) {
return Float.intBitsToFloat(readInt(bytes, start));
}
/** Parse a long from a byte array. */
public static long readLong(byte[] bytes, int start) {
return ((long)(readInt(bytes, start)) << 32) +
(readInt(bytes, start+4) & 0xFFFFFFFFL);
}
/** Parse a double from a byte array. */
public static double readDouble(byte[] bytes, int start) {
return Double.longBitsToDouble(readLong(bytes, start));
}
/**
* Reads a zero-compressed encoded long from a byte array and returns it.
* @param bytes byte array with decode long
* @param start starting index
* @throws java.io.IOException
* @return deserialized long
*/
public static long readVLong(byte[] bytes, int start) throws IOException {
int len = bytes[start];
if (len >= -112) {
return len;
}
boolean isNegative = (len < -120);
len = isNegative ? -(len + 120) : -(len + 112);
if (start+1+len>bytes.length)
throw new IOException(
"Not enough number of bytes for a zero-compressed integer");
long i = 0;
for (int idx = 0; idx < len; idx++) {
i = i << 8;
i = i | (bytes[start+1+idx] & 0xFF);
}
return (isNegative ? (i ^ -1L) : i);
}
/**
* Reads a zero-compressed encoded integer from a byte array and returns it.
* @param bytes byte array with the encoded integer
* @param start start index
* @throws java.io.IOException
* @return deserialized integer
*/
public static int readVInt(byte[] bytes, int start) throws IOException {
return (int) readVLong(bytes, start);
}
}
| 9,183 | 32.889299 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/OutputBuffer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.*;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/** A reusable {@link OutputStream} implementation that writes to an in-memory
* buffer.
*
* <p>This saves memory over creating a new OutputStream and
* ByteArrayOutputStream each time data is written.
*
* <p>Typical usage is something like the following:<pre>
*
* OutputBuffer buffer = new OutputBuffer();
* while (... loop condition ...) {
* buffer.reset();
* ... write buffer using OutputStream methods ...
* byte[] data = buffer.getData();
* int dataLength = buffer.getLength();
* ... write data to its ultimate destination ...
* }
* </pre>
* @see DataOutputBuffer
* @see InputBuffer
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Unstable
public class OutputBuffer extends FilterOutputStream {
private static class Buffer extends ByteArrayOutputStream {
public byte[] getData() { return buf; }
public int getLength() { return count; }
@Override
public void reset() { count = 0; }
public void write(InputStream in, int len) throws IOException {
int newcount = count + len;
if (newcount > buf.length) {
byte newbuf[] = new byte[Math.max(buf.length << 1, newcount)];
System.arraycopy(buf, 0, newbuf, 0, count);
buf = newbuf;
}
IOUtils.readFully(in, buf, count, len);
count = newcount;
}
}
private Buffer buffer;
/** Constructs a new empty buffer. */
public OutputBuffer() {
this(new Buffer());
}
private OutputBuffer(Buffer buffer) {
super(buffer);
this.buffer = buffer;
}
/** Returns the current contents of the buffer.
* Data is only valid to {@link #getLength()}.
*/
public byte[] getData() { return buffer.getData(); }
/** Returns the length of the valid data currently in the buffer. */
public int getLength() { return buffer.getLength(); }
/** Resets the buffer to empty. */
public OutputBuffer reset() {
buffer.reset();
return this;
}
/** Writes bytes from a InputStream directly into the buffer. */
public void write(InputStream in, int length) throws IOException {
buffer.write(in, length);
}
}
| 3,104 | 30.363636 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/CompressedWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.IOException;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.DataInputStream;
import java.io.ByteArrayOutputStream;
import java.io.ByteArrayInputStream;
import java.util.zip.Deflater;
import java.util.zip.DeflaterOutputStream;
import java.util.zip.InflaterInputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/** A base-class for Writables which store themselves compressed and lazily
* inflate on field access. This is useful for large objects whose fields are
* not be altered during a map or reduce operation: leaving the field data
* compressed makes copying the instance from one file to another much
* faster. */
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class CompressedWritable implements Writable {
// if non-null, the compressed field data of this instance.
private byte[] compressed;
public CompressedWritable() {}
@Override
public final void readFields(DataInput in) throws IOException {
compressed = new byte[in.readInt()];
in.readFully(compressed, 0, compressed.length);
}
/** Must be called by all methods which access fields to ensure that the data
* has been uncompressed. */
protected void ensureInflated() {
if (compressed != null) {
try {
ByteArrayInputStream deflated = new ByteArrayInputStream(compressed);
DataInput inflater =
new DataInputStream(new InflaterInputStream(deflated));
readFieldsCompressed(inflater);
compressed = null;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
/** Subclasses implement this instead of {@link #readFields(DataInput)}. */
protected abstract void readFieldsCompressed(DataInput in)
throws IOException;
@Override
public final void write(DataOutput out) throws IOException {
if (compressed == null) {
ByteArrayOutputStream deflated = new ByteArrayOutputStream();
Deflater deflater = new Deflater(Deflater.BEST_SPEED);
DataOutputStream dout =
new DataOutputStream(new DeflaterOutputStream(deflated, deflater));
writeCompressed(dout);
dout.close();
deflater.end();
compressed = deflated.toByteArray();
}
out.writeInt(compressed.length);
out.write(compressed);
}
/** Subclasses implement this instead of {@link #write(DataOutput)}. */
protected abstract void writeCompressed(DataOutput out) throws IOException;
}
| 3,397 | 35.148936 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VersionMismatchException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/** Thrown by {@link VersionedWritable#readFields(DataInput)} when the
* version of an object being read does not match the current implementation
* version as returned by {@link VersionedWritable#getVersion()}. */
@InterfaceAudience.Public
@InterfaceStability.Stable
public class VersionMismatchException extends IOException {
private byte expectedVersion;
private byte foundVersion;
public VersionMismatchException(byte expectedVersionIn, byte foundVersionIn){
expectedVersion = expectedVersionIn;
foundVersion = foundVersionIn;
}
/** Returns a string representation of this object. */
@Override
public String toString(){
return "A record version mismatch occurred. Expecting v"
+ expectedVersion + ", found v" + foundVersion;
}
}
| 1,761 | 35.708333 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/GenericWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ReflectionUtils;
/**
* A wrapper for Writable instances.
* <p>
* When two sequence files, which have same Key type but different Value
* types, are mapped out to reduce, multiple Value types is not allowed.
* In this case, this class can help you wrap instances with different types.
* </p>
*
* <p>
* Compared with <code>ObjectWritable</code>, this class is much more effective,
* because <code>ObjectWritable</code> will append the class declaration as a String
* into the output file in every Key-Value pair.
* </p>
*
* <p>
* Generic Writable implements {@link Configurable} interface, so that it will be
* configured by the framework. The configuration is passed to the wrapped objects
* implementing {@link Configurable} interface <i>before deserialization</i>.
* </p>
*
* how to use it: <br>
* 1. Write your own class, such as GenericObject, which extends GenericWritable.<br>
* 2. Implements the abstract method <code>getTypes()</code>, defines
* the classes which will be wrapped in GenericObject in application.
* Attention: this classes defined in <code>getTypes()</code> method, must
* implement <code>Writable</code> interface.
* <br><br>
*
* The code looks like this:
* <blockquote><pre>
* public class GenericObject extends GenericWritable {
*
* private static Class[] CLASSES = {
* ClassType1.class,
* ClassType2.class,
* ClassType3.class,
* };
*
* protected Class[] getTypes() {
* return CLASSES;
* }
*
* }
* </pre></blockquote>
*
* @since Nov 8, 2006
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class GenericWritable implements Writable, Configurable {
private static final byte NOT_SET = -1;
private byte type = NOT_SET;
private Writable instance;
private Configuration conf = null;
/**
* Set the instance that is wrapped.
*
* @param obj
*/
public void set(Writable obj) {
instance = obj;
Class<? extends Writable> instanceClazz = instance.getClass();
Class<? extends Writable>[] clazzes = getTypes();
for (int i = 0; i < clazzes.length; i++) {
Class<? extends Writable> clazz = clazzes[i];
if (clazz.equals(instanceClazz)) {
type = (byte) i;
return;
}
}
throw new RuntimeException("The type of instance is: "
+ instance.getClass() + ", which is NOT registered.");
}
/**
* Return the wrapped instance.
*/
public Writable get() {
return instance;
}
@Override
public String toString() {
return "GW[" + (instance != null ? ("class=" + instance.getClass().getName() +
",value=" + instance.toString()) : "(null)") + "]";
}
@Override
public void readFields(DataInput in) throws IOException {
type = in.readByte();
Class<? extends Writable> clazz = getTypes()[type & 0xff];
try {
instance = ReflectionUtils.newInstance(clazz, conf);
} catch (Exception e) {
e.printStackTrace();
throw new IOException("Cannot initialize the class: " + clazz);
}
instance.readFields(in);
}
@Override
public void write(DataOutput out) throws IOException {
if (type == NOT_SET || instance == null)
throw new IOException("The GenericWritable has NOT been set correctly. type="
+ type + ", instance=" + instance);
out.writeByte(type);
instance.write(out);
}
/**
* Return all classes that may be wrapped. Subclasses should implement this
* to return a constant array of classes.
*/
abstract protected Class<? extends Writable>[] getTypes();
@Override
public Configuration getConf() {
return conf;
}
@Override
public void setConf(Configuration conf) {
this.conf = conf;
}
}
| 4,985 | 29.777778 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DataInputBuffer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.*;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/** A reusable {@link DataInput} implementation that reads from an in-memory
* buffer.
*
* <p>This saves memory over creating a new DataInputStream and
* ByteArrayInputStream each time data is read.
*
* <p>Typical usage is something like the following:<pre>
*
* DataInputBuffer buffer = new DataInputBuffer();
* while (... loop condition ...) {
* byte[] data = ... get data ...;
* int dataLength = ... get data length ...;
* buffer.reset(data, dataLength);
* ... read buffer using DataInput methods ...
* }
* </pre>
*
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Unstable
public class DataInputBuffer extends DataInputStream {
private static class Buffer extends ByteArrayInputStream {
public Buffer() {
super(new byte[] {});
}
public void reset(byte[] input, int start, int length) {
this.buf = input;
this.count = start+length;
this.mark = start;
this.pos = start;
}
public byte[] getData() { return buf; }
public int getPosition() { return pos; }
public int getLength() { return count; }
}
private Buffer buffer;
/** Constructs a new empty buffer. */
public DataInputBuffer() {
this(new Buffer());
}
private DataInputBuffer(Buffer buffer) {
super(buffer);
this.buffer = buffer;
}
/** Resets the data that the buffer reads. */
public void reset(byte[] input, int length) {
buffer.reset(input, 0, length);
}
/** Resets the data that the buffer reads. */
public void reset(byte[] input, int start, int length) {
buffer.reset(input, start, length);
}
public byte[] getData() {
return buffer.getData();
}
/** Returns the current position in the input. */
public int getPosition() { return buffer.getPosition(); }
/**
* Returns the index one greater than the last valid character in the input
* stream buffer.
*/
public int getLength() { return buffer.getLength(); }
}
| 2,952 | 28.53 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/TwoDArrayWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.*;
import java.lang.reflect.Array;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/** A Writable for 2D arrays containing a matrix of instances of a class. */
@InterfaceAudience.Public
@InterfaceStability.Stable
public class TwoDArrayWritable implements Writable {
private Class valueClass;
private Writable[][] values;
public TwoDArrayWritable(Class valueClass) {
this.valueClass = valueClass;
}
public TwoDArrayWritable(Class valueClass, Writable[][] values) {
this(valueClass);
this.values = values;
}
public Object toArray() {
int dimensions[] = {values.length, 0};
Object result = Array.newInstance(valueClass, dimensions);
for (int i = 0; i < values.length; i++) {
Object resultRow = Array.newInstance(valueClass, values[i].length);
Array.set(result, i, resultRow);
for (int j = 0; j < values[i].length; j++) {
Array.set(resultRow, j, values[i][j]);
}
}
return result;
}
public void set(Writable[][] values) { this.values = values; }
public Writable[][] get() { return values; }
@Override
public void readFields(DataInput in) throws IOException {
// construct matrix
values = new Writable[in.readInt()][];
for (int i = 0; i < values.length; i++) {
values[i] = new Writable[in.readInt()];
}
// construct values
for (int i = 0; i < values.length; i++) {
for (int j = 0; j < values[i].length; j++) {
Writable value; // construct value
try {
value = (Writable)valueClass.newInstance();
} catch (InstantiationException e) {
throw new RuntimeException(e.toString());
} catch (IllegalAccessException e) {
throw new RuntimeException(e.toString());
}
value.readFields(in); // read a value
values[i][j] = value; // store it in values
}
}
}
@Override
public void write(DataOutput out) throws IOException {
out.writeInt(values.length); // write values
for (int i = 0; i < values.length; i++) {
out.writeInt(values[i].length);
}
for (int i = 0; i < values.length; i++) {
for (int j = 0; j < values[i].length; j++) {
values[i][j].write(out);
}
}
}
}
| 3,251 | 31.848485 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/VersionedWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.DataOutput;
import java.io.DataInput;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/** A base class for Writables that provides version checking.
*
* <p>This is useful when a class may evolve, so that instances written by the
* old version of the class may still be processed by the new version. To
* handle this situation, {@link #readFields(DataInput)}
* implementations should catch {@link VersionMismatchException}.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class VersionedWritable implements Writable {
/** Return the version number of the current implementation. */
public abstract byte getVersion();
// javadoc from Writable
@Override
public void write(DataOutput out) throws IOException {
out.writeByte(getVersion()); // store version
}
// javadoc from Writable
@Override
public void readFields(DataInput in) throws IOException {
byte version = in.readByte(); // read version
if (version != getVersion())
throw new VersionMismatchException(getVersion(), version);
}
}
| 2,055 | 34.448276 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.*;
import java.net.Socket;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.channels.WritableByteChannel;
import java.nio.file.DirectoryStream;
import java.nio.file.DirectoryIteratorException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ChunkedArrayList;
/**
* An utility class for I/O related functionality.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class IOUtils {
/**
* Copies from one stream to another.
*
* @param in InputStrem to read from
* @param out OutputStream to write to
* @param buffSize the size of the buffer
* @param close whether or not close the InputStream and
* OutputStream at the end. The streams are closed in the finally clause.
*/
public static void copyBytes(InputStream in, OutputStream out, int buffSize, boolean close)
throws IOException {
try {
copyBytes(in, out, buffSize);
if(close) {
out.close();
out = null;
in.close();
in = null;
}
} finally {
if(close) {
closeStream(out);
closeStream(in);
}
}
}
/**
* Copies from one stream to another.
*
* @param in InputStrem to read from
* @param out OutputStream to write to
* @param buffSize the size of the buffer
*/
public static void copyBytes(InputStream in, OutputStream out, int buffSize)
throws IOException {
PrintStream ps = out instanceof PrintStream ? (PrintStream)out : null;
byte buf[] = new byte[buffSize];
int bytesRead = in.read(buf);
while (bytesRead >= 0) {
out.write(buf, 0, bytesRead);
if ((ps != null) && ps.checkError()) {
throw new IOException("Unable to write to output stream.");
}
bytesRead = in.read(buf);
}
}
/**
* Copies from one stream to another. <strong>closes the input and output streams
* at the end</strong>.
*
* @param in InputStrem to read from
* @param out OutputStream to write to
* @param conf the Configuration object
*/
public static void copyBytes(InputStream in, OutputStream out, Configuration conf)
throws IOException {
copyBytes(in, out, conf.getInt("io.file.buffer.size", 4096), true);
}
/**
* Copies from one stream to another.
*
* @param in InputStream to read from
* @param out OutputStream to write to
* @param conf the Configuration object
* @param close whether or not close the InputStream and
* OutputStream at the end. The streams are closed in the finally clause.
*/
public static void copyBytes(InputStream in, OutputStream out, Configuration conf, boolean close)
throws IOException {
copyBytes(in, out, conf.getInt("io.file.buffer.size", 4096), close);
}
/**
* Copies count bytes from one stream to another.
*
* @param in InputStream to read from
* @param out OutputStream to write to
* @param count number of bytes to copy
* @param close whether to close the streams
* @throws IOException if bytes can not be read or written
*/
public static void copyBytes(InputStream in, OutputStream out, long count,
boolean close) throws IOException {
byte buf[] = new byte[4096];
long bytesRemaining = count;
int bytesRead;
try {
while (bytesRemaining > 0) {
int bytesToRead = (int)
(bytesRemaining < buf.length ? bytesRemaining : buf.length);
bytesRead = in.read(buf, 0, bytesToRead);
if (bytesRead == -1)
break;
out.write(buf, 0, bytesRead);
bytesRemaining -= bytesRead;
}
if (close) {
out.close();
out = null;
in.close();
in = null;
}
} finally {
if (close) {
closeStream(out);
closeStream(in);
}
}
}
/**
* Utility wrapper for reading from {@link InputStream}. It catches any errors
* thrown by the underlying stream (either IO or decompression-related), and
* re-throws as an IOException.
*
* @param is - InputStream to be read from
* @param buf - buffer the data is read into
* @param off - offset within buf
* @param len - amount of data to be read
* @return number of bytes read
*/
public static int wrappedReadForCompressedData(InputStream is, byte[] buf,
int off, int len) throws IOException {
try {
return is.read(buf, off, len);
} catch (IOException ie) {
throw ie;
} catch (Throwable t) {
throw new IOException("Error while reading compressed data", t);
}
}
/**
* Reads len bytes in a loop.
*
* @param in InputStream to read from
* @param buf The buffer to fill
* @param off offset from the buffer
* @param len the length of bytes to read
* @throws IOException if it could not read requested number of bytes
* for any reason (including EOF)
*/
public static void readFully(InputStream in, byte buf[],
int off, int len) throws IOException {
int toRead = len;
while (toRead > 0) {
int ret = in.read(buf, off, toRead);
if (ret < 0) {
throw new IOException( "Premature EOF from inputStream");
}
toRead -= ret;
off += ret;
}
}
/**
* Similar to readFully(). Skips bytes in a loop.
* @param in The InputStream to skip bytes from
* @param len number of bytes to skip.
* @throws IOException if it could not skip requested number of bytes
* for any reason (including EOF)
*/
public static void skipFully(InputStream in, long len) throws IOException {
long amt = len;
while (amt > 0) {
long ret = in.skip(amt);
if (ret == 0) {
// skip may return 0 even if we're not at EOF. Luckily, we can
// use the read() method to figure out if we're at the end.
int b = in.read();
if (b == -1) {
throw new EOFException( "Premature EOF from inputStream after " +
"skipping " + (len - amt) + " byte(s).");
}
ret = 1;
}
amt -= ret;
}
}
/**
* Close the Closeable objects and <b>ignore</b> any {@link IOException} or
* null pointers. Must only be used for cleanup in exception handlers.
*
* @param log the log to record problems to at debug level. Can be null.
* @param closeables the objects to close
*/
public static void cleanup(Log log, java.io.Closeable... closeables) {
for (java.io.Closeable c : closeables) {
if (c != null) {
try {
c.close();
} catch(IOException e) {
if (log != null && log.isDebugEnabled()) {
log.debug("Exception in closing " + c, e);
}
}
}
}
}
/**
* Closes the stream ignoring {@link IOException}.
* Must only be called in cleaning up from exception handlers.
*
* @param stream the Stream to close
*/
public static void closeStream(java.io.Closeable stream) {
cleanup(null, stream);
}
/**
* Closes the socket ignoring {@link IOException}
*
* @param sock the Socket to close
*/
public static void closeSocket(Socket sock) {
if (sock != null) {
try {
sock.close();
} catch (IOException ignored) {
}
}
}
/**
* The /dev/null of OutputStreams.
*/
public static class NullOutputStream extends OutputStream {
@Override
public void write(byte[] b, int off, int len) throws IOException {
}
@Override
public void write(int b) throws IOException {
}
}
/**
* Write a ByteBuffer to a WritableByteChannel, handling short writes.
*
* @param bc The WritableByteChannel to write to
* @param buf The input buffer
* @throws IOException On I/O error
*/
public static void writeFully(WritableByteChannel bc, ByteBuffer buf)
throws IOException {
do {
bc.write(buf);
} while (buf.remaining() > 0);
}
/**
* Write a ByteBuffer to a FileChannel at a given offset,
* handling short writes.
*
* @param fc The FileChannel to write to
* @param buf The input buffer
* @param offset The offset in the file to start writing at
* @throws IOException On I/O error
*/
public static void writeFully(FileChannel fc, ByteBuffer buf,
long offset) throws IOException {
do {
offset += fc.write(buf, offset);
} while (buf.remaining() > 0);
}
/**
* Return the complete list of files in a directory as strings.<p/>
*
* This is better than File#listDir because it does not ignore IOExceptions.
*
* @param dir The directory to list.
* @param filter If non-null, the filter to use when listing
* this directory.
* @return The list of files in the directory.
*
* @throws IOException On I/O error
*/
public static List<String> listDirectory(File dir, FilenameFilter filter)
throws IOException {
ArrayList<String> list = new ArrayList<String> ();
try (DirectoryStream<Path> stream =
Files.newDirectoryStream(dir.toPath())) {
for (Path entry: stream) {
String fileName = entry.getFileName().toString();
if ((filter == null) || filter.accept(dir, fileName)) {
list.add(fileName);
}
}
} catch (DirectoryIteratorException e) {
throw e.getCause();
}
return list;
}
}
| 10,581 | 29.234286 | 99 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/UTF8.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.IOException;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.UTFDataFormatException;
import org.apache.hadoop.util.StringUtils;
import org.apache.commons.logging.*;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/** A WritableComparable for strings that uses the UTF8 encoding.
*
* <p>Also includes utilities for efficiently reading and writing UTF-8.
*
* Note that this decodes UTF-8 but actually encodes CESU-8, a variant of
* UTF-8: see http://en.wikipedia.org/wiki/CESU-8
*
* @deprecated replaced by Text
*/
@Deprecated
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Stable
public class UTF8 implements WritableComparable<UTF8> {
private static final Log LOG= LogFactory.getLog(UTF8.class);
private static final DataInputBuffer IBUF = new DataInputBuffer();
private static final ThreadLocal<DataOutputBuffer> OBUF_FACTORY =
new ThreadLocal<DataOutputBuffer>(){
@Override
protected DataOutputBuffer initialValue() {
return new DataOutputBuffer();
}
};
private static final byte[] EMPTY_BYTES = new byte[0];
private byte[] bytes = EMPTY_BYTES;
private int length;
public UTF8() {
//set("");
}
/** Construct from a given string. */
public UTF8(String string) {
set(string);
}
/** Construct from a given string. */
public UTF8(UTF8 utf8) {
set(utf8);
}
/** The raw bytes. */
public byte[] getBytes() {
return bytes;
}
/** The number of bytes in the encoded string. */
public int getLength() {
return length;
}
/** Set to contain the contents of a string. */
public void set(String string) {
if (string.length() > 0xffff/3) { // maybe too long
LOG.warn("truncating long string: " + string.length()
+ " chars, starting with " + string.substring(0, 20));
string = string.substring(0, 0xffff/3);
}
length = utf8Length(string); // compute length
if (length > 0xffff) // double-check length
throw new RuntimeException("string too long!");
if (bytes == null || length > bytes.length) // grow buffer
bytes = new byte[length];
try { // avoid sync'd allocations
DataOutputBuffer obuf = OBUF_FACTORY.get();
obuf.reset();
writeChars(obuf, string, 0, string.length());
System.arraycopy(obuf.getData(), 0, bytes, 0, length);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
/** Set to contain the contents of a string. */
public void set(UTF8 other) {
length = other.length;
if (bytes == null || length > bytes.length) // grow buffer
bytes = new byte[length];
System.arraycopy(other.bytes, 0, bytes, 0, length);
}
@Override
public void readFields(DataInput in) throws IOException {
length = in.readUnsignedShort();
if (bytes == null || bytes.length < length)
bytes = new byte[length];
in.readFully(bytes, 0, length);
}
/** Skips over one UTF8 in the input. */
public static void skip(DataInput in) throws IOException {
int length = in.readUnsignedShort();
WritableUtils.skipFully(in, length);
}
@Override
public void write(DataOutput out) throws IOException {
out.writeShort(length);
out.write(bytes, 0, length);
}
/** Compare two UTF8s. */
@Override
public int compareTo(UTF8 o) {
return WritableComparator.compareBytes(bytes, 0, length,
o.bytes, 0, o.length);
}
/** Convert to a String. */
@Override
public String toString() {
StringBuilder buffer = new StringBuilder(length);
try {
synchronized (IBUF) {
IBUF.reset(bytes, length);
readChars(IBUF, buffer, length);
}
} catch (IOException e) {
throw new RuntimeException(e);
}
return buffer.toString();
}
/**
* Convert to a string, checking for valid UTF8.
* @return the converted string
* @throws UTFDataFormatException if the underlying bytes contain invalid
* UTF8 data.
*/
public String toStringChecked() throws IOException {
StringBuilder buffer = new StringBuilder(length);
synchronized (IBUF) {
IBUF.reset(bytes, length);
readChars(IBUF, buffer, length);
}
return buffer.toString();
}
/** Returns true iff <code>o</code> is a UTF8 with the same contents. */
@Override
public boolean equals(Object o) {
if (!(o instanceof UTF8))
return false;
UTF8 that = (UTF8)o;
if (this.length != that.length)
return false;
else
return WritableComparator.compareBytes(bytes, 0, length,
that.bytes, 0, that.length) == 0;
}
@Override
public int hashCode() {
return WritableComparator.hashBytes(bytes, length);
}
/** A WritableComparator optimized for UTF8 keys. */
public static class Comparator extends WritableComparator {
public Comparator() {
super(UTF8.class);
}
@Override
public int compare(byte[] b1, int s1, int l1,
byte[] b2, int s2, int l2) {
int n1 = readUnsignedShort(b1, s1);
int n2 = readUnsignedShort(b2, s2);
return compareBytes(b1, s1+2, n1, b2, s2+2, n2);
}
}
static { // register this comparator
WritableComparator.define(UTF8.class, new Comparator());
}
/// STATIC UTILITIES FROM HERE DOWN
/// These are probably not used much anymore, and might be removed...
/** Convert a string to a UTF-8 encoded byte array.
* @see String#getBytes(String)
*/
public static byte[] getBytes(String string) {
byte[] result = new byte[utf8Length(string)];
try { // avoid sync'd allocations
DataOutputBuffer obuf = OBUF_FACTORY.get();
obuf.reset();
writeChars(obuf, string, 0, string.length());
System.arraycopy(obuf.getData(), 0, result, 0, obuf.getLength());
} catch (IOException e) {
throw new RuntimeException(e);
}
return result;
}
/**
* Convert a UTF-8 encoded byte array back into a string.
*
* @throws IOException if the byte array is invalid UTF8
*/
public static String fromBytes(byte[] bytes) throws IOException {
DataInputBuffer dbuf = new DataInputBuffer();
dbuf.reset(bytes, 0, bytes.length);
StringBuilder buf = new StringBuilder(bytes.length);
readChars(dbuf, buf, bytes.length);
return buf.toString();
}
/** Read a UTF-8 encoded string.
*
* @see DataInput#readUTF()
*/
public static String readString(DataInput in) throws IOException {
int bytes = in.readUnsignedShort();
StringBuilder buffer = new StringBuilder(bytes);
readChars(in, buffer, bytes);
return buffer.toString();
}
private static void readChars(DataInput in, StringBuilder buffer, int nBytes)
throws UTFDataFormatException, IOException {
DataOutputBuffer obuf = OBUF_FACTORY.get();
obuf.reset();
obuf.write(in, nBytes);
byte[] bytes = obuf.getData();
int i = 0;
while (i < nBytes) {
byte b = bytes[i++];
if ((b & 0x80) == 0) {
// 0b0xxxxxxx: 1-byte sequence
buffer.append((char)(b & 0x7F));
} else if ((b & 0xE0) == 0xC0) {
if (i >= nBytes) {
throw new UTFDataFormatException("Truncated UTF8 at " +
StringUtils.byteToHexString(bytes, i - 1, 1));
}
// 0b110xxxxx: 2-byte sequence
buffer.append((char)(((b & 0x1F) << 6)
| (bytes[i++] & 0x3F)));
} else if ((b & 0xF0) == 0xE0) {
// 0b1110xxxx: 3-byte sequence
if (i + 1 >= nBytes) {
throw new UTFDataFormatException("Truncated UTF8 at " +
StringUtils.byteToHexString(bytes, i - 1, 2));
}
buffer.append((char)(((b & 0x0F) << 12)
| ((bytes[i++] & 0x3F) << 6)
| (bytes[i++] & 0x3F)));
} else if ((b & 0xF8) == 0xF0) {
if (i + 2 >= nBytes) {
throw new UTFDataFormatException("Truncated UTF8 at " +
StringUtils.byteToHexString(bytes, i - 1, 3));
}
// 0b11110xxx: 4-byte sequence
int codepoint =
((b & 0x07) << 18)
| ((bytes[i++] & 0x3F) << 12)
| ((bytes[i++] & 0x3F) << 6)
| ((bytes[i++] & 0x3F));
buffer.append(highSurrogate(codepoint))
.append(lowSurrogate(codepoint));
} else {
// The UTF8 standard describes 5-byte and 6-byte sequences, but
// these are no longer allowed as of 2003 (see RFC 3629)
// Only show the next 6 bytes max in the error code - in case the
// buffer is large, this will prevent an exceedingly large message.
int endForError = Math.min(i + 5, nBytes);
throw new UTFDataFormatException("Invalid UTF8 at " +
StringUtils.byteToHexString(bytes, i - 1, endForError));
}
}
}
private static char highSurrogate(int codePoint) {
return (char) ((codePoint >>> 10)
+ (Character.MIN_HIGH_SURROGATE - (Character.MIN_SUPPLEMENTARY_CODE_POINT >>> 10)));
}
private static char lowSurrogate(int codePoint) {
return (char) ((codePoint & 0x3ff) + Character.MIN_LOW_SURROGATE);
}
/** Write a UTF-8 encoded string.
*
* @see DataOutput#writeUTF(String)
*/
public static int writeString(DataOutput out, String s) throws IOException {
if (s.length() > 0xffff/3) { // maybe too long
LOG.warn("truncating long string: " + s.length()
+ " chars, starting with " + s.substring(0, 20));
s = s.substring(0, 0xffff/3);
}
int len = utf8Length(s);
if (len > 0xffff) // double-check length
throw new IOException("string too long!");
out.writeShort(len);
writeChars(out, s, 0, s.length());
return len;
}
/** Returns the number of bytes required to write this. */
private static int utf8Length(String string) {
int stringLength = string.length();
int utf8Length = 0;
for (int i = 0; i < stringLength; i++) {
int c = string.charAt(i);
if (c <= 0x007F) {
utf8Length++;
} else if (c > 0x07FF) {
utf8Length += 3;
} else {
utf8Length += 2;
}
}
return utf8Length;
}
private static void writeChars(DataOutput out,
String s, int start, int length)
throws IOException {
final int end = start + length;
for (int i = start; i < end; i++) {
int code = s.charAt(i);
if (code <= 0x7F) {
out.writeByte((byte)code);
} else if (code <= 0x07FF) {
out.writeByte((byte)(0xC0 | ((code >> 6) & 0x1F)));
out.writeByte((byte)(0x80 | code & 0x3F));
} else {
out.writeByte((byte)(0xE0 | ((code >> 12) & 0X0F)));
out.writeByte((byte)(0x80 | ((code >> 6) & 0x3F)));
out.writeByte((byte)(0x80 | (code & 0x3F)));
}
}
}
}
| 11,993 | 30.814324 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/** A factory for a class of Writable.
* @see WritableFactories
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface WritableFactory {
/** Return a new instance. */
Writable newInstance();
}
| 1,177 | 33.647059 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DoubleWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Writable for Double values.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class DoubleWritable implements WritableComparable<DoubleWritable> {
private double value = 0.0;
public DoubleWritable() {
}
public DoubleWritable(double value) {
set(value);
}
@Override
public void readFields(DataInput in) throws IOException {
value = in.readDouble();
}
@Override
public void write(DataOutput out) throws IOException {
out.writeDouble(value);
}
public void set(double value) { this.value = value; }
public double get() { return value; }
/**
* Returns true iff <code>o</code> is a DoubleWritable with the same value.
*/
@Override
public boolean equals(Object o) {
if (!(o instanceof DoubleWritable)) {
return false;
}
DoubleWritable other = (DoubleWritable)o;
return this.value == other.value;
}
@Override
public int hashCode() {
return (int)Double.doubleToLongBits(value);
}
@Override
public int compareTo(DoubleWritable o) {
return (value < o.value ? -1 : (value == o.value ? 0 : 1));
}
@Override
public String toString() {
return Double.toString(value);
}
/** A Comparator optimized for DoubleWritable. */
public static class Comparator extends WritableComparator {
public Comparator() {
super(DoubleWritable.class);
}
@Override
public int compare(byte[] b1, int s1, int l1,
byte[] b2, int s2, int l2) {
double thisValue = readDouble(b1, s1);
double thatValue = readDouble(b2, s2);
return (thisValue < thatValue ? -1 : (thisValue == thatValue ? 0 : 1));
}
}
static { // register this comparator
WritableComparator.define(DoubleWritable.class, new Comparator());
}
}
| 2,888 | 26 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ByteWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.*;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/** A WritableComparable for a single byte. */
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ByteWritable implements WritableComparable<ByteWritable> {
private byte value;
public ByteWritable() {}
public ByteWritable(byte value) { set(value); }
/** Set the value of this ByteWritable. */
public void set(byte value) { this.value = value; }
/** Return the value of this ByteWritable. */
public byte get() { return value; }
@Override
public void readFields(DataInput in) throws IOException {
value = in.readByte();
}
@Override
public void write(DataOutput out) throws IOException {
out.writeByte(value);
}
/** Returns true iff <code>o</code> is a ByteWritable with the same value. */
@Override
public boolean equals(Object o) {
if (!(o instanceof ByteWritable)) {
return false;
}
ByteWritable other = (ByteWritable)o;
return this.value == other.value;
}
@Override
public int hashCode() {
return (int)value;
}
/** Compares two ByteWritables. */
@Override
public int compareTo(ByteWritable o) {
int thisValue = this.value;
int thatValue = o.value;
return (thisValue < thatValue ? -1 : (thisValue == thatValue ? 0 : 1));
}
@Override
public String toString() {
return Byte.toString(value);
}
/** A Comparator optimized for ByteWritable. */
public static class Comparator extends WritableComparator {
public Comparator() {
super(ByteWritable.class);
}
@Override
public int compare(byte[] b1, int s1, int l1,
byte[] b2, int s2, int l2) {
byte thisValue = b1[s1];
byte thatValue = b2[s2];
return (thisValue < thatValue ? -1 : (thisValue == thatValue ? 0 : 1));
}
}
static { // register this comparator
WritableComparator.define(ByteWritable.class, new Comparator());
}
}
| 2,899 | 28 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ArrayPrimitiveWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.lang.reflect.Array;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This is a wrapper class. It wraps a Writable implementation around
* an array of primitives (e.g., int[], long[], etc.), with optimized
* wire format, and without creating new objects per element.
*
* This is a wrapper class only; it does not make a copy of the
* underlying array.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ArrayPrimitiveWritable implements Writable {
//componentType is determined from the component type of the value array
//during a "set" operation. It must be primitive.
private Class<?> componentType = null;
//declaredComponentType need not be declared, but if you do (by using the
//ArrayPrimitiveWritable(Class<?>) constructor), it will provide typechecking
//for all "set" operations.
private Class<?> declaredComponentType = null;
private int length;
private Object value; //must be an array of <componentType>[length]
private static final Map<String, Class<?>> PRIMITIVE_NAMES =
new HashMap<String, Class<?>>(16);
static {
PRIMITIVE_NAMES.put(boolean.class.getName(), boolean.class);
PRIMITIVE_NAMES.put(byte.class.getName(), byte.class);
PRIMITIVE_NAMES.put(char.class.getName(), char.class);
PRIMITIVE_NAMES.put(short.class.getName(), short.class);
PRIMITIVE_NAMES.put(int.class.getName(), int.class);
PRIMITIVE_NAMES.put(long.class.getName(), long.class);
PRIMITIVE_NAMES.put(float.class.getName(), float.class);
PRIMITIVE_NAMES.put(double.class.getName(), double.class);
}
private static Class<?> getPrimitiveClass(String className) {
return PRIMITIVE_NAMES.get(className);
}
private static void checkPrimitive(Class<?> componentType) {
if (componentType == null) {
throw new HadoopIllegalArgumentException("null component type not allowed");
}
if (! PRIMITIVE_NAMES.containsKey(componentType.getName())) {
throw new HadoopIllegalArgumentException("input array component type "
+ componentType.getName() + " is not a candidate primitive type");
}
}
private void checkDeclaredComponentType(Class<?> componentType) {
if ((declaredComponentType != null)
&& (componentType != declaredComponentType)) {
throw new HadoopIllegalArgumentException("input array component type "
+ componentType.getName() + " does not match declared type "
+ declaredComponentType.getName());
}
}
private static void checkArray(Object value) {
if (value == null) {
throw new HadoopIllegalArgumentException("null value not allowed");
}
if (! value.getClass().isArray()) {
throw new HadoopIllegalArgumentException("non-array value of class "
+ value.getClass() + " not allowed");
}
}
/**
* Construct an empty instance, for use during Writable read
*/
public ArrayPrimitiveWritable() {
//empty constructor
}
/**
* Construct an instance of known type but no value yet
* for use with type-specific wrapper classes
*/
public ArrayPrimitiveWritable(Class<?> componentType) {
checkPrimitive(componentType);
this.declaredComponentType = componentType;
}
/**
* Wrap an existing array of primitives
* @param value - array of primitives
*/
public ArrayPrimitiveWritable(Object value) {
set(value);
}
/**
* Get the original array.
* Client must cast it back to type componentType[]
* (or may use type-specific wrapper classes).
* @return - original array as Object
*/
public Object get() { return value; }
public Class<?> getComponentType() { return componentType; }
public Class<?> getDeclaredComponentType() { return declaredComponentType; }
public boolean isDeclaredComponentType(Class<?> componentType) {
return componentType == declaredComponentType;
}
public void set(Object value) {
checkArray(value);
Class<?> componentType = value.getClass().getComponentType();
checkPrimitive(componentType);
checkDeclaredComponentType(componentType);
this.componentType = componentType;
this.value = value;
this.length = Array.getLength(value);
}
/**
* Do not use this class.
* This is an internal class, purely for ObjectWritable to use as
* a label class for transparent conversions of arrays of primitives
* during wire protocol reads and writes.
*/
static class Internal extends ArrayPrimitiveWritable {
Internal() { //use for reads
super();
}
Internal(Object value) { //use for writes
super(value);
}
} //end Internal subclass declaration
/*
* @see org.apache.hadoop.io.Writable#write(java.io.DataOutput)
*/
@Override
@SuppressWarnings("deprecation")
public void write(DataOutput out) throws IOException {
// write componentType
UTF8.writeString(out, componentType.getName());
// write length
out.writeInt(length);
// do the inner loop. Walk the decision tree only once.
if (componentType == Boolean.TYPE) { // boolean
writeBooleanArray(out);
} else if (componentType == Character.TYPE) { // char
writeCharArray(out);
} else if (componentType == Byte.TYPE) { // byte
writeByteArray(out);
} else if (componentType == Short.TYPE) { // short
writeShortArray(out);
} else if (componentType == Integer.TYPE) { // int
writeIntArray(out);
} else if (componentType == Long.TYPE) { // long
writeLongArray(out);
} else if (componentType == Float.TYPE) { // float
writeFloatArray(out);
} else if (componentType == Double.TYPE) { // double
writeDoubleArray(out);
} else {
throw new IOException("Component type " + componentType.toString()
+ " is set as the output type, but no encoding is implemented for this type.");
}
}
/*
* @see org.apache.hadoop.io.Writable#readFields(java.io.DataInput)
*/
@Override
public void readFields(DataInput in) throws IOException {
// read and set the component type of the array
@SuppressWarnings("deprecation")
String className = UTF8.readString(in);
Class<?> componentType = getPrimitiveClass(className);
if (componentType == null) {
throw new IOException("encoded array component type "
+ className + " is not a candidate primitive type");
}
checkDeclaredComponentType(componentType);
this.componentType = componentType;
// read and set the length of the array
int length = in.readInt();
if (length < 0) {
throw new IOException("encoded array length is negative " + length);
}
this.length = length;
// construct and read in the array
value = Array.newInstance(componentType, length);
// do the inner loop. Walk the decision tree only once.
if (componentType == Boolean.TYPE) { // boolean
readBooleanArray(in);
} else if (componentType == Character.TYPE) { // char
readCharArray(in);
} else if (componentType == Byte.TYPE) { // byte
readByteArray(in);
} else if (componentType == Short.TYPE) { // short
readShortArray(in);
} else if (componentType == Integer.TYPE) { // int
readIntArray(in);
} else if (componentType == Long.TYPE) { // long
readLongArray(in);
} else if (componentType == Float.TYPE) { // float
readFloatArray(in);
} else if (componentType == Double.TYPE) { // double
readDoubleArray(in);
} else {
throw new IOException("Encoded type " + className
+ " converted to valid component type " + componentType.toString()
+ " but no encoding is implemented for this type.");
}
}
//For efficient implementation, there's no way around
//the following massive code duplication.
private void writeBooleanArray(DataOutput out) throws IOException {
boolean[] v = (boolean[]) value;
for (int i = 0; i < length; i++)
out.writeBoolean(v[i]);
}
private void writeCharArray(DataOutput out) throws IOException {
char[] v = (char[]) value;
for (int i = 0; i < length; i++)
out.writeChar(v[i]);
}
private void writeByteArray(DataOutput out) throws IOException {
out.write((byte[]) value, 0, length);
}
private void writeShortArray(DataOutput out) throws IOException {
short[] v = (short[]) value;
for (int i = 0; i < length; i++)
out.writeShort(v[i]);
}
private void writeIntArray(DataOutput out) throws IOException {
int[] v = (int[]) value;
for (int i = 0; i < length; i++)
out.writeInt(v[i]);
}
private void writeLongArray(DataOutput out) throws IOException {
long[] v = (long[]) value;
for (int i = 0; i < length; i++)
out.writeLong(v[i]);
}
private void writeFloatArray(DataOutput out) throws IOException {
float[] v = (float[]) value;
for (int i = 0; i < length; i++)
out.writeFloat(v[i]);
}
private void writeDoubleArray(DataOutput out) throws IOException {
double[] v = (double[]) value;
for (int i = 0; i < length; i++)
out.writeDouble(v[i]);
}
private void readBooleanArray(DataInput in) throws IOException {
boolean[] v = (boolean[]) value;
for (int i = 0; i < length; i++)
v[i] = in.readBoolean();
}
private void readCharArray(DataInput in) throws IOException {
char[] v = (char[]) value;
for (int i = 0; i < length; i++)
v[i] = in.readChar();
}
private void readByteArray(DataInput in) throws IOException {
in.readFully((byte[]) value, 0, length);
}
private void readShortArray(DataInput in) throws IOException {
short[] v = (short[]) value;
for (int i = 0; i < length; i++)
v[i] = in.readShort();
}
private void readIntArray(DataInput in) throws IOException {
int[] v = (int[]) value;
for (int i = 0; i < length; i++)
v[i] = in.readInt();
}
private void readLongArray(DataInput in) throws IOException {
long[] v = (long[]) value;
for (int i = 0; i < length; i++)
v[i] = in.readLong();
}
private void readFloatArray(DataInput in) throws IOException {
float[] v = (float[]) value;
for (int i = 0; i < length; i++)
v[i] = in.readFloat();
}
private void readDoubleArray(DataInput in) throws IOException {
double[] v = (double[]) value;
for (int i = 0; i < length; i++)
v[i] = in.readDouble();
}
}
| 11,697 | 32.711816 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableName.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.util.HashMap;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
/** Utility to permit renaming of Writable implementation classes without
* invalidiating files that contain their class name.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class WritableName {
private static HashMap<String, Class<?>> NAME_TO_CLASS =
new HashMap<String, Class<?>>();
private static HashMap<Class<?>, String> CLASS_TO_NAME =
new HashMap<Class<?>, String>();
static { // define important types
WritableName.setName(NullWritable.class, "null");
WritableName.setName(LongWritable.class, "long");
WritableName.setName(UTF8.class, "UTF8");
WritableName.setName(MD5Hash.class, "MD5Hash");
}
private WritableName() {} // no public ctor
/** Set the name that a class should be known as to something other than the
* class name. */
public static synchronized void setName(Class<?> writableClass, String name) {
CLASS_TO_NAME.put(writableClass, name);
NAME_TO_CLASS.put(name, writableClass);
}
/** Add an alternate name for a class. */
public static synchronized void addName(Class<?> writableClass, String name) {
NAME_TO_CLASS.put(name, writableClass);
}
/** Return the name for a class. Default is {@link Class#getName()}. */
public static synchronized String getName(Class<?> writableClass) {
String name = CLASS_TO_NAME.get(writableClass);
if (name != null)
return name;
return writableClass.getName();
}
/** Return the class for a name. Default is {@link Class#forName(String)}.*/
public static synchronized Class<?> getClass(String name, Configuration conf
) throws IOException {
Class<?> writableClass = NAME_TO_CLASS.get(name);
if (writableClass != null)
return writableClass.asSubclass(Writable.class);
try {
return conf.getClassByName(name);
} catch (ClassNotFoundException e) {
IOException newE = new IOException("WritableName can't load class: " + name);
newE.initCause(e);
throw newE;
}
}
}
| 3,146 | 36.464286 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/InputBuffer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.*;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/** A reusable {@link InputStream} implementation that reads from an in-memory
* buffer.
*
* <p>This saves memory over creating a new InputStream and
* ByteArrayInputStream each time data is read.
*
* <p>Typical usage is something like the following:<pre>
*
* InputBuffer buffer = new InputBuffer();
* while (... loop condition ...) {
* byte[] data = ... get data ...;
* int dataLength = ... get data length ...;
* buffer.reset(data, dataLength);
* ... read buffer using InputStream methods ...
* }
* </pre>
* @see DataInputBuffer
* @see DataOutput
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Unstable
public class InputBuffer extends FilterInputStream {
private static class Buffer extends ByteArrayInputStream {
public Buffer() {
super(new byte[] {});
}
public void reset(byte[] input, int start, int length) {
this.buf = input;
this.count = start+length;
this.mark = start;
this.pos = start;
}
public int getPosition() { return pos; }
public int getLength() { return count; }
}
private Buffer buffer;
/** Constructs a new empty buffer. */
public InputBuffer() {
this(new Buffer());
}
private InputBuffer(Buffer buffer) {
super(buffer);
this.buffer = buffer;
}
/** Resets the data that the buffer reads. */
public void reset(byte[] input, int length) {
buffer.reset(input, 0, length);
}
/** Resets the data that the buffer reads. */
public void reset(byte[] input, int start, int length) {
buffer.reset(input, start, length);
}
/** Returns the current position in the input. */
public int getPosition() { return buffer.getPosition(); }
/** Returns the length of the input. */
public int getLength() { return buffer.getLength(); }
}
| 2,798 | 28.463158 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BytesWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.IOException;
import java.io.DataInput;
import java.io.DataOutput;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* A byte sequence that is usable as a key or value.
* It is resizable and distinguishes between the size of the sequence and
* the current capacity. The hash function is the front of the md5 of the
* buffer. The sort order is the same as memcmp.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class BytesWritable extends BinaryComparable
implements WritableComparable<BinaryComparable> {
private static final int LENGTH_BYTES = 4;
private static final byte[] EMPTY_BYTES = {};
private int size;
private byte[] bytes;
/**
* Create a zero-size sequence.
*/
public BytesWritable() {this(EMPTY_BYTES);}
/**
* Create a BytesWritable using the byte array as the initial value.
* @param bytes This array becomes the backing storage for the object.
*/
public BytesWritable(byte[] bytes) {
this(bytes, bytes.length);
}
/**
* Create a BytesWritable using the byte array as the initial value
* and length as the length. Use this constructor if the array is larger
* than the value it represents.
* @param bytes This array becomes the backing storage for the object.
* @param length The number of bytes to use from array.
*/
public BytesWritable(byte[] bytes, int length) {
this.bytes = bytes;
this.size = length;
}
/**
* Get a copy of the bytes that is exactly the length of the data.
* See {@link #getBytes()} for faster access to the underlying array.
*/
public byte[] copyBytes() {
byte[] result = new byte[size];
System.arraycopy(bytes, 0, result, 0, size);
return result;
}
/**
* Get the data backing the BytesWritable. Please use {@link #copyBytes()}
* if you need the returned array to be precisely the length of the data.
* @return The data is only valid between 0 and getLength() - 1.
*/
@Override
public byte[] getBytes() {
return bytes;
}
/**
* Get the data from the BytesWritable.
* @deprecated Use {@link #getBytes()} instead.
*/
@Deprecated
public byte[] get() {
return getBytes();
}
/**
* Get the current size of the buffer.
*/
@Override
public int getLength() {
return size;
}
/**
* Get the current size of the buffer.
* @deprecated Use {@link #getLength()} instead.
*/
@Deprecated
public int getSize() {
return getLength();
}
/**
* Change the size of the buffer. The values in the old range are preserved
* and any new values are undefined. The capacity is changed if it is
* necessary.
* @param size The new number of bytes
*/
public void setSize(int size) {
if (size > getCapacity()) {
setCapacity(size * 3 / 2);
}
this.size = size;
}
/**
* Get the capacity, which is the maximum size that could handled without
* resizing the backing storage.
* @return The number of bytes
*/
public int getCapacity() {
return bytes.length;
}
/**
* Change the capacity of the backing storage.
* The data is preserved.
* @param new_cap The new capacity in bytes.
*/
public void setCapacity(int new_cap) {
if (new_cap != getCapacity()) {
byte[] new_data = new byte[new_cap];
if (new_cap < size) {
size = new_cap;
}
if (size != 0) {
System.arraycopy(bytes, 0, new_data, 0, size);
}
bytes = new_data;
}
}
/**
* Set the BytesWritable to the contents of the given newData.
* @param newData the value to set this BytesWritable to.
*/
public void set(BytesWritable newData) {
set(newData.bytes, 0, newData.size);
}
/**
* Set the value to a copy of the given byte range
* @param newData the new values to copy in
* @param offset the offset in newData to start at
* @param length the number of bytes to copy
*/
public void set(byte[] newData, int offset, int length) {
setSize(0);
setSize(length);
System.arraycopy(newData, offset, bytes, 0, size);
}
// inherit javadoc
@Override
public void readFields(DataInput in) throws IOException {
setSize(0); // clear the old data
setSize(in.readInt());
in.readFully(bytes, 0, size);
}
// inherit javadoc
@Override
public void write(DataOutput out) throws IOException {
out.writeInt(size);
out.write(bytes, 0, size);
}
@Override
public int hashCode() {
return super.hashCode();
}
/**
* Are the two byte sequences equal?
*/
@Override
public boolean equals(Object right_obj) {
if (right_obj instanceof BytesWritable)
return super.equals(right_obj);
return false;
}
/**
* Generate the stream of bytes as hex pairs separated by ' '.
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder(3*size);
for (int idx = 0; idx < size; idx++) {
// if not the first, put a blank separator in
if (idx != 0) {
sb.append(' ');
}
String num = Integer.toHexString(0xff & bytes[idx]);
// if it is only one digit, add a leading 0.
if (num.length() < 2) {
sb.append('0');
}
sb.append(num);
}
return sb.toString();
}
/** A Comparator optimized for BytesWritable. */
public static class Comparator extends WritableComparator {
public Comparator() {
super(BytesWritable.class);
}
/**
* Compare the buffers in serialized form.
*/
@Override
public int compare(byte[] b1, int s1, int l1,
byte[] b2, int s2, int l2) {
return compareBytes(b1, s1+LENGTH_BYTES, l1-LENGTH_BYTES,
b2, s2+LENGTH_BYTES, l2-LENGTH_BYTES);
}
}
static { // register this comparator
WritableComparator.define(BytesWritable.class, new Comparator());
}
}
| 6,854 | 26.641129 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.lang.reflect.Field;
import java.nio.ByteOrder;
import java.security.AccessController;
import java.security.PrivilegedAction;
import sun.misc.Unsafe;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.google.common.primitives.Longs;
import com.google.common.primitives.UnsignedBytes;
/**
* Utility code to do optimized byte-array comparison.
* This is borrowed and slightly modified from Guava's {@link UnsignedBytes}
* class to be able to compare arrays that start at non-zero offsets.
*/
abstract class FastByteComparisons {
static final Log LOG = LogFactory.getLog(FastByteComparisons.class);
/**
* Lexicographically compare two byte arrays.
*/
public static int compareTo(byte[] b1, int s1, int l1, byte[] b2, int s2,
int l2) {
return LexicographicalComparerHolder.BEST_COMPARER.compareTo(
b1, s1, l1, b2, s2, l2);
}
private interface Comparer<T> {
abstract public int compareTo(T buffer1, int offset1, int length1,
T buffer2, int offset2, int length2);
}
private static Comparer<byte[]> lexicographicalComparerJavaImpl() {
return LexicographicalComparerHolder.PureJavaComparer.INSTANCE;
}
/**
* Provides a lexicographical comparer implementation; either a Java
* implementation or a faster implementation based on {@link Unsafe}.
*
* <p>Uses reflection to gracefully fall back to the Java implementation if
* {@code Unsafe} isn't available.
*/
private static class LexicographicalComparerHolder {
static final String UNSAFE_COMPARER_NAME =
LexicographicalComparerHolder.class.getName() + "$UnsafeComparer";
static final Comparer<byte[]> BEST_COMPARER = getBestComparer();
/**
* Returns the Unsafe-using Comparer, or falls back to the pure-Java
* implementation if unable to do so.
*/
static Comparer<byte[]> getBestComparer() {
if (System.getProperty("os.arch").equals("sparc")) {
if (LOG.isTraceEnabled()) {
LOG.trace("Lexicographical comparer selected for "
+ "byte aligned system architecture");
}
return lexicographicalComparerJavaImpl();
}
try {
Class<?> theClass = Class.forName(UNSAFE_COMPARER_NAME);
// yes, UnsafeComparer does implement Comparer<byte[]>
@SuppressWarnings("unchecked")
Comparer<byte[]> comparer =
(Comparer<byte[]>) theClass.getEnumConstants()[0];
if (LOG.isTraceEnabled()) {
LOG.trace("Unsafe comparer selected for "
+ "byte unaligned system architecture");
}
return comparer;
} catch (Throwable t) { // ensure we really catch *everything*
if (LOG.isTraceEnabled()) {
LOG.trace(t.getMessage());
LOG.trace("Lexicographical comparer selected");
}
return lexicographicalComparerJavaImpl();
}
}
private enum PureJavaComparer implements Comparer<byte[]> {
INSTANCE;
@Override
public int compareTo(byte[] buffer1, int offset1, int length1,
byte[] buffer2, int offset2, int length2) {
// Short circuit equal case
if (buffer1 == buffer2 &&
offset1 == offset2 &&
length1 == length2) {
return 0;
}
// Bring WritableComparator code local
int end1 = offset1 + length1;
int end2 = offset2 + length2;
for (int i = offset1, j = offset2; i < end1 && j < end2; i++, j++) {
int a = (buffer1[i] & 0xff);
int b = (buffer2[j] & 0xff);
if (a != b) {
return a - b;
}
}
return length1 - length2;
}
}
@SuppressWarnings("unused") // used via reflection
private enum UnsafeComparer implements Comparer<byte[]> {
INSTANCE;
static final Unsafe theUnsafe;
/** The offset to the first element in a byte array. */
static final int BYTE_ARRAY_BASE_OFFSET;
static {
theUnsafe = (Unsafe) AccessController.doPrivileged(
new PrivilegedAction<Object>() {
@Override
public Object run() {
try {
Field f = Unsafe.class.getDeclaredField("theUnsafe");
f.setAccessible(true);
return f.get(null);
} catch (NoSuchFieldException e) {
// It doesn't matter what we throw;
// it's swallowed in getBestComparer().
throw new Error();
} catch (IllegalAccessException e) {
throw new Error();
}
}
});
BYTE_ARRAY_BASE_OFFSET = theUnsafe.arrayBaseOffset(byte[].class);
// sanity check - this should never fail
if (theUnsafe.arrayIndexScale(byte[].class) != 1) {
throw new AssertionError();
}
}
static final boolean littleEndian =
ByteOrder.nativeOrder().equals(ByteOrder.LITTLE_ENDIAN);
/**
* Returns true if x1 is less than x2, when both values are treated as
* unsigned.
*/
static boolean lessThanUnsigned(long x1, long x2) {
return (x1 + Long.MIN_VALUE) < (x2 + Long.MIN_VALUE);
}
/**
* Lexicographically compare two arrays.
*
* @param buffer1 left operand
* @param buffer2 right operand
* @param offset1 Where to start comparing in the left buffer
* @param offset2 Where to start comparing in the right buffer
* @param length1 How much to compare from the left buffer
* @param length2 How much to compare from the right buffer
* @return 0 if equal, < 0 if left is less than right, etc.
*/
@Override
public int compareTo(byte[] buffer1, int offset1, int length1,
byte[] buffer2, int offset2, int length2) {
// Short circuit equal case
if (buffer1 == buffer2 &&
offset1 == offset2 &&
length1 == length2) {
return 0;
}
int minLength = Math.min(length1, length2);
int minWords = minLength / Longs.BYTES;
int offset1Adj = offset1 + BYTE_ARRAY_BASE_OFFSET;
int offset2Adj = offset2 + BYTE_ARRAY_BASE_OFFSET;
/*
* Compare 8 bytes at a time. Benchmarking shows comparing 8 bytes at a
* time is no slower than comparing 4 bytes at a time even on 32-bit.
* On the other hand, it is substantially faster on 64-bit.
*/
for (int i = 0; i < minWords * Longs.BYTES; i += Longs.BYTES) {
long lw = theUnsafe.getLong(buffer1, offset1Adj + (long) i);
long rw = theUnsafe.getLong(buffer2, offset2Adj + (long) i);
long diff = lw ^ rw;
if (diff != 0) {
if (!littleEndian) {
return lessThanUnsigned(lw, rw) ? -1 : 1;
}
// Use binary search
int n = 0;
int y;
int x = (int) diff;
if (x == 0) {
x = (int) (diff >>> 32);
n = 32;
}
y = x << 16;
if (y == 0) {
n += 16;
} else {
x = y;
}
y = x << 8;
if (y == 0) {
n += 8;
}
return (int) (((lw >>> n) & 0xFFL) - ((rw >>> n) & 0xFFL));
}
}
// The epilogue to cover the last (minLength % 8) elements.
for (int i = minWords * Longs.BYTES; i < minLength; i++) {
int result = UnsignedBytes.compare(
buffer1[offset1 + i],
buffer2[offset2 + i]);
if (result != 0) {
return result;
}
}
return length1 - length2;
}
}
}
}
| 8,681 | 32.782101 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ArrayFile.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.*;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.util.*;
import org.apache.hadoop.io.SequenceFile.CompressionType;
/** A dense file-based mapping from integers to values. */
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ArrayFile extends MapFile {
protected ArrayFile() {} // no public ctor
/** Write a new array file. */
public static class Writer extends MapFile.Writer {
private LongWritable count = new LongWritable(0);
/** Create the named file for values of the named class. */
public Writer(Configuration conf, FileSystem fs,
String file, Class<? extends Writable> valClass)
throws IOException {
super(conf, new Path(file), keyClass(LongWritable.class),
valueClass(valClass));
}
/** Create the named file for values of the named class. */
public Writer(Configuration conf, FileSystem fs,
String file, Class<? extends Writable> valClass,
CompressionType compress, Progressable progress)
throws IOException {
super(conf, new Path(file),
keyClass(LongWritable.class),
valueClass(valClass),
compression(compress),
progressable(progress));
}
/** Append a value to the file. */
public synchronized void append(Writable value) throws IOException {
super.append(count, value); // add to map
count.set(count.get()+1); // increment count
}
}
/** Provide access to an existing array file. */
public static class Reader extends MapFile.Reader {
private LongWritable key = new LongWritable();
/** Construct an array reader for the named file.*/
public Reader(FileSystem fs, String file,
Configuration conf) throws IOException {
super(new Path(file), conf);
}
/** Positions the reader before its <code>n</code>th value. */
public synchronized void seek(long n) throws IOException {
key.set(n);
seek(key);
}
/** Read and return the next value in the file. */
public synchronized Writable next(Writable value) throws IOException {
return next(key, value) ? value : null;
}
/** Returns the key associated with the most recent call to {@link
* #seek(long)}, {@link #next(Writable)}, or {@link
* #get(long,Writable)}. */
public synchronized long key() throws IOException {
return key.get();
}
/** Return the <code>n</code>th value in the file. */
public synchronized Writable get(long n, Writable value)
throws IOException {
key.set(n);
return get(key, value);
}
}
}
| 3,716 | 34.4 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.File;
import java.io.FileDescriptor;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.util.Arrays;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.nativeio.Errno;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.io.nativeio.NativeIOException;
import org.apache.hadoop.io.nativeio.NativeIO.POSIX.Stat;
import org.apache.hadoop.security.UserGroupInformation;
import com.google.common.annotations.VisibleForTesting;
/**
* This class provides secure APIs for opening and creating files on the local
* disk. The main issue this class tries to handle is that of symlink traversal.
* <br/>
* An example of such an attack is:
* <ol>
* <li> Malicious user removes his task's syslog file, and puts a link to the
* jobToken file of a target user.</li>
* <li> Malicious user tries to open the syslog file via the servlet on the
* tasktracker.</li>
* <li> The tasktracker is unaware of the symlink, and simply streams the contents
* of the jobToken file. The malicious user can now access potentially sensitive
* map outputs, etc. of the target user's job.</li>
* </ol>
* A similar attack is possible involving task log truncation, but in that case
* due to an insecure write to a file.
* <br/>
*/
public class SecureIOUtils {
/**
* Ensure that we are set up to run with the appropriate native support code.
* If security is disabled, and the support code is unavailable, this class
* still tries its best to be secure, but is vulnerable to some race condition
* attacks.
*
* If security is enabled but the support code is unavailable, throws a
* RuntimeException since we don't want to run insecurely.
*/
static {
boolean shouldBeSecure = UserGroupInformation.isSecurityEnabled();
boolean canBeSecure = NativeIO.isAvailable();
if (!canBeSecure && shouldBeSecure) {
throw new RuntimeException(
"Secure IO is not possible without native code extensions.");
}
// Pre-cache an instance of the raw FileSystem since we sometimes
// do secure IO in a shutdown hook, where this call could fail.
try {
rawFilesystem = FileSystem.getLocal(new Configuration()).getRaw();
} catch (IOException ie) {
throw new RuntimeException(
"Couldn't obtain an instance of RawLocalFileSystem.");
}
// SecureIO just skips security checks in the case that security is
// disabled
skipSecurity = !canBeSecure;
}
private final static boolean skipSecurity;
private final static FileSystem rawFilesystem;
/**
* Open the given File for random read access, verifying the expected user/
* group constraints if security is enabled.
*
* Note that this function provides no additional security checks if hadoop
* security is disabled, since doing the checks would be too expensive when
* native libraries are not available.
*
* @param f file that we are trying to open
* @param mode mode in which we want to open the random access file
* @param expectedOwner the expected user owner for the file
* @param expectedGroup the expected group owner for the file
* @throws IOException if an IO error occurred or if the user/group does
* not match when security is enabled.
*/
public static RandomAccessFile openForRandomRead(File f,
String mode, String expectedOwner, String expectedGroup)
throws IOException {
if (!UserGroupInformation.isSecurityEnabled()) {
return new RandomAccessFile(f, mode);
}
return forceSecureOpenForRandomRead(f, mode, expectedOwner, expectedGroup);
}
/**
* Same as openForRandomRead except that it will run even if security is off.
* This is used by unit tests.
*/
@VisibleForTesting
protected static RandomAccessFile forceSecureOpenForRandomRead(File f,
String mode, String expectedOwner, String expectedGroup)
throws IOException {
RandomAccessFile raf = new RandomAccessFile(f, mode);
boolean success = false;
try {
Stat stat = NativeIO.POSIX.getFstat(raf.getFD());
checkStat(f, stat.getOwner(), stat.getGroup(), expectedOwner,
expectedGroup);
success = true;
return raf;
} finally {
if (!success) {
raf.close();
}
}
}
/**
* Opens the {@link FSDataInputStream} on the requested file on local file
* system, verifying the expected user/group constraints if security is
* enabled.
* @param file absolute path of the file
* @param expectedOwner the expected user owner for the file
* @param expectedGroup the expected group owner for the file
* @throws IOException if an IO Error occurred or the user/group does not
* match if security is enabled
*/
public static FSDataInputStream openFSDataInputStream(File file,
String expectedOwner, String expectedGroup) throws IOException {
if (!UserGroupInformation.isSecurityEnabled()) {
return rawFilesystem.open(new Path(file.getAbsolutePath()));
}
return forceSecureOpenFSDataInputStream(file, expectedOwner, expectedGroup);
}
/**
* Same as openFSDataInputStream except that it will run even if security is
* off. This is used by unit tests.
*/
@VisibleForTesting
protected static FSDataInputStream forceSecureOpenFSDataInputStream(
File file,
String expectedOwner, String expectedGroup) throws IOException {
final FSDataInputStream in =
rawFilesystem.open(new Path(file.getAbsolutePath()));
boolean success = false;
try {
Stat stat = NativeIO.POSIX.getFstat(in.getFileDescriptor());
checkStat(file, stat.getOwner(), stat.getGroup(), expectedOwner,
expectedGroup);
success = true;
return in;
} finally {
if (!success) {
in.close();
}
}
}
/**
* Open the given File for read access, verifying the expected user/group
* constraints if security is enabled.
*
* Note that this function provides no additional checks if Hadoop
* security is disabled, since doing the checks would be too expensive
* when native libraries are not available.
*
* @param f the file that we are trying to open
* @param expectedOwner the expected user owner for the file
* @param expectedGroup the expected group owner for the file
* @throws IOException if an IO Error occurred, or security is enabled and
* the user/group does not match
*/
public static FileInputStream openForRead(File f, String expectedOwner,
String expectedGroup) throws IOException {
if (!UserGroupInformation.isSecurityEnabled()) {
return new FileInputStream(f);
}
return forceSecureOpenForRead(f, expectedOwner, expectedGroup);
}
/**
* Same as openForRead() except that it will run even if security is off.
* This is used by unit tests.
*/
@VisibleForTesting
protected static FileInputStream forceSecureOpenForRead(File f, String expectedOwner,
String expectedGroup) throws IOException {
FileInputStream fis = new FileInputStream(f);
boolean success = false;
try {
Stat stat = NativeIO.POSIX.getFstat(fis.getFD());
checkStat(f, stat.getOwner(), stat.getGroup(), expectedOwner,
expectedGroup);
success = true;
return fis;
} finally {
if (!success) {
fis.close();
}
}
}
private static FileOutputStream insecureCreateForWrite(File f,
int permissions) throws IOException {
// If we can't do real security, do a racy exists check followed by an
// open and chmod
if (f.exists()) {
throw new AlreadyExistsException("File " + f + " already exists");
}
FileOutputStream fos = new FileOutputStream(f);
boolean success = false;
try {
rawFilesystem.setPermission(new Path(f.getAbsolutePath()),
new FsPermission((short)permissions));
success = true;
return fos;
} finally {
if (!success) {
fos.close();
}
}
}
/**
* Open the specified File for write access, ensuring that it does not exist.
* @param f the file that we want to create
* @param permissions we want to have on the file (if security is enabled)
*
* @throws AlreadyExistsException if the file already exists
* @throws IOException if any other error occurred
*/
public static FileOutputStream createForWrite(File f, int permissions)
throws IOException {
if (skipSecurity) {
return insecureCreateForWrite(f, permissions);
} else {
return NativeIO.getCreateForWriteFileOutputStream(f, permissions);
}
}
private static void checkStat(File f, String owner, String group,
String expectedOwner,
String expectedGroup) throws IOException {
boolean success = true;
if (expectedOwner != null &&
!expectedOwner.equals(owner)) {
if (Path.WINDOWS) {
UserGroupInformation ugi =
UserGroupInformation.createRemoteUser(expectedOwner);
final String adminsGroupString = "Administrators";
success = owner.equals(adminsGroupString)
&& Arrays.asList(ugi.getGroupNames()).contains(adminsGroupString);
} else {
success = false;
}
}
if (!success) {
throw new IOException(
"Owner '" + owner + "' for path " + f + " did not match " +
"expected owner '" + expectedOwner + "'");
}
}
/**
* Signals that an attempt to create a file at a given pathname has failed
* because another file already existed at that path.
*/
public static class AlreadyExistsException extends IOException {
private static final long serialVersionUID = 1L;
public AlreadyExistsException(String msg) {
super(msg);
}
public AlreadyExistsException(Throwable cause) {
super(cause);
}
}
}
| 10,919 | 34.570033 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.util.ReflectionUtils;
/**
* A Writable Map.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class MapWritable extends AbstractMapWritable
implements Map<Writable, Writable> {
private Map<Writable, Writable> instance;
/** Default constructor. */
public MapWritable() {
super();
this.instance = new HashMap<Writable, Writable>();
}
/**
* Copy constructor.
*
* @param other the map to copy from
*/
public MapWritable(MapWritable other) {
this();
copy(other);
}
@Override
public void clear() {
instance.clear();
}
@Override
public boolean containsKey(Object key) {
return instance.containsKey(key);
}
@Override
public boolean containsValue(Object value) {
return instance.containsValue(value);
}
@Override
public Set<Map.Entry<Writable, Writable>> entrySet() {
return instance.entrySet();
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof MapWritable) {
MapWritable map = (MapWritable) obj;
if (size() != map.size()) {
return false;
}
return entrySet().equals(map.entrySet());
}
return false;
}
@Override
public Writable get(Object key) {
return instance.get(key);
}
@Override
public int hashCode() {
return 1 + this.instance.hashCode();
}
@Override
public boolean isEmpty() {
return instance.isEmpty();
}
@Override
public Set<Writable> keySet() {
return instance.keySet();
}
@Override
public Writable put(Writable key, Writable value) {
addToMap(key.getClass());
addToMap(value.getClass());
return instance.put(key, value);
}
@Override
public void putAll(Map<? extends Writable, ? extends Writable> t) {
for (Map.Entry<? extends Writable, ? extends Writable> e: t.entrySet()) {
put(e.getKey(), e.getValue());
}
}
@Override
public Writable remove(Object key) {
return instance.remove(key);
}
@Override
public int size() {
return instance.size();
}
@Override
public Collection<Writable> values() {
return instance.values();
}
// Writable
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
// Write out the number of entries in the map
out.writeInt(instance.size());
// Then write out each key/value pair
for (Map.Entry<Writable, Writable> e: instance.entrySet()) {
out.writeByte(getId(e.getKey().getClass()));
e.getKey().write(out);
out.writeByte(getId(e.getValue().getClass()));
e.getValue().write(out);
}
}
@Override
public void readFields(DataInput in) throws IOException {
super.readFields(in);
// First clear the map. Otherwise we will just accumulate
// entries every time this method is called.
this.instance.clear();
// Read the number of entries in the map
int entries = in.readInt();
// Then read each key/value pair
for (int i = 0; i < entries; i++) {
Writable key = (Writable) ReflectionUtils.newInstance(getClass(
in.readByte()), getConf());
key.readFields(in);
Writable value = (Writable) ReflectionUtils.newInstance(getClass(
in.readByte()), getConf());
value.readFields(in);
instance.put(key, value);
}
}
@Override
public String toString() {
return instance.toString();
}
}
| 4,663 | 22.555556 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.EOFException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.util.Options;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.ReflectionUtils;
/** A file-based map from keys to values.
*
* <p>A map is a directory containing two files, the <code>data</code> file,
* containing all keys and values in the map, and a smaller <code>index</code>
* file, containing a fraction of the keys. The fraction is determined by
* {@link Writer#getIndexInterval()}.
*
* <p>The index file is read entirely into memory. Thus key implementations
* should try to keep themselves small.
*
* <p>Map files are created by adding entries in-order. To maintain a large
* database, perform updates by copying the previous version of a database and
* merging in a sorted change list, to create a new version of the database in
* a new file. Sorting large change lists can be done with {@link
* SequenceFile.Sorter}.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class MapFile {
private static final Log LOG = LogFactory.getLog(MapFile.class);
/** The name of the index file. */
public static final String INDEX_FILE_NAME = "index";
/** The name of the data file. */
public static final String DATA_FILE_NAME = "data";
protected MapFile() {} // no public ctor
/** Writes a new map. */
public static class Writer implements java.io.Closeable {
private SequenceFile.Writer data;
private SequenceFile.Writer index;
final private static String INDEX_INTERVAL = "io.map.index.interval";
private int indexInterval = 128;
private long size;
private LongWritable position = new LongWritable();
// the following fields are used only for checking key order
private WritableComparator comparator;
private DataInputBuffer inBuf = new DataInputBuffer();
private DataOutputBuffer outBuf = new DataOutputBuffer();
private WritableComparable lastKey;
/** What's the position (in bytes) we wrote when we got the last index */
private long lastIndexPos = -1;
/**
* What was size when we last wrote an index. Set to MIN_VALUE to ensure that
* we have an index at position zero -- midKey will throw an exception if this
* is not the case
*/
private long lastIndexKeyCount = Long.MIN_VALUE;
/** Create the named map for keys of the named class.
* @deprecated Use Writer(Configuration, Path, Option...) instead.
*/
@Deprecated
public Writer(Configuration conf, FileSystem fs, String dirName,
Class<? extends WritableComparable> keyClass,
Class valClass) throws IOException {
this(conf, new Path(dirName), keyClass(keyClass), valueClass(valClass));
}
/** Create the named map for keys of the named class.
* @deprecated Use Writer(Configuration, Path, Option...) instead.
*/
@Deprecated
public Writer(Configuration conf, FileSystem fs, String dirName,
Class<? extends WritableComparable> keyClass, Class valClass,
CompressionType compress,
Progressable progress) throws IOException {
this(conf, new Path(dirName), keyClass(keyClass), valueClass(valClass),
compression(compress), progressable(progress));
}
/** Create the named map for keys of the named class.
* @deprecated Use Writer(Configuration, Path, Option...) instead.
*/
@Deprecated
public Writer(Configuration conf, FileSystem fs, String dirName,
Class<? extends WritableComparable> keyClass, Class valClass,
CompressionType compress, CompressionCodec codec,
Progressable progress) throws IOException {
this(conf, new Path(dirName), keyClass(keyClass), valueClass(valClass),
compression(compress, codec), progressable(progress));
}
/** Create the named map for keys of the named class.
* @deprecated Use Writer(Configuration, Path, Option...) instead.
*/
@Deprecated
public Writer(Configuration conf, FileSystem fs, String dirName,
Class<? extends WritableComparable> keyClass, Class valClass,
CompressionType compress) throws IOException {
this(conf, new Path(dirName), keyClass(keyClass),
valueClass(valClass), compression(compress));
}
/** Create the named map using the named key comparator.
* @deprecated Use Writer(Configuration, Path, Option...) instead.
*/
@Deprecated
public Writer(Configuration conf, FileSystem fs, String dirName,
WritableComparator comparator, Class valClass
) throws IOException {
this(conf, new Path(dirName), comparator(comparator),
valueClass(valClass));
}
/** Create the named map using the named key comparator.
* @deprecated Use Writer(Configuration, Path, Option...) instead.
*/
@Deprecated
public Writer(Configuration conf, FileSystem fs, String dirName,
WritableComparator comparator, Class valClass,
SequenceFile.CompressionType compress) throws IOException {
this(conf, new Path(dirName), comparator(comparator),
valueClass(valClass), compression(compress));
}
/** Create the named map using the named key comparator.
* @deprecated Use Writer(Configuration, Path, Option...)} instead.
*/
@Deprecated
public Writer(Configuration conf, FileSystem fs, String dirName,
WritableComparator comparator, Class valClass,
SequenceFile.CompressionType compress,
Progressable progress) throws IOException {
this(conf, new Path(dirName), comparator(comparator),
valueClass(valClass), compression(compress),
progressable(progress));
}
/** Create the named map using the named key comparator.
* @deprecated Use Writer(Configuration, Path, Option...) instead.
*/
@Deprecated
public Writer(Configuration conf, FileSystem fs, String dirName,
WritableComparator comparator, Class valClass,
SequenceFile.CompressionType compress, CompressionCodec codec,
Progressable progress) throws IOException {
this(conf, new Path(dirName), comparator(comparator),
valueClass(valClass), compression(compress, codec),
progressable(progress));
}
// our options are a superset of sequence file writer options
public static interface Option extends SequenceFile.Writer.Option { }
private static class KeyClassOption extends Options.ClassOption
implements Option {
KeyClassOption(Class<?> value) {
super(value);
}
}
private static class ComparatorOption implements Option {
private final WritableComparator value;
ComparatorOption(WritableComparator value) {
this.value = value;
}
WritableComparator getValue() {
return value;
}
}
public static Option keyClass(Class<? extends WritableComparable> value) {
return new KeyClassOption(value);
}
public static Option comparator(WritableComparator value) {
return new ComparatorOption(value);
}
public static SequenceFile.Writer.Option valueClass(Class<?> value) {
return SequenceFile.Writer.valueClass(value);
}
public static
SequenceFile.Writer.Option compression(CompressionType type) {
return SequenceFile.Writer.compression(type);
}
public static
SequenceFile.Writer.Option compression(CompressionType type,
CompressionCodec codec) {
return SequenceFile.Writer.compression(type, codec);
}
public static SequenceFile.Writer.Option progressable(Progressable value) {
return SequenceFile.Writer.progressable(value);
}
@SuppressWarnings("unchecked")
public Writer(Configuration conf,
Path dirName,
SequenceFile.Writer.Option... opts
) throws IOException {
KeyClassOption keyClassOption =
Options.getOption(KeyClassOption.class, opts);
ComparatorOption comparatorOption =
Options.getOption(ComparatorOption.class, opts);
if ((keyClassOption == null) == (comparatorOption == null)) {
throw new IllegalArgumentException("key class or comparator option "
+ "must be set");
}
this.indexInterval = conf.getInt(INDEX_INTERVAL, this.indexInterval);
Class<? extends WritableComparable> keyClass;
if (keyClassOption == null) {
this.comparator = comparatorOption.getValue();
keyClass = comparator.getKeyClass();
} else {
keyClass=
(Class<? extends WritableComparable>) keyClassOption.getValue();
this.comparator = WritableComparator.get(keyClass, conf);
}
this.lastKey = comparator.newKey();
FileSystem fs = dirName.getFileSystem(conf);
if (!fs.mkdirs(dirName)) {
throw new IOException("Mkdirs failed to create directory " + dirName);
}
Path dataFile = new Path(dirName, DATA_FILE_NAME);
Path indexFile = new Path(dirName, INDEX_FILE_NAME);
SequenceFile.Writer.Option[] dataOptions =
Options.prependOptions(opts,
SequenceFile.Writer.file(dataFile),
SequenceFile.Writer.keyClass(keyClass));
this.data = SequenceFile.createWriter(conf, dataOptions);
SequenceFile.Writer.Option[] indexOptions =
Options.prependOptions(opts, SequenceFile.Writer.file(indexFile),
SequenceFile.Writer.keyClass(keyClass),
SequenceFile.Writer.valueClass(LongWritable.class),
SequenceFile.Writer.compression(CompressionType.BLOCK));
this.index = SequenceFile.createWriter(conf, indexOptions);
}
/** The number of entries that are added before an index entry is added.*/
public int getIndexInterval() { return indexInterval; }
/** Sets the index interval.
* @see #getIndexInterval()
*/
public void setIndexInterval(int interval) { indexInterval = interval; }
/** Sets the index interval and stores it in conf
* @see #getIndexInterval()
*/
public static void setIndexInterval(Configuration conf, int interval) {
conf.setInt(INDEX_INTERVAL, interval);
}
/** Close the map. */
@Override
public synchronized void close() throws IOException {
data.close();
index.close();
}
/** Append a key/value pair to the map. The key must be greater or equal
* to the previous key added to the map. */
public synchronized void append(WritableComparable key, Writable val)
throws IOException {
checkKey(key);
long pos = data.getLength();
// Only write an index if we've changed positions. In a block compressed
// file, this means we write an entry at the start of each block
if (size >= lastIndexKeyCount + indexInterval && pos > lastIndexPos) {
position.set(pos); // point to current eof
index.append(key, position);
lastIndexPos = pos;
lastIndexKeyCount = size;
}
data.append(key, val); // append key/value to data
size++;
}
private void checkKey(WritableComparable key) throws IOException {
// check that keys are well-ordered
if (size != 0 && comparator.compare(lastKey, key) > 0)
throw new IOException("key out of order: "+key+" after "+lastKey);
// update lastKey with a copy of key by writing and reading
outBuf.reset();
key.write(outBuf); // write new key
inBuf.reset(outBuf.getData(), outBuf.getLength());
lastKey.readFields(inBuf); // read into lastKey
}
}
/** Provide access to an existing map. */
public static class Reader implements java.io.Closeable {
/** Number of index entries to skip between each entry. Zero by default.
* Setting this to values larger than zero can facilitate opening large map
* files using less memory. */
private int INDEX_SKIP = 0;
private WritableComparator comparator;
private WritableComparable nextKey;
private long seekPosition = -1;
private int seekIndex = -1;
private long firstPosition;
// the data, on disk
private SequenceFile.Reader data;
private SequenceFile.Reader index;
// whether the index Reader was closed
private boolean indexClosed = false;
// the index, in memory
private int count = -1;
private WritableComparable[] keys;
private long[] positions;
/** Returns the class of keys in this file. */
public Class<?> getKeyClass() { return data.getKeyClass(); }
/** Returns the class of values in this file. */
public Class<?> getValueClass() { return data.getValueClass(); }
public static interface Option extends SequenceFile.Reader.Option {}
public static Option comparator(WritableComparator value) {
return new ComparatorOption(value);
}
static class ComparatorOption implements Option {
private final WritableComparator value;
ComparatorOption(WritableComparator value) {
this.value = value;
}
WritableComparator getValue() {
return value;
}
}
public Reader(Path dir, Configuration conf,
SequenceFile.Reader.Option... opts) throws IOException {
ComparatorOption comparatorOption =
Options.getOption(ComparatorOption.class, opts);
WritableComparator comparator =
comparatorOption == null ? null : comparatorOption.getValue();
INDEX_SKIP = conf.getInt("io.map.index.skip", 0);
open(dir, comparator, conf, opts);
}
/** Construct a map reader for the named map.
* @deprecated
*/
@Deprecated
public Reader(FileSystem fs, String dirName,
Configuration conf) throws IOException {
this(new Path(dirName), conf);
}
/** Construct a map reader for the named map using the named comparator.
* @deprecated
*/
@Deprecated
public Reader(FileSystem fs, String dirName, WritableComparator comparator,
Configuration conf) throws IOException {
this(new Path(dirName), conf, comparator(comparator));
}
protected synchronized void open(Path dir,
WritableComparator comparator,
Configuration conf,
SequenceFile.Reader.Option... options
) throws IOException {
Path dataFile = new Path(dir, DATA_FILE_NAME);
Path indexFile = new Path(dir, INDEX_FILE_NAME);
// open the data
this.data = createDataFileReader(dataFile, conf, options);
this.firstPosition = data.getPosition();
if (comparator == null) {
Class<? extends WritableComparable> cls;
cls = data.getKeyClass().asSubclass(WritableComparable.class);
this.comparator = WritableComparator.get(cls, conf);
} else {
this.comparator = comparator;
}
// open the index
SequenceFile.Reader.Option[] indexOptions =
Options.prependOptions(options, SequenceFile.Reader.file(indexFile));
this.index = new SequenceFile.Reader(conf, indexOptions);
}
/**
* Override this method to specialize the type of
* {@link SequenceFile.Reader} returned.
*/
protected SequenceFile.Reader
createDataFileReader(Path dataFile, Configuration conf,
SequenceFile.Reader.Option... options
) throws IOException {
SequenceFile.Reader.Option[] newOptions =
Options.prependOptions(options, SequenceFile.Reader.file(dataFile));
return new SequenceFile.Reader(conf, newOptions);
}
private void readIndex() throws IOException {
// read the index entirely into memory
if (this.keys != null)
return;
this.count = 0;
this.positions = new long[1024];
try {
int skip = INDEX_SKIP;
LongWritable position = new LongWritable();
WritableComparable lastKey = null;
long lastIndex = -1;
ArrayList<WritableComparable> keyBuilder = new ArrayList<WritableComparable>(1024);
while (true) {
WritableComparable k = comparator.newKey();
if (!index.next(k, position))
break;
// check order to make sure comparator is compatible
if (lastKey != null && comparator.compare(lastKey, k) > 0)
throw new IOException("key out of order: "+k+" after "+lastKey);
lastKey = k;
if (skip > 0) {
skip--;
continue; // skip this entry
} else {
skip = INDEX_SKIP; // reset skip
}
// don't read an index that is the same as the previous one. Block
// compressed map files used to do this (multiple entries would point
// at the same block)
if (position.get() == lastIndex)
continue;
if (count == positions.length) {
positions = Arrays.copyOf(positions, positions.length * 2);
}
keyBuilder.add(k);
positions[count] = position.get();
count++;
}
this.keys = keyBuilder.toArray(new WritableComparable[count]);
positions = Arrays.copyOf(positions, count);
} catch (EOFException e) {
LOG.warn("Unexpected EOF reading " + index +
" at entry #" + count + ". Ignoring.");
} finally {
indexClosed = true;
index.close();
}
}
/** Re-positions the reader before its first key. */
public synchronized void reset() throws IOException {
data.seek(firstPosition);
}
/** Get the key at approximately the middle of the file. Or null if the
* file is empty.
*/
public synchronized WritableComparable midKey() throws IOException {
readIndex();
if (count == 0) {
return null;
}
return keys[(count - 1) / 2];
}
/** Reads the final key from the file.
*
* @param key key to read into
*/
public synchronized void finalKey(WritableComparable key)
throws IOException {
long originalPosition = data.getPosition(); // save position
try {
readIndex(); // make sure index is valid
if (count > 0) {
data.seek(positions[count-1]); // skip to last indexed entry
} else {
reset(); // start at the beginning
}
while (data.next(key)) {} // scan to eof
} finally {
data.seek(originalPosition); // restore position
}
}
/** Positions the reader at the named key, or if none such exists, at the
* first entry after the named key. Returns true iff the named key exists
* in this map.
*/
public synchronized boolean seek(WritableComparable key) throws IOException {
return seekInternal(key) == 0;
}
/**
* Positions the reader at the named key, or if none such exists, at the
* first entry after the named key.
*
* @return 0 - exact match found
* < 0 - positioned at next record
* 1 - no more records in file
*/
private synchronized int seekInternal(WritableComparable key)
throws IOException {
return seekInternal(key, false);
}
/**
* Positions the reader at the named key, or if none such exists, at the
* key that falls just before or just after dependent on how the
* <code>before</code> parameter is set.
*
* @param before - IF true, and <code>key</code> does not exist, position
* file at entry that falls just before <code>key</code>. Otherwise,
* position file at record that sorts just after.
* @return 0 - exact match found
* < 0 - positioned at next record
* 1 - no more records in file
*/
private synchronized int seekInternal(WritableComparable key,
final boolean before)
throws IOException {
readIndex(); // make sure index is read
if (seekIndex != -1 // seeked before
&& seekIndex+1 < count
&& comparator.compare(key, keys[seekIndex+1])<0 // before next indexed
&& comparator.compare(key, nextKey)
>= 0) { // but after last seeked
// do nothing
} else {
seekIndex = binarySearch(key);
if (seekIndex < 0) // decode insertion point
seekIndex = -seekIndex-2;
if (seekIndex == -1) // belongs before first entry
seekPosition = firstPosition; // use beginning of file
else
seekPosition = positions[seekIndex]; // else use index
}
data.seek(seekPosition);
if (nextKey == null)
nextKey = comparator.newKey();
// If we're looking for the key before, we need to keep track
// of the position we got the current key as well as the position
// of the key before it.
long prevPosition = -1;
long curPosition = seekPosition;
while (data.next(nextKey)) {
int c = comparator.compare(key, nextKey);
if (c <= 0) { // at or beyond desired
if (before && c != 0) {
if (prevPosition == -1) {
// We're on the first record of this index block
// and we've already passed the search key. Therefore
// we must be at the beginning of the file, so seek
// to the beginning of this block and return c
data.seek(curPosition);
} else {
// We have a previous record to back up to
data.seek(prevPosition);
data.next(nextKey);
// now that we've rewound, the search key must be greater than this key
return 1;
}
}
return c;
}
if (before) {
prevPosition = curPosition;
curPosition = data.getPosition();
}
}
return 1;
}
private int binarySearch(WritableComparable key) {
int low = 0;
int high = count-1;
while (low <= high) {
int mid = (low + high) >>> 1;
WritableComparable midVal = keys[mid];
int cmp = comparator.compare(midVal, key);
if (cmp < 0)
low = mid + 1;
else if (cmp > 0)
high = mid - 1;
else
return mid; // key found
}
return -(low + 1); // key not found.
}
/** Read the next key/value pair in the map into <code>key</code> and
* <code>val</code>. Returns true if such a pair exists and false when at
* the end of the map */
public synchronized boolean next(WritableComparable key, Writable val)
throws IOException {
return data.next(key, val);
}
/** Return the value for the named key, or null if none exists. */
public synchronized Writable get(WritableComparable key, Writable val)
throws IOException {
if (seek(key)) {
data.getCurrentValue(val);
return val;
} else
return null;
}
/**
* Finds the record that is the closest match to the specified key.
* Returns <code>key</code> or if it does not exist, at the first entry
* after the named key.
*
- * @param key - key that we're trying to find
- * @param val - data value if key is found
- * @return - the key that was the closest match or null if eof.
*/
public synchronized WritableComparable getClosest(WritableComparable key,
Writable val)
throws IOException {
return getClosest(key, val, false);
}
/**
* Finds the record that is the closest match to the specified key.
*
* @param key - key that we're trying to find
* @param val - data value if key is found
* @param before - IF true, and <code>key</code> does not exist, return
* the first entry that falls just before the <code>key</code>. Otherwise,
* return the record that sorts just after.
* @return - the key that was the closest match or null if eof.
*/
public synchronized WritableComparable getClosest(WritableComparable key,
Writable val, final boolean before)
throws IOException {
int c = seekInternal(key, before);
// If we didn't get an exact match, and we ended up in the wrong
// direction relative to the query key, return null since we
// must be at the beginning or end of the file.
if ((!before && c > 0) ||
(before && c < 0)) {
return null;
}
data.getCurrentValue(val);
return nextKey;
}
/** Close the map. */
@Override
public synchronized void close() throws IOException {
if (!indexClosed) {
index.close();
}
data.close();
}
}
/** Renames an existing map directory. */
public static void rename(FileSystem fs, String oldName, String newName)
throws IOException {
Path oldDir = new Path(oldName);
Path newDir = new Path(newName);
if (!fs.rename(oldDir, newDir)) {
throw new IOException("Could not rename " + oldDir + " to " + newDir);
}
}
/** Deletes the named map file. */
public static void delete(FileSystem fs, String name) throws IOException {
Path dir = new Path(name);
Path data = new Path(dir, DATA_FILE_NAME);
Path index = new Path(dir, INDEX_FILE_NAME);
fs.delete(data, true);
fs.delete(index, true);
fs.delete(dir, true);
}
/**
* This method attempts to fix a corrupt MapFile by re-creating its index.
* @param fs filesystem
* @param dir directory containing the MapFile data and index
* @param keyClass key class (has to be a subclass of Writable)
* @param valueClass value class (has to be a subclass of Writable)
* @param dryrun do not perform any changes, just report what needs to be done
* @return number of valid entries in this MapFile, or -1 if no fixing was needed
* @throws Exception
*/
public static long fix(FileSystem fs, Path dir,
Class<? extends Writable> keyClass,
Class<? extends Writable> valueClass, boolean dryrun,
Configuration conf) throws Exception {
String dr = (dryrun ? "[DRY RUN ] " : "");
Path data = new Path(dir, DATA_FILE_NAME);
Path index = new Path(dir, INDEX_FILE_NAME);
int indexInterval = conf.getInt(Writer.INDEX_INTERVAL, 128);
if (!fs.exists(data)) {
// there's nothing we can do to fix this!
throw new Exception(dr + "Missing data file in " + dir + ", impossible to fix this.");
}
if (fs.exists(index)) {
// no fixing needed
return -1;
}
SequenceFile.Reader dataReader =
new SequenceFile.Reader(conf, SequenceFile.Reader.file(data));
if (!dataReader.getKeyClass().equals(keyClass)) {
throw new Exception(dr + "Wrong key class in " + dir + ", expected" + keyClass.getName() +
", got " + dataReader.getKeyClass().getName());
}
if (!dataReader.getValueClass().equals(valueClass)) {
throw new Exception(dr + "Wrong value class in " + dir + ", expected" + valueClass.getName() +
", got " + dataReader.getValueClass().getName());
}
long cnt = 0L;
Writable key = ReflectionUtils.newInstance(keyClass, conf);
Writable value = ReflectionUtils.newInstance(valueClass, conf);
SequenceFile.Writer indexWriter = null;
if (!dryrun) {
indexWriter =
SequenceFile.createWriter(conf,
SequenceFile.Writer.file(index),
SequenceFile.Writer.keyClass(keyClass),
SequenceFile.Writer.valueClass
(LongWritable.class));
}
try {
long pos = 0L;
LongWritable position = new LongWritable();
while(dataReader.next(key, value)) {
cnt++;
if (cnt % indexInterval == 0) {
position.set(pos);
if (!dryrun) indexWriter.append(key, position);
}
pos = dataReader.getPosition();
}
} catch(Throwable t) {
// truncated data file. swallow it.
}
dataReader.close();
if (!dryrun) indexWriter.close();
return cnt;
}
/**
* Class to merge multiple MapFiles of same Key and Value types to one MapFile
*/
public static class Merger {
private Configuration conf;
private WritableComparator comparator = null;
private Reader[] inReaders;
private Writer outWriter;
private Class<Writable> valueClass = null;
private Class<WritableComparable> keyClass = null;
public Merger(Configuration conf) throws IOException {
this.conf = conf;
}
/**
* Merge multiple MapFiles to one Mapfile
*
* @param inMapFiles
* @param outMapFile
* @throws IOException
*/
public void merge(Path[] inMapFiles, boolean deleteInputs,
Path outMapFile) throws IOException {
try {
open(inMapFiles, outMapFile);
mergePass();
} finally {
close();
}
if (deleteInputs) {
for (int i = 0; i < inMapFiles.length; i++) {
Path path = inMapFiles[i];
delete(path.getFileSystem(conf), path.toString());
}
}
}
/*
* Open all input files for reading and verify the key and value types. And
* open Output file for writing
*/
@SuppressWarnings("unchecked")
private void open(Path[] inMapFiles, Path outMapFile) throws IOException {
inReaders = new Reader[inMapFiles.length];
for (int i = 0; i < inMapFiles.length; i++) {
Reader reader = new Reader(inMapFiles[i], conf);
if (keyClass == null || valueClass == null) {
keyClass = (Class<WritableComparable>) reader.getKeyClass();
valueClass = (Class<Writable>) reader.getValueClass();
} else if (keyClass != reader.getKeyClass()
|| valueClass != reader.getValueClass()) {
throw new HadoopIllegalArgumentException(
"Input files cannot be merged as they"
+ " have different Key and Value classes");
}
inReaders[i] = reader;
}
if (comparator == null) {
Class<? extends WritableComparable> cls;
cls = keyClass.asSubclass(WritableComparable.class);
this.comparator = WritableComparator.get(cls, conf);
} else if (comparator.getKeyClass() != keyClass) {
throw new HadoopIllegalArgumentException(
"Input files cannot be merged as they"
+ " have different Key class compared to"
+ " specified comparator");
}
outWriter = new MapFile.Writer(conf, outMapFile,
MapFile.Writer.keyClass(keyClass),
MapFile.Writer.valueClass(valueClass));
}
/**
* Merge all input files to output map file.<br>
* 1. Read first key/value from all input files to keys/values array. <br>
* 2. Select the least key and corresponding value. <br>
* 3. Write the selected key and value to output file. <br>
* 4. Replace the already written key/value in keys/values arrays with the
* next key/value from the selected input <br>
* 5. Repeat step 2-4 till all keys are read. <br>
*/
private void mergePass() throws IOException {
// re-usable array
WritableComparable[] keys = new WritableComparable[inReaders.length];
Writable[] values = new Writable[inReaders.length];
// Read first key/value from all inputs
for (int i = 0; i < inReaders.length; i++) {
keys[i] = ReflectionUtils.newInstance(keyClass, null);
values[i] = ReflectionUtils.newInstance(valueClass, null);
if (!inReaders[i].next(keys[i], values[i])) {
// Handle empty files
keys[i] = null;
values[i] = null;
}
}
do {
int currentEntry = -1;
WritableComparable currentKey = null;
Writable currentValue = null;
for (int i = 0; i < keys.length; i++) {
if (keys[i] == null) {
// Skip Readers reached EOF
continue;
}
if (currentKey == null || comparator.compare(currentKey, keys[i]) > 0) {
currentEntry = i;
currentKey = keys[i];
currentValue = values[i];
}
}
if (currentKey == null) {
// Merge Complete
break;
}
// Write the selected key/value to merge stream
outWriter.append(currentKey, currentValue);
// Replace the already written key/value in keys/values arrays with the
// next key/value from the selected input
if (!inReaders[currentEntry].next(keys[currentEntry],
values[currentEntry])) {
// EOF for this file
keys[currentEntry] = null;
values[currentEntry] = null;
}
} while (true);
}
private void close() throws IOException {
for (int i = 0; i < inReaders.length; i++) {
IOUtils.closeStream(inReaders[i]);
inReaders[i] = null;
}
if (outWriter != null) {
outWriter.close();
outWriter = null;
}
}
}
public static void main(String[] args) throws Exception {
String usage = "Usage: MapFile inFile outFile";
if (args.length != 2) {
System.err.println(usage);
System.exit(-1);
}
String in = args[0];
String out = args[1];
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
MapFile.Reader reader = null;
MapFile.Writer writer = null;
try {
reader = new MapFile.Reader(fs, in, conf);
writer =
new MapFile.Writer(conf, fs, out,
reader.getKeyClass().asSubclass(WritableComparable.class),
reader.getValueClass());
WritableComparable key = ReflectionUtils.newInstance(reader.getKeyClass()
.asSubclass(WritableComparable.class), conf);
Writable value = ReflectionUtils.newInstance(reader.getValueClass()
.asSubclass(Writable.class), conf);
while (reader.next(key, value)) // copy all entries
writer.append(key, value);
} finally {
IOUtils.cleanup(LOG, writer, reader);
}
}
}
| 36,681 | 35.499502 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ArrayWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.*;
import java.lang.reflect.Array;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* A Writable for arrays containing instances of a class. The elements of this
* writable must all be instances of the same class. If this writable will be
* the input for a Reducer, you will need to create a subclass that sets the
* value to be of the proper type.
*
* For example:
* <code>
* public class IntArrayWritable extends ArrayWritable {
* public IntArrayWritable() {
* super(IntWritable.class);
* }
* }
* </code>
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ArrayWritable implements Writable {
private Class<? extends Writable> valueClass;
private Writable[] values;
public ArrayWritable(Class<? extends Writable> valueClass) {
if (valueClass == null) {
throw new IllegalArgumentException("null valueClass");
}
this.valueClass = valueClass;
}
public ArrayWritable(Class<? extends Writable> valueClass, Writable[] values) {
this(valueClass);
this.values = values;
}
public ArrayWritable(String[] strings) {
this(UTF8.class, new Writable[strings.length]);
for (int i = 0; i < strings.length; i++) {
values[i] = new UTF8(strings[i]);
}
}
public Class getValueClass() {
return valueClass;
}
public String[] toStrings() {
String[] strings = new String[values.length];
for (int i = 0; i < values.length; i++) {
strings[i] = values[i].toString();
}
return strings;
}
public Object toArray() {
Object result = Array.newInstance(valueClass, values.length);
for (int i = 0; i < values.length; i++) {
Array.set(result, i, values[i]);
}
return result;
}
public void set(Writable[] values) { this.values = values; }
public Writable[] get() { return values; }
@Override
public void readFields(DataInput in) throws IOException {
values = new Writable[in.readInt()]; // construct values
for (int i = 0; i < values.length; i++) {
Writable value = WritableFactories.newInstance(valueClass);
value.readFields(in); // read a value
values[i] = value; // store it in values
}
}
@Override
public void write(DataOutput out) throws IOException {
out.writeInt(values.length); // write values
for (int i = 0; i < values.length; i++) {
values[i].write(out);
}
}
}
| 3,384 | 29.495495 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BooleanWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.*;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* A WritableComparable for booleans.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class BooleanWritable implements WritableComparable<BooleanWritable> {
private boolean value;
/**
*/
public BooleanWritable() {};
/**
*/
public BooleanWritable(boolean value) {
set(value);
}
/**
* Set the value of the BooleanWritable
*/
public void set(boolean value) {
this.value = value;
}
/**
* Returns the value of the BooleanWritable
*/
public boolean get() {
return value;
}
/**
*/
@Override
public void readFields(DataInput in) throws IOException {
value = in.readBoolean();
}
/**
*/
@Override
public void write(DataOutput out) throws IOException {
out.writeBoolean(value);
}
/**
*/
@Override
public boolean equals(Object o) {
if (!(o instanceof BooleanWritable)) {
return false;
}
BooleanWritable other = (BooleanWritable) o;
return this.value == other.value;
}
@Override
public int hashCode() {
return value ? 0 : 1;
}
/**
*/
@Override
public int compareTo(BooleanWritable o) {
boolean a = this.value;
boolean b = o.value;
return ((a == b) ? 0 : (a == false) ? -1 : 1);
}
@Override
public String toString() {
return Boolean.toString(get());
}
/**
* A Comparator optimized for BooleanWritable.
*/
public static class Comparator extends WritableComparator {
public Comparator() {
super(BooleanWritable.class);
}
@Override
public int compare(byte[] b1, int s1, int l1,
byte[] b2, int s2, int l2) {
return compareBytes(b1, s1, l1, b2, s2, l2);
}
}
static {
WritableComparator.define(BooleanWritable.class, new Comparator());
}
}
| 2,783 | 21.451613 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/CompareUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.io.file.tfile;
import java.io.Serializable;
import java.util.Comparator;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.WritableComparator;
class CompareUtils {
/**
* Prevent the instantiation of class.
*/
private CompareUtils() {
// nothing
}
/**
* A comparator to compare anything that implements {@link RawComparable}
* using a customized comparator.
*/
public static final class BytesComparator implements
Comparator<RawComparable> {
private RawComparator<Object> cmp;
public BytesComparator(RawComparator<Object> cmp) {
this.cmp = cmp;
}
@Override
public int compare(RawComparable o1, RawComparable o2) {
return compare(o1.buffer(), o1.offset(), o1.size(), o2.buffer(), o2
.offset(), o2.size());
}
public int compare(byte[] a, int off1, int len1, byte[] b, int off2,
int len2) {
return cmp.compare(a, off1, len1, b, off2, len2);
}
}
/**
* Interface for all objects that has a single integer magnitude.
*/
static interface Scalar {
long magnitude();
}
static final class ScalarLong implements Scalar {
private long magnitude;
public ScalarLong(long m) {
magnitude = m;
}
@Override
public long magnitude() {
return magnitude;
}
}
public static final class ScalarComparator implements Comparator<Scalar>, Serializable {
@Override
public int compare(Scalar o1, Scalar o2) {
long diff = o1.magnitude() - o2.magnitude();
if (diff < 0) return -1;
if (diff > 0) return 1;
return 0;
}
}
public static final class MemcmpRawComparator implements
RawComparator<Object>, Serializable {
@Override
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
return WritableComparator.compareBytes(b1, s1, l1, b2, s2, l2);
}
@Override
public int compare(Object o1, Object o2) {
throw new RuntimeException("Object comparison not supported");
}
}
}
| 2,861 | 27.62 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFileDumper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.io.file.tfile;
import java.io.IOException;
import java.io.PrintStream;
import java.util.Collection;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;
import org.apache.commons.io.Charsets;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.file.tfile.BCFile.BlockRegion;
import org.apache.hadoop.io.file.tfile.BCFile.MetaIndexEntry;
import org.apache.hadoop.io.file.tfile.TFile.TFileIndexEntry;
import org.apache.hadoop.io.file.tfile.Utils.Version;
/**
* Dumping the information of a TFile.
*/
class TFileDumper {
static final Log LOG = LogFactory.getLog(TFileDumper.class);
private TFileDumper() {
// namespace object not constructable.
}
private enum Align {
LEFT, CENTER, RIGHT, ZERO_PADDED;
static String format(String s, int width, Align align) {
if (s.length() >= width) return s;
int room = width - s.length();
Align alignAdjusted = align;
if (room == 1) {
alignAdjusted = LEFT;
}
if (alignAdjusted == LEFT) {
return s + String.format("%" + room + "s", "");
}
if (alignAdjusted == RIGHT) {
return String.format("%" + room + "s", "") + s;
}
if (alignAdjusted == CENTER) {
int half = room / 2;
return String.format("%" + half + "s", "") + s
+ String.format("%" + (room - half) + "s", "");
}
throw new IllegalArgumentException("Unsupported alignment");
}
static String format(long l, int width, Align align) {
if (align == ZERO_PADDED) {
return String.format("%0" + width + "d", l);
}
return format(Long.toString(l), width, align);
}
static int calculateWidth(String caption, long max) {
return Math.max(caption.length(), Long.toString(max).length());
}
}
/**
* Dump information about TFile.
*
* @param file
* Path string of the TFile
* @param out
* PrintStream to output the information.
* @param conf
* The configuration object.
* @throws IOException
*/
static public void dumpInfo(String file, PrintStream out, Configuration conf)
throws IOException {
final int maxKeySampleLen = 16;
Path path = new Path(file);
FileSystem fs = path.getFileSystem(conf);
long length = fs.getFileStatus(path).getLen();
FSDataInputStream fsdis = fs.open(path);
TFile.Reader reader = new TFile.Reader(fsdis, length, conf);
try {
LinkedHashMap<String, String> properties =
new LinkedHashMap<String, String>();
int blockCnt = reader.readerBCF.getBlockCount();
int metaBlkCnt = reader.readerBCF.metaIndex.index.size();
properties.put("BCFile Version", reader.readerBCF.version.toString());
properties.put("TFile Version", reader.tfileMeta.version.toString());
properties.put("File Length", Long.toString(length));
properties.put("Data Compression", reader.readerBCF
.getDefaultCompressionName());
properties.put("Record Count", Long.toString(reader.getEntryCount()));
properties.put("Sorted", Boolean.toString(reader.isSorted()));
if (reader.isSorted()) {
properties.put("Comparator", reader.getComparatorName());
}
properties.put("Data Block Count", Integer.toString(blockCnt));
long dataSize = 0, dataSizeUncompressed = 0;
if (blockCnt > 0) {
for (int i = 0; i < blockCnt; ++i) {
BlockRegion region =
reader.readerBCF.dataIndex.getBlockRegionList().get(i);
dataSize += region.getCompressedSize();
dataSizeUncompressed += region.getRawSize();
}
properties.put("Data Block Bytes", Long.toString(dataSize));
if (reader.readerBCF.getDefaultCompressionName() != "none") {
properties.put("Data Block Uncompressed Bytes", Long
.toString(dataSizeUncompressed));
properties.put("Data Block Compression Ratio", String.format(
"1:%.1f", (double) dataSizeUncompressed / dataSize));
}
}
properties.put("Meta Block Count", Integer.toString(metaBlkCnt));
long metaSize = 0, metaSizeUncompressed = 0;
if (metaBlkCnt > 0) {
Collection<MetaIndexEntry> metaBlks =
reader.readerBCF.metaIndex.index.values();
boolean calculateCompression = false;
for (Iterator<MetaIndexEntry> it = metaBlks.iterator(); it.hasNext();) {
MetaIndexEntry e = it.next();
metaSize += e.getRegion().getCompressedSize();
metaSizeUncompressed += e.getRegion().getRawSize();
if (e.getCompressionAlgorithm() != Compression.Algorithm.NONE) {
calculateCompression = true;
}
}
properties.put("Meta Block Bytes", Long.toString(metaSize));
if (calculateCompression) {
properties.put("Meta Block Uncompressed Bytes", Long
.toString(metaSizeUncompressed));
properties.put("Meta Block Compression Ratio", String.format(
"1:%.1f", (double) metaSizeUncompressed / metaSize));
}
}
properties.put("Meta-Data Size Ratio", String.format("1:%.1f",
(double) dataSize / metaSize));
long leftOverBytes = length - dataSize - metaSize;
long miscSize =
BCFile.Magic.size() * 2 + Long.SIZE / Byte.SIZE + Version.size();
long metaIndexSize = leftOverBytes - miscSize;
properties.put("Meta Block Index Bytes", Long.toString(metaIndexSize));
properties.put("Headers Etc Bytes", Long.toString(miscSize));
// Now output the properties table.
int maxKeyLength = 0;
Set<Map.Entry<String, String>> entrySet = properties.entrySet();
for (Iterator<Map.Entry<String, String>> it = entrySet.iterator(); it
.hasNext();) {
Map.Entry<String, String> e = it.next();
if (e.getKey().length() > maxKeyLength) {
maxKeyLength = e.getKey().length();
}
}
for (Iterator<Map.Entry<String, String>> it = entrySet.iterator(); it
.hasNext();) {
Map.Entry<String, String> e = it.next();
out.printf("%s : %s%n", Align.format(e.getKey(), maxKeyLength,
Align.LEFT), e.getValue());
}
out.println();
reader.checkTFileDataIndex();
if (blockCnt > 0) {
String blkID = "Data-Block";
int blkIDWidth = Align.calculateWidth(blkID, blockCnt);
int blkIDWidth2 = Align.calculateWidth("", blockCnt);
String offset = "Offset";
int offsetWidth = Align.calculateWidth(offset, length);
String blkLen = "Length";
int blkLenWidth =
Align.calculateWidth(blkLen, dataSize / blockCnt * 10);
String rawSize = "Raw-Size";
int rawSizeWidth =
Align.calculateWidth(rawSize, dataSizeUncompressed / blockCnt * 10);
String records = "Records";
int recordsWidth =
Align.calculateWidth(records, reader.getEntryCount() / blockCnt
* 10);
String endKey = "End-Key";
int endKeyWidth = Math.max(endKey.length(), maxKeySampleLen * 2 + 5);
out.printf("%s %s %s %s %s %s%n", Align.format(blkID, blkIDWidth,
Align.CENTER), Align.format(offset, offsetWidth, Align.CENTER),
Align.format(blkLen, blkLenWidth, Align.CENTER), Align.format(
rawSize, rawSizeWidth, Align.CENTER), Align.format(records,
recordsWidth, Align.CENTER), Align.format(endKey, endKeyWidth,
Align.LEFT));
for (int i = 0; i < blockCnt; ++i) {
BlockRegion region =
reader.readerBCF.dataIndex.getBlockRegionList().get(i);
TFileIndexEntry indexEntry = reader.tfileIndex.getEntry(i);
out.printf("%s %s %s %s %s ", Align.format(Align.format(i,
blkIDWidth2, Align.ZERO_PADDED), blkIDWidth, Align.LEFT), Align
.format(region.getOffset(), offsetWidth, Align.LEFT), Align
.format(region.getCompressedSize(), blkLenWidth, Align.LEFT),
Align.format(region.getRawSize(), rawSizeWidth, Align.LEFT),
Align.format(indexEntry.kvEntries, recordsWidth, Align.LEFT));
byte[] key = indexEntry.key;
boolean asAscii = true;
int sampleLen = Math.min(maxKeySampleLen, key.length);
for (int j = 0; j < sampleLen; ++j) {
byte b = key[j];
if ((b < 32 && b != 9) || (b == 127)) {
asAscii = false;
}
}
if (!asAscii) {
out.print("0X");
for (int j = 0; j < sampleLen; ++j) {
byte b = key[i];
out.printf("%X", b);
}
} else {
out.print(new String(key, 0, sampleLen, Charsets.UTF_8));
}
if (sampleLen < key.length) {
out.print("...");
}
out.println();
}
}
out.println();
if (metaBlkCnt > 0) {
String name = "Meta-Block";
int maxNameLen = 0;
Set<Map.Entry<String, MetaIndexEntry>> metaBlkEntrySet =
reader.readerBCF.metaIndex.index.entrySet();
for (Iterator<Map.Entry<String, MetaIndexEntry>> it =
metaBlkEntrySet.iterator(); it.hasNext();) {
Map.Entry<String, MetaIndexEntry> e = it.next();
if (e.getKey().length() > maxNameLen) {
maxNameLen = e.getKey().length();
}
}
int nameWidth = Math.max(name.length(), maxNameLen);
String offset = "Offset";
int offsetWidth = Align.calculateWidth(offset, length);
String blkLen = "Length";
int blkLenWidth =
Align.calculateWidth(blkLen, metaSize / metaBlkCnt * 10);
String rawSize = "Raw-Size";
int rawSizeWidth =
Align.calculateWidth(rawSize, metaSizeUncompressed / metaBlkCnt
* 10);
String compression = "Compression";
int compressionWidth = compression.length();
out.printf("%s %s %s %s %s%n", Align.format(name, nameWidth,
Align.CENTER), Align.format(offset, offsetWidth, Align.CENTER),
Align.format(blkLen, blkLenWidth, Align.CENTER), Align.format(
rawSize, rawSizeWidth, Align.CENTER), Align.format(compression,
compressionWidth, Align.LEFT));
for (Iterator<Map.Entry<String, MetaIndexEntry>> it =
metaBlkEntrySet.iterator(); it.hasNext();) {
Map.Entry<String, MetaIndexEntry> e = it.next();
String blkName = e.getValue().getMetaName();
BlockRegion region = e.getValue().getRegion();
String blkCompression =
e.getValue().getCompressionAlgorithm().getName();
out.printf("%s %s %s %s %s%n", Align.format(blkName, nameWidth,
Align.LEFT), Align.format(region.getOffset(), offsetWidth,
Align.LEFT), Align.format(region.getCompressedSize(),
blkLenWidth, Align.LEFT), Align.format(region.getRawSize(),
rawSizeWidth, Align.LEFT), Align.format(blkCompression,
compressionWidth, Align.LEFT));
}
}
} finally {
IOUtils.cleanup(LOG, reader, fsdis);
}
}
}
| 12,363 | 40.62963 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.io.file.tfile;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.FilterOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.ArrayList;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.CodecPool;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionInputStream;
import org.apache.hadoop.io.compress.CompressionOutputStream;
import org.apache.hadoop.io.compress.Compressor;
import org.apache.hadoop.io.compress.Decompressor;
import org.apache.hadoop.io.compress.DefaultCodec;
import org.apache.hadoop.util.ReflectionUtils;
/**
* Compression related stuff.
*/
final class Compression {
static final Log LOG = LogFactory.getLog(Compression.class);
/**
* Prevent the instantiation of class.
*/
private Compression() {
// nothing
}
static class FinishOnFlushCompressionStream extends FilterOutputStream {
public FinishOnFlushCompressionStream(CompressionOutputStream cout) {
super(cout);
}
@Override
public void write(byte b[], int off, int len) throws IOException {
out.write(b, off, len);
}
@Override
public void flush() throws IOException {
CompressionOutputStream cout = (CompressionOutputStream) out;
cout.finish();
cout.flush();
cout.resetState();
}
}
/**
* Compression algorithms.
*/
static enum Algorithm {
LZO(TFile.COMPRESSION_LZO) {
private transient boolean checked = false;
private static final String defaultClazz =
"org.apache.hadoop.io.compress.LzoCodec";
private transient CompressionCodec codec = null;
@Override
public synchronized boolean isSupported() {
if (!checked) {
checked = true;
String extClazz =
(conf.get(CONF_LZO_CLASS) == null ? System
.getProperty(CONF_LZO_CLASS) : null);
String clazz = (extClazz != null) ? extClazz : defaultClazz;
try {
LOG.info("Trying to load Lzo codec class: " + clazz);
codec =
(CompressionCodec) ReflectionUtils.newInstance(Class
.forName(clazz), conf);
} catch (ClassNotFoundException e) {
// that is okay
}
}
return codec != null;
}
@Override
CompressionCodec getCodec() throws IOException {
if (!isSupported()) {
throw new IOException(
"LZO codec class not specified. Did you forget to set property "
+ CONF_LZO_CLASS + "?");
}
return codec;
}
@Override
public synchronized InputStream createDecompressionStream(
InputStream downStream, Decompressor decompressor,
int downStreamBufferSize) throws IOException {
if (!isSupported()) {
throw new IOException(
"LZO codec class not specified. Did you forget to set property "
+ CONF_LZO_CLASS + "?");
}
InputStream bis1 = null;
if (downStreamBufferSize > 0) {
bis1 = new BufferedInputStream(downStream, downStreamBufferSize);
} else {
bis1 = downStream;
}
conf.setInt("io.compression.codec.lzo.buffersize", 64 * 1024);
CompressionInputStream cis =
codec.createInputStream(bis1, decompressor);
BufferedInputStream bis2 = new BufferedInputStream(cis, DATA_IBUF_SIZE);
return bis2;
}
@Override
public synchronized OutputStream createCompressionStream(
OutputStream downStream, Compressor compressor,
int downStreamBufferSize) throws IOException {
if (!isSupported()) {
throw new IOException(
"LZO codec class not specified. Did you forget to set property "
+ CONF_LZO_CLASS + "?");
}
OutputStream bos1 = null;
if (downStreamBufferSize > 0) {
bos1 = new BufferedOutputStream(downStream, downStreamBufferSize);
} else {
bos1 = downStream;
}
conf.setInt("io.compression.codec.lzo.buffersize", 64 * 1024);
CompressionOutputStream cos =
codec.createOutputStream(bos1, compressor);
BufferedOutputStream bos2 =
new BufferedOutputStream(new FinishOnFlushCompressionStream(cos),
DATA_OBUF_SIZE);
return bos2;
}
},
GZ(TFile.COMPRESSION_GZ) {
private transient DefaultCodec codec;
@Override
CompressionCodec getCodec() {
if (codec == null) {
codec = new DefaultCodec();
codec.setConf(conf);
}
return codec;
}
@Override
public synchronized InputStream createDecompressionStream(
InputStream downStream, Decompressor decompressor,
int downStreamBufferSize) throws IOException {
// Set the internal buffer size to read from down stream.
if (downStreamBufferSize > 0) {
codec.getConf().setInt("io.file.buffer.size", downStreamBufferSize);
}
CompressionInputStream cis =
codec.createInputStream(downStream, decompressor);
BufferedInputStream bis2 = new BufferedInputStream(cis, DATA_IBUF_SIZE);
return bis2;
}
@Override
public synchronized OutputStream createCompressionStream(
OutputStream downStream, Compressor compressor,
int downStreamBufferSize) throws IOException {
OutputStream bos1 = null;
if (downStreamBufferSize > 0) {
bos1 = new BufferedOutputStream(downStream, downStreamBufferSize);
} else {
bos1 = downStream;
}
codec.getConf().setInt("io.file.buffer.size", 32 * 1024);
CompressionOutputStream cos =
codec.createOutputStream(bos1, compressor);
BufferedOutputStream bos2 =
new BufferedOutputStream(new FinishOnFlushCompressionStream(cos),
DATA_OBUF_SIZE);
return bos2;
}
@Override
public boolean isSupported() {
return true;
}
},
NONE(TFile.COMPRESSION_NONE) {
@Override
CompressionCodec getCodec() {
return null;
}
@Override
public synchronized InputStream createDecompressionStream(
InputStream downStream, Decompressor decompressor,
int downStreamBufferSize) throws IOException {
if (downStreamBufferSize > 0) {
return new BufferedInputStream(downStream, downStreamBufferSize);
}
return downStream;
}
@Override
public synchronized OutputStream createCompressionStream(
OutputStream downStream, Compressor compressor,
int downStreamBufferSize) throws IOException {
if (downStreamBufferSize > 0) {
return new BufferedOutputStream(downStream, downStreamBufferSize);
}
return downStream;
}
@Override
public boolean isSupported() {
return true;
}
};
// We require that all compression related settings are configured
// statically in the Configuration object.
protected static final Configuration conf = new Configuration();
private final String compressName;
// data input buffer size to absorb small reads from application.
private static final int DATA_IBUF_SIZE = 1 * 1024;
// data output buffer size to absorb small writes from application.
private static final int DATA_OBUF_SIZE = 4 * 1024;
public static final String CONF_LZO_CLASS =
"io.compression.codec.lzo.class";
Algorithm(String name) {
this.compressName = name;
}
abstract CompressionCodec getCodec() throws IOException;
public abstract InputStream createDecompressionStream(
InputStream downStream, Decompressor decompressor,
int downStreamBufferSize) throws IOException;
public abstract OutputStream createCompressionStream(
OutputStream downStream, Compressor compressor, int downStreamBufferSize)
throws IOException;
public abstract boolean isSupported();
public Compressor getCompressor() throws IOException {
CompressionCodec codec = getCodec();
if (codec != null) {
Compressor compressor = CodecPool.getCompressor(codec);
if (compressor != null) {
if (compressor.finished()) {
// Somebody returns the compressor to CodecPool but is still using
// it.
LOG.warn("Compressor obtained from CodecPool already finished()");
} else {
if(LOG.isDebugEnabled()) {
LOG.debug("Got a compressor: " + compressor.hashCode());
}
}
/**
* Following statement is necessary to get around bugs in 0.18 where a
* compressor is referenced after returned back to the codec pool.
*/
compressor.reset();
}
return compressor;
}
return null;
}
public void returnCompressor(Compressor compressor) {
if (compressor != null) {
if(LOG.isDebugEnabled()) {
LOG.debug("Return a compressor: " + compressor.hashCode());
}
CodecPool.returnCompressor(compressor);
}
}
public Decompressor getDecompressor() throws IOException {
CompressionCodec codec = getCodec();
if (codec != null) {
Decompressor decompressor = CodecPool.getDecompressor(codec);
if (decompressor != null) {
if (decompressor.finished()) {
// Somebody returns the decompressor to CodecPool but is still using
// it.
LOG.warn("Deompressor obtained from CodecPool already finished()");
} else {
if(LOG.isDebugEnabled()) {
LOG.debug("Got a decompressor: " + decompressor.hashCode());
}
}
/**
* Following statement is necessary to get around bugs in 0.18 where a
* decompressor is referenced after returned back to the codec pool.
*/
decompressor.reset();
}
return decompressor;
}
return null;
}
public void returnDecompressor(Decompressor decompressor) {
if (decompressor != null) {
if(LOG.isDebugEnabled()) {
LOG.debug("Returned a decompressor: " + decompressor.hashCode());
}
CodecPool.returnDecompressor(decompressor);
}
}
public String getName() {
return compressName;
}
}
static Algorithm getCompressionAlgorithmByName(String compressName) {
Algorithm[] algos = Algorithm.class.getEnumConstants();
for (Algorithm a : algos) {
if (a.getName().equals(compressName)) {
return a;
}
}
throw new IllegalArgumentException(
"Unsupported compression algorithm name: " + compressName);
}
static String[] getSupportedAlgorithms() {
Algorithm[] algos = Algorithm.class.getEnumConstants();
ArrayList<String> ret = new ArrayList<String>();
for (Algorithm a : algos) {
if (a.isSupported()) {
ret.add(a.getName());
}
}
return ret.toArray(new String[ret.size()]);
}
}
| 12,261 | 32.140541 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/MetaBlockAlreadyExists.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.io.file.tfile;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Exception - Meta Block with the same name already exists.
*/
@SuppressWarnings("serial")
@InterfaceAudience.Public
@InterfaceStability.Stable
public class MetaBlockAlreadyExists extends IOException {
/**
* Constructor
*
* @param s
* message.
*/
MetaBlockAlreadyExists(String s) {
super(s);
}
}
| 1,328 | 30.642857 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/BCFile.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.io.file.tfile;
import java.io.Closeable;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Map;
import java.util.TreeMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.compress.Compressor;
import org.apache.hadoop.io.compress.Decompressor;
import org.apache.hadoop.io.file.tfile.CompareUtils.Scalar;
import org.apache.hadoop.io.file.tfile.CompareUtils.ScalarComparator;
import org.apache.hadoop.io.file.tfile.CompareUtils.ScalarLong;
import org.apache.hadoop.io.file.tfile.Compression.Algorithm;
import org.apache.hadoop.io.file.tfile.Utils.Version;
/**
* Block Compressed file, the underlying physical storage layer for TFile.
* BCFile provides the basic block level compression for the data block and meta
* blocks. It is separated from TFile as it may be used for other
* block-compressed file implementation.
*/
final class BCFile {
// the current version of BCFile impl, increment them (major or minor) made
// enough changes
static final Version API_VERSION = new Version((short) 1, (short) 0);
static final Log LOG = LogFactory.getLog(BCFile.class);
/**
* Prevent the instantiation of BCFile objects.
*/
private BCFile() {
// nothing
}
/**
* BCFile writer, the entry point for creating a new BCFile.
*/
static public class Writer implements Closeable {
private final FSDataOutputStream out;
private final Configuration conf;
// the single meta block containing index of compressed data blocks
final DataIndex dataIndex;
// index for meta blocks
final MetaIndex metaIndex;
boolean blkInProgress = false;
private boolean metaBlkSeen = false;
private boolean closed = false;
long errorCount = 0;
// reusable buffers.
private BytesWritable fsOutputBuffer;
/**
* Call-back interface to register a block after a block is closed.
*/
private static interface BlockRegister {
/**
* Register a block that is fully closed.
*
* @param raw
* The size of block in terms of uncompressed bytes.
* @param offsetStart
* The start offset of the block.
* @param offsetEnd
* One byte after the end of the block. Compressed block size is
* offsetEnd - offsetStart.
*/
public void register(long raw, long offsetStart, long offsetEnd);
}
/**
* Intermediate class that maintain the state of a Writable Compression
* Block.
*/
private static final class WBlockState {
private final Algorithm compressAlgo;
private Compressor compressor; // !null only if using native
// Hadoop compression
private final FSDataOutputStream fsOut;
private final long posStart;
private final SimpleBufferedOutputStream fsBufferedOutput;
private OutputStream out;
/**
* @param compressionAlgo
* The compression algorithm to be used to for compression.
* @throws IOException
*/
public WBlockState(Algorithm compressionAlgo, FSDataOutputStream fsOut,
BytesWritable fsOutputBuffer, Configuration conf) throws IOException {
this.compressAlgo = compressionAlgo;
this.fsOut = fsOut;
this.posStart = fsOut.getPos();
fsOutputBuffer.setCapacity(TFile.getFSOutputBufferSize(conf));
this.fsBufferedOutput =
new SimpleBufferedOutputStream(this.fsOut, fsOutputBuffer.getBytes());
this.compressor = compressAlgo.getCompressor();
try {
this.out =
compressionAlgo.createCompressionStream(fsBufferedOutput,
compressor, 0);
} catch (IOException e) {
compressAlgo.returnCompressor(compressor);
throw e;
}
}
/**
* Get the output stream for BlockAppender's consumption.
*
* @return the output stream suitable for writing block data.
*/
OutputStream getOutputStream() {
return out;
}
/**
* Get the current position in file.
*
* @return The current byte offset in underlying file.
* @throws IOException
*/
long getCurrentPos() throws IOException {
return fsOut.getPos() + fsBufferedOutput.size();
}
long getStartPos() {
return posStart;
}
/**
* Current size of compressed data.
*
* @return
* @throws IOException
*/
long getCompressedSize() throws IOException {
long ret = getCurrentPos() - posStart;
return ret;
}
/**
* Finishing up the current block.
*/
public void finish() throws IOException {
try {
if (out != null) {
out.flush();
out = null;
}
} finally {
compressAlgo.returnCompressor(compressor);
compressor = null;
}
}
}
/**
* Access point to stuff data into a block.
*
* TODO: Change DataOutputStream to something else that tracks the size as
* long instead of int. Currently, we will wrap around if the row block size
* is greater than 4GB.
*/
public class BlockAppender extends DataOutputStream {
private final BlockRegister blockRegister;
private final WBlockState wBlkState;
@SuppressWarnings("hiding")
private boolean closed = false;
/**
* Constructor
*
* @param register
* the block register, which is called when the block is closed.
* @param wbs
* The writable compression block state.
*/
BlockAppender(BlockRegister register, WBlockState wbs) {
super(wbs.getOutputStream());
this.blockRegister = register;
this.wBlkState = wbs;
}
/**
* Get the raw size of the block.
*
* @return the number of uncompressed bytes written through the
* BlockAppender so far.
* @throws IOException
*/
public long getRawSize() throws IOException {
/**
* Expecting the size() of a block not exceeding 4GB. Assuming the
* size() will wrap to negative integer if it exceeds 2GB.
*/
return size() & 0x00000000ffffffffL;
}
/**
* Get the compressed size of the block in progress.
*
* @return the number of compressed bytes written to the underlying FS
* file. The size may be smaller than actual need to compress the
* all data written due to internal buffering inside the
* compressor.
* @throws IOException
*/
public long getCompressedSize() throws IOException {
return wBlkState.getCompressedSize();
}
@Override
public void flush() {
// The down stream is a special kind of stream that finishes a
// compression block upon flush. So we disable flush() here.
}
/**
* Signaling the end of write to the block. The block register will be
* called for registering the finished block.
*/
@Override
public void close() throws IOException {
if (closed == true) {
return;
}
try {
++errorCount;
wBlkState.finish();
blockRegister.register(getRawSize(), wBlkState.getStartPos(),
wBlkState.getCurrentPos());
--errorCount;
} finally {
closed = true;
blkInProgress = false;
}
}
}
/**
* Constructor
*
* @param fout
* FS output stream.
* @param compressionName
* Name of the compression algorithm, which will be used for all
* data blocks.
* @throws IOException
* @see Compression#getSupportedAlgorithms
*/
public Writer(FSDataOutputStream fout, String compressionName,
Configuration conf) throws IOException {
if (fout.getPos() != 0) {
throw new IOException("Output file not at zero offset.");
}
this.out = fout;
this.conf = conf;
dataIndex = new DataIndex(compressionName);
metaIndex = new MetaIndex();
fsOutputBuffer = new BytesWritable();
Magic.write(fout);
}
/**
* Close the BCFile Writer. Attempting to use the Writer after calling
* <code>close</code> is not allowed and may lead to undetermined results.
*/
@Override
public void close() throws IOException {
if (closed == true) {
return;
}
try {
if (errorCount == 0) {
if (blkInProgress == true) {
throw new IllegalStateException(
"Close() called with active block appender.");
}
// add metaBCFileIndex to metaIndex as the last meta block
BlockAppender appender =
prepareMetaBlock(DataIndex.BLOCK_NAME,
getDefaultCompressionAlgorithm());
try {
dataIndex.write(appender);
} finally {
appender.close();
}
long offsetIndexMeta = out.getPos();
metaIndex.write(out);
// Meta Index and the trailing section are written out directly.
out.writeLong(offsetIndexMeta);
API_VERSION.write(out);
Magic.write(out);
out.flush();
}
} finally {
closed = true;
}
}
private Algorithm getDefaultCompressionAlgorithm() {
return dataIndex.getDefaultCompressionAlgorithm();
}
private BlockAppender prepareMetaBlock(String name, Algorithm compressAlgo)
throws IOException, MetaBlockAlreadyExists {
if (blkInProgress == true) {
throw new IllegalStateException(
"Cannot create Meta Block until previous block is closed.");
}
if (metaIndex.getMetaByName(name) != null) {
throw new MetaBlockAlreadyExists("name=" + name);
}
MetaBlockRegister mbr = new MetaBlockRegister(name, compressAlgo);
WBlockState wbs =
new WBlockState(compressAlgo, out, fsOutputBuffer, conf);
BlockAppender ba = new BlockAppender(mbr, wbs);
blkInProgress = true;
metaBlkSeen = true;
return ba;
}
/**
* Create a Meta Block and obtain an output stream for adding data into the
* block. There can only be one BlockAppender stream active at any time.
* Regular Blocks may not be created after the first Meta Blocks. The caller
* must call BlockAppender.close() to conclude the block creation.
*
* @param name
* The name of the Meta Block. The name must not conflict with
* existing Meta Blocks.
* @param compressionName
* The name of the compression algorithm to be used.
* @return The BlockAppender stream
* @throws IOException
* @throws MetaBlockAlreadyExists
* If the meta block with the name already exists.
*/
public BlockAppender prepareMetaBlock(String name, String compressionName)
throws IOException, MetaBlockAlreadyExists {
return prepareMetaBlock(name, Compression
.getCompressionAlgorithmByName(compressionName));
}
/**
* Create a Meta Block and obtain an output stream for adding data into the
* block. The Meta Block will be compressed with the same compression
* algorithm as data blocks. There can only be one BlockAppender stream
* active at any time. Regular Blocks may not be created after the first
* Meta Blocks. The caller must call BlockAppender.close() to conclude the
* block creation.
*
* @param name
* The name of the Meta Block. The name must not conflict with
* existing Meta Blocks.
* @return The BlockAppender stream
* @throws MetaBlockAlreadyExists
* If the meta block with the name already exists.
* @throws IOException
*/
public BlockAppender prepareMetaBlock(String name) throws IOException,
MetaBlockAlreadyExists {
return prepareMetaBlock(name, getDefaultCompressionAlgorithm());
}
/**
* Create a Data Block and obtain an output stream for adding data into the
* block. There can only be one BlockAppender stream active at any time.
* Data Blocks may not be created after the first Meta Blocks. The caller
* must call BlockAppender.close() to conclude the block creation.
*
* @return The BlockAppender stream
* @throws IOException
*/
public BlockAppender prepareDataBlock() throws IOException {
if (blkInProgress == true) {
throw new IllegalStateException(
"Cannot create Data Block until previous block is closed.");
}
if (metaBlkSeen == true) {
throw new IllegalStateException(
"Cannot create Data Block after Meta Blocks.");
}
DataBlockRegister dbr = new DataBlockRegister();
WBlockState wbs =
new WBlockState(getDefaultCompressionAlgorithm(), out,
fsOutputBuffer, conf);
BlockAppender ba = new BlockAppender(dbr, wbs);
blkInProgress = true;
return ba;
}
/**
* Callback to make sure a meta block is added to the internal list when its
* stream is closed.
*/
private class MetaBlockRegister implements BlockRegister {
private final String name;
private final Algorithm compressAlgo;
MetaBlockRegister(String name, Algorithm compressAlgo) {
this.name = name;
this.compressAlgo = compressAlgo;
}
@Override
public void register(long raw, long begin, long end) {
metaIndex.addEntry(new MetaIndexEntry(name, compressAlgo,
new BlockRegion(begin, end - begin, raw)));
}
}
/**
* Callback to make sure a data block is added to the internal list when
* it's being closed.
*
*/
private class DataBlockRegister implements BlockRegister {
DataBlockRegister() {
// do nothing
}
@Override
public void register(long raw, long begin, long end) {
dataIndex.addBlockRegion(new BlockRegion(begin, end - begin, raw));
}
}
}
/**
* BCFile Reader, interface to read the file's data and meta blocks.
*/
static public class Reader implements Closeable {
private final FSDataInputStream in;
private final Configuration conf;
final DataIndex dataIndex;
// Index for meta blocks
final MetaIndex metaIndex;
final Version version;
/**
* Intermediate class that maintain the state of a Readable Compression
* Block.
*/
static private final class RBlockState {
private final Algorithm compressAlgo;
private Decompressor decompressor;
private final BlockRegion region;
private final InputStream in;
public RBlockState(Algorithm compressionAlgo, FSDataInputStream fsin,
BlockRegion region, Configuration conf) throws IOException {
this.compressAlgo = compressionAlgo;
this.region = region;
this.decompressor = compressionAlgo.getDecompressor();
try {
this.in =
compressAlgo
.createDecompressionStream(new BoundedRangeFileInputStream(
fsin, this.region.getOffset(), this.region
.getCompressedSize()), decompressor, TFile
.getFSInputBufferSize(conf));
} catch (IOException e) {
compressAlgo.returnDecompressor(decompressor);
throw e;
}
}
/**
* Get the output stream for BlockAppender's consumption.
*
* @return the output stream suitable for writing block data.
*/
public InputStream getInputStream() {
return in;
}
public String getCompressionName() {
return compressAlgo.getName();
}
public BlockRegion getBlockRegion() {
return region;
}
public void finish() throws IOException {
try {
in.close();
} finally {
compressAlgo.returnDecompressor(decompressor);
decompressor = null;
}
}
}
/**
* Access point to read a block.
*/
public static class BlockReader extends DataInputStream {
private final RBlockState rBlkState;
private boolean closed = false;
BlockReader(RBlockState rbs) {
super(rbs.getInputStream());
rBlkState = rbs;
}
/**
* Finishing reading the block. Release all resources.
*/
@Override
public void close() throws IOException {
if (closed == true) {
return;
}
try {
// Do not set rBlkState to null. People may access stats after calling
// close().
rBlkState.finish();
} finally {
closed = true;
}
}
/**
* Get the name of the compression algorithm used to compress the block.
*
* @return name of the compression algorithm.
*/
public String getCompressionName() {
return rBlkState.getCompressionName();
}
/**
* Get the uncompressed size of the block.
*
* @return uncompressed size of the block.
*/
public long getRawSize() {
return rBlkState.getBlockRegion().getRawSize();
}
/**
* Get the compressed size of the block.
*
* @return compressed size of the block.
*/
public long getCompressedSize() {
return rBlkState.getBlockRegion().getCompressedSize();
}
/**
* Get the starting position of the block in the file.
*
* @return the starting position of the block in the file.
*/
public long getStartPos() {
return rBlkState.getBlockRegion().getOffset();
}
}
/**
* Constructor
*
* @param fin
* FS input stream.
* @param fileLength
* Length of the corresponding file
* @throws IOException
*/
public Reader(FSDataInputStream fin, long fileLength, Configuration conf)
throws IOException {
this.in = fin;
this.conf = conf;
// move the cursor to the beginning of the tail, containing: offset to the
// meta block index, version and magic
fin.seek(fileLength - Magic.size() - Version.size() - Long.SIZE
/ Byte.SIZE);
long offsetIndexMeta = fin.readLong();
version = new Version(fin);
Magic.readAndVerify(fin);
if (!version.compatibleWith(BCFile.API_VERSION)) {
throw new RuntimeException("Incompatible BCFile fileBCFileVersion.");
}
// read meta index
fin.seek(offsetIndexMeta);
metaIndex = new MetaIndex(fin);
// read data:BCFile.index, the data block index
BlockReader blockR = getMetaBlock(DataIndex.BLOCK_NAME);
try {
dataIndex = new DataIndex(blockR);
} finally {
blockR.close();
}
}
/**
* Get the name of the default compression algorithm.
*
* @return the name of the default compression algorithm.
*/
public String getDefaultCompressionName() {
return dataIndex.getDefaultCompressionAlgorithm().getName();
}
/**
* Get version of BCFile file being read.
*
* @return version of BCFile file being read.
*/
public Version getBCFileVersion() {
return version;
}
/**
* Get version of BCFile API.
*
* @return version of BCFile API.
*/
public Version getAPIVersion() {
return API_VERSION;
}
/**
* Finishing reading the BCFile. Release all resources.
*/
@Override
public void close() {
// nothing to be done now
}
/**
* Get the number of data blocks.
*
* @return the number of data blocks.
*/
public int getBlockCount() {
return dataIndex.getBlockRegionList().size();
}
/**
* Stream access to a Meta Block.
*
* @param name
* meta block name
* @return BlockReader input stream for reading the meta block.
* @throws IOException
* @throws MetaBlockDoesNotExist
* The Meta Block with the given name does not exist.
*/
public BlockReader getMetaBlock(String name) throws IOException,
MetaBlockDoesNotExist {
MetaIndexEntry imeBCIndex = metaIndex.getMetaByName(name);
if (imeBCIndex == null) {
throw new MetaBlockDoesNotExist("name=" + name);
}
BlockRegion region = imeBCIndex.getRegion();
return createReader(imeBCIndex.getCompressionAlgorithm(), region);
}
/**
* Stream access to a Data Block.
*
* @param blockIndex
* 0-based data block index.
* @return BlockReader input stream for reading the data block.
* @throws IOException
*/
public BlockReader getDataBlock(int blockIndex) throws IOException {
if (blockIndex < 0 || blockIndex >= getBlockCount()) {
throw new IndexOutOfBoundsException(String.format(
"blockIndex=%d, numBlocks=%d", blockIndex, getBlockCount()));
}
BlockRegion region = dataIndex.getBlockRegionList().get(blockIndex);
return createReader(dataIndex.getDefaultCompressionAlgorithm(), region);
}
private BlockReader createReader(Algorithm compressAlgo, BlockRegion region)
throws IOException {
RBlockState rbs = new RBlockState(compressAlgo, in, region, conf);
return new BlockReader(rbs);
}
/**
* Find the smallest Block index whose starting offset is greater than or
* equal to the specified offset.
*
* @param offset
* User-specific offset.
* @return the index to the data Block if such block exists; or -1
* otherwise.
*/
public int getBlockIndexNear(long offset) {
ArrayList<BlockRegion> list = dataIndex.getBlockRegionList();
int idx =
Utils
.lowerBound(list, new ScalarLong(offset), new ScalarComparator());
if (idx == list.size()) {
return -1;
}
return idx;
}
}
/**
* Index for all Meta blocks.
*/
static class MetaIndex {
// use a tree map, for getting a meta block entry by name
final Map<String, MetaIndexEntry> index;
// for write
public MetaIndex() {
index = new TreeMap<String, MetaIndexEntry>();
}
// for read, construct the map from the file
public MetaIndex(DataInput in) throws IOException {
int count = Utils.readVInt(in);
index = new TreeMap<String, MetaIndexEntry>();
for (int nx = 0; nx < count; nx++) {
MetaIndexEntry indexEntry = new MetaIndexEntry(in);
index.put(indexEntry.getMetaName(), indexEntry);
}
}
public void addEntry(MetaIndexEntry indexEntry) {
index.put(indexEntry.getMetaName(), indexEntry);
}
public MetaIndexEntry getMetaByName(String name) {
return index.get(name);
}
public void write(DataOutput out) throws IOException {
Utils.writeVInt(out, index.size());
for (MetaIndexEntry indexEntry : index.values()) {
indexEntry.write(out);
}
}
}
/**
* An entry describes a meta block in the MetaIndex.
*/
static final class MetaIndexEntry {
private final String metaName;
private final Algorithm compressionAlgorithm;
private final static String defaultPrefix = "data:";
private final BlockRegion region;
public MetaIndexEntry(DataInput in) throws IOException {
String fullMetaName = Utils.readString(in);
if (fullMetaName.startsWith(defaultPrefix)) {
metaName =
fullMetaName.substring(defaultPrefix.length(), fullMetaName
.length());
} else {
throw new IOException("Corrupted Meta region Index");
}
compressionAlgorithm =
Compression.getCompressionAlgorithmByName(Utils.readString(in));
region = new BlockRegion(in);
}
public MetaIndexEntry(String metaName, Algorithm compressionAlgorithm,
BlockRegion region) {
this.metaName = metaName;
this.compressionAlgorithm = compressionAlgorithm;
this.region = region;
}
public String getMetaName() {
return metaName;
}
public Algorithm getCompressionAlgorithm() {
return compressionAlgorithm;
}
public BlockRegion getRegion() {
return region;
}
public void write(DataOutput out) throws IOException {
Utils.writeString(out, defaultPrefix + metaName);
Utils.writeString(out, compressionAlgorithm.getName());
region.write(out);
}
}
/**
* Index of all compressed data blocks.
*/
static class DataIndex {
final static String BLOCK_NAME = "BCFile.index";
private final Algorithm defaultCompressionAlgorithm;
// for data blocks, each entry specifies a block's offset, compressed size
// and raw size
private final ArrayList<BlockRegion> listRegions;
// for read, deserialized from a file
public DataIndex(DataInput in) throws IOException {
defaultCompressionAlgorithm =
Compression.getCompressionAlgorithmByName(Utils.readString(in));
int n = Utils.readVInt(in);
listRegions = new ArrayList<BlockRegion>(n);
for (int i = 0; i < n; i++) {
BlockRegion region = new BlockRegion(in);
listRegions.add(region);
}
}
// for write
public DataIndex(String defaultCompressionAlgorithmName) {
this.defaultCompressionAlgorithm =
Compression
.getCompressionAlgorithmByName(defaultCompressionAlgorithmName);
listRegions = new ArrayList<BlockRegion>();
}
public Algorithm getDefaultCompressionAlgorithm() {
return defaultCompressionAlgorithm;
}
public ArrayList<BlockRegion> getBlockRegionList() {
return listRegions;
}
public void addBlockRegion(BlockRegion region) {
listRegions.add(region);
}
public void write(DataOutput out) throws IOException {
Utils.writeString(out, defaultCompressionAlgorithm.getName());
Utils.writeVInt(out, listRegions.size());
for (BlockRegion region : listRegions) {
region.write(out);
}
}
}
/**
* Magic number uniquely identifying a BCFile in the header/footer.
*/
static final class Magic {
private final static byte[] AB_MAGIC_BCFILE =
{
// ... total of 16 bytes
(byte) 0xd1, (byte) 0x11, (byte) 0xd3, (byte) 0x68, (byte) 0x91,
(byte) 0xb5, (byte) 0xd7, (byte) 0xb6, (byte) 0x39, (byte) 0xdf,
(byte) 0x41, (byte) 0x40, (byte) 0x92, (byte) 0xba, (byte) 0xe1,
(byte) 0x50 };
public static void readAndVerify(DataInput in) throws IOException {
byte[] abMagic = new byte[size()];
in.readFully(abMagic);
// check against AB_MAGIC_BCFILE, if not matching, throw an
// Exception
if (!Arrays.equals(abMagic, AB_MAGIC_BCFILE)) {
throw new IOException("Not a valid BCFile.");
}
}
public static void write(DataOutput out) throws IOException {
out.write(AB_MAGIC_BCFILE);
}
public static int size() {
return AB_MAGIC_BCFILE.length;
}
}
/**
* Block region.
*/
static final class BlockRegion implements Scalar {
private final long offset;
private final long compressedSize;
private final long rawSize;
public BlockRegion(DataInput in) throws IOException {
offset = Utils.readVLong(in);
compressedSize = Utils.readVLong(in);
rawSize = Utils.readVLong(in);
}
public BlockRegion(long offset, long compressedSize, long rawSize) {
this.offset = offset;
this.compressedSize = compressedSize;
this.rawSize = rawSize;
}
public void write(DataOutput out) throws IOException {
Utils.writeVLong(out, offset);
Utils.writeVLong(out, compressedSize);
Utils.writeVLong(out, rawSize);
}
public long getOffset() {
return offset;
}
public long getCompressedSize() {
return compressedSize;
}
public long getRawSize() {
return rawSize;
}
@Override
public long magnitude() {
return offset;
}
}
}
| 29,762 | 29.246951 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/SimpleBufferedOutputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.io.file.tfile;
import java.io.FilterOutputStream;
import java.io.IOException;
import java.io.OutputStream;
/**
* A simplified BufferedOutputStream with borrowed buffer, and allow users to
* see how much data have been buffered.
*/
class SimpleBufferedOutputStream extends FilterOutputStream {
protected byte buf[]; // the borrowed buffer
protected int count = 0; // bytes used in buffer.
// Constructor
public SimpleBufferedOutputStream(OutputStream out, byte[] buf) {
super(out);
this.buf = buf;
}
private void flushBuffer() throws IOException {
if (count > 0) {
out.write(buf, 0, count);
count = 0;
}
}
@Override
public void write(int b) throws IOException {
if (count >= buf.length) {
flushBuffer();
}
buf[count++] = (byte) b;
}
@Override
public void write(byte b[], int off, int len) throws IOException {
if (len >= buf.length) {
flushBuffer();
out.write(b, off, len);
return;
}
if (len > buf.length - count) {
flushBuffer();
}
System.arraycopy(b, off, buf, count, len);
count += len;
}
@Override
public synchronized void flush() throws IOException {
flushBuffer();
out.flush();
}
// Get the size of internal buffer being used.
public int size() {
return count;
}
}
| 2,151 | 26.589744 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Utils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.io.file.tfile;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Comparator;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
/**
* Supporting Utility classes used by TFile, and shared by users of TFile.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public final class Utils {
/**
* Prevent the instantiation of Utils.
*/
private Utils() {
// nothing
}
/**
* Encoding an integer into a variable-length encoding format. Synonymous to
* <code>Utils#writeVLong(out, n)</code>.
*
* @param out
* output stream
* @param n
* The integer to be encoded
* @throws IOException
* @see Utils#writeVLong(DataOutput, long)
*/
public static void writeVInt(DataOutput out, int n) throws IOException {
writeVLong(out, n);
}
/**
* Encoding a Long integer into a variable-length encoding format.
* <ul>
* <li>if n in [-32, 127): encode in one byte with the actual value.
* Otherwise,
* <li>if n in [-20*2^8, 20*2^8): encode in two bytes: byte[0] = n/256 - 52;
* byte[1]=n&0xff. Otherwise,
* <li>if n IN [-16*2^16, 16*2^16): encode in three bytes: byte[0]=n/2^16 -
* 88; byte[1]=(n>>8)&0xff; byte[2]=n&0xff. Otherwise,
* <li>if n in [-8*2^24, 8*2^24): encode in four bytes: byte[0]=n/2^24 - 112;
* byte[1] = (n>>16)&0xff; byte[2] = (n>>8)&0xff; byte[3]=n&0xff. Otherwise:
* <li>if n in [-2^31, 2^31): encode in five bytes: byte[0]=-125; byte[1] =
* (n>>24)&0xff; byte[2]=(n>>16)&0xff; byte[3]=(n>>8)&0xff; byte[4]=n&0xff;
* <li>if n in [-2^39, 2^39): encode in six bytes: byte[0]=-124; byte[1] =
* (n>>32)&0xff; byte[2]=(n>>24)&0xff; byte[3]=(n>>16)&0xff;
* byte[4]=(n>>8)&0xff; byte[5]=n&0xff
* <li>if n in [-2^47, 2^47): encode in seven bytes: byte[0]=-123; byte[1] =
* (n>>40)&0xff; byte[2]=(n>>32)&0xff; byte[3]=(n>>24)&0xff;
* byte[4]=(n>>16)&0xff; byte[5]=(n>>8)&0xff; byte[6]=n&0xff;
* <li>if n in [-2^55, 2^55): encode in eight bytes: byte[0]=-122; byte[1] =
* (n>>48)&0xff; byte[2] = (n>>40)&0xff; byte[3]=(n>>32)&0xff;
* byte[4]=(n>>24)&0xff; byte[5]=(n>>16)&0xff; byte[6]=(n>>8)&0xff;
* byte[7]=n&0xff;
* <li>if n in [-2^63, 2^63): encode in nine bytes: byte[0]=-121; byte[1] =
* (n>>54)&0xff; byte[2] = (n>>48)&0xff; byte[3] = (n>>40)&0xff;
* byte[4]=(n>>32)&0xff; byte[5]=(n>>24)&0xff; byte[6]=(n>>16)&0xff;
* byte[7]=(n>>8)&0xff; byte[8]=n&0xff;
* </ul>
*
* @param out
* output stream
* @param n
* the integer number
* @throws IOException
*/
@SuppressWarnings("fallthrough")
public static void writeVLong(DataOutput out, long n) throws IOException {
if ((n < 128) && (n >= -32)) {
out.writeByte((int) n);
return;
}
long un = (n < 0) ? ~n : n;
// how many bytes do we need to represent the number with sign bit?
int len = (Long.SIZE - Long.numberOfLeadingZeros(un)) / 8 + 1;
int firstByte = (int) (n >> ((len - 1) * 8));
switch (len) {
case 1:
// fall it through to firstByte==-1, len=2.
firstByte >>= 8;
case 2:
if ((firstByte < 20) && (firstByte >= -20)) {
out.writeByte(firstByte - 52);
out.writeByte((int) n);
return;
}
// fall it through to firstByte==0/-1, len=3.
firstByte >>= 8;
case 3:
if ((firstByte < 16) && (firstByte >= -16)) {
out.writeByte(firstByte - 88);
out.writeShort((int) n);
return;
}
// fall it through to firstByte==0/-1, len=4.
firstByte >>= 8;
case 4:
if ((firstByte < 8) && (firstByte >= -8)) {
out.writeByte(firstByte - 112);
out.writeShort(((int) n) >>> 8);
out.writeByte((int) n);
return;
}
out.writeByte(len - 129);
out.writeInt((int) n);
return;
case 5:
out.writeByte(len - 129);
out.writeInt((int) (n >>> 8));
out.writeByte((int) n);
return;
case 6:
out.writeByte(len - 129);
out.writeInt((int) (n >>> 16));
out.writeShort((int) n);
return;
case 7:
out.writeByte(len - 129);
out.writeInt((int) (n >>> 24));
out.writeShort((int) (n >>> 8));
out.writeByte((int) n);
return;
case 8:
out.writeByte(len - 129);
out.writeLong(n);
return;
default:
throw new RuntimeException("Internel error");
}
}
/**
* Decoding the variable-length integer. Synonymous to
* <code>(int)Utils#readVLong(in)</code>.
*
* @param in
* input stream
* @return the decoded integer
* @throws IOException
*
* @see Utils#readVLong(DataInput)
*/
public static int readVInt(DataInput in) throws IOException {
long ret = readVLong(in);
if ((ret > Integer.MAX_VALUE) || (ret < Integer.MIN_VALUE)) {
throw new RuntimeException(
"Number too large to be represented as Integer");
}
return (int) ret;
}
/**
* Decoding the variable-length integer. Suppose the value of the first byte
* is FB, and the following bytes are NB[*].
* <ul>
* <li>if (FB >= -32), return (long)FB;
* <li>if (FB in [-72, -33]), return (FB+52)<<8 + NB[0]&0xff;
* <li>if (FB in [-104, -73]), return (FB+88)<<16 + (NB[0]&0xff)<<8 +
* NB[1]&0xff;
* <li>if (FB in [-120, -105]), return (FB+112)<<24 + (NB[0]&0xff)<<16 +
* (NB[1]&0xff)<<8 + NB[2]&0xff;
* <li>if (FB in [-128, -121]), return interpret NB[FB+129] as a signed
* big-endian integer.
*
* @param in
* input stream
* @return the decoded long integer.
* @throws IOException
*/
public static long readVLong(DataInput in) throws IOException {
int firstByte = in.readByte();
if (firstByte >= -32) {
return firstByte;
}
switch ((firstByte + 128) / 8) {
case 11:
case 10:
case 9:
case 8:
case 7:
return ((firstByte + 52) << 8) | in.readUnsignedByte();
case 6:
case 5:
case 4:
case 3:
return ((firstByte + 88) << 16) | in.readUnsignedShort();
case 2:
case 1:
return ((firstByte + 112) << 24) | (in.readUnsignedShort() << 8)
| in.readUnsignedByte();
case 0:
int len = firstByte + 129;
switch (len) {
case 4:
return in.readInt();
case 5:
return ((long) in.readInt()) << 8 | in.readUnsignedByte();
case 6:
return ((long) in.readInt()) << 16 | in.readUnsignedShort();
case 7:
return ((long) in.readInt()) << 24 | (in.readUnsignedShort() << 8)
| in.readUnsignedByte();
case 8:
return in.readLong();
default:
throw new IOException("Corrupted VLong encoding");
}
default:
throw new RuntimeException("Internal error");
}
}
/**
* Write a String as a VInt n, followed by n Bytes as in Text format.
*
* @param out
* @param s
* @throws IOException
*/
public static void writeString(DataOutput out, String s) throws IOException {
if (s != null) {
Text text = new Text(s);
byte[] buffer = text.getBytes();
int len = text.getLength();
writeVInt(out, len);
out.write(buffer, 0, len);
} else {
writeVInt(out, -1);
}
}
/**
* Read a String as a VInt n, followed by n Bytes in Text format.
*
* @param in
* The input stream.
* @return The string
* @throws IOException
*/
public static String readString(DataInput in) throws IOException {
int length = readVInt(in);
if (length == -1) return null;
byte[] buffer = new byte[length];
in.readFully(buffer);
return Text.decode(buffer);
}
/**
* A generic Version class. We suggest applications built on top of TFile use
* this class to maintain version information in their meta blocks.
*
* A version number consists of a major version and a minor version. The
* suggested usage of major and minor version number is to increment major
* version number when the new storage format is not backward compatible, and
* increment the minor version otherwise.
*/
public static final class Version implements Comparable<Version> {
private final short major;
private final short minor;
/**
* Construct the Version object by reading from the input stream.
*
* @param in
* input stream
* @throws IOException
*/
public Version(DataInput in) throws IOException {
major = in.readShort();
minor = in.readShort();
}
/**
* Constructor.
*
* @param major
* major version.
* @param minor
* minor version.
*/
public Version(short major, short minor) {
this.major = major;
this.minor = minor;
}
/**
* Write the objec to a DataOutput. The serialized format of the Version is
* major version followed by minor version, both as big-endian short
* integers.
*
* @param out
* The DataOutput object.
* @throws IOException
*/
public void write(DataOutput out) throws IOException {
out.writeShort(major);
out.writeShort(minor);
}
/**
* Get the major version.
*
* @return Major version.
*/
public int getMajor() {
return major;
}
/**
* Get the minor version.
*
* @return The minor version.
*/
public int getMinor() {
return minor;
}
/**
* Get the size of the serialized Version object.
*
* @return serialized size of the version object.
*/
public static int size() {
return (Short.SIZE + Short.SIZE) / Byte.SIZE;
}
/**
* Return a string representation of the version.
*/
@Override
public String toString() {
return new StringBuilder("v").append(major).append(".").append(minor)
.toString();
}
/**
* Test compatibility.
*
* @param other
* The Version object to test compatibility with.
* @return true if both versions have the same major version number; false
* otherwise.
*/
public boolean compatibleWith(Version other) {
return major == other.major;
}
/**
* Compare this version with another version.
*/
@Override
public int compareTo(Version that) {
if (major != that.major) {
return major - that.major;
}
return minor - that.minor;
}
@Override
public boolean equals(Object other) {
if (this == other) return true;
if (!(other instanceof Version)) return false;
return compareTo((Version) other) == 0;
}
@Override
public int hashCode() {
return (major << 16 + minor);
}
}
/**
* Lower bound binary search. Find the index to the first element in the list
* that compares greater than or equal to key.
*
* @param <T>
* Type of the input key.
* @param list
* The list
* @param key
* The input key.
* @param cmp
* Comparator for the key.
* @return The index to the desired element if it exists; or list.size()
* otherwise.
*/
public static <T> int lowerBound(List<? extends T> list, T key,
Comparator<? super T> cmp) {
int low = 0;
int high = list.size();
while (low < high) {
int mid = (low + high) >>> 1;
T midVal = list.get(mid);
int ret = cmp.compare(midVal, key);
if (ret < 0)
low = mid + 1;
else high = mid;
}
return low;
}
/**
* Upper bound binary search. Find the index to the first element in the list
* that compares greater than the input key.
*
* @param <T>
* Type of the input key.
* @param list
* The list
* @param key
* The input key.
* @param cmp
* Comparator for the key.
* @return The index to the desired element if it exists; or list.size()
* otherwise.
*/
public static <T> int upperBound(List<? extends T> list, T key,
Comparator<? super T> cmp) {
int low = 0;
int high = list.size();
while (low < high) {
int mid = (low + high) >>> 1;
T midVal = list.get(mid);
int ret = cmp.compare(midVal, key);
if (ret <= 0)
low = mid + 1;
else high = mid;
}
return low;
}
/**
* Lower bound binary search. Find the index to the first element in the list
* that compares greater than or equal to key.
*
* @param <T>
* Type of the input key.
* @param list
* The list
* @param key
* The input key.
* @return The index to the desired element if it exists; or list.size()
* otherwise.
*/
public static <T> int lowerBound(List<? extends Comparable<? super T>> list,
T key) {
int low = 0;
int high = list.size();
while (low < high) {
int mid = (low + high) >>> 1;
Comparable<? super T> midVal = list.get(mid);
int ret = midVal.compareTo(key);
if (ret < 0)
low = mid + 1;
else high = mid;
}
return low;
}
/**
* Upper bound binary search. Find the index to the first element in the list
* that compares greater than the input key.
*
* @param <T>
* Type of the input key.
* @param list
* The list
* @param key
* The input key.
* @return The index to the desired element if it exists; or list.size()
* otherwise.
*/
public static <T> int upperBound(List<? extends Comparable<? super T>> list,
T key) {
int low = 0;
int high = list.size();
while (low < high) {
int mid = (low + high) >>> 1;
Comparable<? super T> midVal = list.get(mid);
int ret = midVal.compareTo(key);
if (ret <= 0)
low = mid + 1;
else high = mid;
}
return low;
}
}
| 15,107 | 27.942529 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Chunk.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.io.file.tfile;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
/**
* Several related classes to support chunk-encoded sub-streams on top of a
* regular stream.
*/
final class Chunk {
/**
* Prevent the instantiation of class.
*/
private Chunk() {
// nothing
}
/**
* Decoding a chain of chunks encoded through ChunkEncoder or
* SingleChunkEncoder.
*/
static public class ChunkDecoder extends InputStream {
private DataInputStream in = null;
private boolean lastChunk;
private int remain = 0;
private boolean closed;
public ChunkDecoder() {
lastChunk = true;
closed = true;
}
public void reset(DataInputStream downStream) {
// no need to wind forward the old input.
in = downStream;
lastChunk = false;
remain = 0;
closed = false;
}
/**
* Constructor
*
* @param in
* The source input stream which contains chunk-encoded data
* stream.
*/
public ChunkDecoder(DataInputStream in) {
this.in = in;
lastChunk = false;
closed = false;
}
/**
* Have we reached the last chunk.
*
* @return true if we have reached the last chunk.
* @throws java.io.IOException
*/
public boolean isLastChunk() throws IOException {
checkEOF();
return lastChunk;
}
/**
* How many bytes remain in the current chunk?
*
* @return remaining bytes left in the current chunk.
* @throws java.io.IOException
*/
public int getRemain() throws IOException {
checkEOF();
return remain;
}
/**
* Reading the length of next chunk.
*
* @throws java.io.IOException
* when no more data is available.
*/
private void readLength() throws IOException {
remain = Utils.readVInt(in);
if (remain >= 0) {
lastChunk = true;
} else {
remain = -remain;
}
}
/**
* Check whether we reach the end of the stream.
*
* @return false if the chunk encoded stream has more data to read (in which
* case available() will be greater than 0); true otherwise.
* @throws java.io.IOException
* on I/O errors.
*/
private boolean checkEOF() throws IOException {
if (isClosed()) return true;
while (true) {
if (remain > 0) return false;
if (lastChunk) return true;
readLength();
}
}
@Override
/*
* This method never blocks the caller. Returning 0 does not mean we reach
* the end of the stream.
*/
public int available() {
return remain;
}
@Override
public int read() throws IOException {
if (checkEOF()) return -1;
int ret = in.read();
if (ret < 0) throw new IOException("Corrupted chunk encoding stream");
--remain;
return ret;
}
@Override
public int read(byte[] b) throws IOException {
return read(b, 0, b.length);
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
if ((off | len | (off + len) | (b.length - (off + len))) < 0) {
throw new IndexOutOfBoundsException();
}
if (!checkEOF()) {
int n = Math.min(remain, len);
int ret = in.read(b, off, n);
if (ret < 0) throw new IOException("Corrupted chunk encoding stream");
remain -= ret;
return ret;
}
return -1;
}
@Override
public long skip(long n) throws IOException {
if (!checkEOF()) {
long ret = in.skip(Math.min(remain, n));
remain -= ret;
return ret;
}
return 0;
}
@Override
public boolean markSupported() {
return false;
}
public boolean isClosed() {
return closed;
}
@Override
public void close() throws IOException {
if (closed == false) {
try {
while (!checkEOF()) {
skip(Integer.MAX_VALUE);
}
} finally {
closed = true;
}
}
}
}
/**
* Chunk Encoder. Encoding the output data into a chain of chunks in the
* following sequences: -len1, byte[len1], -len2, byte[len2], ... len_n,
* byte[len_n]. Where len1, len2, ..., len_n are the lengths of the data
* chunks. Non-terminal chunks have their lengths negated. Non-terminal chunks
* cannot have length 0. All lengths are in the range of 0 to
* Integer.MAX_VALUE and are encoded in Utils.VInt format.
*/
static public class ChunkEncoder extends OutputStream {
/**
* The data output stream it connects to.
*/
private DataOutputStream out;
/**
* The internal buffer that is only used when we do not know the advertised
* size.
*/
private byte buf[];
/**
* The number of valid bytes in the buffer. This value is always in the
* range <tt>0</tt> through <tt>buf.length</tt>; elements <tt>buf[0]</tt>
* through <tt>buf[count-1]</tt> contain valid byte data.
*/
private int count;
/**
* Constructor.
*
* @param out
* the underlying output stream.
* @param buf
* user-supplied buffer. The buffer would be used exclusively by
* the ChunkEncoder during its life cycle.
*/
public ChunkEncoder(DataOutputStream out, byte[] buf) {
this.out = out;
this.buf = buf;
this.count = 0;
}
/**
* Write out a chunk.
*
* @param chunk
* The chunk buffer.
* @param offset
* Offset to chunk buffer for the beginning of chunk.
* @param len
* @param last
* Is this the last call to flushBuffer?
*/
private void writeChunk(byte[] chunk, int offset, int len, boolean last)
throws IOException {
if (last) { // always write out the length for the last chunk.
Utils.writeVInt(out, len);
if (len > 0) {
out.write(chunk, offset, len);
}
} else {
if (len > 0) {
Utils.writeVInt(out, -len);
out.write(chunk, offset, len);
}
}
}
/**
* Write out a chunk that is a concatenation of the internal buffer plus
* user supplied data. This will never be the last block.
*
* @param data
* User supplied data buffer.
* @param offset
* Offset to user data buffer.
* @param len
* User data buffer size.
*/
private void writeBufData(byte[] data, int offset, int len)
throws IOException {
if (count + len > 0) {
Utils.writeVInt(out, -(count + len));
out.write(buf, 0, count);
count = 0;
out.write(data, offset, len);
}
}
/**
* Flush the internal buffer.
*
* Is this the last call to flushBuffer?
*
* @throws java.io.IOException
*/
private void flushBuffer() throws IOException {
if (count > 0) {
writeChunk(buf, 0, count, false);
count = 0;
}
}
@Override
public void write(int b) throws IOException {
if (count >= buf.length) {
flushBuffer();
}
buf[count++] = (byte) b;
}
@Override
public void write(byte b[]) throws IOException {
write(b, 0, b.length);
}
@Override
public void write(byte b[], int off, int len) throws IOException {
if ((len + count) >= buf.length) {
/*
* If the input data do not fit in buffer, flush the output buffer and
* then write the data directly. In this way buffered streams will
* cascade harmlessly.
*/
writeBufData(b, off, len);
return;
}
System.arraycopy(b, off, buf, count, len);
count += len;
}
@Override
public void flush() throws IOException {
flushBuffer();
out.flush();
}
@Override
public void close() throws IOException {
if (buf != null) {
try {
writeChunk(buf, 0, count, true);
} finally {
buf = null;
out = null;
}
}
}
}
/**
* Encode the whole stream as a single chunk. Expecting to know the size of
* the chunk up-front.
*/
static public class SingleChunkEncoder extends OutputStream {
/**
* The data output stream it connects to.
*/
private final DataOutputStream out;
/**
* The remaining bytes to be written.
*/
private int remain;
private boolean closed = false;
/**
* Constructor.
*
* @param out
* the underlying output stream.
* @param size
* The total # of bytes to be written as a single chunk.
* @throws java.io.IOException
* if an I/O error occurs.
*/
public SingleChunkEncoder(DataOutputStream out, int size)
throws IOException {
this.out = out;
this.remain = size;
Utils.writeVInt(out, size);
}
@Override
public void write(int b) throws IOException {
if (remain > 0) {
out.write(b);
--remain;
} else {
throw new IOException("Writing more bytes than advertised size.");
}
}
@Override
public void write(byte b[]) throws IOException {
write(b, 0, b.length);
}
@Override
public void write(byte b[], int off, int len) throws IOException {
if (remain >= len) {
out.write(b, off, len);
remain -= len;
} else {
throw new IOException("Writing more bytes than advertised size.");
}
}
@Override
public void flush() throws IOException {
out.flush();
}
@Override
public void close() throws IOException {
if (closed == true) {
return;
}
try {
if (remain > 0) {
throw new IOException("Writing less bytes than advertised size.");
}
} finally {
closed = true;
}
}
}
}
| 10,965 | 24.502326 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/ByteArray.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.io.file.tfile;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.BytesWritable;
/**
* Adaptor class to wrap byte-array backed objects (including java byte array)
* as RawComparable objects.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public final class ByteArray implements RawComparable {
private final byte[] buffer;
private final int offset;
private final int len;
/**
* Constructing a ByteArray from a {@link BytesWritable}.
*
* @param other
*/
public ByteArray(BytesWritable other) {
this(other.getBytes(), 0, other.getLength());
}
/**
* Wrap a whole byte array as a RawComparable.
*
* @param buffer
* the byte array buffer.
*/
public ByteArray(byte[] buffer) {
this(buffer, 0, buffer.length);
}
/**
* Wrap a partial byte array as a RawComparable.
*
* @param buffer
* the byte array buffer.
* @param offset
* the starting offset
* @param len
* the length of the consecutive bytes to be wrapped.
*/
public ByteArray(byte[] buffer, int offset, int len) {
if ((offset | len | (buffer.length - offset - len)) < 0) {
throw new IndexOutOfBoundsException();
}
this.buffer = buffer;
this.offset = offset;
this.len = len;
}
/**
* @return the underlying buffer.
*/
@Override
public byte[] buffer() {
return buffer;
}
/**
* @return the offset in the buffer.
*/
@Override
public int offset() {
return offset;
}
/**
* @return the size of the byte array.
*/
@Override
public int size() {
return len;
}
}
| 2,549 | 25.28866 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/MetaBlockDoesNotExist.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.io.file.tfile;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Exception - No such Meta Block with the given name.
*/
@SuppressWarnings("serial")
@InterfaceAudience.Public
@InterfaceStability.Stable
public class MetaBlockDoesNotExist extends IOException {
/**
* Constructor
*
* @param s
* message.
*/
MetaBlockDoesNotExist(String s) {
super(s);
}
}
| 1,320 | 30.452381 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFile.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.io.file.tfile;
import java.io.ByteArrayInputStream;
import java.io.Closeable;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.EOFException;
import java.io.IOException;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.Comparator;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.io.BoundedByteArrayOutputStream;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.io.file.tfile.BCFile.Reader.BlockReader;
import org.apache.hadoop.io.file.tfile.BCFile.Writer.BlockAppender;
import org.apache.hadoop.io.file.tfile.Chunk.ChunkDecoder;
import org.apache.hadoop.io.file.tfile.Chunk.ChunkEncoder;
import org.apache.hadoop.io.file.tfile.CompareUtils.BytesComparator;
import org.apache.hadoop.io.file.tfile.CompareUtils.MemcmpRawComparator;
import org.apache.hadoop.io.file.tfile.Utils.Version;
import org.apache.hadoop.io.serializer.JavaSerializationComparator;
/**
* A TFile is a container of key-value pairs. Both keys and values are type-less
* bytes. Keys are restricted to 64KB, value length is not restricted
* (practically limited to the available disk storage). TFile further provides
* the following features:
* <ul>
* <li>Block Compression.
* <li>Named meta data blocks.
* <li>Sorted or unsorted keys.
* <li>Seek by key or by file offset.
* </ul>
* The memory footprint of a TFile includes the following:
* <ul>
* <li>Some constant overhead of reading or writing a compressed block.
* <ul>
* <li>Each compressed block requires one compression/decompression codec for
* I/O.
* <li>Temporary space to buffer the key.
* <li>Temporary space to buffer the value (for TFile.Writer only). Values are
* chunk encoded, so that we buffer at most one chunk of user data. By default,
* the chunk buffer is 1MB. Reading chunked value does not require additional
* memory.
* </ul>
* <li>TFile index, which is proportional to the total number of Data Blocks.
* The total amount of memory needed to hold the index can be estimated as
* (56+AvgKeySize)*NumBlocks.
* <li>MetaBlock index, which is proportional to the total number of Meta
* Blocks.The total amount of memory needed to hold the index for Meta Blocks
* can be estimated as (40+AvgMetaBlockName)*NumMetaBlock.
* </ul>
* <p>
* The behavior of TFile can be customized by the following variables through
* Configuration:
* <ul>
* <li><b>tfile.io.chunk.size</b>: Value chunk size. Integer (in bytes). Default
* to 1MB. Values of the length less than the chunk size is guaranteed to have
* known value length in read time (See
* {@link TFile.Reader.Scanner.Entry#isValueLengthKnown()}).
* <li><b>tfile.fs.output.buffer.size</b>: Buffer size used for
* FSDataOutputStream. Integer (in bytes). Default to 256KB.
* <li><b>tfile.fs.input.buffer.size</b>: Buffer size used for
* FSDataInputStream. Integer (in bytes). Default to 256KB.
* </ul>
* <p>
* Suggestions on performance optimization.
* <ul>
* <li>Minimum block size. We recommend a setting of minimum block size between
* 256KB to 1MB for general usage. Larger block size is preferred if files are
* primarily for sequential access. However, it would lead to inefficient random
* access (because there are more data to decompress). Smaller blocks are good
* for random access, but require more memory to hold the block index, and may
* be slower to create (because we must flush the compressor stream at the
* conclusion of each data block, which leads to an FS I/O flush). Further, due
* to the internal caching in Compression codec, the smallest possible block
* size would be around 20KB-30KB.
* <li>The current implementation does not offer true multi-threading for
* reading. The implementation uses FSDataInputStream seek()+read(), which is
* shown to be much faster than positioned-read call in single thread mode.
* However, it also means that if multiple threads attempt to access the same
* TFile (using multiple scanners) simultaneously, the actual I/O is carried out
* sequentially even if they access different DFS blocks.
* <li>Compression codec. Use "none" if the data is not very compressable (by
* compressable, I mean a compression ratio at least 2:1). Generally, use "lzo"
* as the starting point for experimenting. "gz" overs slightly better
* compression ratio over "lzo" but requires 4x CPU to compress and 2x CPU to
* decompress, comparing to "lzo".
* <li>File system buffering, if the underlying FSDataInputStream and
* FSDataOutputStream is already adequately buffered; or if applications
* reads/writes keys and values in large buffers, we can reduce the sizes of
* input/output buffering in TFile layer by setting the configuration parameters
* "tfile.fs.input.buffer.size" and "tfile.fs.output.buffer.size".
* </ul>
*
* Some design rationale behind TFile can be found at <a
* href=https://issues.apache.org/jira/browse/HADOOP-3315>Hadoop-3315</a>.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class TFile {
static final Log LOG = LogFactory.getLog(TFile.class);
private static final String CHUNK_BUF_SIZE_ATTR = "tfile.io.chunk.size";
private static final String FS_INPUT_BUF_SIZE_ATTR =
"tfile.fs.input.buffer.size";
private static final String FS_OUTPUT_BUF_SIZE_ATTR =
"tfile.fs.output.buffer.size";
static int getChunkBufferSize(Configuration conf) {
int ret = conf.getInt(CHUNK_BUF_SIZE_ATTR, 1024 * 1024);
return (ret > 0) ? ret : 1024 * 1024;
}
static int getFSInputBufferSize(Configuration conf) {
return conf.getInt(FS_INPUT_BUF_SIZE_ATTR, 256 * 1024);
}
static int getFSOutputBufferSize(Configuration conf) {
return conf.getInt(FS_OUTPUT_BUF_SIZE_ATTR, 256 * 1024);
}
private static final int MAX_KEY_SIZE = 64 * 1024; // 64KB
static final Version API_VERSION = new Version((short) 1, (short) 0);
/** compression: gzip */
public static final String COMPRESSION_GZ = "gz";
/** compression: lzo */
public static final String COMPRESSION_LZO = "lzo";
/** compression: none */
public static final String COMPRESSION_NONE = "none";
/** comparator: memcmp */
public static final String COMPARATOR_MEMCMP = "memcmp";
/** comparator prefix: java class */
public static final String COMPARATOR_JCLASS = "jclass:";
/**
* Make a raw comparator from a string name.
*
* @param name
* Comparator name
* @return A RawComparable comparator.
*/
static public Comparator<RawComparable> makeComparator(String name) {
return TFileMeta.makeComparator(name);
}
// Prevent the instantiation of TFiles
private TFile() {
// nothing
}
/**
* Get names of supported compression algorithms. The names are acceptable by
* TFile.Writer.
*
* @return Array of strings, each represents a supported compression
* algorithm. Currently, the following compression algorithms are
* supported.
* <ul>
* <li>"none" - No compression.
* <li>"lzo" - LZO compression.
* <li>"gz" - GZIP compression.
* </ul>
*/
public static String[] getSupportedCompressionAlgorithms() {
return Compression.getSupportedAlgorithms();
}
/**
* TFile Writer.
*/
@InterfaceStability.Evolving
public static class Writer implements Closeable {
// minimum compressed size for a block.
private final int sizeMinBlock;
// Meta blocks.
final TFileIndex tfileIndex;
final TFileMeta tfileMeta;
// reference to the underlying BCFile.
private BCFile.Writer writerBCF;
// current data block appender.
BlockAppender blkAppender;
long blkRecordCount;
// buffers for caching the key.
BoundedByteArrayOutputStream currentKeyBufferOS;
BoundedByteArrayOutputStream lastKeyBufferOS;
// buffer used by chunk codec
private byte[] valueBuffer;
/**
* Writer states. The state always transits in circles: READY -> IN_KEY ->
* END_KEY -> IN_VALUE -> READY.
*/
private enum State {
READY, // Ready to start a new key-value pair insertion.
IN_KEY, // In the middle of key insertion.
END_KEY, // Key insertion complete, ready to insert value.
IN_VALUE, // In value insertion.
// ERROR, // Error encountered, cannot continue.
CLOSED, // TFile already closed.
};
// current state of Writer.
State state = State.READY;
Configuration conf;
long errorCount = 0;
/**
* Constructor
*
* @param fsdos
* output stream for writing. Must be at position 0.
* @param minBlockSize
* Minimum compressed block size in bytes. A compression block will
* not be closed until it reaches this size except for the last
* block.
* @param compressName
* Name of the compression algorithm. Must be one of the strings
* returned by {@link TFile#getSupportedCompressionAlgorithms()}.
* @param comparator
* Leave comparator as null or empty string if TFile is not sorted.
* Otherwise, provide the string name for the comparison algorithm
* for keys. Two kinds of comparators are supported.
* <ul>
* <li>Algorithmic comparator: binary comparators that is language
* independent. Currently, only "memcmp" is supported.
* <li>Language-specific comparator: binary comparators that can
* only be constructed in specific language. For Java, the syntax
* is "jclass:", followed by the class name of the RawComparator.
* Currently, we only support RawComparators that can be
* constructed through the default constructor (with no
* parameters). Parameterized RawComparators such as
* {@link WritableComparator} or
* {@link JavaSerializationComparator} may not be directly used.
* One should write a wrapper class that inherits from such classes
* and use its default constructor to perform proper
* initialization.
* </ul>
* @param conf
* The configuration object.
* @throws IOException
*/
public Writer(FSDataOutputStream fsdos, int minBlockSize,
String compressName, String comparator, Configuration conf)
throws IOException {
sizeMinBlock = minBlockSize;
tfileMeta = new TFileMeta(comparator);
tfileIndex = new TFileIndex(tfileMeta.getComparator());
writerBCF = new BCFile.Writer(fsdos, compressName, conf);
currentKeyBufferOS = new BoundedByteArrayOutputStream(MAX_KEY_SIZE);
lastKeyBufferOS = new BoundedByteArrayOutputStream(MAX_KEY_SIZE);
this.conf = conf;
}
/**
* Close the Writer. Resources will be released regardless of the exceptions
* being thrown. Future close calls will have no effect.
*
* The underlying FSDataOutputStream is not closed.
*/
@Override
public void close() throws IOException {
if ((state == State.CLOSED)) {
return;
}
try {
// First try the normal finish.
// Terminate upon the first Exception.
if (errorCount == 0) {
if (state != State.READY) {
throw new IllegalStateException(
"Cannot close TFile in the middle of key-value insertion.");
}
finishDataBlock(true);
// first, write out data:TFile.meta
BlockAppender outMeta =
writerBCF
.prepareMetaBlock(TFileMeta.BLOCK_NAME, COMPRESSION_NONE);
try {
tfileMeta.write(outMeta);
} finally {
outMeta.close();
}
// second, write out data:TFile.index
BlockAppender outIndex =
writerBCF.prepareMetaBlock(TFileIndex.BLOCK_NAME);
try {
tfileIndex.write(outIndex);
} finally {
outIndex.close();
}
writerBCF.close();
}
} finally {
IOUtils.cleanup(LOG, blkAppender, writerBCF);
blkAppender = null;
writerBCF = null;
state = State.CLOSED;
}
}
/**
* Adding a new key-value pair to the TFile. This is synonymous to
* append(key, 0, key.length, value, 0, value.length)
*
* @param key
* Buffer for key.
* @param value
* Buffer for value.
* @throws IOException
*/
public void append(byte[] key, byte[] value) throws IOException {
append(key, 0, key.length, value, 0, value.length);
}
/**
* Adding a new key-value pair to TFile.
*
* @param key
* buffer for key.
* @param koff
* offset in key buffer.
* @param klen
* length of key.
* @param value
* buffer for value.
* @param voff
* offset in value buffer.
* @param vlen
* length of value.
* @throws IOException
* Upon IO errors.
* <p>
* If an exception is thrown, the TFile will be in an inconsistent
* state. The only legitimate call after that would be close
*/
public void append(byte[] key, int koff, int klen, byte[] value, int voff,
int vlen) throws IOException {
if ((koff | klen | (koff + klen) | (key.length - (koff + klen))) < 0) {
throw new IndexOutOfBoundsException(
"Bad key buffer offset-length combination.");
}
if ((voff | vlen | (voff + vlen) | (value.length - (voff + vlen))) < 0) {
throw new IndexOutOfBoundsException(
"Bad value buffer offset-length combination.");
}
try {
DataOutputStream dosKey = prepareAppendKey(klen);
try {
++errorCount;
dosKey.write(key, koff, klen);
--errorCount;
} finally {
dosKey.close();
}
DataOutputStream dosValue = prepareAppendValue(vlen);
try {
++errorCount;
dosValue.write(value, voff, vlen);
--errorCount;
} finally {
dosValue.close();
}
} finally {
state = State.READY;
}
}
/**
* Helper class to register key after close call on key append stream.
*/
private class KeyRegister extends DataOutputStream {
private final int expectedLength;
private boolean closed = false;
public KeyRegister(int len) {
super(currentKeyBufferOS);
if (len >= 0) {
currentKeyBufferOS.reset(len);
} else {
currentKeyBufferOS.reset();
}
expectedLength = len;
}
@Override
public void close() throws IOException {
if (closed == true) {
return;
}
try {
++errorCount;
byte[] key = currentKeyBufferOS.getBuffer();
int len = currentKeyBufferOS.size();
/**
* verify length.
*/
if (expectedLength >= 0 && expectedLength != len) {
throw new IOException("Incorrect key length: expected="
+ expectedLength + " actual=" + len);
}
Utils.writeVInt(blkAppender, len);
blkAppender.write(key, 0, len);
if (tfileIndex.getFirstKey() == null) {
tfileIndex.setFirstKey(key, 0, len);
}
if (tfileMeta.isSorted() && tfileMeta.getRecordCount()>0) {
byte[] lastKey = lastKeyBufferOS.getBuffer();
int lastLen = lastKeyBufferOS.size();
if (tfileMeta.getComparator().compare(key, 0, len, lastKey, 0,
lastLen) < 0) {
throw new IOException("Keys are not added in sorted order");
}
}
BoundedByteArrayOutputStream tmp = currentKeyBufferOS;
currentKeyBufferOS = lastKeyBufferOS;
lastKeyBufferOS = tmp;
--errorCount;
} finally {
closed = true;
state = State.END_KEY;
}
}
}
/**
* Helper class to register value after close call on value append stream.
*/
private class ValueRegister extends DataOutputStream {
private boolean closed = false;
public ValueRegister(OutputStream os) {
super(os);
}
// Avoiding flushing call to down stream.
@Override
public void flush() {
// do nothing
}
@Override
public void close() throws IOException {
if (closed == true) {
return;
}
try {
++errorCount;
super.close();
blkRecordCount++;
// bump up the total record count in the whole file
tfileMeta.incRecordCount();
finishDataBlock(false);
--errorCount;
} finally {
closed = true;
state = State.READY;
}
}
}
/**
* Obtain an output stream for writing a key into TFile. This may only be
* called when there is no active Key appending stream or value appending
* stream.
*
* @param length
* The expected length of the key. If length of the key is not
* known, set length = -1. Otherwise, the application must write
* exactly as many bytes as specified here before calling close on
* the returned output stream.
* @return The key appending output stream.
* @throws IOException
*
*/
public DataOutputStream prepareAppendKey(int length) throws IOException {
if (state != State.READY) {
throw new IllegalStateException("Incorrect state to start a new key: "
+ state.name());
}
initDataBlock();
DataOutputStream ret = new KeyRegister(length);
state = State.IN_KEY;
return ret;
}
/**
* Obtain an output stream for writing a value into TFile. This may only be
* called right after a key appending operation (the key append stream must
* be closed).
*
* @param length
* The expected length of the value. If length of the value is not
* known, set length = -1. Otherwise, the application must write
* exactly as many bytes as specified here before calling close on
* the returned output stream. Advertising the value size up-front
* guarantees that the value is encoded in one chunk, and avoids
* intermediate chunk buffering.
* @throws IOException
*
*/
public DataOutputStream prepareAppendValue(int length) throws IOException {
if (state != State.END_KEY) {
throw new IllegalStateException(
"Incorrect state to start a new value: " + state.name());
}
DataOutputStream ret;
// unknown length
if (length < 0) {
if (valueBuffer == null) {
valueBuffer = new byte[getChunkBufferSize(conf)];
}
ret = new ValueRegister(new ChunkEncoder(blkAppender, valueBuffer));
} else {
ret =
new ValueRegister(new Chunk.SingleChunkEncoder(blkAppender, length));
}
state = State.IN_VALUE;
return ret;
}
/**
* Obtain an output stream for creating a meta block. This function may not
* be called when there is a key append stream or value append stream
* active. No more key-value insertion is allowed after a meta data block
* has been added to TFile.
*
* @param name
* Name of the meta block.
* @param compressName
* Name of the compression algorithm to be used. Must be one of the
* strings returned by
* {@link TFile#getSupportedCompressionAlgorithms()}.
* @return A DataOutputStream that can be used to write Meta Block data.
* Closing the stream would signal the ending of the block.
* @throws IOException
* @throws MetaBlockAlreadyExists
* the Meta Block with the same name already exists.
*/
public DataOutputStream prepareMetaBlock(String name, String compressName)
throws IOException, MetaBlockAlreadyExists {
if (state != State.READY) {
throw new IllegalStateException(
"Incorrect state to start a Meta Block: " + state.name());
}
finishDataBlock(true);
DataOutputStream outputStream =
writerBCF.prepareMetaBlock(name, compressName);
return outputStream;
}
/**
* Obtain an output stream for creating a meta block. This function may not
* be called when there is a key append stream or value append stream
* active. No more key-value insertion is allowed after a meta data block
* has been added to TFile. Data will be compressed using the default
* compressor as defined in Writer's constructor.
*
* @param name
* Name of the meta block.
* @return A DataOutputStream that can be used to write Meta Block data.
* Closing the stream would signal the ending of the block.
* @throws IOException
* @throws MetaBlockAlreadyExists
* the Meta Block with the same name already exists.
*/
public DataOutputStream prepareMetaBlock(String name) throws IOException,
MetaBlockAlreadyExists {
if (state != State.READY) {
throw new IllegalStateException(
"Incorrect state to start a Meta Block: " + state.name());
}
finishDataBlock(true);
return writerBCF.prepareMetaBlock(name);
}
/**
* Check if we need to start a new data block.
*
* @throws IOException
*/
private void initDataBlock() throws IOException {
// for each new block, get a new appender
if (blkAppender == null) {
blkAppender = writerBCF.prepareDataBlock();
}
}
/**
* Close the current data block if necessary.
*
* @param bForceFinish
* Force the closure regardless of the block size.
* @throws IOException
*/
void finishDataBlock(boolean bForceFinish) throws IOException {
if (blkAppender == null) {
return;
}
// exceeded the size limit, do the compression and finish the block
if (bForceFinish || blkAppender.getCompressedSize() >= sizeMinBlock) {
// keep tracks of the last key of each data block, no padding
// for now
TFileIndexEntry keyLast =
new TFileIndexEntry(lastKeyBufferOS.getBuffer(), 0, lastKeyBufferOS
.size(), blkRecordCount);
tfileIndex.addEntry(keyLast);
// close the appender
blkAppender.close();
blkAppender = null;
blkRecordCount = 0;
}
}
}
/**
* TFile Reader. Users may only read TFiles by creating TFile.Reader.Scanner.
* objects. A scanner may scan the whole TFile ({@link Reader#createScanner()}
* ) , a portion of TFile based on byte offsets (
* {@link Reader#createScannerByByteRange(long, long)}), or a portion of TFile with keys
* fall in a certain key range (for sorted TFile only,
* {@link Reader#createScannerByKey(byte[], byte[])} or
* {@link Reader#createScannerByKey(RawComparable, RawComparable)}).
*/
@InterfaceStability.Evolving
public static class Reader implements Closeable {
// The underlying BCFile reader.
final BCFile.Reader readerBCF;
// TFile index, it is loaded lazily.
TFileIndex tfileIndex = null;
final TFileMeta tfileMeta;
final BytesComparator comparator;
// global begin and end locations.
private final Location begin;
private final Location end;
/**
* Location representing a virtual position in the TFile.
*/
static final class Location implements Comparable<Location>, Cloneable {
private int blockIndex;
// distance/offset from the beginning of the block
private long recordIndex;
Location(int blockIndex, long recordIndex) {
set(blockIndex, recordIndex);
}
void incRecordIndex() {
++recordIndex;
}
Location(Location other) {
set(other);
}
int getBlockIndex() {
return blockIndex;
}
long getRecordIndex() {
return recordIndex;
}
void set(int blockIndex, long recordIndex) {
if ((blockIndex | recordIndex) < 0) {
throw new IllegalArgumentException(
"Illegal parameter for BlockLocation.");
}
this.blockIndex = blockIndex;
this.recordIndex = recordIndex;
}
void set(Location other) {
set(other.blockIndex, other.recordIndex);
}
/**
* @see java.lang.Comparable#compareTo(java.lang.Object)
*/
@Override
public int compareTo(Location other) {
return compareTo(other.blockIndex, other.recordIndex);
}
int compareTo(int bid, long rid) {
if (this.blockIndex == bid) {
long ret = this.recordIndex - rid;
if (ret > 0) return 1;
if (ret < 0) return -1;
return 0;
}
return this.blockIndex - bid;
}
/**
* @see java.lang.Object#clone()
*/
@Override
protected Location clone() {
return new Location(blockIndex, recordIndex);
}
/**
* @see java.lang.Object#hashCode()
*/
@Override
public int hashCode() {
final int prime = 31;
int result = prime + blockIndex;
result = (int) (prime * result + recordIndex);
return result;
}
/**
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object obj) {
if (this == obj) return true;
if (obj == null) return false;
if (getClass() != obj.getClass()) return false;
Location other = (Location) obj;
if (blockIndex != other.blockIndex) return false;
if (recordIndex != other.recordIndex) return false;
return true;
}
}
/**
* Constructor
*
* @param fsdis
* FS input stream of the TFile.
* @param fileLength
* The length of TFile. This is required because we have no easy
* way of knowing the actual size of the input file through the
* File input stream.
* @param conf
* @throws IOException
*/
public Reader(FSDataInputStream fsdis, long fileLength, Configuration conf)
throws IOException {
readerBCF = new BCFile.Reader(fsdis, fileLength, conf);
// first, read TFile meta
BlockReader brMeta = readerBCF.getMetaBlock(TFileMeta.BLOCK_NAME);
try {
tfileMeta = new TFileMeta(brMeta);
} finally {
brMeta.close();
}
comparator = tfileMeta.getComparator();
// Set begin and end locations.
begin = new Location(0, 0);
end = new Location(readerBCF.getBlockCount(), 0);
}
/**
* Close the reader. The state of the Reader object is undefined after
* close. Calling close() for multiple times has no effect.
*/
@Override
public void close() throws IOException {
readerBCF.close();
}
/**
* Get the begin location of the TFile.
*
* @return If TFile is not empty, the location of the first key-value pair.
* Otherwise, it returns end().
*/
Location begin() {
return begin;
}
/**
* Get the end location of the TFile.
*
* @return The location right after the last key-value pair in TFile.
*/
Location end() {
return end;
}
/**
* Get the string representation of the comparator.
*
* @return If the TFile is not sorted by keys, an empty string will be
* returned. Otherwise, the actual comparator string that is
* provided during the TFile creation time will be returned.
*/
public String getComparatorName() {
return tfileMeta.getComparatorString();
}
/**
* Is the TFile sorted?
*
* @return true if TFile is sorted.
*/
public boolean isSorted() {
return tfileMeta.isSorted();
}
/**
* Get the number of key-value pair entries in TFile.
*
* @return the number of key-value pairs in TFile
*/
public long getEntryCount() {
return tfileMeta.getRecordCount();
}
/**
* Lazily loading the TFile index.
*
* @throws IOException
*/
synchronized void checkTFileDataIndex() throws IOException {
if (tfileIndex == null) {
BlockReader brIndex = readerBCF.getMetaBlock(TFileIndex.BLOCK_NAME);
try {
tfileIndex =
new TFileIndex(readerBCF.getBlockCount(), brIndex, tfileMeta
.getComparator());
} finally {
brIndex.close();
}
}
}
/**
* Get the first key in the TFile.
*
* @return The first key in the TFile.
* @throws IOException
*/
public RawComparable getFirstKey() throws IOException {
checkTFileDataIndex();
return tfileIndex.getFirstKey();
}
/**
* Get the last key in the TFile.
*
* @return The last key in the TFile.
* @throws IOException
*/
public RawComparable getLastKey() throws IOException {
checkTFileDataIndex();
return tfileIndex.getLastKey();
}
/**
* Get a Comparator object to compare Entries. It is useful when you want
* stores the entries in a collection (such as PriorityQueue) and perform
* sorting or comparison among entries based on the keys without copying out
* the key.
*
* @return An Entry Comparator..
*/
public Comparator<Scanner.Entry> getEntryComparator() {
if (!isSorted()) {
throw new RuntimeException(
"Entries are not comparable for unsorted TFiles");
}
return new Comparator<Scanner.Entry>() {
/**
* Provide a customized comparator for Entries. This is useful if we
* have a collection of Entry objects. However, if the Entry objects
* come from different TFiles, users must ensure that those TFiles share
* the same RawComparator.
*/
@Override
public int compare(Scanner.Entry o1, Scanner.Entry o2) {
return comparator.compare(o1.getKeyBuffer(), 0, o1.getKeyLength(), o2
.getKeyBuffer(), 0, o2.getKeyLength());
}
};
}
/**
* Get an instance of the RawComparator that is constructed based on the
* string comparator representation.
*
* @return a Comparator that can compare RawComparable's.
*/
public Comparator<RawComparable> getComparator() {
return comparator;
}
/**
* Stream access to a meta block.``
*
* @param name
* The name of the meta block.
* @return The input stream.
* @throws IOException
* on I/O error.
* @throws MetaBlockDoesNotExist
* If the meta block with the name does not exist.
*/
public DataInputStream getMetaBlock(String name) throws IOException,
MetaBlockDoesNotExist {
return readerBCF.getMetaBlock(name);
}
/**
* if greater is true then returns the beginning location of the block
* containing the key strictly greater than input key. if greater is false
* then returns the beginning location of the block greater than equal to
* the input key
*
* @param key
* the input key
* @param greater
* boolean flag
* @return
* @throws IOException
*/
Location getBlockContainsKey(RawComparable key, boolean greater)
throws IOException {
if (!isSorted()) {
throw new RuntimeException("Seeking in unsorted TFile");
}
checkTFileDataIndex();
int blkIndex =
(greater) ? tfileIndex.upperBound(key) : tfileIndex.lowerBound(key);
if (blkIndex < 0) return end;
return new Location(blkIndex, 0);
}
Location getLocationByRecordNum(long recNum) throws IOException {
checkTFileDataIndex();
return tfileIndex.getLocationByRecordNum(recNum);
}
long getRecordNumByLocation(Location location) throws IOException {
checkTFileDataIndex();
return tfileIndex.getRecordNumByLocation(location);
}
int compareKeys(byte[] a, int o1, int l1, byte[] b, int o2, int l2) {
if (!isSorted()) {
throw new RuntimeException("Cannot compare keys for unsorted TFiles.");
}
return comparator.compare(a, o1, l1, b, o2, l2);
}
int compareKeys(RawComparable a, RawComparable b) {
if (!isSorted()) {
throw new RuntimeException("Cannot compare keys for unsorted TFiles.");
}
return comparator.compare(a, b);
}
/**
* Get the location pointing to the beginning of the first key-value pair in
* a compressed block whose byte offset in the TFile is greater than or
* equal to the specified offset.
*
* @param offset
* the user supplied offset.
* @return the location to the corresponding entry; or end() if no such
* entry exists.
*/
Location getLocationNear(long offset) {
int blockIndex = readerBCF.getBlockIndexNear(offset);
if (blockIndex == -1) return end;
return new Location(blockIndex, 0);
}
/**
* Get the RecordNum for the first key-value pair in a compressed block
* whose byte offset in the TFile is greater than or equal to the specified
* offset.
*
* @param offset
* the user supplied offset.
* @return the RecordNum to the corresponding entry. If no such entry
* exists, it returns the total entry count.
* @throws IOException
*/
public long getRecordNumNear(long offset) throws IOException {
return getRecordNumByLocation(getLocationNear(offset));
}
/**
* Get a sample key that is within a block whose starting offset is greater
* than or equal to the specified offset.
*
* @param offset
* The file offset.
* @return the key that fits the requirement; or null if no such key exists
* (which could happen if the offset is close to the end of the
* TFile).
* @throws IOException
*/
public RawComparable getKeyNear(long offset) throws IOException {
int blockIndex = readerBCF.getBlockIndexNear(offset);
if (blockIndex == -1) return null;
checkTFileDataIndex();
return new ByteArray(tfileIndex.getEntry(blockIndex).key);
}
/**
* Get a scanner than can scan the whole TFile.
*
* @return The scanner object. A valid Scanner is always returned even if
* the TFile is empty.
* @throws IOException
*/
public Scanner createScanner() throws IOException {
return new Scanner(this, begin, end);
}
/**
* Get a scanner that covers a portion of TFile based on byte offsets.
*
* @param offset
* The beginning byte offset in the TFile.
* @param length
* The length of the region.
* @return The actual coverage of the returned scanner tries to match the
* specified byte-region but always round up to the compression
* block boundaries. It is possible that the returned scanner
* contains zero key-value pairs even if length is positive.
* @throws IOException
*/
public Scanner createScannerByByteRange(long offset, long length) throws IOException {
return new Scanner(this, offset, offset + length);
}
/**
* Get a scanner that covers a portion of TFile based on keys.
*
* @param beginKey
* Begin key of the scan (inclusive). If null, scan from the first
* key-value entry of the TFile.
* @param endKey
* End key of the scan (exclusive). If null, scan up to the last
* key-value entry of the TFile.
* @return The actual coverage of the returned scanner will cover all keys
* greater than or equal to the beginKey and less than the endKey.
* @throws IOException
*
* @deprecated Use {@link #createScannerByKey(byte[], byte[])} instead.
*/
@Deprecated
public Scanner createScanner(byte[] beginKey, byte[] endKey)
throws IOException {
return createScannerByKey(beginKey, endKey);
}
/**
* Get a scanner that covers a portion of TFile based on keys.
*
* @param beginKey
* Begin key of the scan (inclusive). If null, scan from the first
* key-value entry of the TFile.
* @param endKey
* End key of the scan (exclusive). If null, scan up to the last
* key-value entry of the TFile.
* @return The actual coverage of the returned scanner will cover all keys
* greater than or equal to the beginKey and less than the endKey.
* @throws IOException
*/
public Scanner createScannerByKey(byte[] beginKey, byte[] endKey)
throws IOException {
return createScannerByKey((beginKey == null) ? null : new ByteArray(beginKey,
0, beginKey.length), (endKey == null) ? null : new ByteArray(endKey,
0, endKey.length));
}
/**
* Get a scanner that covers a specific key range.
*
* @param beginKey
* Begin key of the scan (inclusive). If null, scan from the first
* key-value entry of the TFile.
* @param endKey
* End key of the scan (exclusive). If null, scan up to the last
* key-value entry of the TFile.
* @return The actual coverage of the returned scanner will cover all keys
* greater than or equal to the beginKey and less than the endKey.
* @throws IOException
*
* @deprecated Use {@link #createScannerByKey(RawComparable, RawComparable)}
* instead.
*/
@Deprecated
public Scanner createScanner(RawComparable beginKey, RawComparable endKey)
throws IOException {
return createScannerByKey(beginKey, endKey);
}
/**
* Get a scanner that covers a specific key range.
*
* @param beginKey
* Begin key of the scan (inclusive). If null, scan from the first
* key-value entry of the TFile.
* @param endKey
* End key of the scan (exclusive). If null, scan up to the last
* key-value entry of the TFile.
* @return The actual coverage of the returned scanner will cover all keys
* greater than or equal to the beginKey and less than the endKey.
* @throws IOException
*/
public Scanner createScannerByKey(RawComparable beginKey, RawComparable endKey)
throws IOException {
if ((beginKey != null) && (endKey != null)
&& (compareKeys(beginKey, endKey) >= 0)) {
return new Scanner(this, beginKey, beginKey);
}
return new Scanner(this, beginKey, endKey);
}
/**
* Create a scanner that covers a range of records.
*
* @param beginRecNum
* The RecordNum for the first record (inclusive).
* @param endRecNum
* The RecordNum for the last record (exclusive). To scan the whole
* file, either specify endRecNum==-1 or endRecNum==getEntryCount().
* @return The TFile scanner that covers the specified range of records.
* @throws IOException
*/
public Scanner createScannerByRecordNum(long beginRecNum, long endRecNum)
throws IOException {
if (beginRecNum < 0) beginRecNum = 0;
if (endRecNum < 0 || endRecNum > getEntryCount()) {
endRecNum = getEntryCount();
}
return new Scanner(this, getLocationByRecordNum(beginRecNum),
getLocationByRecordNum(endRecNum));
}
/**
* The TFile Scanner. The Scanner has an implicit cursor, which, upon
* creation, points to the first key-value pair in the scan range. If the
* scan range is empty, the cursor will point to the end of the scan range.
* <p>
* Use {@link Scanner#atEnd()} to test whether the cursor is at the end
* location of the scanner.
* <p>
* Use {@link Scanner#advance()} to move the cursor to the next key-value
* pair (or end if none exists). Use seekTo methods (
* {@link Scanner#seekTo(byte[])} or
* {@link Scanner#seekTo(byte[], int, int)}) to seek to any arbitrary
* location in the covered range (including backward seeking). Use
* {@link Scanner#rewind()} to seek back to the beginning of the scanner.
* Use {@link Scanner#seekToEnd()} to seek to the end of the scanner.
* <p>
* Actual keys and values may be obtained through {@link Scanner.Entry}
* object, which is obtained through {@link Scanner#entry()}.
*/
public static class Scanner implements Closeable {
// The underlying TFile reader.
final Reader reader;
// current block (null if reaching end)
private BlockReader blkReader;
Location beginLocation;
Location endLocation;
Location currentLocation;
// flag to ensure value is only examined once.
boolean valueChecked = false;
// reusable buffer for keys.
final byte[] keyBuffer;
// length of key, -1 means key is invalid.
int klen = -1;
static final int MAX_VAL_TRANSFER_BUF_SIZE = 128 * 1024;
BytesWritable valTransferBuffer;
DataInputBuffer keyDataInputStream;
ChunkDecoder valueBufferInputStream;
DataInputStream valueDataInputStream;
// vlen == -1 if unknown.
int vlen;
/**
* Constructor
*
* @param reader
* The TFile reader object.
* @param offBegin
* Begin byte-offset of the scan.
* @param offEnd
* End byte-offset of the scan.
* @throws IOException
*
* The offsets will be rounded to the beginning of a compressed
* block whose offset is greater than or equal to the specified
* offset.
*/
protected Scanner(Reader reader, long offBegin, long offEnd)
throws IOException {
this(reader, reader.getLocationNear(offBegin), reader
.getLocationNear(offEnd));
}
/**
* Constructor
*
* @param reader
* The TFile reader object.
* @param begin
* Begin location of the scan.
* @param end
* End location of the scan.
* @throws IOException
*/
Scanner(Reader reader, Location begin, Location end) throws IOException {
this.reader = reader;
// ensure the TFile index is loaded throughout the life of scanner.
reader.checkTFileDataIndex();
beginLocation = begin;
endLocation = end;
valTransferBuffer = new BytesWritable();
// TODO: remember the longest key in a TFile, and use it to replace
// MAX_KEY_SIZE.
keyBuffer = new byte[MAX_KEY_SIZE];
keyDataInputStream = new DataInputBuffer();
valueBufferInputStream = new ChunkDecoder();
valueDataInputStream = new DataInputStream(valueBufferInputStream);
if (beginLocation.compareTo(endLocation) >= 0) {
currentLocation = new Location(endLocation);
} else {
currentLocation = new Location(0, 0);
initBlock(beginLocation.getBlockIndex());
inBlockAdvance(beginLocation.getRecordIndex());
}
}
/**
* Constructor
*
* @param reader
* The TFile reader object.
* @param beginKey
* Begin key of the scan. If null, scan from the first <K,V>
* entry of the TFile.
* @param endKey
* End key of the scan. If null, scan up to the last <K, V> entry
* of the TFile.
* @throws IOException
*/
protected Scanner(Reader reader, RawComparable beginKey,
RawComparable endKey) throws IOException {
this(reader, (beginKey == null) ? reader.begin() : reader
.getBlockContainsKey(beginKey, false), reader.end());
if (beginKey != null) {
inBlockAdvance(beginKey, false);
beginLocation.set(currentLocation);
}
if (endKey != null) {
seekTo(endKey, false);
endLocation.set(currentLocation);
seekTo(beginLocation);
}
}
/**
* Move the cursor to the first entry whose key is greater than or equal
* to the input key. Synonymous to seekTo(key, 0, key.length). The entry
* returned by the previous entry() call will be invalid.
*
* @param key
* The input key
* @return true if we find an equal key.
* @throws IOException
*/
public boolean seekTo(byte[] key) throws IOException {
return seekTo(key, 0, key.length);
}
/**
* Move the cursor to the first entry whose key is greater than or equal
* to the input key. The entry returned by the previous entry() call will
* be invalid.
*
* @param key
* The input key
* @param keyOffset
* offset in the key buffer.
* @param keyLen
* key buffer length.
* @return true if we find an equal key; false otherwise.
* @throws IOException
*/
public boolean seekTo(byte[] key, int keyOffset, int keyLen)
throws IOException {
return seekTo(new ByteArray(key, keyOffset, keyLen), false);
}
private boolean seekTo(RawComparable key, boolean beyond)
throws IOException {
Location l = reader.getBlockContainsKey(key, beyond);
if (l.compareTo(beginLocation) < 0) {
l = beginLocation;
} else if (l.compareTo(endLocation) >= 0) {
seekTo(endLocation);
return false;
}
// check if what we are seeking is in the later part of the current
// block.
if (atEnd() || (l.getBlockIndex() != currentLocation.getBlockIndex())
|| (compareCursorKeyTo(key) >= 0)) {
// sorry, we must seek to a different location first.
seekTo(l);
}
return inBlockAdvance(key, beyond);
}
/**
* Move the cursor to the new location. The entry returned by the previous
* entry() call will be invalid.
*
* @param l
* new cursor location. It must fall between the begin and end
* location of the scanner.
* @throws IOException
*/
private void seekTo(Location l) throws IOException {
if (l.compareTo(beginLocation) < 0) {
throw new IllegalArgumentException(
"Attempt to seek before the begin location.");
}
if (l.compareTo(endLocation) > 0) {
throw new IllegalArgumentException(
"Attempt to seek after the end location.");
}
if (l.compareTo(endLocation) == 0) {
parkCursorAtEnd();
return;
}
if (l.getBlockIndex() != currentLocation.getBlockIndex()) {
// going to a totally different block
initBlock(l.getBlockIndex());
} else {
if (valueChecked) {
// may temporarily go beyond the last record in the block (in which
// case the next if loop will always be true).
inBlockAdvance(1);
}
if (l.getRecordIndex() < currentLocation.getRecordIndex()) {
initBlock(l.getBlockIndex());
}
}
inBlockAdvance(l.getRecordIndex() - currentLocation.getRecordIndex());
return;
}
/**
* Rewind to the first entry in the scanner. The entry returned by the
* previous entry() call will be invalid.
*
* @throws IOException
*/
public void rewind() throws IOException {
seekTo(beginLocation);
}
/**
* Seek to the end of the scanner. The entry returned by the previous
* entry() call will be invalid.
*
* @throws IOException
*/
public void seekToEnd() throws IOException {
parkCursorAtEnd();
}
/**
* Move the cursor to the first entry whose key is greater than or equal
* to the input key. Synonymous to lowerBound(key, 0, key.length). The
* entry returned by the previous entry() call will be invalid.
*
* @param key
* The input key
* @throws IOException
*/
public void lowerBound(byte[] key) throws IOException {
lowerBound(key, 0, key.length);
}
/**
* Move the cursor to the first entry whose key is greater than or equal
* to the input key. The entry returned by the previous entry() call will
* be invalid.
*
* @param key
* The input key
* @param keyOffset
* offset in the key buffer.
* @param keyLen
* key buffer length.
* @throws IOException
*/
public void lowerBound(byte[] key, int keyOffset, int keyLen)
throws IOException {
seekTo(new ByteArray(key, keyOffset, keyLen), false);
}
/**
* Move the cursor to the first entry whose key is strictly greater than
* the input key. Synonymous to upperBound(key, 0, key.length). The entry
* returned by the previous entry() call will be invalid.
*
* @param key
* The input key
* @throws IOException
*/
public void upperBound(byte[] key) throws IOException {
upperBound(key, 0, key.length);
}
/**
* Move the cursor to the first entry whose key is strictly greater than
* the input key. The entry returned by the previous entry() call will be
* invalid.
*
* @param key
* The input key
* @param keyOffset
* offset in the key buffer.
* @param keyLen
* key buffer length.
* @throws IOException
*/
public void upperBound(byte[] key, int keyOffset, int keyLen)
throws IOException {
seekTo(new ByteArray(key, keyOffset, keyLen), true);
}
/**
* Move the cursor to the next key-value pair. The entry returned by the
* previous entry() call will be invalid.
*
* @return true if the cursor successfully moves. False when cursor is
* already at the end location and cannot be advanced.
* @throws IOException
*/
public boolean advance() throws IOException {
if (atEnd()) {
return false;
}
int curBid = currentLocation.getBlockIndex();
long curRid = currentLocation.getRecordIndex();
long entriesInBlock = reader.getBlockEntryCount(curBid);
if (curRid + 1 >= entriesInBlock) {
if (endLocation.compareTo(curBid + 1, 0) <= 0) {
// last entry in TFile.
parkCursorAtEnd();
} else {
// last entry in Block.
initBlock(curBid + 1);
}
} else {
inBlockAdvance(1);
}
return true;
}
/**
* Load a compressed block for reading. Expecting blockIndex is valid.
*
* @throws IOException
*/
private void initBlock(int blockIndex) throws IOException {
klen = -1;
if (blkReader != null) {
try {
blkReader.close();
} finally {
blkReader = null;
}
}
blkReader = reader.getBlockReader(blockIndex);
currentLocation.set(blockIndex, 0);
}
private void parkCursorAtEnd() throws IOException {
klen = -1;
currentLocation.set(endLocation);
if (blkReader != null) {
try {
blkReader.close();
} finally {
blkReader = null;
}
}
}
/**
* Close the scanner. Release all resources. The behavior of using the
* scanner after calling close is not defined. The entry returned by the
* previous entry() call will be invalid.
*/
@Override
public void close() throws IOException {
parkCursorAtEnd();
}
/**
* Is cursor at the end location?
*
* @return true if the cursor is at the end location.
*/
public boolean atEnd() {
return (currentLocation.compareTo(endLocation) >= 0);
}
/**
* check whether we have already successfully obtained the key. It also
* initializes the valueInputStream.
*/
void checkKey() throws IOException {
if (klen >= 0) return;
if (atEnd()) {
throw new EOFException("No key-value to read");
}
klen = -1;
vlen = -1;
valueChecked = false;
klen = Utils.readVInt(blkReader);
blkReader.readFully(keyBuffer, 0, klen);
valueBufferInputStream.reset(blkReader);
if (valueBufferInputStream.isLastChunk()) {
vlen = valueBufferInputStream.getRemain();
}
}
/**
* Get an entry to access the key and value.
*
* @return The Entry object to access the key and value.
* @throws IOException
*/
public Entry entry() throws IOException {
checkKey();
return new Entry();
}
/**
* Get the RecordNum corresponding to the entry pointed by the cursor.
* @return The RecordNum corresponding to the entry pointed by the cursor.
* @throws IOException
*/
public long getRecordNum() throws IOException {
return reader.getRecordNumByLocation(currentLocation);
}
/**
* Internal API. Comparing the key at cursor to user-specified key.
*
* @param other
* user-specified key.
* @return negative if key at cursor is smaller than user key; 0 if equal;
* and positive if key at cursor greater than user key.
* @throws IOException
*/
int compareCursorKeyTo(RawComparable other) throws IOException {
checkKey();
return reader.compareKeys(keyBuffer, 0, klen, other.buffer(), other
.offset(), other.size());
}
/**
* Entry to a <Key, Value> pair.
*/
public class Entry implements Comparable<RawComparable> {
/**
* Get the length of the key.
*
* @return the length of the key.
*/
public int getKeyLength() {
return klen;
}
byte[] getKeyBuffer() {
return keyBuffer;
}
/**
* Copy the key and value in one shot into BytesWritables. This is
* equivalent to getKey(key); getValue(value);
*
* @param key
* BytesWritable to hold key.
* @param value
* BytesWritable to hold value
* @throws IOException
*/
public void get(BytesWritable key, BytesWritable value)
throws IOException {
getKey(key);
getValue(value);
}
/**
* Copy the key into BytesWritable. The input BytesWritable will be
* automatically resized to the actual key size.
*
* @param key
* BytesWritable to hold the key.
* @throws IOException
*/
public int getKey(BytesWritable key) throws IOException {
key.setSize(getKeyLength());
getKey(key.getBytes());
return key.getLength();
}
/**
* Copy the value into BytesWritable. The input BytesWritable will be
* automatically resized to the actual value size. The implementation
* directly uses the buffer inside BytesWritable for storing the value.
* The call does not require the value length to be known.
*
* @param value
* @throws IOException
*/
public long getValue(BytesWritable value) throws IOException {
DataInputStream dis = getValueStream();
int size = 0;
try {
int remain;
while ((remain = valueBufferInputStream.getRemain()) > 0) {
value.setSize(size + remain);
dis.readFully(value.getBytes(), size, remain);
size += remain;
}
return value.getLength();
} finally {
dis.close();
}
}
/**
* Writing the key to the output stream. This method avoids copying key
* buffer from Scanner into user buffer, then writing to the output
* stream.
*
* @param out
* The output stream
* @return the length of the key.
* @throws IOException
*/
public int writeKey(OutputStream out) throws IOException {
out.write(keyBuffer, 0, klen);
return klen;
}
/**
* Writing the value to the output stream. This method avoids copying
* value data from Scanner into user buffer, then writing to the output
* stream. It does not require the value length to be known.
*
* @param out
* The output stream
* @return the length of the value
* @throws IOException
*/
public long writeValue(OutputStream out) throws IOException {
DataInputStream dis = getValueStream();
long size = 0;
try {
int chunkSize;
while ((chunkSize = valueBufferInputStream.getRemain()) > 0) {
chunkSize = Math.min(chunkSize, MAX_VAL_TRANSFER_BUF_SIZE);
valTransferBuffer.setSize(chunkSize);
dis.readFully(valTransferBuffer.getBytes(), 0, chunkSize);
out.write(valTransferBuffer.getBytes(), 0, chunkSize);
size += chunkSize;
}
return size;
} finally {
dis.close();
}
}
/**
* Copy the key into user supplied buffer.
*
* @param buf
* The buffer supplied by user. The length of the buffer must
* not be shorter than the key length.
* @return The length of the key.
*
* @throws IOException
*/
public int getKey(byte[] buf) throws IOException {
return getKey(buf, 0);
}
/**
* Copy the key into user supplied buffer.
*
* @param buf
* The buffer supplied by user.
* @param offset
* The starting offset of the user buffer where we should copy
* the key into. Requiring the key-length + offset no greater
* than the buffer length.
* @return The length of the key.
* @throws IOException
*/
public int getKey(byte[] buf, int offset) throws IOException {
if ((offset | (buf.length - offset - klen)) < 0) {
throw new IndexOutOfBoundsException(
"Bufer not enough to store the key");
}
System.arraycopy(keyBuffer, 0, buf, offset, klen);
return klen;
}
/**
* Streaming access to the key. Useful for desrializing the key into
* user objects.
*
* @return The input stream.
*/
public DataInputStream getKeyStream() {
keyDataInputStream.reset(keyBuffer, klen);
return keyDataInputStream;
}
/**
* Get the length of the value. isValueLengthKnown() must be tested
* true.
*
* @return the length of the value.
*/
public int getValueLength() {
if (vlen >= 0) {
return vlen;
}
throw new RuntimeException("Value length unknown.");
}
/**
* Copy value into user-supplied buffer. User supplied buffer must be
* large enough to hold the whole value. The value part of the key-value
* pair pointed by the current cursor is not cached and can only be
* examined once. Calling any of the following functions more than once
* without moving the cursor will result in exception:
* {@link #getValue(byte[])}, {@link #getValue(byte[], int)},
* {@link #getValueStream}.
*
* @return the length of the value. Does not require
* isValueLengthKnown() to be true.
* @throws IOException
*
*/
public int getValue(byte[] buf) throws IOException {
return getValue(buf, 0);
}
/**
* Copy value into user-supplied buffer. User supplied buffer must be
* large enough to hold the whole value (starting from the offset). The
* value part of the key-value pair pointed by the current cursor is not
* cached and can only be examined once. Calling any of the following
* functions more than once without moving the cursor will result in
* exception: {@link #getValue(byte[])}, {@link #getValue(byte[], int)},
* {@link #getValueStream}.
*
* @return the length of the value. Does not require
* isValueLengthKnown() to be true.
* @throws IOException
*/
public int getValue(byte[] buf, int offset) throws IOException {
DataInputStream dis = getValueStream();
try {
if (isValueLengthKnown()) {
if ((offset | (buf.length - offset - vlen)) < 0) {
throw new IndexOutOfBoundsException(
"Buffer too small to hold value");
}
dis.readFully(buf, offset, vlen);
return vlen;
}
int nextOffset = offset;
while (nextOffset < buf.length) {
int n = dis.read(buf, nextOffset, buf.length - nextOffset);
if (n < 0) {
break;
}
nextOffset += n;
}
if (dis.read() >= 0) {
// attempt to read one more byte to determine whether we reached
// the
// end or not.
throw new IndexOutOfBoundsException(
"Buffer too small to hold value");
}
return nextOffset - offset;
} finally {
dis.close();
}
}
/**
* Stream access to value. The value part of the key-value pair pointed
* by the current cursor is not cached and can only be examined once.
* Calling any of the following functions more than once without moving
* the cursor will result in exception: {@link #getValue(byte[])},
* {@link #getValue(byte[], int)}, {@link #getValueStream}.
*
* @return The input stream for reading the value.
* @throws IOException
*/
public DataInputStream getValueStream() throws IOException {
if (valueChecked == true) {
throw new IllegalStateException(
"Attempt to examine value multiple times.");
}
valueChecked = true;
return valueDataInputStream;
}
/**
* Check whether it is safe to call getValueLength().
*
* @return true if value length is known before hand. Values less than
* the chunk size will always have their lengths known before
* hand. Values that are written out as a whole (with advertised
* length up-front) will always have their lengths known in
* read.
*/
public boolean isValueLengthKnown() {
return (vlen >= 0);
}
/**
* Compare the entry key to another key. Synonymous to compareTo(key, 0,
* key.length).
*
* @param buf
* The key buffer.
* @return comparison result between the entry key with the input key.
*/
public int compareTo(byte[] buf) {
return compareTo(buf, 0, buf.length);
}
/**
* Compare the entry key to another key. Synonymous to compareTo(new
* ByteArray(buf, offset, length)
*
* @param buf
* The key buffer
* @param offset
* offset into the key buffer.
* @param length
* the length of the key.
* @return comparison result between the entry key with the input key.
*/
public int compareTo(byte[] buf, int offset, int length) {
return compareTo(new ByteArray(buf, offset, length));
}
/**
* Compare an entry with a RawComparable object. This is useful when
* Entries are stored in a collection, and we want to compare a user
* supplied key.
*/
@Override
public int compareTo(RawComparable key) {
return reader.compareKeys(keyBuffer, 0, getKeyLength(), key.buffer(),
key.offset(), key.size());
}
/**
* Compare whether this and other points to the same key value.
*/
@Override
public boolean equals(Object other) {
if (this == other) return true;
if (!(other instanceof Entry)) return false;
return ((Entry) other).compareTo(keyBuffer, 0, getKeyLength()) == 0;
}
@Override
public int hashCode() {
return WritableComparator.hashBytes(keyBuffer, 0, getKeyLength());
}
}
/**
* Advance cursor by n positions within the block.
*
* @param n
* Number of key-value pairs to skip in block.
* @throws IOException
*/
private void inBlockAdvance(long n) throws IOException {
for (long i = 0; i < n; ++i) {
checkKey();
if (!valueBufferInputStream.isClosed()) {
valueBufferInputStream.close();
}
klen = -1;
currentLocation.incRecordIndex();
}
}
/**
* Advance cursor in block until we find a key that is greater than or
* equal to the input key.
*
* @param key
* Key to compare.
* @param greater
* advance until we find a key greater than the input key.
* @return true if we find a equal key.
* @throws IOException
*/
private boolean inBlockAdvance(RawComparable key, boolean greater)
throws IOException {
int curBid = currentLocation.getBlockIndex();
long entryInBlock = reader.getBlockEntryCount(curBid);
if (curBid == endLocation.getBlockIndex()) {
entryInBlock = endLocation.getRecordIndex();
}
while (currentLocation.getRecordIndex() < entryInBlock) {
int cmp = compareCursorKeyTo(key);
if (cmp > 0) return false;
if (cmp == 0 && !greater) return true;
if (!valueBufferInputStream.isClosed()) {
valueBufferInputStream.close();
}
klen = -1;
currentLocation.incRecordIndex();
}
throw new RuntimeException("Cannot find matching key in block.");
}
}
long getBlockEntryCount(int curBid) {
return tfileIndex.getEntry(curBid).entries();
}
BlockReader getBlockReader(int blockIndex) throws IOException {
return readerBCF.getDataBlock(blockIndex);
}
}
/**
* Data structure representing "TFile.meta" meta block.
*/
static final class TFileMeta {
final static String BLOCK_NAME = "TFile.meta";
final Version version;
private long recordCount;
private final String strComparator;
private final BytesComparator comparator;
// ctor for writes
public TFileMeta(String comparator) {
// set fileVersion to API version when we create it.
version = TFile.API_VERSION;
recordCount = 0;
strComparator = (comparator == null) ? "" : comparator;
this.comparator = makeComparator(strComparator);
}
// ctor for reads
public TFileMeta(DataInput in) throws IOException {
version = new Version(in);
if (!version.compatibleWith(TFile.API_VERSION)) {
throw new RuntimeException("Incompatible TFile fileVersion.");
}
recordCount = Utils.readVLong(in);
strComparator = Utils.readString(in);
comparator = makeComparator(strComparator);
}
@SuppressWarnings("unchecked")
static BytesComparator makeComparator(String comparator) {
if (comparator.length() == 0) {
// unsorted keys
return null;
}
if (comparator.equals(COMPARATOR_MEMCMP)) {
// default comparator
return new BytesComparator(new MemcmpRawComparator());
} else if (comparator.startsWith(COMPARATOR_JCLASS)) {
String compClassName =
comparator.substring(COMPARATOR_JCLASS.length()).trim();
try {
Class compClass = Class.forName(compClassName);
// use its default ctor to create an instance
return new BytesComparator((RawComparator<Object>) compClass
.newInstance());
} catch (Exception e) {
throw new IllegalArgumentException(
"Failed to instantiate comparator: " + comparator + "("
+ e.toString() + ")");
}
} else {
throw new IllegalArgumentException("Unsupported comparator: "
+ comparator);
}
}
public void write(DataOutput out) throws IOException {
TFile.API_VERSION.write(out);
Utils.writeVLong(out, recordCount);
Utils.writeString(out, strComparator);
}
public long getRecordCount() {
return recordCount;
}
public void incRecordCount() {
++recordCount;
}
public boolean isSorted() {
return !strComparator.isEmpty();
}
public String getComparatorString() {
return strComparator;
}
public BytesComparator getComparator() {
return comparator;
}
public Version getVersion() {
return version;
}
} // END: class MetaTFileMeta
/**
* Data structure representing "TFile.index" meta block.
*/
static class TFileIndex {
final static String BLOCK_NAME = "TFile.index";
private ByteArray firstKey;
private final ArrayList<TFileIndexEntry> index;
private final ArrayList<Long> recordNumIndex;
private final BytesComparator comparator;
private long sum = 0;
/**
* For reading from file.
*
* @throws IOException
*/
public TFileIndex(int entryCount, DataInput in, BytesComparator comparator)
throws IOException {
index = new ArrayList<TFileIndexEntry>(entryCount);
recordNumIndex = new ArrayList<Long>(entryCount);
int size = Utils.readVInt(in); // size for the first key entry.
if (size > 0) {
byte[] buffer = new byte[size];
in.readFully(buffer);
DataInputStream firstKeyInputStream =
new DataInputStream(new ByteArrayInputStream(buffer, 0, size));
int firstKeyLength = Utils.readVInt(firstKeyInputStream);
firstKey = new ByteArray(new byte[firstKeyLength]);
firstKeyInputStream.readFully(firstKey.buffer());
for (int i = 0; i < entryCount; i++) {
size = Utils.readVInt(in);
if (buffer.length < size) {
buffer = new byte[size];
}
in.readFully(buffer, 0, size);
TFileIndexEntry idx =
new TFileIndexEntry(new DataInputStream(new ByteArrayInputStream(
buffer, 0, size)));
index.add(idx);
sum += idx.entries();
recordNumIndex.add(sum);
}
} else {
if (entryCount != 0) {
throw new RuntimeException("Internal error");
}
}
this.comparator = comparator;
}
/**
* @param key
* input key.
* @return the ID of the first block that contains key >= input key. Or -1
* if no such block exists.
*/
public int lowerBound(RawComparable key) {
if (comparator == null) {
throw new RuntimeException("Cannot search in unsorted TFile");
}
if (firstKey == null) {
return -1; // not found
}
int ret = Utils.lowerBound(index, key, comparator);
if (ret == index.size()) {
return -1;
}
return ret;
}
/**
* @param key
* input key.
* @return the ID of the first block that contains key > input key. Or -1
* if no such block exists.
*/
public int upperBound(RawComparable key) {
if (comparator == null) {
throw new RuntimeException("Cannot search in unsorted TFile");
}
if (firstKey == null) {
return -1; // not found
}
int ret = Utils.upperBound(index, key, comparator);
if (ret == index.size()) {
return -1;
}
return ret;
}
/**
* For writing to file.
*/
public TFileIndex(BytesComparator comparator) {
index = new ArrayList<TFileIndexEntry>();
recordNumIndex = new ArrayList<Long>();
this.comparator = comparator;
}
public RawComparable getFirstKey() {
return firstKey;
}
public Reader.Location getLocationByRecordNum(long recNum) {
int idx = Utils.upperBound(recordNumIndex, recNum);
long lastRecNum = (idx == 0)? 0: recordNumIndex.get(idx-1);
return new Reader.Location(idx, recNum-lastRecNum);
}
public long getRecordNumByLocation(Reader.Location location) {
int blkIndex = location.getBlockIndex();
long lastRecNum = (blkIndex == 0) ? 0: recordNumIndex.get(blkIndex-1);
return lastRecNum + location.getRecordIndex();
}
public void setFirstKey(byte[] key, int offset, int length) {
firstKey = new ByteArray(new byte[length]);
System.arraycopy(key, offset, firstKey.buffer(), 0, length);
}
public RawComparable getLastKey() {
if (index.size() == 0) {
return null;
}
return new ByteArray(index.get(index.size() - 1).buffer());
}
public void addEntry(TFileIndexEntry keyEntry) {
index.add(keyEntry);
sum += keyEntry.entries();
recordNumIndex.add(sum);
}
public TFileIndexEntry getEntry(int bid) {
return index.get(bid);
}
public void write(DataOutput out) throws IOException {
if (firstKey == null) {
Utils.writeVInt(out, 0);
return;
}
DataOutputBuffer dob = new DataOutputBuffer();
Utils.writeVInt(dob, firstKey.size());
dob.write(firstKey.buffer());
Utils.writeVInt(out, dob.size());
out.write(dob.getData(), 0, dob.getLength());
for (TFileIndexEntry entry : index) {
dob.reset();
entry.write(dob);
Utils.writeVInt(out, dob.getLength());
out.write(dob.getData(), 0, dob.getLength());
}
}
}
/**
* TFile Data Index entry. We should try to make the memory footprint of each
* index entry as small as possible.
*/
static final class TFileIndexEntry implements RawComparable {
final byte[] key;
// count of <key, value> entries in the block.
final long kvEntries;
public TFileIndexEntry(DataInput in) throws IOException {
int len = Utils.readVInt(in);
key = new byte[len];
in.readFully(key, 0, len);
kvEntries = Utils.readVLong(in);
}
// default entry, without any padding
public TFileIndexEntry(byte[] newkey, int offset, int len, long entries) {
key = new byte[len];
System.arraycopy(newkey, offset, key, 0, len);
this.kvEntries = entries;
}
@Override
public byte[] buffer() {
return key;
}
@Override
public int offset() {
return 0;
}
@Override
public int size() {
return key.length;
}
long entries() {
return kvEntries;
}
public void write(DataOutput out) throws IOException {
Utils.writeVInt(out, key.length);
out.write(key, 0, key.length);
Utils.writeVLong(out, kvEntries);
}
}
/**
* Dumping the TFile information.
*
* @param args
* A list of TFile paths.
*/
public static void main(String[] args) {
System.out.printf("TFile Dumper (TFile %s, BCFile %s)%n", TFile.API_VERSION
.toString(), BCFile.API_VERSION.toString());
if (args.length == 0) {
System.out
.println("Usage: java ... org.apache.hadoop.io.file.tfile.TFile tfile-path [tfile-path ...]");
System.exit(0);
}
Configuration conf = new Configuration();
for (String file : args) {
System.out.println("===" + file + "===");
try {
TFileDumper.dumpInfo(file, System.out, conf);
} catch (IOException e) {
e.printStackTrace(System.err);
}
}
}
}
| 79,247 | 32.537029 | 104 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/BoundedRangeFileInputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.io.file.tfile;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.fs.FSDataInputStream;
/**
* BoundedRangeFIleInputStream abstracts a contiguous region of a Hadoop
* FSDataInputStream as a regular input stream. One can create multiple
* BoundedRangeFileInputStream on top of the same FSDataInputStream and they
* would not interfere with each other.
*/
class BoundedRangeFileInputStream extends InputStream {
private FSDataInputStream in;
private long pos;
private long end;
private long mark;
private final byte[] oneByte = new byte[1];
/**
* Constructor
*
* @param in
* The FSDataInputStream we connect to.
* @param offset
* Begining offset of the region.
* @param length
* Length of the region.
*
* The actual length of the region may be smaller if (off_begin +
* length) goes beyond the end of FS input stream.
*/
public BoundedRangeFileInputStream(FSDataInputStream in, long offset,
long length) {
if (offset < 0 || length < 0) {
throw new IndexOutOfBoundsException("Invalid offset/length: " + offset
+ "/" + length);
}
this.in = in;
this.pos = offset;
this.end = offset + length;
this.mark = -1;
}
@Override
public int available() throws IOException {
int avail = in.available();
if (pos + avail > end) {
avail = (int) (end - pos);
}
return avail;
}
@Override
public int read() throws IOException {
int ret = read(oneByte);
if (ret == 1) return oneByte[0] & 0xff;
return -1;
}
@Override
public int read(byte[] b) throws IOException {
return read(b, 0, b.length);
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
if ((off | len | (off + len) | (b.length - (off + len))) < 0) {
throw new IndexOutOfBoundsException();
}
int n = (int) Math.min(Integer.MAX_VALUE, Math.min(len, (end - pos)));
if (n == 0) return -1;
int ret = 0;
synchronized (in) {
in.seek(pos);
ret = in.read(b, off, n);
}
if (ret < 0) {
end = pos;
return -1;
}
pos += ret;
return ret;
}
@Override
/*
* We may skip beyond the end of the file.
*/
public long skip(long n) throws IOException {
long len = Math.min(n, end - pos);
pos += len;
return len;
}
@Override
public synchronized void mark(int readlimit) {
mark = pos;
}
@Override
public synchronized void reset() throws IOException {
if (mark < 0) throw new IOException("Resetting to invalid mark");
pos = mark;
}
@Override
public boolean markSupported() {
return true;
}
@Override
public void close() {
// Invalidate the state of the stream.
in = null;
pos = end;
mark = -1;
}
}
| 3,672 | 24.866197 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/RawComparable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.io.file.tfile;
import java.util.Collections;
import java.util.Comparator;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.RawComparator;
/**
* Interface for objects that can be compared through {@link RawComparator}.
* This is useful in places where we need a single object reference to specify a
* range of bytes in a byte array, such as {@link Comparable} or
* {@link Collections#binarySearch(java.util.List, Object, Comparator)}
*
* The actual comparison among RawComparable's requires an external
* RawComparator and it is applications' responsibility to ensure two
* RawComparable are supposed to be semantically comparable with the same
* RawComparator.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface RawComparable {
/**
* Get the underlying byte array.
*
* @return The underlying byte array.
*/
abstract byte[] buffer();
/**
* Get the offset of the first byte in the byte array.
*
* @return The offset of the first byte in the byte array.
*/
abstract int offset();
/**
* Get the size of the byte range in the byte array.
*
* @return The size of the byte range in the byte array.
*/
abstract int size();
}
| 2,132 | 33.403226 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "MapReduce"})
@InterfaceStability.Evolving
package org.apache.hadoop.io.retry;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
| 1,056 | 44.956522 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/FailoverProxyProvider.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.retry;
import java.io.Closeable;
import org.apache.hadoop.classification.InterfaceStability;
/**
* An implementer of this interface is capable of providing proxy objects for
* use in IPC communication, and potentially modifying these objects or creating
* entirely new ones in the event of certain types of failures. The
* determination of whether or not to fail over is handled by
* {@link RetryPolicy}.
*/
@InterfaceStability.Evolving
public interface FailoverProxyProvider<T> extends Closeable {
public static final class ProxyInfo<T> {
public final T proxy;
/*
* The information (e.g., the IP address) of the current proxy object. It
* provides information for debugging purposes.
*/
public final String proxyInfo;
public ProxyInfo(T proxy, String proxyInfo) {
this.proxy = proxy;
this.proxyInfo = proxyInfo;
}
}
/**
* Get the proxy object which should be used until the next failover event
* occurs.
*
* @return the proxy object to invoke methods upon
*/
public ProxyInfo<T> getProxy();
/**
* Called whenever the associated {@link RetryPolicy} determines that an error
* warrants failing over.
*
* @param currentProxy
* the proxy object which was being used before this failover event
*/
public void performFailover(T currentProxy);
/**
* Return a reference to the interface this provider's proxy objects actually
* implement. If any of the methods on this interface are annotated as being
* {@link Idempotent} or {@link AtMostOnce}, then this fact will be passed to
* the {@link RetryPolicy#shouldRetry(Exception, int, int, boolean)} method on
* error, for use in determining whether or not failover should be attempted.
*
* @return the interface implemented by the proxy objects returned by
* {@link FailoverProxyProvider#getProxy()}
*/
public Class<T> getInterface();
}
| 2,767 | 36.405405 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AtMostOnce.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.retry;
import java.lang.annotation.ElementType;
import java.lang.annotation.Inherited;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Used to mark certain methods of an interface with at-most-once semantics.
*
* Server must guarantee that methods are executed at most once, by keeping
* a retry cache. The previous response must be returned when duplicate
* requests are received. Because of these guarantee, a client can retry
* this request on failover and other network failure conditions.
*/
@Inherited
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.METHOD)
@InterfaceStability.Evolving
public @interface AtMostOnce {
}
| 1,620 | 37.595238 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicy.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.retry;
import org.apache.hadoop.classification.InterfaceStability;
/**
* <p>
* Specifies a policy for retrying method failures.
* Implementations of this interface should be immutable.
* </p>
*/
@InterfaceStability.Evolving
public interface RetryPolicy {
/**
* Returned by {@link RetryPolicy#shouldRetry(Exception, int, int, boolean)}.
*/
@InterfaceStability.Evolving
public static class RetryAction {
// A few common retry policies, with no delays.
public static final RetryAction FAIL =
new RetryAction(RetryDecision.FAIL);
public static final RetryAction RETRY =
new RetryAction(RetryDecision.RETRY);
public static final RetryAction FAILOVER_AND_RETRY =
new RetryAction(RetryDecision.FAILOVER_AND_RETRY);
public final RetryDecision action;
public final long delayMillis;
public final String reason;
public RetryAction(RetryDecision action) {
this(action, 0, null);
}
public RetryAction(RetryDecision action, long delayTime) {
this(action, delayTime, null);
}
public RetryAction(RetryDecision action, long delayTime, String reason) {
this.action = action;
this.delayMillis = delayTime;
this.reason = reason;
}
@Override
public String toString() {
return getClass().getSimpleName() + "(action=" + action
+ ", delayMillis=" + delayMillis + ", reason=" + reason + ")";
}
public enum RetryDecision {
FAIL,
RETRY,
FAILOVER_AND_RETRY
}
}
/**
* <p>
* Determines whether the framework should retry a method for the given
* exception, and the number of retries that have been made for that operation
* so far.
* </p>
*
* @param e The exception that caused the method to fail
* @param retries The number of times the method has been retried
* @param failovers The number of times the method has failed over to a
* different backend implementation
* @param isIdempotentOrAtMostOnce <code>true</code> if the method is
* {@link Idempotent} or {@link AtMostOnce} and so can reasonably be
* retried on failover when we don't know if the previous attempt
* reached the server or not
* @return <code>true</code> if the method should be retried,
* <code>false</code> if the method should not be retried but
* shouldn't fail with an exception (only for void methods)
* @throws Exception The re-thrown exception <code>e</code> indicating that
* the method failed and should not be retried further
*/
public RetryAction shouldRetry(Exception e, int retries, int failovers,
boolean isIdempotentOrAtMostOnce) throws Exception;
}
| 3,598 | 34.99 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/LossyRetryInvocationHandler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.retry;
import java.lang.reflect.Method;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ipc.RetriableException;
/**
* A dummy invocation handler extending RetryInvocationHandler. It drops the
* first N number of responses. This invocation handler is only used for testing.
*/
@InterfaceAudience.Private
public class LossyRetryInvocationHandler<T> extends RetryInvocationHandler<T> {
private final int numToDrop;
private static final ThreadLocal<Integer> RetryCount =
new ThreadLocal<Integer>();
public LossyRetryInvocationHandler(int numToDrop,
FailoverProxyProvider<T> proxyProvider, RetryPolicy retryPolicy) {
super(proxyProvider, retryPolicy);
this.numToDrop = numToDrop;
}
@Override
public Object invoke(Object proxy, Method method, Object[] args)
throws Throwable {
RetryCount.set(0);
return super.invoke(proxy, method, args);
}
@Override
protected Object invokeMethod(Method method, Object[] args) throws Throwable {
Object result = super.invokeMethod(method, args);
int retryCount = RetryCount.get();
if (retryCount < this.numToDrop) {
RetryCount.set(++retryCount);
if (LOG.isDebugEnabled()) {
LOG.debug("Drop the response. Current retryCount == " + retryCount);
}
throw new RetriableException("Fake Exception");
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("retryCount == " + retryCount
+ ". It's time to normally process the response");
}
return result;
}
}
}
| 2,395 | 34.761194 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.retry;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.RemoteException;
import com.google.protobuf.ServiceException;
import org.apache.hadoop.ipc.RetriableException;
public class RetryUtils {
public static final Log LOG = LogFactory.getLog(RetryUtils.class);
/**
* Return the default retry policy set in conf.
*
* If the value retryPolicyEnabledKey is set to false in conf,
* use TRY_ONCE_THEN_FAIL.
*
* Otherwise, get the MultipleLinearRandomRetry policy specified in the conf
* and then
* (1) use multipleLinearRandomRetry for
* - remoteExceptionToRetry, or
* - IOException other than RemoteException, or
* - ServiceException; and
* (2) use TRY_ONCE_THEN_FAIL for
* - non-remoteExceptionToRetry RemoteException, or
* - non-IOException.
*
*
* @param conf
* @param retryPolicyEnabledKey conf property key for enabling retry
* @param defaultRetryPolicyEnabled default retryPolicyEnabledKey conf value
* @param retryPolicySpecKey conf property key for retry policy spec
* @param defaultRetryPolicySpec default retryPolicySpecKey conf value
* @param remoteExceptionToRetry The particular RemoteException to retry
* @return the default retry policy.
*/
public static RetryPolicy getDefaultRetryPolicy(
Configuration conf,
String retryPolicyEnabledKey,
boolean defaultRetryPolicyEnabled,
String retryPolicySpecKey,
String defaultRetryPolicySpec,
final String remoteExceptionToRetry
) {
final RetryPolicy multipleLinearRandomRetry =
getMultipleLinearRandomRetry(
conf,
retryPolicyEnabledKey, defaultRetryPolicyEnabled,
retryPolicySpecKey, defaultRetryPolicySpec
);
if (LOG.isDebugEnabled()) {
LOG.debug("multipleLinearRandomRetry = " + multipleLinearRandomRetry);
}
if (multipleLinearRandomRetry == null) {
//no retry
return RetryPolicies.TRY_ONCE_THEN_FAIL;
} else {
return new RetryPolicy() {
@Override
public RetryAction shouldRetry(Exception e, int retries, int failovers,
boolean isMethodIdempotent) throws Exception {
if (e instanceof ServiceException) {
//unwrap ServiceException
final Throwable cause = e.getCause();
if (cause != null && cause instanceof Exception) {
e = (Exception)cause;
}
}
//see (1) and (2) in the javadoc of this method.
final RetryPolicy p;
if (e instanceof RetriableException
|| RetryPolicies.getWrappedRetriableException(e) != null) {
// RetriableException or RetriableException wrapped
p = multipleLinearRandomRetry;
} else if (e instanceof RemoteException) {
final RemoteException re = (RemoteException)e;
p = remoteExceptionToRetry.equals(re.getClassName())?
multipleLinearRandomRetry: RetryPolicies.TRY_ONCE_THEN_FAIL;
} else if (e instanceof IOException || e instanceof ServiceException) {
p = multipleLinearRandomRetry;
} else { //non-IOException
p = RetryPolicies.TRY_ONCE_THEN_FAIL;
}
if (LOG.isDebugEnabled()) {
LOG.debug("RETRY " + retries + ") policy="
+ p.getClass().getSimpleName() + ", exception=" + e);
}
return p.shouldRetry(e, retries, failovers, isMethodIdempotent);
}
@Override
public String toString() {
return "RetryPolicy[" + multipleLinearRandomRetry + ", "
+ RetryPolicies.TRY_ONCE_THEN_FAIL.getClass().getSimpleName()
+ "]";
}
};
}
}
/**
* Return the MultipleLinearRandomRetry policy specified in the conf,
* or null if the feature is disabled.
* If the policy is specified in the conf but the policy cannot be parsed,
* the default policy is returned.
*
* Retry policy spec:
* N pairs of sleep-time and number-of-retries "s1,n1,s2,n2,..."
*
* @param conf
* @param retryPolicyEnabledKey conf property key for enabling retry
* @param defaultRetryPolicyEnabled default retryPolicyEnabledKey conf value
* @param retryPolicySpecKey conf property key for retry policy spec
* @param defaultRetryPolicySpec default retryPolicySpecKey conf value
* @return the MultipleLinearRandomRetry policy specified in the conf,
* or null if the feature is disabled.
*/
public static RetryPolicy getMultipleLinearRandomRetry(
Configuration conf,
String retryPolicyEnabledKey,
boolean defaultRetryPolicyEnabled,
String retryPolicySpecKey,
String defaultRetryPolicySpec
) {
final boolean enabled =
conf.getBoolean(retryPolicyEnabledKey, defaultRetryPolicyEnabled);
if (!enabled) {
return null;
}
final String policy = conf.get(retryPolicySpecKey, defaultRetryPolicySpec);
final RetryPolicy r =
RetryPolicies.MultipleLinearRandomRetry.parseCommaSeparatedString(
policy);
return (r != null) ?
r :
RetryPolicies.MultipleLinearRandomRetry.parseCommaSeparatedString(
defaultRetryPolicySpec);
}
}
| 6,312 | 36.577381 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/Idempotent.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.retry;
import java.lang.annotation.ElementType;
import java.lang.annotation.Inherited;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Used to mark certain methods of an interface as being idempotent, and
* therefore warrant being retried on failover.
*/
@Inherited
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.METHOD)
@InterfaceStability.Evolving
public @interface Idempotent {}
| 1,369 | 37.055556 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.retry;
import java.io.IOException;
import java.net.ConnectException;
import java.net.NoRouteToHostException;
import java.net.SocketException;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.RetriableException;
import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.net.ConnectTimeoutException;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
/**
* <p>
* A collection of useful implementations of {@link RetryPolicy}.
* </p>
*/
public class RetryPolicies {
public static final Log LOG = LogFactory.getLog(RetryPolicies.class);
/**
* <p>
* Try once, and fail by re-throwing the exception.
* This corresponds to having no retry mechanism in place.
* </p>
*/
public static final RetryPolicy TRY_ONCE_THEN_FAIL = new TryOnceThenFail();
/**
* <p>
* Keep trying forever.
* </p>
*/
public static final RetryPolicy RETRY_FOREVER = new RetryForever();
/**
* <p>
* Keep trying a limited number of times, waiting a fixed time between attempts,
* and then fail by re-throwing the exception.
* </p>
*/
public static final RetryPolicy retryUpToMaximumCountWithFixedSleep(int maxRetries, long sleepTime, TimeUnit timeUnit) {
return new RetryUpToMaximumCountWithFixedSleep(maxRetries, sleepTime, timeUnit);
}
/**
* <p>
* Keep trying for a maximum time, waiting a fixed time between attempts,
* and then fail by re-throwing the exception.
* </p>
*/
public static final RetryPolicy retryUpToMaximumTimeWithFixedSleep(long maxTime, long sleepTime, TimeUnit timeUnit) {
return new RetryUpToMaximumTimeWithFixedSleep(maxTime, sleepTime, timeUnit);
}
/**
* <p>
* Keep trying a limited number of times, waiting a growing amount of time between attempts,
* and then fail by re-throwing the exception.
* The time between attempts is <code>sleepTime</code> mutliplied by the number of tries so far.
* </p>
*/
public static final RetryPolicy retryUpToMaximumCountWithProportionalSleep(int maxRetries, long sleepTime, TimeUnit timeUnit) {
return new RetryUpToMaximumCountWithProportionalSleep(maxRetries, sleepTime, timeUnit);
}
/**
* <p>
* Keep trying a limited number of times, waiting a growing amount of time between attempts,
* and then fail by re-throwing the exception.
* The time between attempts is <code>sleepTime</code> mutliplied by a random
* number in the range of [0, 2 to the number of retries)
* </p>
*/
public static final RetryPolicy exponentialBackoffRetry(
int maxRetries, long sleepTime, TimeUnit timeUnit) {
return new ExponentialBackoffRetry(maxRetries, sleepTime, timeUnit);
}
/**
* <p>
* Set a default policy with some explicit handlers for specific exceptions.
* </p>
*/
public static final RetryPolicy retryByException(RetryPolicy defaultPolicy,
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap) {
return new ExceptionDependentRetry(defaultPolicy, exceptionToPolicyMap);
}
/**
* <p>
* A retry policy for RemoteException
* Set a default policy with some explicit handlers for specific exceptions.
* </p>
*/
public static final RetryPolicy retryByRemoteException(
RetryPolicy defaultPolicy,
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap) {
return new RemoteExceptionDependentRetry(defaultPolicy, exceptionToPolicyMap);
}
public static final RetryPolicy failoverOnNetworkException(int maxFailovers) {
return failoverOnNetworkException(TRY_ONCE_THEN_FAIL, maxFailovers);
}
public static final RetryPolicy failoverOnNetworkException(
RetryPolicy fallbackPolicy, int maxFailovers) {
return failoverOnNetworkException(fallbackPolicy, maxFailovers, 0, 0);
}
public static final RetryPolicy failoverOnNetworkException(
RetryPolicy fallbackPolicy, int maxFailovers, long delayMillis,
long maxDelayBase) {
return new FailoverOnNetworkExceptionRetry(fallbackPolicy, maxFailovers,
delayMillis, maxDelayBase);
}
public static final RetryPolicy failoverOnNetworkException(
RetryPolicy fallbackPolicy, int maxFailovers, int maxRetries,
long delayMillis, long maxDelayBase) {
return new FailoverOnNetworkExceptionRetry(fallbackPolicy, maxFailovers,
maxRetries, delayMillis, maxDelayBase);
}
static class TryOnceThenFail implements RetryPolicy {
@Override
public RetryAction shouldRetry(Exception e, int retries, int failovers,
boolean isIdempotentOrAtMostOnce) throws Exception {
return RetryAction.FAIL;
}
}
static class RetryForever implements RetryPolicy {
@Override
public RetryAction shouldRetry(Exception e, int retries, int failovers,
boolean isIdempotentOrAtMostOnce) throws Exception {
return RetryAction.RETRY;
}
}
/**
* Retry up to maxRetries.
* The actual sleep time of the n-th retry is f(n, sleepTime),
* where f is a function provided by the subclass implementation.
*
* The object of the subclasses should be immutable;
* otherwise, the subclass must override hashCode(), equals(..) and toString().
*/
static abstract class RetryLimited implements RetryPolicy {
final int maxRetries;
final long sleepTime;
final TimeUnit timeUnit;
private String myString;
RetryLimited(int maxRetries, long sleepTime, TimeUnit timeUnit) {
if (maxRetries < 0) {
throw new IllegalArgumentException("maxRetries = " + maxRetries+" < 0");
}
if (sleepTime < 0) {
throw new IllegalArgumentException("sleepTime = " + sleepTime + " < 0");
}
this.maxRetries = maxRetries;
this.sleepTime = sleepTime;
this.timeUnit = timeUnit;
}
@Override
public RetryAction shouldRetry(Exception e, int retries, int failovers,
boolean isIdempotentOrAtMostOnce) throws Exception {
if (retries >= maxRetries) {
return RetryAction.FAIL;
}
return new RetryAction(RetryAction.RetryDecision.RETRY,
timeUnit.toMillis(calculateSleepTime(retries)));
}
protected abstract long calculateSleepTime(int retries);
@Override
public int hashCode() {
return toString().hashCode();
}
@Override
public boolean equals(final Object that) {
if (this == that) {
return true;
} else if (that == null || this.getClass() != that.getClass()) {
return false;
}
return this.toString().equals(that.toString());
}
@Override
public String toString() {
if (myString == null) {
myString = getClass().getSimpleName() + "(maxRetries=" + maxRetries
+ ", sleepTime=" + sleepTime + " " + timeUnit + ")";
}
return myString;
}
}
static class RetryUpToMaximumCountWithFixedSleep extends RetryLimited {
public RetryUpToMaximumCountWithFixedSleep(int maxRetries, long sleepTime, TimeUnit timeUnit) {
super(maxRetries, sleepTime, timeUnit);
}
@Override
protected long calculateSleepTime(int retries) {
return sleepTime;
}
}
static class RetryUpToMaximumTimeWithFixedSleep extends RetryUpToMaximumCountWithFixedSleep {
public RetryUpToMaximumTimeWithFixedSleep(long maxTime, long sleepTime, TimeUnit timeUnit) {
super((int) (maxTime / sleepTime), sleepTime, timeUnit);
}
}
static class RetryUpToMaximumCountWithProportionalSleep extends RetryLimited {
public RetryUpToMaximumCountWithProportionalSleep(int maxRetries, long sleepTime, TimeUnit timeUnit) {
super(maxRetries, sleepTime, timeUnit);
}
@Override
protected long calculateSleepTime(int retries) {
return sleepTime * (retries + 1);
}
}
/**
* Given pairs of number of retries and sleep time (n0, t0), (n1, t1), ...,
* the first n0 retries sleep t0 milliseconds on average,
* the following n1 retries sleep t1 milliseconds on average, and so on.
*
* For all the sleep, the actual sleep time is randomly uniform distributed
* in the close interval [0.5t, 1.5t], where t is the sleep time specified.
*
* The objects of this class are immutable.
*/
public static class MultipleLinearRandomRetry implements RetryPolicy {
/** Pairs of numRetries and sleepSeconds */
public static class Pair {
final int numRetries;
final int sleepMillis;
public Pair(final int numRetries, final int sleepMillis) {
if (numRetries < 0) {
throw new IllegalArgumentException("numRetries = " + numRetries+" < 0");
}
if (sleepMillis < 0) {
throw new IllegalArgumentException("sleepMillis = " + sleepMillis + " < 0");
}
this.numRetries = numRetries;
this.sleepMillis = sleepMillis;
}
@Override
public String toString() {
return numRetries + "x" + sleepMillis + "ms";
}
}
private final List<Pair> pairs;
private String myString;
public MultipleLinearRandomRetry(List<Pair> pairs) {
if (pairs == null || pairs.isEmpty()) {
throw new IllegalArgumentException("pairs must be neither null nor empty.");
}
this.pairs = Collections.unmodifiableList(pairs);
}
@Override
public RetryAction shouldRetry(Exception e, int curRetry, int failovers,
boolean isIdempotentOrAtMostOnce) throws Exception {
final Pair p = searchPair(curRetry);
if (p == null) {
//no more retries.
return RetryAction.FAIL;
}
//calculate sleep time and return.
// ensure 0.5 <= ratio <=1.5
final double ratio = ThreadLocalRandom.current().nextDouble() + 0.5;
final long sleepTime = Math.round(p.sleepMillis * ratio);
return new RetryAction(RetryAction.RetryDecision.RETRY, sleepTime);
}
/**
* Given the current number of retry, search the corresponding pair.
* @return the corresponding pair,
* or null if the current number of retry > maximum number of retry.
*/
private Pair searchPair(int curRetry) {
int i = 0;
for(; i < pairs.size() && curRetry > pairs.get(i).numRetries; i++) {
curRetry -= pairs.get(i).numRetries;
}
return i == pairs.size()? null: pairs.get(i);
}
@Override
public int hashCode() {
return toString().hashCode();
}
@Override
public boolean equals(final Object that) {
if (this == that) {
return true;
} else if (that == null || this.getClass() != that.getClass()) {
return false;
}
return this.toString().equals(that.toString());
}
@Override
public String toString() {
if (myString == null) {
myString = getClass().getSimpleName() + pairs;
}
return myString;
}
/**
* Parse the given string as a MultipleLinearRandomRetry object.
* The format of the string is "t_1, n_1, t_2, n_2, ...",
* where t_i and n_i are the i-th pair of sleep time and number of retires.
* Note that the white spaces in the string are ignored.
*
* @return the parsed object, or null if the parsing fails.
*/
public static MultipleLinearRandomRetry parseCommaSeparatedString(String s) {
final String[] elements = s.split(",");
if (elements.length == 0) {
LOG.warn("Illegal value: there is no element in \"" + s + "\".");
return null;
}
if (elements.length % 2 != 0) {
LOG.warn("Illegal value: the number of elements in \"" + s + "\" is "
+ elements.length + " but an even number of elements is expected.");
return null;
}
final List<RetryPolicies.MultipleLinearRandomRetry.Pair> pairs
= new ArrayList<RetryPolicies.MultipleLinearRandomRetry.Pair>();
for(int i = 0; i < elements.length; ) {
//parse the i-th sleep-time
final int sleep = parsePositiveInt(elements, i++, s);
if (sleep == -1) {
return null; //parse fails
}
//parse the i-th number-of-retries
final int retries = parsePositiveInt(elements, i++, s);
if (retries == -1) {
return null; //parse fails
}
pairs.add(new RetryPolicies.MultipleLinearRandomRetry.Pair(retries, sleep));
}
return new RetryPolicies.MultipleLinearRandomRetry(pairs);
}
/**
* Parse the i-th element as an integer.
* @return -1 if the parsing fails or the parsed value <= 0;
* otherwise, return the parsed value.
*/
private static int parsePositiveInt(final String[] elements,
final int i, final String originalString) {
final String s = elements[i].trim();
final int n;
try {
n = Integer.parseInt(s);
} catch(NumberFormatException nfe) {
LOG.warn("Failed to parse \"" + s + "\", which is the index " + i
+ " element in \"" + originalString + "\"", nfe);
return -1;
}
if (n <= 0) {
LOG.warn("The value " + n + " <= 0: it is parsed from the string \""
+ s + "\" which is the index " + i + " element in \""
+ originalString + "\"");
return -1;
}
return n;
}
}
static class ExceptionDependentRetry implements RetryPolicy {
RetryPolicy defaultPolicy;
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap;
public ExceptionDependentRetry(RetryPolicy defaultPolicy,
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap) {
this.defaultPolicy = defaultPolicy;
this.exceptionToPolicyMap = exceptionToPolicyMap;
}
@Override
public RetryAction shouldRetry(Exception e, int retries, int failovers,
boolean isIdempotentOrAtMostOnce) throws Exception {
RetryPolicy policy = exceptionToPolicyMap.get(e.getClass());
if (policy == null) {
policy = defaultPolicy;
}
return policy.shouldRetry(e, retries, failovers, isIdempotentOrAtMostOnce);
}
}
static class RemoteExceptionDependentRetry implements RetryPolicy {
RetryPolicy defaultPolicy;
Map<String, RetryPolicy> exceptionNameToPolicyMap;
public RemoteExceptionDependentRetry(RetryPolicy defaultPolicy,
Map<Class<? extends Exception>,
RetryPolicy> exceptionToPolicyMap) {
this.defaultPolicy = defaultPolicy;
this.exceptionNameToPolicyMap = new HashMap<String, RetryPolicy>();
for (Entry<Class<? extends Exception>, RetryPolicy> e :
exceptionToPolicyMap.entrySet()) {
exceptionNameToPolicyMap.put(e.getKey().getName(), e.getValue());
}
}
@Override
public RetryAction shouldRetry(Exception e, int retries, int failovers,
boolean isIdempotentOrAtMostOnce) throws Exception {
RetryPolicy policy = null;
if (e instanceof RemoteException) {
policy = exceptionNameToPolicyMap.get(
((RemoteException) e).getClassName());
}
if (policy == null) {
policy = defaultPolicy;
}
return policy.shouldRetry(e, retries, failovers, isIdempotentOrAtMostOnce);
}
}
static class ExponentialBackoffRetry extends RetryLimited {
public ExponentialBackoffRetry(
int maxRetries, long sleepTime, TimeUnit timeUnit) {
super(maxRetries, sleepTime, timeUnit);
if (maxRetries < 0) {
throw new IllegalArgumentException("maxRetries = " + maxRetries + " < 0");
} else if (maxRetries >= Long.SIZE - 1) {
//calculateSleepTime may overflow.
throw new IllegalArgumentException("maxRetries = " + maxRetries
+ " >= " + (Long.SIZE - 1));
}
}
@Override
protected long calculateSleepTime(int retries) {
return calculateExponentialTime(sleepTime, retries + 1);
}
}
/**
* Fail over and retry in the case of:
* Remote StandbyException (server is up, but is not the active server)
* Immediate socket exceptions (e.g. no route to host, econnrefused)
* Socket exceptions after initial connection when operation is idempotent
*
* The first failover is immediate, while all subsequent failovers wait an
* exponentially-increasing random amount of time.
*
* Fail immediately in the case of:
* Socket exceptions after initial connection when operation is not idempotent
*
* Fall back on underlying retry policy otherwise.
*/
static class FailoverOnNetworkExceptionRetry implements RetryPolicy {
private RetryPolicy fallbackPolicy;
private int maxFailovers;
private int maxRetries;
private long delayMillis;
private long maxDelayBase;
public FailoverOnNetworkExceptionRetry(RetryPolicy fallbackPolicy,
int maxFailovers) {
this(fallbackPolicy, maxFailovers, 0, 0, 0);
}
public FailoverOnNetworkExceptionRetry(RetryPolicy fallbackPolicy,
int maxFailovers, long delayMillis, long maxDelayBase) {
this(fallbackPolicy, maxFailovers, 0, delayMillis, maxDelayBase);
}
public FailoverOnNetworkExceptionRetry(RetryPolicy fallbackPolicy,
int maxFailovers, int maxRetries, long delayMillis, long maxDelayBase) {
this.fallbackPolicy = fallbackPolicy;
this.maxFailovers = maxFailovers;
this.maxRetries = maxRetries;
this.delayMillis = delayMillis;
this.maxDelayBase = maxDelayBase;
}
/**
* @return 0 if this is our first failover/retry (i.e., retry immediately),
* sleep exponentially otherwise
*/
private long getFailoverOrRetrySleepTime(int times) {
return times == 0 ? 0 :
calculateExponentialTime(delayMillis, times, maxDelayBase);
}
@Override
public RetryAction shouldRetry(Exception e, int retries,
int failovers, boolean isIdempotentOrAtMostOnce) throws Exception {
if (failovers >= maxFailovers) {
return new RetryAction(RetryAction.RetryDecision.FAIL, 0,
"failovers (" + failovers + ") exceeded maximum allowed ("
+ maxFailovers + ")");
}
if (retries - failovers > maxRetries) {
return new RetryAction(RetryAction.RetryDecision.FAIL, 0, "retries ("
+ retries + ") exceeded maximum allowed (" + maxRetries + ")");
}
if (e instanceof ConnectException ||
e instanceof NoRouteToHostException ||
e instanceof UnknownHostException ||
e instanceof StandbyException ||
e instanceof ConnectTimeoutException ||
isWrappedStandbyException(e)) {
return new RetryAction(RetryAction.RetryDecision.FAILOVER_AND_RETRY,
getFailoverOrRetrySleepTime(failovers));
} else if (e instanceof RetriableException
|| getWrappedRetriableException(e) != null) {
// RetriableException or RetriableException wrapped
return new RetryAction(RetryAction.RetryDecision.RETRY,
getFailoverOrRetrySleepTime(retries));
} else if (e instanceof InvalidToken) {
return new RetryAction(RetryAction.RetryDecision.FAIL, 0,
"Invalid or Cancelled Token");
} else if (e instanceof SocketException
|| (e instanceof IOException && !(e instanceof RemoteException))) {
if (isIdempotentOrAtMostOnce) {
return RetryAction.FAILOVER_AND_RETRY;
} else {
return new RetryAction(RetryAction.RetryDecision.FAIL, 0,
"the invoked method is not idempotent, and unable to determine "
+ "whether it was invoked");
}
} else {
return fallbackPolicy.shouldRetry(e, retries, failovers,
isIdempotentOrAtMostOnce);
}
}
}
/**
* Return a value which is <code>time</code> increasing exponentially as a
* function of <code>retries</code>, +/- 0%-50% of that value, chosen
* randomly.
*
* @param time the base amount of time to work with
* @param retries the number of retries that have so occurred so far
* @param cap value at which to cap the base sleep time
* @return an amount of time to sleep
*/
private static long calculateExponentialTime(long time, int retries,
long cap) {
long baseTime = Math.min(time * (1L << retries), cap);
return (long) (baseTime * (ThreadLocalRandom.current().nextDouble() + 0.5));
}
private static long calculateExponentialTime(long time, int retries) {
return calculateExponentialTime(time, retries, Long.MAX_VALUE);
}
private static boolean isWrappedStandbyException(Exception e) {
if (!(e instanceof RemoteException)) {
return false;
}
Exception unwrapped = ((RemoteException)e).unwrapRemoteException(
StandbyException.class);
return unwrapped instanceof StandbyException;
}
static RetriableException getWrappedRetriableException(Exception e) {
if (!(e instanceof RemoteException)) {
return null;
}
Exception unwrapped = ((RemoteException)e).unwrapRemoteException(
RetriableException.class);
return unwrapped instanceof RetriableException ?
(RetriableException) unwrapped : null;
}
}
| 22,599 | 34.478807 | 129 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryProxy.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.retry;
import java.lang.reflect.Proxy;
import java.util.Map;
/**
* <p>
* A factory for creating retry proxies.
* </p>
*/
public class RetryProxy {
/**
* <p>
* Create a proxy for an interface of an implementation class
* using the same retry policy for each method in the interface.
* </p>
* @param iface the interface that the retry will implement
* @param implementation the instance whose methods should be retried
* @param retryPolicy the policy for retrying method call failures
* @return the retry proxy
*/
public static <T> Object create(Class<T> iface, T implementation,
RetryPolicy retryPolicy) {
return RetryProxy.create(iface,
new DefaultFailoverProxyProvider<T>(iface, implementation),
retryPolicy);
}
/**
* Create a proxy for an interface of implementations of that interface using
* the given {@link FailoverProxyProvider} and the same retry policy for each
* method in the interface.
*
* @param iface the interface that the retry will implement
* @param proxyProvider provides implementation instances whose methods should be retried
* @param retryPolicy the policy for retrying or failing over method call failures
* @return the retry proxy
*/
public static <T> Object create(Class<T> iface,
FailoverProxyProvider<T> proxyProvider, RetryPolicy retryPolicy) {
return Proxy.newProxyInstance(
proxyProvider.getInterface().getClassLoader(),
new Class<?>[] { iface },
new RetryInvocationHandler<T>(proxyProvider, retryPolicy)
);
}
/**
* Create a proxy for an interface of an implementation class
* using the a set of retry policies specified by method name.
* If no retry policy is defined for a method then a default of
* {@link RetryPolicies#TRY_ONCE_THEN_FAIL} is used.
*
* @param iface the interface that the retry will implement
* @param implementation the instance whose methods should be retried
* @param methodNameToPolicyMap a map of method names to retry policies
* @return the retry proxy
*/
public static <T> Object create(Class<T> iface, T implementation,
Map<String,RetryPolicy> methodNameToPolicyMap) {
return create(iface,
new DefaultFailoverProxyProvider<T>(iface, implementation),
methodNameToPolicyMap,
RetryPolicies.TRY_ONCE_THEN_FAIL);
}
/**
* Create a proxy for an interface of implementations of that interface using
* the given {@link FailoverProxyProvider} and the a set of retry policies
* specified by method name. If no retry policy is defined for a method then a
* default of {@link RetryPolicies#TRY_ONCE_THEN_FAIL} is used.
*
* @param iface the interface that the retry will implement
* @param proxyProvider provides implementation instances whose methods should be retried
* @param methodNameToPolicyMapa map of method names to retry policies
* @return the retry proxy
*/
public static <T> Object create(Class<T> iface,
FailoverProxyProvider<T> proxyProvider,
Map<String,RetryPolicy> methodNameToPolicyMap,
RetryPolicy defaultPolicy) {
return Proxy.newProxyInstance(
proxyProvider.getInterface().getClassLoader(),
new Class<?>[] { iface },
new RetryInvocationHandler<T>(proxyProvider, defaultPolicy,
methodNameToPolicyMap)
);
}
}
| 4,276 | 38.971963 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.retry;
import java.io.IOException;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.io.retry.FailoverProxyProvider.ProxyInfo;
import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
import org.apache.hadoop.ipc.Client;
import org.apache.hadoop.ipc.Client.ConnectionId;
import org.apache.hadoop.ipc.ProtocolTranslator;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RpcConstants;
import org.apache.hadoop.ipc.RpcInvocationHandler;
import com.google.common.annotations.VisibleForTesting;
/**
* This class implements RpcInvocationHandler and supports retry on the client
* side.
*/
@InterfaceAudience.Private
public class RetryInvocationHandler<T> implements RpcInvocationHandler {
public static final Log LOG = LogFactory.getLog(RetryInvocationHandler.class);
private final FailoverProxyProvider<T> proxyProvider;
/**
* The number of times the associated proxyProvider has ever been failed over.
*/
private long proxyProviderFailoverCount = 0;
private volatile boolean hasMadeASuccessfulCall = false;
private final RetryPolicy defaultPolicy;
private final Map<String,RetryPolicy> methodNameToPolicyMap;
private ProxyInfo<T> currentProxy;
protected RetryInvocationHandler(FailoverProxyProvider<T> proxyProvider,
RetryPolicy retryPolicy) {
this(proxyProvider, retryPolicy, Collections.<String, RetryPolicy>emptyMap());
}
protected RetryInvocationHandler(FailoverProxyProvider<T> proxyProvider,
RetryPolicy defaultPolicy,
Map<String, RetryPolicy> methodNameToPolicyMap) {
this.proxyProvider = proxyProvider;
this.defaultPolicy = defaultPolicy;
this.methodNameToPolicyMap = methodNameToPolicyMap;
this.currentProxy = proxyProvider.getProxy();
}
@Override
public Object invoke(Object proxy, Method method, Object[] args)
throws Throwable {
RetryPolicy policy = methodNameToPolicyMap.get(method.getName());
if (policy == null) {
policy = defaultPolicy;
}
// The number of times this method invocation has been failed over.
int invocationFailoverCount = 0;
final boolean isRpc = isRpcInvocation(currentProxy.proxy);
final int callId = isRpc? Client.nextCallId(): RpcConstants.INVALID_CALL_ID;
int retries = 0;
while (true) {
// The number of times this invocation handler has ever been failed over,
// before this method invocation attempt. Used to prevent concurrent
// failed method invocations from triggering multiple failover attempts.
long invocationAttemptFailoverCount;
synchronized (proxyProvider) {
invocationAttemptFailoverCount = proxyProviderFailoverCount;
}
if (isRpc) {
Client.setCallIdAndRetryCount(callId, retries);
}
try {
Object ret = invokeMethod(method, args);
hasMadeASuccessfulCall = true;
return ret;
} catch (Exception ex) {
boolean isIdempotentOrAtMostOnce = proxyProvider.getInterface()
.getMethod(method.getName(), method.getParameterTypes())
.isAnnotationPresent(Idempotent.class);
if (!isIdempotentOrAtMostOnce) {
isIdempotentOrAtMostOnce = proxyProvider.getInterface()
.getMethod(method.getName(), method.getParameterTypes())
.isAnnotationPresent(AtMostOnce.class);
}
List<RetryAction> actions = extractActions(policy, ex, retries++,
invocationFailoverCount, isIdempotentOrAtMostOnce);
RetryAction failAction = getFailAction(actions);
if (failAction != null) {
if (failAction.reason != null) {
LOG.warn("Exception while invoking " + currentProxy.proxy.getClass()
+ "." + method.getName() + " over " + currentProxy.proxyInfo
+ ". Not retrying because " + failAction.reason, ex);
}
throw ex;
} else { // retry or failover
// avoid logging the failover if this is the first call on this
// proxy object, and we successfully achieve the failover without
// any flip-flopping
boolean worthLogging =
!(invocationFailoverCount == 0 && !hasMadeASuccessfulCall);
worthLogging |= LOG.isDebugEnabled();
RetryAction failOverAction = getFailOverAction(actions);
long delay = getDelayMillis(actions);
if (failOverAction != null && worthLogging) {
String msg = "Exception while invoking " + method.getName()
+ " of class " + currentProxy.proxy.getClass().getSimpleName()
+ " over " + currentProxy.proxyInfo;
if (invocationFailoverCount > 0) {
msg += " after " + invocationFailoverCount + " fail over attempts";
}
msg += ". Trying to fail over " + formatSleepMessage(delay);
LOG.info(msg, ex);
} else {
if(LOG.isDebugEnabled()) {
LOG.debug("Exception while invoking " + method.getName()
+ " of class " + currentProxy.proxy.getClass().getSimpleName()
+ " over " + currentProxy.proxyInfo + ". Retrying "
+ formatSleepMessage(delay), ex);
}
}
if (delay > 0) {
Thread.sleep(delay);
}
if (failOverAction != null) {
// Make sure that concurrent failed method invocations only cause a
// single actual fail over.
synchronized (proxyProvider) {
if (invocationAttemptFailoverCount == proxyProviderFailoverCount) {
proxyProvider.performFailover(currentProxy.proxy);
proxyProviderFailoverCount++;
} else {
LOG.warn("A failover has occurred since the start of this method"
+ " invocation attempt.");
}
currentProxy = proxyProvider.getProxy();
}
invocationFailoverCount++;
}
}
}
}
}
/**
* Obtain a retry delay from list of RetryActions.
*/
private long getDelayMillis(List<RetryAction> actions) {
long retVal = 0;
for (RetryAction action : actions) {
if (action.action == RetryAction.RetryDecision.FAILOVER_AND_RETRY ||
action.action == RetryAction.RetryDecision.RETRY) {
if (action.delayMillis > retVal) {
retVal = action.delayMillis;
}
}
}
return retVal;
}
/**
* Return the first FAILOVER_AND_RETRY action.
*/
private RetryAction getFailOverAction(List<RetryAction> actions) {
for (RetryAction action : actions) {
if (action.action == RetryAction.RetryDecision.FAILOVER_AND_RETRY) {
return action;
}
}
return null;
}
/**
* Return the last FAIL action.. only if there are no RETRY actions.
*/
private RetryAction getFailAction(List<RetryAction> actions) {
RetryAction fAction = null;
for (RetryAction action : actions) {
if (action.action == RetryAction.RetryDecision.FAIL) {
fAction = action;
} else {
// Atleast 1 RETRY
return null;
}
}
return fAction;
}
private List<RetryAction> extractActions(RetryPolicy policy, Exception ex,
int i, int invocationFailoverCount,
boolean isIdempotentOrAtMostOnce)
throws Exception {
List<RetryAction> actions = new LinkedList<>();
if (ex instanceof MultiException) {
for (Exception th : ((MultiException) ex).getExceptions().values()) {
actions.add(policy.shouldRetry(th, i, invocationFailoverCount,
isIdempotentOrAtMostOnce));
}
} else {
actions.add(policy.shouldRetry(ex, i,
invocationFailoverCount, isIdempotentOrAtMostOnce));
}
return actions;
}
private static String formatSleepMessage(long millis) {
if (millis > 0) {
return "after sleeping for " + millis + "ms.";
} else {
return "immediately.";
}
}
protected Object invokeMethod(Method method, Object[] args) throws Throwable {
try {
if (!method.isAccessible()) {
method.setAccessible(true);
}
return method.invoke(currentProxy.proxy, args);
} catch (InvocationTargetException e) {
throw e.getCause();
}
}
@VisibleForTesting
static boolean isRpcInvocation(Object proxy) {
if (proxy instanceof ProtocolTranslator) {
proxy = ((ProtocolTranslator) proxy).getUnderlyingProxyObject();
}
if (!Proxy.isProxyClass(proxy.getClass())) {
return false;
}
final InvocationHandler ih = Proxy.getInvocationHandler(proxy);
return ih instanceof RpcInvocationHandler;
}
@Override
public void close() throws IOException {
proxyProvider.close();
}
@Override //RpcInvocationHandler
public ConnectionId getConnectionId() {
return RPC.getConnectionIdForProxy(currentProxy.proxy);
}
}
| 10,285 | 35.604982 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/DefaultFailoverProxyProvider.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.retry;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.ipc.RPC;
/**
* An implementation of {@link FailoverProxyProvider} which does nothing in the
* event of failover, and always returns the same proxy object.
*/
@InterfaceStability.Evolving
public class DefaultFailoverProxyProvider<T> implements FailoverProxyProvider<T> {
private T proxy;
private Class<T> iface;
public DefaultFailoverProxyProvider(Class<T> iface, T proxy) {
this.proxy = proxy;
this.iface = iface;
}
@Override
public Class<T> getInterface() {
return iface;
}
@Override
public ProxyInfo<T> getProxy() {
return new ProxyInfo<T>(proxy, null);
}
@Override
public void performFailover(T currentProxy) {
// Nothing to do.
}
@Override
public void close() throws IOException {
RPC.stopProxy(proxy);
}
}
| 1,740 | 27.540984 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/MultiException.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.hadoop.io.retry;
import java.io.IOException;
import java.util.Map;
/**
* Holder class that clients can use to return multiple exceptions.
*/
public class MultiException extends IOException {
private final Map<String, Exception> exes;
public MultiException(Map<String, Exception> exes) {
this.exes = exes;
}
public Map<String, Exception> getExceptions() {
return exes;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("{");
for (Exception e : exes.values()) {
sb.append(e.toString()).append(", ");
}
sb.append("}");
return "MultiException[" + sb.toString() + "]";
}
}
| 1,482 | 28.66 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
package org.apache.hadoop.io.nativeio;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
| 1,021 | 41.583333 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.nativeio;
import java.io.File;
import java.io.FileDescriptor;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.lang.reflect.Field;
import java.nio.ByteBuffer;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.HardLink;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.SecureIOUtils.AlreadyExistsException;
import org.apache.hadoop.util.NativeCodeLoader;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.PerformanceAdvisory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import sun.misc.Unsafe;
import com.google.common.annotations.VisibleForTesting;
/**
* JNI wrappers for various native IO-related calls not available in Java.
* These functions should generally be used alongside a fallback to another
* more portable mechanism.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class NativeIO {
public static class POSIX {
// Flags for open() call from bits/fcntl.h - Set by JNI
public static int O_RDONLY = -1;
public static int O_WRONLY = -1;
public static int O_RDWR = -1;
public static int O_CREAT = -1;
public static int O_EXCL = -1;
public static int O_NOCTTY = -1;
public static int O_TRUNC = -1;
public static int O_APPEND = -1;
public static int O_NONBLOCK = -1;
public static int O_SYNC = -1;
// Flags for posix_fadvise() from bits/fcntl.h - Set by JNI
/* No further special treatment. */
public static int POSIX_FADV_NORMAL = -1;
/* Expect random page references. */
public static int POSIX_FADV_RANDOM = -1;
/* Expect sequential page references. */
public static int POSIX_FADV_SEQUENTIAL = -1;
/* Will need these pages. */
public static int POSIX_FADV_WILLNEED = -1;
/* Don't need these pages. */
public static int POSIX_FADV_DONTNEED = -1;
/* Data will be accessed once. */
public static int POSIX_FADV_NOREUSE = -1;
// Updated by JNI when supported by glibc. Leave defaults in case kernel
// supports sync_file_range, but glibc does not.
/* Wait upon writeout of all pages
in the range before performing the
write. */
public static int SYNC_FILE_RANGE_WAIT_BEFORE = 1;
/* Initiate writeout of all those
dirty pages in the range which are
not presently under writeback. */
public static int SYNC_FILE_RANGE_WRITE = 2;
/* Wait upon writeout of all pages in
the range after performing the
write. */
public static int SYNC_FILE_RANGE_WAIT_AFTER = 4;
private static final Log LOG = LogFactory.getLog(NativeIO.class);
// Set to true via JNI if possible
public static boolean fadvisePossible = false;
private static boolean nativeLoaded = false;
private static boolean syncFileRangePossible = true;
static final String WORKAROUND_NON_THREADSAFE_CALLS_KEY =
"hadoop.workaround.non.threadsafe.getpwuid";
static final boolean WORKAROUND_NON_THREADSAFE_CALLS_DEFAULT = true;
private static long cacheTimeout = -1;
private static CacheManipulator cacheManipulator = new CacheManipulator();
public static CacheManipulator getCacheManipulator() {
return cacheManipulator;
}
public static void setCacheManipulator(CacheManipulator cacheManipulator) {
POSIX.cacheManipulator = cacheManipulator;
}
/**
* Used to manipulate the operating system cache.
*/
@VisibleForTesting
public static class CacheManipulator {
public void mlock(String identifier, ByteBuffer buffer,
long len) throws IOException {
POSIX.mlock(buffer, len);
}
public long getMemlockLimit() {
return NativeIO.getMemlockLimit();
}
public long getOperatingSystemPageSize() {
return NativeIO.getOperatingSystemPageSize();
}
public void posixFadviseIfPossible(String identifier,
FileDescriptor fd, long offset, long len, int flags)
throws NativeIOException {
NativeIO.POSIX.posixFadviseIfPossible(identifier, fd, offset,
len, flags);
}
public boolean verifyCanMlock() {
return NativeIO.isAvailable();
}
}
/**
* A CacheManipulator used for testing which does not actually call mlock.
* This allows many tests to be run even when the operating system does not
* allow mlock, or only allows limited mlocking.
*/
@VisibleForTesting
public static class NoMlockCacheManipulator extends CacheManipulator {
public void mlock(String identifier, ByteBuffer buffer,
long len) throws IOException {
LOG.info("mlocking " + identifier);
}
public long getMemlockLimit() {
return 1125899906842624L;
}
public long getOperatingSystemPageSize() {
return 4096;
}
public boolean verifyCanMlock() {
return true;
}
}
static {
if (NativeCodeLoader.isNativeCodeLoaded()) {
try {
Configuration conf = new Configuration();
workaroundNonThreadSafePasswdCalls = conf.getBoolean(
WORKAROUND_NON_THREADSAFE_CALLS_KEY,
WORKAROUND_NON_THREADSAFE_CALLS_DEFAULT);
initNative();
nativeLoaded = true;
cacheTimeout = conf.getLong(
CommonConfigurationKeys.HADOOP_SECURITY_UID_NAME_CACHE_TIMEOUT_KEY,
CommonConfigurationKeys.HADOOP_SECURITY_UID_NAME_CACHE_TIMEOUT_DEFAULT) *
1000;
LOG.debug("Initialized cache for IDs to User/Group mapping with a " +
" cache timeout of " + cacheTimeout/1000 + " seconds.");
} catch (Throwable t) {
// This can happen if the user has an older version of libhadoop.so
// installed - in this case we can continue without native IO
// after warning
PerformanceAdvisory.LOG.debug("Unable to initialize NativeIO libraries", t);
}
}
}
/**
* Return true if the JNI-based native IO extensions are available.
*/
public static boolean isAvailable() {
return NativeCodeLoader.isNativeCodeLoaded() && nativeLoaded;
}
private static void assertCodeLoaded() throws IOException {
if (!isAvailable()) {
throw new IOException("NativeIO was not loaded");
}
}
/** Wrapper around open(2) */
public static native FileDescriptor open(String path, int flags, int mode) throws IOException;
/** Wrapper around fstat(2) */
private static native Stat fstat(FileDescriptor fd) throws IOException;
/** Native chmod implementation. On UNIX, it is a wrapper around chmod(2) */
private static native void chmodImpl(String path, int mode) throws IOException;
public static void chmod(String path, int mode) throws IOException {
if (!Shell.WINDOWS) {
chmodImpl(path, mode);
} else {
try {
chmodImpl(path, mode);
} catch (NativeIOException nioe) {
if (nioe.getErrorCode() == 3) {
throw new NativeIOException("No such file or directory",
Errno.ENOENT);
} else {
LOG.warn(String.format("NativeIO.chmod error (%d): %s",
nioe.getErrorCode(), nioe.getMessage()));
throw new NativeIOException("Unknown error", Errno.UNKNOWN);
}
}
}
}
/** Wrapper around posix_fadvise(2) */
static native void posix_fadvise(
FileDescriptor fd, long offset, long len, int flags) throws NativeIOException;
/** Wrapper around sync_file_range(2) */
static native void sync_file_range(
FileDescriptor fd, long offset, long nbytes, int flags) throws NativeIOException;
/**
* Call posix_fadvise on the given file descriptor. See the manpage
* for this syscall for more information. On systems where this
* call is not available, does nothing.
*
* @throws NativeIOException if there is an error with the syscall
*/
static void posixFadviseIfPossible(String identifier,
FileDescriptor fd, long offset, long len, int flags)
throws NativeIOException {
if (nativeLoaded && fadvisePossible) {
try {
posix_fadvise(fd, offset, len, flags);
} catch (UnsatisfiedLinkError ule) {
fadvisePossible = false;
}
}
}
/**
* Call sync_file_range on the given file descriptor. See the manpage
* for this syscall for more information. On systems where this
* call is not available, does nothing.
*
* @throws NativeIOException if there is an error with the syscall
*/
public static void syncFileRangeIfPossible(
FileDescriptor fd, long offset, long nbytes, int flags)
throws NativeIOException {
if (nativeLoaded && syncFileRangePossible) {
try {
sync_file_range(fd, offset, nbytes, flags);
} catch (UnsupportedOperationException uoe) {
syncFileRangePossible = false;
} catch (UnsatisfiedLinkError ule) {
syncFileRangePossible = false;
}
}
}
static native void mlock_native(
ByteBuffer buffer, long len) throws NativeIOException;
/**
* Locks the provided direct ByteBuffer into memory, preventing it from
* swapping out. After a buffer is locked, future accesses will not incur
* a page fault.
*
* See the mlock(2) man page for more information.
*
* @throws NativeIOException
*/
static void mlock(ByteBuffer buffer, long len)
throws IOException {
assertCodeLoaded();
if (!buffer.isDirect()) {
throw new IOException("Cannot mlock a non-direct ByteBuffer");
}
mlock_native(buffer, len);
}
/**
* Unmaps the block from memory. See munmap(2).
*
* There isn't any portable way to unmap a memory region in Java.
* So we use the sun.nio method here.
* Note that unmapping a memory region could cause crashes if code
* continues to reference the unmapped code. However, if we don't
* manually unmap the memory, we are dependent on the finalizer to
* do it, and we have no idea when the finalizer will run.
*
* @param buffer The buffer to unmap.
*/
public static void munmap(MappedByteBuffer buffer) {
if (buffer instanceof sun.nio.ch.DirectBuffer) {
sun.misc.Cleaner cleaner =
((sun.nio.ch.DirectBuffer)buffer).cleaner();
cleaner.clean();
}
}
/** Linux only methods used for getOwner() implementation */
private static native long getUIDforFDOwnerforOwner(FileDescriptor fd) throws IOException;
private static native String getUserName(long uid) throws IOException;
/**
* Result type of the fstat call
*/
public static class Stat {
private int ownerId, groupId;
private String owner, group;
private int mode;
// Mode constants - Set by JNI
public static int S_IFMT = -1; /* type of file */
public static int S_IFIFO = -1; /* named pipe (fifo) */
public static int S_IFCHR = -1; /* character special */
public static int S_IFDIR = -1; /* directory */
public static int S_IFBLK = -1; /* block special */
public static int S_IFREG = -1; /* regular */
public static int S_IFLNK = -1; /* symbolic link */
public static int S_IFSOCK = -1; /* socket */
public static int S_ISUID = -1; /* set user id on execution */
public static int S_ISGID = -1; /* set group id on execution */
public static int S_ISVTX = -1; /* save swapped text even after use */
public static int S_IRUSR = -1; /* read permission, owner */
public static int S_IWUSR = -1; /* write permission, owner */
public static int S_IXUSR = -1; /* execute/search permission, owner */
Stat(int ownerId, int groupId, int mode) {
this.ownerId = ownerId;
this.groupId = groupId;
this.mode = mode;
}
Stat(String owner, String group, int mode) {
if (!Shell.WINDOWS) {
this.owner = owner;
} else {
this.owner = stripDomain(owner);
}
if (!Shell.WINDOWS) {
this.group = group;
} else {
this.group = stripDomain(group);
}
this.mode = mode;
}
@Override
public String toString() {
return "Stat(owner='" + owner + "', group='" + group + "'" +
", mode=" + mode + ")";
}
public String getOwner() {
return owner;
}
public String getGroup() {
return group;
}
public int getMode() {
return mode;
}
}
/**
* Returns the file stat for a file descriptor.
*
* @param fd file descriptor.
* @return the file descriptor file stat.
* @throws IOException thrown if there was an IO error while obtaining the file stat.
*/
public static Stat getFstat(FileDescriptor fd) throws IOException {
Stat stat = null;
if (!Shell.WINDOWS) {
stat = fstat(fd);
stat.owner = getName(IdCache.USER, stat.ownerId);
stat.group = getName(IdCache.GROUP, stat.groupId);
} else {
try {
stat = fstat(fd);
} catch (NativeIOException nioe) {
if (nioe.getErrorCode() == 6) {
throw new NativeIOException("The handle is invalid.",
Errno.EBADF);
} else {
LOG.warn(String.format("NativeIO.getFstat error (%d): %s",
nioe.getErrorCode(), nioe.getMessage()));
throw new NativeIOException("Unknown error", Errno.UNKNOWN);
}
}
}
return stat;
}
private static String getName(IdCache domain, int id) throws IOException {
Map<Integer, CachedName> idNameCache = (domain == IdCache.USER)
? USER_ID_NAME_CACHE : GROUP_ID_NAME_CACHE;
String name;
CachedName cachedName = idNameCache.get(id);
long now = System.currentTimeMillis();
if (cachedName != null && (cachedName.timestamp + cacheTimeout) > now) {
name = cachedName.name;
} else {
name = (domain == IdCache.USER) ? getUserName(id) : getGroupName(id);
if (LOG.isDebugEnabled()) {
String type = (domain == IdCache.USER) ? "UserName" : "GroupName";
LOG.debug("Got " + type + " " + name + " for ID " + id +
" from the native implementation");
}
cachedName = new CachedName(name, now);
idNameCache.put(id, cachedName);
}
return name;
}
static native String getUserName(int uid) throws IOException;
static native String getGroupName(int uid) throws IOException;
private static class CachedName {
final long timestamp;
final String name;
public CachedName(String name, long timestamp) {
this.name = name;
this.timestamp = timestamp;
}
}
private static final Map<Integer, CachedName> USER_ID_NAME_CACHE =
new ConcurrentHashMap<Integer, CachedName>();
private static final Map<Integer, CachedName> GROUP_ID_NAME_CACHE =
new ConcurrentHashMap<Integer, CachedName>();
private enum IdCache { USER, GROUP }
public final static int MMAP_PROT_READ = 0x1;
public final static int MMAP_PROT_WRITE = 0x2;
public final static int MMAP_PROT_EXEC = 0x4;
public static native long mmap(FileDescriptor fd, int prot,
boolean shared, long length) throws IOException;
public static native void munmap(long addr, long length)
throws IOException;
}
private static boolean workaroundNonThreadSafePasswdCalls = false;
public static class Windows {
// Flags for CreateFile() call on Windows
public static final long GENERIC_READ = 0x80000000L;
public static final long GENERIC_WRITE = 0x40000000L;
public static final long FILE_SHARE_READ = 0x00000001L;
public static final long FILE_SHARE_WRITE = 0x00000002L;
public static final long FILE_SHARE_DELETE = 0x00000004L;
public static final long CREATE_NEW = 1;
public static final long CREATE_ALWAYS = 2;
public static final long OPEN_EXISTING = 3;
public static final long OPEN_ALWAYS = 4;
public static final long TRUNCATE_EXISTING = 5;
public static final long FILE_BEGIN = 0;
public static final long FILE_CURRENT = 1;
public static final long FILE_END = 2;
public static final long FILE_ATTRIBUTE_NORMAL = 0x00000080L;
/**
* Create a directory with permissions set to the specified mode. By setting
* permissions at creation time, we avoid issues related to the user lacking
* WRITE_DAC rights on subsequent chmod calls. One example where this can
* occur is writing to an SMB share where the user does not have Full Control
* rights, and therefore WRITE_DAC is denied.
*
* @param path directory to create
* @param mode permissions of new directory
* @throws IOException if there is an I/O error
*/
public static void createDirectoryWithMode(File path, int mode)
throws IOException {
createDirectoryWithMode0(path.getAbsolutePath(), mode);
}
/** Wrapper around CreateDirectory() on Windows */
private static native void createDirectoryWithMode0(String path, int mode)
throws NativeIOException;
/** Wrapper around CreateFile() on Windows */
public static native FileDescriptor createFile(String path,
long desiredAccess, long shareMode, long creationDisposition)
throws IOException;
/**
* Create a file for write with permissions set to the specified mode. By
* setting permissions at creation time, we avoid issues related to the user
* lacking WRITE_DAC rights on subsequent chmod calls. One example where
* this can occur is writing to an SMB share where the user does not have
* Full Control rights, and therefore WRITE_DAC is denied.
*
* This method mimics the semantics implemented by the JDK in
* {@link java.io.FileOutputStream}. The file is opened for truncate or
* append, the sharing mode allows other readers and writers, and paths
* longer than MAX_PATH are supported. (See io_util_md.c in the JDK.)
*
* @param path file to create
* @param append if true, then open file for append
* @param mode permissions of new directory
* @return FileOutputStream of opened file
* @throws IOException if there is an I/O error
*/
public static FileOutputStream createFileOutputStreamWithMode(File path,
boolean append, int mode) throws IOException {
long desiredAccess = GENERIC_WRITE;
long shareMode = FILE_SHARE_READ | FILE_SHARE_WRITE;
long creationDisposition = append ? OPEN_ALWAYS : CREATE_ALWAYS;
return new FileOutputStream(createFileWithMode0(path.getAbsolutePath(),
desiredAccess, shareMode, creationDisposition, mode));
}
/** Wrapper around CreateFile() with security descriptor on Windows */
private static native FileDescriptor createFileWithMode0(String path,
long desiredAccess, long shareMode, long creationDisposition, int mode)
throws NativeIOException;
/** Wrapper around SetFilePointer() on Windows */
public static native long setFilePointer(FileDescriptor fd,
long distanceToMove, long moveMethod) throws IOException;
/** Windows only methods used for getOwner() implementation */
private static native String getOwner(FileDescriptor fd) throws IOException;
/** Supported list of Windows access right flags */
public static enum AccessRight {
ACCESS_READ (0x0001), // FILE_READ_DATA
ACCESS_WRITE (0x0002), // FILE_WRITE_DATA
ACCESS_EXECUTE (0x0020); // FILE_EXECUTE
private final int accessRight;
AccessRight(int access) {
accessRight = access;
}
public int accessRight() {
return accessRight;
}
};
/** Windows only method used to check if the current process has requested
* access rights on the given path. */
private static native boolean access0(String path, int requestedAccess);
/**
* Checks whether the current process has desired access rights on
* the given path.
*
* Longer term this native function can be substituted with JDK7
* function Files#isReadable, isWritable, isExecutable.
*
* @param path input path
* @param desiredAccess ACCESS_READ, ACCESS_WRITE or ACCESS_EXECUTE
* @return true if access is allowed
* @throws IOException I/O exception on error
*/
public static boolean access(String path, AccessRight desiredAccess)
throws IOException {
return access0(path, desiredAccess.accessRight());
}
/**
* Extends both the minimum and maximum working set size of the current
* process. This method gets the current minimum and maximum working set
* size, adds the requested amount to each and then sets the minimum and
* maximum working set size to the new values. Controlling the working set
* size of the process also controls the amount of memory it can lock.
*
* @param delta amount to increment minimum and maximum working set size
* @throws IOException for any error
* @see POSIX#mlock(ByteBuffer, long)
*/
public static native void extendWorkingSetSize(long delta) throws IOException;
static {
if (NativeCodeLoader.isNativeCodeLoaded()) {
try {
initNative();
nativeLoaded = true;
} catch (Throwable t) {
// This can happen if the user has an older version of libhadoop.so
// installed - in this case we can continue without native IO
// after warning
PerformanceAdvisory.LOG.debug("Unable to initialize NativeIO libraries", t);
}
}
}
}
private static final Log LOG = LogFactory.getLog(NativeIO.class);
private static boolean nativeLoaded = false;
static {
if (NativeCodeLoader.isNativeCodeLoaded()) {
try {
initNative();
nativeLoaded = true;
} catch (Throwable t) {
// This can happen if the user has an older version of libhadoop.so
// installed - in this case we can continue without native IO
// after warning
PerformanceAdvisory.LOG.debug("Unable to initialize NativeIO libraries", t);
}
}
}
/**
* Return true if the JNI-based native IO extensions are available.
*/
public static boolean isAvailable() {
return NativeCodeLoader.isNativeCodeLoaded() && nativeLoaded;
}
/** Initialize the JNI method ID and class ID cache */
private static native void initNative();
/**
* Get the maximum number of bytes that can be locked into memory at any
* given point.
*
* @return 0 if no bytes can be locked into memory;
* Long.MAX_VALUE if there is no limit;
* The number of bytes that can be locked into memory otherwise.
*/
static long getMemlockLimit() {
return isAvailable() ? getMemlockLimit0() : 0;
}
private static native long getMemlockLimit0();
/**
* @return the operating system's page size.
*/
static long getOperatingSystemPageSize() {
try {
Field f = Unsafe.class.getDeclaredField("theUnsafe");
f.setAccessible(true);
Unsafe unsafe = (Unsafe)f.get(null);
return unsafe.pageSize();
} catch (Throwable e) {
LOG.warn("Unable to get operating system page size. Guessing 4096.", e);
return 4096;
}
}
private static class CachedUid {
final long timestamp;
final String username;
public CachedUid(String username, long timestamp) {
this.timestamp = timestamp;
this.username = username;
}
}
private static final Map<Long, CachedUid> uidCache =
new ConcurrentHashMap<Long, CachedUid>();
private static long cacheTimeout;
private static boolean initialized = false;
/**
* The Windows logon name has two part, NetBIOS domain name and
* user account name, of the format DOMAIN\UserName. This method
* will remove the domain part of the full logon name.
*
* @param Fthe full principal name containing the domain
* @return name with domain removed
*/
private static String stripDomain(String name) {
int i = name.indexOf('\\');
if (i != -1)
name = name.substring(i + 1);
return name;
}
public static String getOwner(FileDescriptor fd) throws IOException {
ensureInitialized();
if (Shell.WINDOWS) {
String owner = Windows.getOwner(fd);
owner = stripDomain(owner);
return owner;
} else {
long uid = POSIX.getUIDforFDOwnerforOwner(fd);
CachedUid cUid = uidCache.get(uid);
long now = System.currentTimeMillis();
if (cUid != null && (cUid.timestamp + cacheTimeout) > now) {
return cUid.username;
}
String user = POSIX.getUserName(uid);
LOG.info("Got UserName " + user + " for UID " + uid
+ " from the native implementation");
cUid = new CachedUid(user, now);
uidCache.put(uid, cUid);
return user;
}
}
/**
* Create a FileInputStream that shares delete permission on the
* file opened, i.e. other process can delete the file the
* FileInputStream is reading. Only Windows implementation uses
* the native interface.
*/
public static FileInputStream getShareDeleteFileInputStream(File f)
throws IOException {
if (!Shell.WINDOWS) {
// On Linux the default FileInputStream shares delete permission
// on the file opened.
//
return new FileInputStream(f);
} else {
// Use Windows native interface to create a FileInputStream that
// shares delete permission on the file opened.
//
FileDescriptor fd = Windows.createFile(
f.getAbsolutePath(),
Windows.GENERIC_READ,
Windows.FILE_SHARE_READ |
Windows.FILE_SHARE_WRITE |
Windows.FILE_SHARE_DELETE,
Windows.OPEN_EXISTING);
return new FileInputStream(fd);
}
}
/**
* Create a FileInputStream that shares delete permission on the
* file opened at a given offset, i.e. other process can delete
* the file the FileInputStream is reading. Only Windows implementation
* uses the native interface.
*/
public static FileInputStream getShareDeleteFileInputStream(File f, long seekOffset)
throws IOException {
if (!Shell.WINDOWS) {
RandomAccessFile rf = new RandomAccessFile(f, "r");
if (seekOffset > 0) {
rf.seek(seekOffset);
}
return new FileInputStream(rf.getFD());
} else {
// Use Windows native interface to create a FileInputStream that
// shares delete permission on the file opened, and set it to the
// given offset.
//
FileDescriptor fd = NativeIO.Windows.createFile(
f.getAbsolutePath(),
NativeIO.Windows.GENERIC_READ,
NativeIO.Windows.FILE_SHARE_READ |
NativeIO.Windows.FILE_SHARE_WRITE |
NativeIO.Windows.FILE_SHARE_DELETE,
NativeIO.Windows.OPEN_EXISTING);
if (seekOffset > 0)
NativeIO.Windows.setFilePointer(fd, seekOffset, NativeIO.Windows.FILE_BEGIN);
return new FileInputStream(fd);
}
}
/**
* Create the specified File for write access, ensuring that it does not exist.
* @param f the file that we want to create
* @param permissions we want to have on the file (if security is enabled)
*
* @throws AlreadyExistsException if the file already exists
* @throws IOException if any other error occurred
*/
public static FileOutputStream getCreateForWriteFileOutputStream(File f, int permissions)
throws IOException {
if (!Shell.WINDOWS) {
// Use the native wrapper around open(2)
try {
FileDescriptor fd = NativeIO.POSIX.open(f.getAbsolutePath(),
NativeIO.POSIX.O_WRONLY | NativeIO.POSIX.O_CREAT
| NativeIO.POSIX.O_EXCL, permissions);
return new FileOutputStream(fd);
} catch (NativeIOException nioe) {
if (nioe.getErrno() == Errno.EEXIST) {
throw new AlreadyExistsException(nioe);
}
throw nioe;
}
} else {
// Use the Windows native APIs to create equivalent FileOutputStream
try {
FileDescriptor fd = NativeIO.Windows.createFile(f.getCanonicalPath(),
NativeIO.Windows.GENERIC_WRITE,
NativeIO.Windows.FILE_SHARE_DELETE
| NativeIO.Windows.FILE_SHARE_READ
| NativeIO.Windows.FILE_SHARE_WRITE,
NativeIO.Windows.CREATE_NEW);
NativeIO.POSIX.chmod(f.getCanonicalPath(), permissions);
return new FileOutputStream(fd);
} catch (NativeIOException nioe) {
if (nioe.getErrorCode() == 80) {
// ERROR_FILE_EXISTS
// 80 (0x50)
// The file exists
throw new AlreadyExistsException(nioe);
}
throw nioe;
}
}
}
private synchronized static void ensureInitialized() {
if (!initialized) {
cacheTimeout =
new Configuration().getLong("hadoop.security.uid.cache.secs",
4*60*60) * 1000;
LOG.info("Initialized cache for UID to User mapping with a cache" +
" timeout of " + cacheTimeout/1000 + " seconds.");
initialized = true;
}
}
/**
* A version of renameTo that throws a descriptive exception when it fails.
*
* @param src The source path
* @param dst The destination path
*
* @throws NativeIOException On failure.
*/
public static void renameTo(File src, File dst)
throws IOException {
if (!nativeLoaded) {
if (!src.renameTo(dst)) {
throw new IOException("renameTo(src=" + src + ", dst=" +
dst + ") failed.");
}
} else {
renameTo0(src.getAbsolutePath(), dst.getAbsolutePath());
}
}
/**
* Creates a hardlink "dst" that points to "src".
*
* This is deprecated since JDK7 NIO can create hardlinks via the
* {@link java.nio.file.Files} API.
*
* @param src source file
* @param dst hardlink location
* @throws IOException
*/
@Deprecated
public static void link(File src, File dst) throws IOException {
if (!nativeLoaded) {
HardLink.createHardLink(src, dst);
} else {
link0(src.getAbsolutePath(), dst.getAbsolutePath());
}
}
/**
* A version of renameTo that throws a descriptive exception when it fails.
*
* @param src The source path
* @param dst The destination path
*
* @throws NativeIOException On failure.
*/
private static native void renameTo0(String src, String dst)
throws NativeIOException;
private static native void link0(String src, String dst)
throws NativeIOException;
/**
* Unbuffered file copy from src to dst without tainting OS buffer cache
*
* In POSIX platform:
* It uses FileChannel#transferTo() which internally attempts
* unbuffered IO on OS with native sendfile64() support and falls back to
* buffered IO otherwise.
*
* It minimizes the number of FileChannel#transferTo call by passing the the
* src file size directly instead of a smaller size as the 3rd parameter.
* This saves the number of sendfile64() system call when native sendfile64()
* is supported. In the two fall back cases where sendfile is not supported,
* FileChannle#transferTo already has its own batching of size 8 MB and 8 KB,
* respectively.
*
* In Windows Platform:
* It uses its own native wrapper of CopyFileEx with COPY_FILE_NO_BUFFERING
* flag, which is supported on Windows Server 2008 and above.
*
* Ideally, we should use FileChannel#transferTo() across both POSIX and Windows
* platform. Unfortunately, the wrapper(Java_sun_nio_ch_FileChannelImpl_transferTo0)
* used by FileChannel#transferTo for unbuffered IO is not implemented on Windows.
* Based on OpenJDK 6/7/8 source code, Java_sun_nio_ch_FileChannelImpl_transferTo0
* on Windows simply returns IOS_UNSUPPORTED.
*
* Note: This simple native wrapper does minimal parameter checking before copy and
* consistency check (e.g., size) after copy.
* It is recommended to use wrapper function like
* the Storage#nativeCopyFileUnbuffered() function in hadoop-hdfs with pre/post copy
* checks.
*
* @param src The source path
* @param dst The destination path
* @throws IOException
*/
public static void copyFileUnbuffered(File src, File dst) throws IOException {
if (nativeLoaded && Shell.WINDOWS) {
copyFileUnbuffered0(src.getAbsolutePath(), dst.getAbsolutePath());
} else {
FileInputStream fis = null;
FileOutputStream fos = null;
FileChannel input = null;
FileChannel output = null;
try {
fis = new FileInputStream(src);
fos = new FileOutputStream(dst);
input = fis.getChannel();
output = fos.getChannel();
long remaining = input.size();
long position = 0;
long transferred = 0;
while (remaining > 0) {
transferred = input.transferTo(position, remaining, output);
remaining -= transferred;
position += transferred;
}
} finally {
IOUtils.cleanup(LOG, output);
IOUtils.cleanup(LOG, fos);
IOUtils.cleanup(LOG, input);
IOUtils.cleanup(LOG, fis);
}
}
}
private static native void copyFileUnbuffered0(String src, String dst)
throws NativeIOException;
}
| 35,117 | 34.761711 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIOException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.nativeio;
import java.io.IOException;
import org.apache.hadoop.util.Shell;
/**
* An exception generated by a call to the native IO code.
*
* These exceptions simply wrap <i>errno</i> result codes on Linux,
* or the System Error Code on Windows.
*/
public class NativeIOException extends IOException {
private static final long serialVersionUID = 1L;
private Errno errno;
// Java has no unsigned primitive error code. Use a signed 32-bit
// integer to hold the unsigned 32-bit integer.
private int errorCode;
public NativeIOException(String msg, Errno errno) {
super(msg);
this.errno = errno;
// Windows error code is always set to ERROR_SUCCESS on Linux,
// i.e. no failure on Windows
this.errorCode = 0;
}
public NativeIOException(String msg, int errorCode) {
super(msg);
this.errorCode = errorCode;
this.errno = Errno.UNKNOWN;
}
public long getErrorCode() {
return errorCode;
}
public Errno getErrno() {
return errno;
}
@Override
public String toString() {
if (Shell.WINDOWS)
return errorCode + ": " + super.getMessage();
else
return errno.toString() + ": " + super.getMessage();
}
}
| 2,030 | 28.014286 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/Errno.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.nativeio;
/**
* Enum representing POSIX errno values.
*/
public enum Errno {
EPERM,
ENOENT,
ESRCH,
EINTR,
EIO,
ENXIO,
E2BIG,
ENOEXEC,
EBADF,
ECHILD,
EAGAIN,
ENOMEM,
EACCES,
EFAULT,
ENOTBLK,
EBUSY,
EEXIST,
EXDEV,
ENODEV,
ENOTDIR,
EISDIR,
EINVAL,
ENFILE,
EMFILE,
ENOTTY,
ETXTBSY,
EFBIG,
ENOSPC,
ESPIPE,
EROFS,
EMLINK,
EPIPE,
EDOM,
ERANGE,
ELOOP,
ENAMETOOLONG,
ENOTEMPTY,
EOVERFLOW,
UNKNOWN;
}
| 1,309 | 19.153846 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/SharedFileDescriptorFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.nativeio;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.FileDescriptor;
import org.apache.commons.lang.SystemUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* A factory for creating shared file descriptors inside a given directory.
* Typically, the directory will be /dev/shm or /tmp.
*
* We will hand out file descriptors that correspond to unlinked files residing
* in that directory. These file descriptors are suitable for sharing across
* multiple processes and are both readable and writable.
*
* Because we unlink the temporary files right after creating them, a JVM crash
* usually does not leave behind any temporary files in the directory. However,
* it may happen that we crash right after creating the file and before
* unlinking it. In the constructor, we attempt to clean up after any such
* remnants by trying to unlink any temporary files created by previous
* SharedFileDescriptorFactory instances that also used our prefix.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class SharedFileDescriptorFactory {
public static final Log LOG = LogFactory.getLog(SharedFileDescriptorFactory.class);
private final String prefix;
private final String path;
public static String getLoadingFailureReason() {
if (!NativeIO.isAvailable()) {
return "NativeIO is not available.";
}
if (!SystemUtils.IS_OS_UNIX) {
return "The OS is not UNIX.";
}
return null;
}
/**
* Create a new SharedFileDescriptorFactory.
*
* @param prefix The prefix to prepend to all the file names created
* by this factory.
* @param paths An array of paths to use. We will try each path in
* succession, and return a factory using the first
* usable path.
* @return The factory.
* @throws IOException If a factory could not be created for any reason.
*/
public static SharedFileDescriptorFactory create(String prefix,
String paths[]) throws IOException {
String loadingFailureReason = getLoadingFailureReason();
if (loadingFailureReason != null) {
throw new IOException(loadingFailureReason);
}
if (paths.length == 0) {
throw new IOException("no SharedFileDescriptorFactory paths were " +
"configured.");
}
StringBuilder errors = new StringBuilder();
String strPrefix = "";
for (String path : paths) {
try {
FileInputStream fis =
new FileInputStream(createDescriptor0(prefix + "test", path, 1));
fis.close();
deleteStaleTemporaryFiles0(prefix, path);
return new SharedFileDescriptorFactory(prefix, path);
} catch (IOException e) {
errors.append(strPrefix).append("Error creating file descriptor in ").
append(path).append(": ").append(e.getMessage());
strPrefix = ", ";
}
}
throw new IOException(errors.toString());
}
/**
* Create a SharedFileDescriptorFactory.
*
* @param prefix Prefix to add to all file names we use.
* @param path Path to use.
*/
private SharedFileDescriptorFactory(String prefix, String path) {
this.prefix = prefix;
this.path = path;
}
public String getPath() {
return path;
}
/**
* Create a shared file descriptor which will be both readable and writable.
*
* @param info Information to include in the path of the
* generated descriptor.
* @param length The starting file length.
*
* @return The file descriptor, wrapped in a FileInputStream.
* @throws IOException If there was an I/O or configuration error creating
* the descriptor.
*/
public FileInputStream createDescriptor(String info, int length)
throws IOException {
return new FileInputStream(
createDescriptor0(prefix + info, path, length));
}
/**
* Delete temporary files in the directory, NOT following symlinks.
*/
private static native void deleteStaleTemporaryFiles0(String prefix,
String path) throws IOException;
/**
* Create a file with O_EXCL, and then resize it to the desired size.
*/
private static native FileDescriptor createDescriptor0(String prefix,
String path, int length) throws IOException;
}
| 5,396 | 36.22069 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/WritableSerialization.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.serializer;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.util.ReflectionUtils;
/**
* A {@link Serialization} for {@link Writable}s that delegates to
* {@link Writable#write(java.io.DataOutput)} and
* {@link Writable#readFields(java.io.DataInput)}.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class WritableSerialization extends Configured
implements Serialization<Writable> {
static class WritableDeserializer extends Configured
implements Deserializer<Writable> {
private Class<?> writableClass;
private DataInputStream dataIn;
public WritableDeserializer(Configuration conf, Class<?> c) {
setConf(conf);
this.writableClass = c;
}
@Override
public void open(InputStream in) {
if (in instanceof DataInputStream) {
dataIn = (DataInputStream) in;
} else {
dataIn = new DataInputStream(in);
}
}
@Override
public Writable deserialize(Writable w) throws IOException {
Writable writable;
if (w == null) {
writable
= (Writable) ReflectionUtils.newInstance(writableClass, getConf());
} else {
writable = w;
}
writable.readFields(dataIn);
return writable;
}
@Override
public void close() throws IOException {
dataIn.close();
}
}
static class WritableSerializer extends Configured implements
Serializer<Writable> {
private DataOutputStream dataOut;
@Override
public void open(OutputStream out) {
if (out instanceof DataOutputStream) {
dataOut = (DataOutputStream) out;
} else {
dataOut = new DataOutputStream(out);
}
}
@Override
public void serialize(Writable w) throws IOException {
w.write(dataOut);
}
@Override
public void close() throws IOException {
dataOut.close();
}
}
@InterfaceAudience.Private
@Override
public boolean accept(Class<?> c) {
return Writable.class.isAssignableFrom(c);
}
@InterfaceAudience.Private
@Override
public Serializer<Writable> getSerializer(Class<Writable> c) {
return new WritableSerializer();
}
@InterfaceAudience.Private
@Override
public Deserializer<Writable> getDeserializer(Class<Writable> c) {
return new WritableDeserializer(getConf(), c);
}
}
| 3,560 | 27.03937 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/SerializationFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.serializer;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.io.serializer.avro.AvroReflectSerialization;
import org.apache.hadoop.io.serializer.avro.AvroSpecificSerialization;
import org.apache.hadoop.util.ReflectionUtils;
/**
* <p>
* A factory for {@link Serialization}s.
* </p>
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public class SerializationFactory extends Configured {
private static final Log LOG =
LogFactory.getLog(SerializationFactory.class.getName());
private List<Serialization<?>> serializations = new ArrayList<Serialization<?>>();
/**
* <p>
* Serializations are found by reading the <code>io.serializations</code>
* property from <code>conf</code>, which is a comma-delimited list of
* classnames.
* </p>
*/
public SerializationFactory(Configuration conf) {
super(conf);
for (String serializerName : conf.getTrimmedStrings(
CommonConfigurationKeys.IO_SERIALIZATIONS_KEY,
new String[]{WritableSerialization.class.getName(),
AvroSpecificSerialization.class.getName(),
AvroReflectSerialization.class.getName()})) {
add(conf, serializerName);
}
}
@SuppressWarnings("unchecked")
private void add(Configuration conf, String serializationName) {
try {
Class<? extends Serialization> serializionClass =
(Class<? extends Serialization>) conf.getClassByName(serializationName);
serializations.add((Serialization)
ReflectionUtils.newInstance(serializionClass, getConf()));
} catch (ClassNotFoundException e) {
LOG.warn("Serialization class not found: ", e);
}
}
public <T> Serializer<T> getSerializer(Class<T> c) {
Serialization<T> serializer = getSerialization(c);
if (serializer != null) {
return serializer.getSerializer(c);
}
return null;
}
public <T> Deserializer<T> getDeserializer(Class<T> c) {
Serialization<T> serializer = getSerialization(c);
if (serializer != null) {
return serializer.getDeserializer(c);
}
return null;
}
@SuppressWarnings("unchecked")
public <T> Serialization<T> getSerialization(Class<T> c) {
for (Serialization serialization : serializations) {
if (serialization.accept(c)) {
return (Serialization<T>) serialization;
}
}
return null;
}
}
| 3,573 | 32.716981 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/Deserializer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.serializer;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* <p>
* Provides a facility for deserializing objects of type <T> from an
* {@link InputStream}.
* </p>
*
* <p>
* Deserializers are stateful, but must not buffer the input since
* other producers may read from the input between calls to
* {@link #deserialize(Object)}.
* </p>
* @param <T>
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public interface Deserializer<T> {
/**
* <p>Prepare the deserializer for reading.</p>
*/
void open(InputStream in) throws IOException;
/**
* <p>
* Deserialize the next object from the underlying input stream.
* If the object <code>t</code> is non-null then this deserializer
* <i>may</i> set its internal state to the next object read from the input
* stream. Otherwise, if the object <code>t</code> is null a new
* deserialized object will be created.
* </p>
* @return the deserialized object
*/
T deserialize(T t) throws IOException;
/**
* <p>Close the underlying input stream and clear up any resources.</p>
*/
void close() throws IOException;
}
| 2,128 | 31.753846 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/Serializer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.serializer;
import java.io.IOException;
import java.io.OutputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* <p>
* Provides a facility for serializing objects of type <T> to an
* {@link OutputStream}.
* </p>
*
* <p>
* Serializers are stateful, but must not buffer the output since
* other producers may write to the output between calls to
* {@link #serialize(Object)}.
* </p>
* @param <T>
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public interface Serializer<T> {
/**
* <p>Prepare the serializer for writing.</p>
*/
void open(OutputStream out) throws IOException;
/**
* <p>Serialize <code>t</code> to the underlying output stream.</p>
*/
void serialize(T t) throws IOException;
/**
* <p>Close the underlying output stream and clear up any resources.</p>
*/
void close() throws IOException;
}
| 1,816 | 30.327586 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/JavaSerializationComparator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.serializer;
import java.io.IOException;
import java.io.Serializable;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.RawComparator;
/**
* <p>
* A {@link RawComparator} that uses a {@link JavaSerialization}
* {@link Deserializer} to deserialize objects that are then compared via
* their {@link Comparable} interfaces.
* </p>
* @param <T>
* @see JavaSerialization
*/
@InterfaceAudience.Public
@InterfaceStability.Unstable
public class JavaSerializationComparator<T extends Serializable&Comparable<T>>
extends DeserializerComparator<T> {
@InterfaceAudience.Private
public JavaSerializationComparator() throws IOException {
super(new JavaSerialization.JavaSerializationDeserializer<T>());
}
@Override
@InterfaceAudience.Private
public int compare(T o1, T o2) {
return o1.compareTo(o2);
}
}
| 1,761 | 31.62963 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/DeserializerComparator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.serializer;
import java.io.IOException;
import java.util.Comparator;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.InputBuffer;
import org.apache.hadoop.io.RawComparator;
/**
* <p>
* A {@link RawComparator} that uses a {@link Deserializer} to deserialize
* the objects to be compared so that the standard {@link Comparator} can
* be used to compare them.
* </p>
* <p>
* One may optimize compare-intensive operations by using a custom
* implementation of {@link RawComparator} that operates directly
* on byte representations.
* </p>
* @param <T>
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public abstract class DeserializerComparator<T> implements RawComparator<T> {
private InputBuffer buffer = new InputBuffer();
private Deserializer<T> deserializer;
private T key1;
private T key2;
protected DeserializerComparator(Deserializer<T> deserializer)
throws IOException {
this.deserializer = deserializer;
this.deserializer.open(buffer);
}
@Override
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
try {
buffer.reset(b1, s1, l1);
key1 = deserializer.deserialize(key1);
buffer.reset(b2, s2, l2);
key2 = deserializer.deserialize(key2);
} catch (IOException e) {
throw new RuntimeException(e);
}
return compare(key1, key2);
}
}
| 2,359 | 30.052632 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/JavaSerialization.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.serializer;
import java.io.IOException;
import java.io.InputStream;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.OutputStream;
import java.io.Serializable;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* <p>
* An experimental {@link Serialization} for Java {@link Serializable} classes.
* </p>
* @see JavaSerializationComparator
*/
@InterfaceAudience.Public
@InterfaceStability.Unstable
public class JavaSerialization implements Serialization<Serializable> {
static class JavaSerializationDeserializer<T extends Serializable>
implements Deserializer<T> {
private ObjectInputStream ois;
@Override
public void open(InputStream in) throws IOException {
ois = new ObjectInputStream(in) {
@Override protected void readStreamHeader() {
// no header
}
};
}
@Override
@SuppressWarnings("unchecked")
public T deserialize(T object) throws IOException {
try {
// ignore passed-in object
return (T) ois.readObject();
} catch (ClassNotFoundException e) {
throw new IOException(e.toString());
}
}
@Override
public void close() throws IOException {
ois.close();
}
}
static class JavaSerializationSerializer
implements Serializer<Serializable> {
private ObjectOutputStream oos;
@Override
public void open(OutputStream out) throws IOException {
oos = new ObjectOutputStream(out) {
@Override protected void writeStreamHeader() {
// no header
}
};
}
@Override
public void serialize(Serializable object) throws IOException {
oos.reset(); // clear (class) back-references
oos.writeObject(object);
}
@Override
public void close() throws IOException {
oos.close();
}
}
@Override
@InterfaceAudience.Private
public boolean accept(Class<?> c) {
return Serializable.class.isAssignableFrom(c);
}
@Override
@InterfaceAudience.Private
public Deserializer<Serializable> getDeserializer(Class<Serializable> c) {
return new JavaSerializationDeserializer<Serializable>();
}
@Override
@InterfaceAudience.Private
public Serializer<Serializable> getSerializer(Class<Serializable> c) {
return new JavaSerializationSerializer();
}
}
| 3,252 | 26.567797 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/Serialization.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.serializer;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* <p>
* Encapsulates a {@link Serializer}/{@link Deserializer} pair.
* </p>
* @param <T>
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public interface Serialization<T> {
/**
* Allows clients to test whether this {@link Serialization}
* supports the given class.
*/
boolean accept(Class<?> c);
/**
* @return a {@link Serializer} for the given class.
*/
Serializer<T> getSerializer(Class<T> c);
/**
* @return a {@link Deserializer} for the given class.
*/
Deserializer<T> getDeserializer(Class<T> c);
}
| 1,564 | 30.3 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/avro/AvroSpecificSerialization.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.serializer.avro;
import org.apache.avro.Schema;
import org.apache.avro.io.DatumReader;
import org.apache.avro.io.DatumWriter;
import org.apache.avro.specific.SpecificDatumReader;
import org.apache.avro.specific.SpecificDatumWriter;
import org.apache.avro.specific.SpecificRecord;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Serialization for Avro Specific classes. This serialization is to be used
* for classes generated by Avro's 'specific' compiler.
*/
@SuppressWarnings("unchecked")
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class AvroSpecificSerialization
extends AvroSerialization<SpecificRecord>{
@InterfaceAudience.Private
@Override
public boolean accept(Class<?> c) {
return SpecificRecord.class.isAssignableFrom(c);
}
@InterfaceAudience.Private
@Override
public DatumReader getReader(Class<SpecificRecord> clazz) {
try {
return new SpecificDatumReader(clazz.newInstance().getSchema());
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@InterfaceAudience.Private
@Override
public Schema getSchema(SpecificRecord t) {
return t.getSchema();
}
@InterfaceAudience.Private
@Override
public DatumWriter getWriter(Class<SpecificRecord> clazz) {
return new SpecificDatumWriter();
}
}
| 2,239 | 31.463768 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/avro/AvroReflectSerializable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.serializer.avro;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Tag interface for Avro 'reflect' serializable classes. Classes implementing
* this interface can be serialized/deserialized using
* {@link AvroReflectSerialization}.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface AvroReflectSerializable {
}
| 1,257 | 36 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/avro/AvroSerialization.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.serializer.avro;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import org.apache.avro.Schema;
import org.apache.avro.io.BinaryDecoder;
import org.apache.avro.io.BinaryEncoder;
import org.apache.avro.io.DatumReader;
import org.apache.avro.io.DatumWriter;
import org.apache.avro.io.DecoderFactory;
import org.apache.avro.io.EncoderFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.io.serializer.Deserializer;
import org.apache.hadoop.io.serializer.Serialization;
import org.apache.hadoop.io.serializer.Serializer;
/**
* Base class for providing serialization to Avro types.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public abstract class AvroSerialization<T> extends Configured
implements Serialization<T>{
@InterfaceAudience.Private
public static final String AVRO_SCHEMA_KEY = "Avro-Schema";
@Override
@InterfaceAudience.Private
public Deserializer<T> getDeserializer(Class<T> c) {
return new AvroDeserializer(c);
}
@Override
@InterfaceAudience.Private
public Serializer<T> getSerializer(Class<T> c) {
return new AvroSerializer(c);
}
/**
* Return an Avro Schema instance for the given class.
*/
@InterfaceAudience.Private
public abstract Schema getSchema(T t);
/**
* Create and return Avro DatumWriter for the given class.
*/
@InterfaceAudience.Private
public abstract DatumWriter<T> getWriter(Class<T> clazz);
/**
* Create and return Avro DatumReader for the given class.
*/
@InterfaceAudience.Private
public abstract DatumReader<T> getReader(Class<T> clazz);
class AvroSerializer implements Serializer<T> {
private DatumWriter<T> writer;
private BinaryEncoder encoder;
private OutputStream outStream;
AvroSerializer(Class<T> clazz) {
this.writer = getWriter(clazz);
}
@Override
public void close() throws IOException {
encoder.flush();
outStream.close();
}
@Override
public void open(OutputStream out) throws IOException {
outStream = out;
encoder = EncoderFactory.get().binaryEncoder(out, encoder);
}
@Override
public void serialize(T t) throws IOException {
writer.setSchema(getSchema(t));
writer.write(t, encoder);
}
}
class AvroDeserializer implements Deserializer<T> {
private DatumReader<T> reader;
private BinaryDecoder decoder;
private InputStream inStream;
AvroDeserializer(Class<T> clazz) {
this.reader = getReader(clazz);
}
@Override
public void close() throws IOException {
inStream.close();
}
@Override
public T deserialize(T t) throws IOException {
return reader.read(t, decoder);
}
@Override
public void open(InputStream in) throws IOException {
inStream = in;
decoder = DecoderFactory.get().binaryDecoder(in, decoder);
}
}
}
| 3,866 | 26.820144 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/avro/AvroReflectSerialization.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.serializer.avro;
import java.util.HashSet;
import java.util.Set;
import org.apache.avro.Schema;
import org.apache.avro.io.DatumReader;
import org.apache.avro.io.DatumWriter;
import org.apache.avro.reflect.ReflectData;
import org.apache.avro.reflect.ReflectDatumReader;
import org.apache.avro.reflect.ReflectDatumWriter;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Serialization for Avro Reflect classes. For a class to be accepted by this
* serialization, it must either be in the package list configured via
* <code>avro.reflect.pkgs</code> or implement
* {@link AvroReflectSerializable} interface.
*
*/
@SuppressWarnings("unchecked")
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class AvroReflectSerialization extends AvroSerialization<Object>{
/**
* Key to configure packages that contain classes to be serialized and
* deserialized using this class. Multiple packages can be specified using
* comma-separated list.
*/
@InterfaceAudience.Private
public static final String AVRO_REFLECT_PACKAGES = "avro.reflect.pkgs";
private Set<String> packages;
@InterfaceAudience.Private
@Override
public synchronized boolean accept(Class<?> c) {
if (packages == null) {
getPackages();
}
return AvroReflectSerializable.class.isAssignableFrom(c) ||
(c.getPackage() != null && packages.contains(c.getPackage().getName()));
}
private void getPackages() {
String[] pkgList = getConf().getStrings(AVRO_REFLECT_PACKAGES);
packages = new HashSet<String>();
if (pkgList != null) {
for (String pkg : pkgList) {
packages.add(pkg.trim());
}
}
}
@InterfaceAudience.Private
@Override
public DatumReader getReader(Class<Object> clazz) {
try {
return new ReflectDatumReader(clazz);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@InterfaceAudience.Private
@Override
public Schema getSchema(Object t) {
return ReflectData.get().getSchema(t.getClass());
}
@InterfaceAudience.Private
@Override
public DatumWriter getWriter(Class<Object> clazz) {
return new ReflectDatumWriter();
}
}
| 3,074 | 30.377551 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress;
import java.io.BufferedInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import org.apache.commons.io.Charsets;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Seekable;
import org.apache.hadoop.io.compress.bzip2.BZip2Constants;
import org.apache.hadoop.io.compress.bzip2.CBZip2InputStream;
import org.apache.hadoop.io.compress.bzip2.CBZip2OutputStream;
import org.apache.hadoop.io.compress.bzip2.Bzip2Factory;
/**
* This class provides output and input streams for bzip2 compression
* and decompression. It uses the native bzip2 library on the system
* if possible, else it uses a pure-Java implementation of the bzip2
* algorithm. The configuration parameter
* io.compression.codec.bzip2.library can be used to control this
* behavior.
*
* In the pure-Java mode, the Compressor and Decompressor interfaces
* are not implemented. Therefore, in that mode, those methods of
* CompressionCodec which have a Compressor or Decompressor type
* argument, throw UnsupportedOperationException.
*
* Currently, support for splittability is available only in the
* pure-Java mode; therefore, if a SplitCompressionInputStream is
* requested, the pure-Java implementation is used, regardless of the
* setting of the configuration parameter mentioned above.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class BZip2Codec implements Configurable, SplittableCompressionCodec {
private static final String HEADER = "BZ";
private static final int HEADER_LEN = HEADER.length();
private static final String SUB_HEADER = "h9";
private static final int SUB_HEADER_LEN = SUB_HEADER.length();
private Configuration conf;
/**
* Set the configuration to be used by this object.
*
* @param conf the configuration object.
*/
@Override
public void setConf(Configuration conf) {
this.conf = conf;
}
/**
* Return the configuration used by this object.
*
* @return the configuration object used by this objec.
*/
@Override
public Configuration getConf() {
return conf;
}
/**
* Creates a new instance of BZip2Codec.
*/
public BZip2Codec() { }
/**
* Create a {@link CompressionOutputStream} that will write to the given
* {@link OutputStream}.
*
* @param out the location for the final output stream
* @return a stream the user can write uncompressed data to, to have it
* compressed
* @throws IOException
*/
@Override
public CompressionOutputStream createOutputStream(OutputStream out)
throws IOException {
return CompressionCodec.Util.
createOutputStreamWithCodecPool(this, conf, out);
}
/**
* Create a {@link CompressionOutputStream} that will write to the given
* {@link OutputStream} with the given {@link Compressor}.
*
* @param out the location for the final output stream
* @param compressor compressor to use
* @return a stream the user can write uncompressed data to, to have it
* compressed
* @throws IOException
*/
@Override
public CompressionOutputStream createOutputStream(OutputStream out,
Compressor compressor) throws IOException {
return Bzip2Factory.isNativeBzip2Loaded(conf) ?
new CompressorStream(out, compressor,
conf.getInt("io.file.buffer.size", 4*1024)) :
new BZip2CompressionOutputStream(out);
}
/**
* Get the type of {@link Compressor} needed by this {@link CompressionCodec}.
*
* @return the type of compressor needed by this codec.
*/
@Override
public Class<? extends Compressor> getCompressorType() {
return Bzip2Factory.getBzip2CompressorType(conf);
}
/**
* Create a new {@link Compressor} for use by this {@link CompressionCodec}.
*
* @return a new compressor for use by this codec
*/
@Override
public Compressor createCompressor() {
return Bzip2Factory.getBzip2Compressor(conf);
}
/**
* Create a {@link CompressionInputStream} that will read from the given
* input stream and return a stream for uncompressed data.
*
* @param in the stream to read compressed bytes from
* @return a stream to read uncompressed bytes from
* @throws IOException
*/
@Override
public CompressionInputStream createInputStream(InputStream in)
throws IOException {
return CompressionCodec.Util.
createInputStreamWithCodecPool(this, conf, in);
}
/**
* Create a {@link CompressionInputStream} that will read from the given
* {@link InputStream} with the given {@link Decompressor}, and return a
* stream for uncompressed data.
*
* @param in the stream to read compressed bytes from
* @param decompressor decompressor to use
* @return a stream to read uncompressed bytes from
* @throws IOException
*/
@Override
public CompressionInputStream createInputStream(InputStream in,
Decompressor decompressor) throws IOException {
return Bzip2Factory.isNativeBzip2Loaded(conf) ?
new DecompressorStream(in, decompressor,
conf.getInt("io.file.buffer.size", 4*1024)) :
new BZip2CompressionInputStream(in);
}
/**
* Creates CompressionInputStream to be used to read off uncompressed data
* in one of the two reading modes. i.e. Continuous or Blocked reading modes
*
* @param seekableIn The InputStream
* @param start The start offset into the compressed stream
* @param end The end offset into the compressed stream
* @param readMode Controls whether progress is reported continuously or
* only at block boundaries.
*
* @return CompressionInputStream for BZip2 aligned at block boundaries
*/
public SplitCompressionInputStream createInputStream(InputStream seekableIn,
Decompressor decompressor, long start, long end, READ_MODE readMode)
throws IOException {
if (!(seekableIn instanceof Seekable)) {
throw new IOException("seekableIn must be an instance of " +
Seekable.class.getName());
}
//find the position of first BZip2 start up marker
((Seekable)seekableIn).seek(0);
// BZip2 start of block markers are of 6 bytes. But the very first block
// also has "BZh9", making it 10 bytes. This is the common case. But at
// time stream might start without a leading BZ.
final long FIRST_BZIP2_BLOCK_MARKER_POSITION =
CBZip2InputStream.numberOfBytesTillNextMarker(seekableIn);
long adjStart = Math.max(0L, start - FIRST_BZIP2_BLOCK_MARKER_POSITION);
((Seekable)seekableIn).seek(adjStart);
SplitCompressionInputStream in =
new BZip2CompressionInputStream(seekableIn, adjStart, end, readMode);
// The following if clause handles the following case:
// Assume the following scenario in BZip2 compressed stream where
// . represent compressed data.
// .....[48 bit Block].....[48 bit Block].....[48 bit Block]...
// ........................[47 bits][1 bit].....[48 bit Block]...
// ................................^[Assume a Byte alignment here]
// ........................................^^[current position of stream]
// .....................^^[We go back 10 Bytes in stream and find a Block marker]
// ........................................^^[We align at wrong position!]
// ...........................................................^^[While this pos is correct]
if (in.getPos() < start) {
((Seekable)seekableIn).seek(start);
in = new BZip2CompressionInputStream(seekableIn, start, end, readMode);
}
return in;
}
/**
* Get the type of {@link Decompressor} needed by this {@link CompressionCodec}.
*
* @return the type of decompressor needed by this codec.
*/
@Override
public Class<? extends Decompressor> getDecompressorType() {
return Bzip2Factory.getBzip2DecompressorType(conf);
}
/**
* Create a new {@link Decompressor} for use by this {@link CompressionCodec}.
*
* @return a new decompressor for use by this codec
*/
@Override
public Decompressor createDecompressor() {
return Bzip2Factory.getBzip2Decompressor(conf);
}
/**
* .bz2 is recognized as the default extension for compressed BZip2 files
*
* @return A String telling the default bzip2 file extension
*/
@Override
public String getDefaultExtension() {
return ".bz2";
}
private static class BZip2CompressionOutputStream extends
CompressionOutputStream {
// class data starts here//
private CBZip2OutputStream output;
private boolean needsReset;
// class data ends here//
public BZip2CompressionOutputStream(OutputStream out)
throws IOException {
super(out);
needsReset = true;
}
private void writeStreamHeader() throws IOException {
if (super.out != null) {
// The compressed bzip2 stream should start with the
// identifying characters BZ. Caller of CBZip2OutputStream
// i.e. this class must write these characters.
out.write(HEADER.getBytes(Charsets.UTF_8));
}
}
public void finish() throws IOException {
if (needsReset) {
// In the case that nothing is written to this stream, we still need to
// write out the header before closing, otherwise the stream won't be
// recognized by BZip2CompressionInputStream.
internalReset();
}
this.output.finish();
needsReset = true;
}
private void internalReset() throws IOException {
if (needsReset) {
needsReset = false;
writeStreamHeader();
this.output = new CBZip2OutputStream(out);
}
}
public void resetState() throws IOException {
// Cannot write to out at this point because out might not be ready
// yet, as in SequenceFile.Writer implementation.
needsReset = true;
}
public void write(int b) throws IOException {
if (needsReset) {
internalReset();
}
this.output.write(b);
}
public void write(byte[] b, int off, int len) throws IOException {
if (needsReset) {
internalReset();
}
this.output.write(b, off, len);
}
public void close() throws IOException {
if (needsReset) {
// In the case that nothing is written to this stream, we still need to
// write out the header before closing, otherwise the stream won't be
// recognized by BZip2CompressionInputStream.
internalReset();
}
this.output.flush();
this.output.close();
needsReset = true;
}
}// end of class BZip2CompressionOutputStream
/**
* This class is capable to de-compress BZip2 data in two modes;
* CONTINOUS and BYBLOCK. BYBLOCK mode makes it possible to
* do decompression starting any arbitrary position in the stream.
*
* So this facility can easily be used to parallelize decompression
* of a large BZip2 file for performance reasons. (It is exactly
* done so for Hadoop framework. See LineRecordReader for an
* example). So one can break the file (of course logically) into
* chunks for parallel processing. These "splits" should be like
* default Hadoop splits (e.g as in FileInputFormat getSplit metod).
* So this code is designed and tested for FileInputFormat's way
* of splitting only.
*/
private static class BZip2CompressionInputStream extends
SplitCompressionInputStream {
// class data starts here//
private CBZip2InputStream input;
boolean needsReset;
private BufferedInputStream bufferedIn;
private boolean isHeaderStripped = false;
private boolean isSubHeaderStripped = false;
private READ_MODE readMode = READ_MODE.CONTINUOUS;
private long startingPos = 0L;
// Following state machine handles different states of compressed stream
// position
// HOLD : Don't advertise compressed stream position
// ADVERTISE : Read 1 more character and advertise stream position
// See more comments about it before updatePos method.
private enum POS_ADVERTISEMENT_STATE_MACHINE {
HOLD, ADVERTISE
};
POS_ADVERTISEMENT_STATE_MACHINE posSM = POS_ADVERTISEMENT_STATE_MACHINE.HOLD;
long compressedStreamPosition = 0;
// class data ends here//
public BZip2CompressionInputStream(InputStream in) throws IOException {
this(in, 0L, Long.MAX_VALUE, READ_MODE.CONTINUOUS);
}
public BZip2CompressionInputStream(InputStream in, long start, long end,
READ_MODE readMode) throws IOException {
super(in, start, end);
needsReset = false;
bufferedIn = new BufferedInputStream(super.in);
this.startingPos = super.getPos();
this.readMode = readMode;
if (this.startingPos == 0) {
// We only strip header if it is start of file
bufferedIn = readStreamHeader();
}
input = new CBZip2InputStream(bufferedIn, readMode);
if (this.isHeaderStripped) {
input.updateReportedByteCount(HEADER_LEN);
}
if (this.isSubHeaderStripped) {
input.updateReportedByteCount(SUB_HEADER_LEN);
}
this.updatePos(false);
}
private BufferedInputStream readStreamHeader() throws IOException {
// We are flexible enough to allow the compressed stream not to
// start with the header of BZ. So it works fine either we have
// the header or not.
if (super.in != null) {
bufferedIn.mark(HEADER_LEN);
byte[] headerBytes = new byte[HEADER_LEN];
int actualRead = bufferedIn.read(headerBytes, 0, HEADER_LEN);
if (actualRead != -1) {
String header = new String(headerBytes, Charsets.UTF_8);
if (header.compareTo(HEADER) != 0) {
bufferedIn.reset();
} else {
this.isHeaderStripped = true;
// In case of BYBLOCK mode, we also want to strip off
// remaining two character of the header.
if (this.readMode == READ_MODE.BYBLOCK) {
actualRead = bufferedIn.read(headerBytes, 0,
SUB_HEADER_LEN);
if (actualRead != -1) {
this.isSubHeaderStripped = true;
}
}
}
}
}
if (bufferedIn == null) {
throw new IOException("Failed to read bzip2 stream.");
}
return bufferedIn;
}// end of method
public void close() throws IOException {
if (!needsReset) {
input.close();
needsReset = true;
}
}
/**
* This method updates compressed stream position exactly when the
* client of this code has read off at least one byte passed any BZip2
* end of block marker.
*
* This mechanism is very helpful to deal with data level record
* boundaries. Please see constructor and next methods of
* org.apache.hadoop.mapred.LineRecordReader as an example usage of this
* feature. We elaborate it with an example in the following:
*
* Assume two different scenarios of the BZip2 compressed stream, where
* [m] represent end of block, \n is line delimiter and . represent compressed
* data.
*
* ............[m]......\n.......
*
* ..........\n[m]......\n.......
*
* Assume that end is right after [m]. In the first case the reading
* will stop at \n and there is no need to read one more line. (To see the
* reason of reading one more line in the next() method is explained in LineRecordReader.)
* While in the second example LineRecordReader needs to read one more line
* (till the second \n). Now since BZip2Codecs only update position
* at least one byte passed a maker, so it is straight forward to differentiate
* between the two cases mentioned.
*
*/
public int read(byte[] b, int off, int len) throws IOException {
if (needsReset) {
internalReset();
}
int result = 0;
result = this.input.read(b, off, len);
if (result == BZip2Constants.END_OF_BLOCK) {
this.posSM = POS_ADVERTISEMENT_STATE_MACHINE.ADVERTISE;
}
if (this.posSM == POS_ADVERTISEMENT_STATE_MACHINE.ADVERTISE) {
result = this.input.read(b, off, off + 1);
// This is the precise time to update compressed stream position
// to the client of this code.
this.updatePos(true);
this.posSM = POS_ADVERTISEMENT_STATE_MACHINE.HOLD;
}
return result;
}
public int read() throws IOException {
byte b[] = new byte[1];
int result = this.read(b, 0, 1);
return (result < 0) ? result : (b[0] & 0xff);
}
private void internalReset() throws IOException {
if (needsReset) {
needsReset = false;
BufferedInputStream bufferedIn = readStreamHeader();
input = new CBZip2InputStream(bufferedIn, this.readMode);
}
}
public void resetState() throws IOException {
// Cannot read from bufferedIn at this point because bufferedIn
// might not be ready
// yet, as in SequenceFile.Reader implementation.
needsReset = true;
}
public long getPos() {
return this.compressedStreamPosition;
}
/*
* As the comments before read method tell that
* compressed stream is advertised when at least
* one byte passed EOB have been read off. But
* there is an exception to this rule. When we
* construct the stream we advertise the position
* exactly at EOB. In the following method
* shouldAddOn boolean captures this exception.
*
*/
private void updatePos(boolean shouldAddOn) {
int addOn = shouldAddOn ? 1 : 0;
this.compressedStreamPosition = this.startingPos
+ this.input.getProcessedByteCount() + addOn;
}
}// end of BZip2CompressionInputStream
}
| 18,926 | 33.601463 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DecompressorStream.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.compress.Decompressor;
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class DecompressorStream extends CompressionInputStream {
protected Decompressor decompressor = null;
protected byte[] buffer;
protected boolean eof = false;
protected boolean closed = false;
private int lastBytesSent = 0;
public DecompressorStream(InputStream in, Decompressor decompressor,
int bufferSize)
throws IOException {
super(in);
if (decompressor == null) {
throw new NullPointerException();
} else if (bufferSize <= 0) {
throw new IllegalArgumentException("Illegal bufferSize");
}
this.decompressor = decompressor;
buffer = new byte[bufferSize];
}
public DecompressorStream(InputStream in, Decompressor decompressor)
throws IOException {
this(in, decompressor, 512);
}
/**
* Allow derived classes to directly set the underlying stream.
*
* @param in Underlying input stream.
* @throws IOException
*/
protected DecompressorStream(InputStream in) throws IOException {
super(in);
}
private byte[] oneByte = new byte[1];
@Override
public int read() throws IOException {
checkStream();
return (read(oneByte, 0, oneByte.length) == -1) ? -1 : (oneByte[0] & 0xff);
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
checkStream();
if ((off | len | (off + len) | (b.length - (off + len))) < 0) {
throw new IndexOutOfBoundsException();
} else if (len == 0) {
return 0;
}
return decompress(b, off, len);
}
protected int decompress(byte[] b, int off, int len) throws IOException {
int n = 0;
while ((n = decompressor.decompress(b, off, len)) == 0) {
if (decompressor.needsDictionary()) {
eof = true;
return -1;
}
if (decompressor.finished()) {
// First see if there was any leftover buffered input from previous
// stream; if not, attempt to refill buffer. If refill -> EOF, we're
// all done; else reset, fix up input buffer, and get ready for next
// concatenated substream/"member".
int nRemaining = decompressor.getRemaining();
if (nRemaining == 0) {
int m = getCompressedData();
if (m == -1) {
// apparently the previous end-of-stream was also end-of-file:
// return success, as if we had never called getCompressedData()
eof = true;
return -1;
}
decompressor.reset();
decompressor.setInput(buffer, 0, m);
lastBytesSent = m;
} else {
// looks like it's a concatenated stream: reset low-level zlib (or
// other engine) and buffers, then "resend" remaining input data
decompressor.reset();
int leftoverOffset = lastBytesSent - nRemaining;
assert (leftoverOffset >= 0);
// this recopies userBuf -> direct buffer if using native libraries:
decompressor.setInput(buffer, leftoverOffset, nRemaining);
// NOTE: this is the one place we do NOT want to save the number
// of bytes sent (nRemaining here) into lastBytesSent: since we
// are resending what we've already sent before, offset is nonzero
// in general (only way it could be zero is if it already equals
// nRemaining), which would then screw up the offset calculation
// _next_ time around. IOW, getRemaining() is in terms of the
// original, zero-offset bufferload, so lastBytesSent must be as
// well. Cheesy ASCII art:
//
// <------------ m, lastBytesSent ----------->
// +===============================================+
// buffer: |1111111111|22222222222222222|333333333333| |
// +===============================================+
// #1: <-- off -->|<-------- nRemaining --------->
// #2: <----------- off ----------->|<-- nRem. -->
// #3: (final substream: nRemaining == 0; eof = true)
//
// If lastBytesSent is anything other than m, as shown, then "off"
// will be calculated incorrectly.
}
} else if (decompressor.needsInput()) {
int m = getCompressedData();
if (m == -1) {
throw new EOFException("Unexpected end of input stream");
}
decompressor.setInput(buffer, 0, m);
lastBytesSent = m;
}
}
return n;
}
protected int getCompressedData() throws IOException {
checkStream();
// note that the _caller_ is now required to call setInput() or throw
return in.read(buffer, 0, buffer.length);
}
protected void checkStream() throws IOException {
if (closed) {
throw new IOException("Stream closed");
}
}
@Override
public void resetState() throws IOException {
decompressor.reset();
}
private byte[] skipBytes = new byte[512];
@Override
public long skip(long n) throws IOException {
// Sanity checks
if (n < 0) {
throw new IllegalArgumentException("negative skip length");
}
checkStream();
// Read 'n' bytes
int skipped = 0;
while (skipped < n) {
int len = Math.min(((int)n - skipped), skipBytes.length);
len = read(skipBytes, 0, len);
if (len == -1) {
eof = true;
break;
}
skipped += len;
}
return skipped;
}
@Override
public int available() throws IOException {
checkStream();
return (eof) ? 0 : 1;
}
@Override
public void close() throws IOException {
if (!closed) {
in.close();
closed = true;
}
}
@Override
public boolean markSupported() {
return false;
}
@Override
public synchronized void mark(int readlimit) {
}
@Override
public synchronized void reset() throws IOException {
throw new IOException("mark/reset not supported");
}
}
| 7,104 | 30.577778 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockCompressorStream.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress;
import java.io.IOException;
import java.io.OutputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* A {@link org.apache.hadoop.io.compress.CompressorStream} which works
* with 'block-based' based compression algorithms, as opposed to
* 'stream-based' compression algorithms.
*
* It should be noted that this wrapper does not guarantee that blocks will
* be sized for the compressor. If the
* {@link org.apache.hadoop.io.compress.Compressor} requires buffering to
* effect meaningful compression, it is responsible for it.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class BlockCompressorStream extends CompressorStream {
// The 'maximum' size of input data to be compressed, to account
// for the overhead of the compression algorithm.
private final int MAX_INPUT_SIZE;
/**
* Create a {@link BlockCompressorStream}.
*
* @param out stream
* @param compressor compressor to be used
* @param bufferSize size of buffer
* @param compressionOverhead maximum 'overhead' of the compression
* algorithm with given bufferSize
*/
public BlockCompressorStream(OutputStream out, Compressor compressor,
int bufferSize, int compressionOverhead) {
super(out, compressor, bufferSize);
MAX_INPUT_SIZE = bufferSize - compressionOverhead;
}
/**
* Create a {@link BlockCompressorStream} with given output-stream and
* compressor.
* Use default of 512 as bufferSize and compressionOverhead of
* (1% of bufferSize + 12 bytes) = 18 bytes (zlib algorithm).
*
* @param out stream
* @param compressor compressor to be used
*/
public BlockCompressorStream(OutputStream out, Compressor compressor) {
this(out, compressor, 512, 18);
}
/**
* Write the data provided to the compression codec, compressing no more
* than the buffer size less the compression overhead as specified during
* construction for each block.
*
* Each block contains the uncompressed length for the block, followed by
* one or more length-prefixed blocks of compressed data.
*/
@Override
public void write(byte[] b, int off, int len) throws IOException {
// Sanity checks
if (compressor.finished()) {
throw new IOException("write beyond end of stream");
}
if (b == null) {
throw new NullPointerException();
} else if ((off < 0) || (off > b.length) || (len < 0) ||
((off + len) > b.length)) {
throw new IndexOutOfBoundsException();
} else if (len == 0) {
return;
}
long limlen = compressor.getBytesRead();
if (len + limlen > MAX_INPUT_SIZE && limlen > 0) {
// Adding this segment would exceed the maximum size.
// Flush data if we have it.
finish();
compressor.reset();
}
if (len > MAX_INPUT_SIZE) {
// The data we're given exceeds the maximum size. Any data
// we had have been flushed, so we write out this chunk in segments
// not exceeding the maximum size until it is exhausted.
rawWriteInt(len);
do {
int bufLen = Math.min(len, MAX_INPUT_SIZE);
compressor.setInput(b, off, bufLen);
compressor.finish();
while (!compressor.finished()) {
compress();
}
compressor.reset();
off += bufLen;
len -= bufLen;
} while (len > 0);
return;
}
// Give data to the compressor
compressor.setInput(b, off, len);
if (!compressor.needsInput()) {
// compressor buffer size might be smaller than the maximum
// size, so we permit it to flush if required.
rawWriteInt((int)compressor.getBytesRead());
do {
compress();
} while (!compressor.needsInput());
}
}
@Override
public void finish() throws IOException {
if (!compressor.finished()) {
rawWriteInt((int)compressor.getBytesRead());
compressor.finish();
while (!compressor.finished()) {
compress();
}
}
}
@Override
protected void compress() throws IOException {
int len = compressor.compress(buffer, 0, buffer.length);
if (len > 0) {
// Write out the compressed chunk
rawWriteInt(len);
out.write(buffer, 0, len);
}
}
private void rawWriteInt(int v) throws IOException {
out.write((v >>> 24) & 0xFF);
out.write((v >>> 16) & 0xFF);
out.write((v >>> 8) & 0xFF);
out.write((v >>> 0) & 0xFF);
}
}
| 5,409 | 31.787879 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/Decompressor.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Specification of a stream-based 'de-compressor' which can be
* plugged into a {@link CompressionInputStream} to compress data.
* This is modelled after {@link java.util.zip.Inflater}
*
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface Decompressor {
/**
* Sets input data for decompression.
* This should be called if and only if {@link #needsInput()} returns
* <code>true</code> indicating that more input data is required.
* (Both native and non-native versions of various Decompressors require
* that the data passed in via <code>b[]</code> remain unmodified until
* the caller is explicitly notified--via {@link #needsInput()}--that the
* buffer may be safely modified. With this requirement, an extra
* buffer-copy can be avoided.)
*
* @param b Input data
* @param off Start offset
* @param len Length
*/
public void setInput(byte[] b, int off, int len);
/**
* Returns <code>true</code> if the input data buffer is empty and
* {@link #setInput(byte[], int, int)} should be called to
* provide more input.
*
* @return <code>true</code> if the input data buffer is empty and
* {@link #setInput(byte[], int, int)} should be called in
* order to provide more input.
*/
public boolean needsInput();
/**
* Sets preset dictionary for compression. A preset dictionary
* is used when the history buffer can be predetermined.
*
* @param b Dictionary data bytes
* @param off Start offset
* @param len Length
*/
public void setDictionary(byte[] b, int off, int len);
/**
* Returns <code>true</code> if a preset dictionary is needed for decompression.
* @return <code>true</code> if a preset dictionary is needed for decompression
*/
public boolean needsDictionary();
/**
* Returns <code>true</code> if the end of the decompressed
* data output stream has been reached. Indicates a concatenated data stream
* when finished() returns <code>true</code> and {@link #getRemaining()}
* returns a positive value. finished() will be reset with the
* {@link #reset()} method.
* @return <code>true</code> if the end of the decompressed
* data output stream has been reached.
*/
public boolean finished();
/**
* Fills specified buffer with uncompressed data. Returns actual number
* of bytes of uncompressed data. A return value of 0 indicates that
* {@link #needsInput()} should be called in order to determine if more
* input data is required.
*
* @param b Buffer for the compressed data
* @param off Start offset of the data
* @param len Size of the buffer
* @return The actual number of bytes of compressed data.
* @throws IOException
*/
public int decompress(byte[] b, int off, int len) throws IOException;
/**
* Returns the number of bytes remaining in the compressed data buffer.
* Indicates a concatenated data stream if {@link #finished()} returns
* <code>true</code> and getRemaining() returns a positive value. If
* {@link #finished()} returns <code>true</code> and getRemaining() returns
* a zero value, indicates that the end of data stream has been reached and
* is not a concatenated data stream.
* @return The number of bytes remaining in the compressed data buffer.
*/
public int getRemaining();
/**
* Resets decompressor and input and output buffers so that a new set of
* input data can be processed. If {@link #finished()}} returns
* <code>true</code> and {@link #getRemaining()} returns a positive value,
* reset() is called before processing of the next data stream in the
* concatenated data stream. {@link #finished()} will be reset and will
* return <code>false</code> when reset() is called.
*/
public void reset();
/**
* Closes the decompressor and discards any unprocessed input.
*/
public void end();
}
| 4,915 | 37.108527 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionInputStream.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.PositionedReadable;
import org.apache.hadoop.fs.Seekable;
/**
* A compression input stream.
*
* <p>Implementations are assumed to be buffered. This permits clients to
* reposition the underlying input stream then call {@link #resetState()},
* without having to also synchronize client buffers.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public abstract class CompressionInputStream extends InputStream implements Seekable {
/**
* The input stream to be compressed.
*/
protected final InputStream in;
protected long maxAvailableData = 0L;
private Decompressor trackedDecompressor;
/**
* Create a compression input stream that reads
* the decompressed bytes from the given stream.
*
* @param in The input stream to be compressed.
* @throws IOException
*/
protected CompressionInputStream(InputStream in) throws IOException {
if (!(in instanceof Seekable) || !(in instanceof PositionedReadable)) {
this.maxAvailableData = in.available();
}
this.in = in;
}
@Override
public void close() throws IOException {
in.close();
if (trackedDecompressor != null) {
CodecPool.returnDecompressor(trackedDecompressor);
trackedDecompressor = null;
}
}
/**
* Read bytes from the stream.
* Made abstract to prevent leakage to underlying stream.
*/
@Override
public abstract int read(byte[] b, int off, int len) throws IOException;
/**
* Reset the decompressor to its initial state and discard any buffered data,
* as the underlying stream may have been repositioned.
*/
public abstract void resetState() throws IOException;
/**
* This method returns the current position in the stream.
*
* @return Current position in stream as a long
*/
@Override
public long getPos() throws IOException {
if (!(in instanceof Seekable) || !(in instanceof PositionedReadable)){
//This way of getting the current position will not work for file
//size which can be fit in an int and hence can not be returned by
//available method.
return (this.maxAvailableData - this.in.available());
}
else{
return ((Seekable)this.in).getPos();
}
}
/**
* This method is current not supported.
*
* @throws UnsupportedOperationException
*/
@Override
public void seek(long pos) throws UnsupportedOperationException {
throw new UnsupportedOperationException();
}
/**
* This method is current not supported.
*
* @throws UnsupportedOperationException
*/
@Override
public boolean seekToNewSource(long targetPos) throws UnsupportedOperationException {
throw new UnsupportedOperationException();
}
void setTrackedDecompressor(Decompressor decompressor) {
trackedDecompressor = decompressor;
}
}
| 3,862 | 29.65873 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CodecPool.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress;
import java.util.HashSet;
import java.util.HashMap;
import java.util.Set;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ReflectionUtils;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
/**
* A global compressor/decompressor pool used to save and reuse
* (possibly native) compression/decompression codecs.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class CodecPool {
private static final Log LOG = LogFactory.getLog(CodecPool.class);
/**
* A global compressor pool used to save the expensive
* construction/destruction of (possibly native) decompression codecs.
*/
private static final Map<Class<Compressor>, Set<Compressor>> compressorPool =
new HashMap<Class<Compressor>, Set<Compressor>>();
/**
* A global decompressor pool used to save the expensive
* construction/destruction of (possibly native) decompression codecs.
*/
private static final Map<Class<Decompressor>, Set<Decompressor>> decompressorPool =
new HashMap<Class<Decompressor>, Set<Decompressor>>();
private static <T> LoadingCache<Class<T>, AtomicInteger> createCache(
Class<T> klass) {
return CacheBuilder.newBuilder().build(
new CacheLoader<Class<T>, AtomicInteger>() {
@Override
public AtomicInteger load(Class<T> key) throws Exception {
return new AtomicInteger();
}
});
}
/**
* Map to track the number of leased compressors
*/
private static final LoadingCache<Class<Compressor>, AtomicInteger> compressorCounts =
createCache(Compressor.class);
/**
* Map to tracks the number of leased decompressors
*/
private static final LoadingCache<Class<Decompressor>, AtomicInteger> decompressorCounts =
createCache(Decompressor.class);
private static <T> T borrow(Map<Class<T>, Set<T>> pool,
Class<? extends T> codecClass) {
T codec = null;
// Check if an appropriate codec is available
Set<T> codecSet;
synchronized (pool) {
codecSet = pool.get(codecClass);
}
if (codecSet != null) {
synchronized (codecSet) {
if (!codecSet.isEmpty()) {
codec = codecSet.iterator().next();
codecSet.remove(codec);
}
}
}
return codec;
}
private static <T> boolean payback(Map<Class<T>, Set<T>> pool, T codec) {
if (codec != null) {
Class<T> codecClass = ReflectionUtils.getClass(codec);
Set<T> codecSet;
synchronized (pool) {
codecSet = pool.get(codecClass);
if (codecSet == null) {
codecSet = new HashSet<T>();
pool.put(codecClass, codecSet);
}
}
synchronized (codecSet) {
return codecSet.add(codec);
}
}
return false;
}
@SuppressWarnings("unchecked")
private static <T> int getLeaseCount(
LoadingCache<Class<T>, AtomicInteger> usageCounts,
Class<? extends T> codecClass) {
return usageCounts.getUnchecked((Class<T>) codecClass).get();
}
private static <T> void updateLeaseCount(
LoadingCache<Class<T>, AtomicInteger> usageCounts, T codec, int delta) {
if (codec != null) {
Class<T> codecClass = ReflectionUtils.getClass(codec);
usageCounts.getUnchecked(codecClass).addAndGet(delta);
}
}
/**
* Get a {@link Compressor} for the given {@link CompressionCodec} from the
* pool or a new one.
*
* @param codec the <code>CompressionCodec</code> for which to get the
* <code>Compressor</code>
* @param conf the <code>Configuration</code> object which contains confs for creating or reinit the compressor
* @return <code>Compressor</code> for the given
* <code>CompressionCodec</code> from the pool or a new one
*/
public static Compressor getCompressor(CompressionCodec codec, Configuration conf) {
Compressor compressor = borrow(compressorPool, codec.getCompressorType());
if (compressor == null) {
compressor = codec.createCompressor();
LOG.info("Got brand-new compressor ["+codec.getDefaultExtension()+"]");
} else {
compressor.reinit(conf);
if(LOG.isDebugEnabled()) {
LOG.debug("Got recycled compressor");
}
}
updateLeaseCount(compressorCounts, compressor, 1);
return compressor;
}
public static Compressor getCompressor(CompressionCodec codec) {
return getCompressor(codec, null);
}
/**
* Get a {@link Decompressor} for the given {@link CompressionCodec} from the
* pool or a new one.
*
* @param codec the <code>CompressionCodec</code> for which to get the
* <code>Decompressor</code>
* @return <code>Decompressor</code> for the given
* <code>CompressionCodec</code> the pool or a new one
*/
public static Decompressor getDecompressor(CompressionCodec codec) {
Decompressor decompressor = borrow(decompressorPool, codec.getDecompressorType());
if (decompressor == null) {
decompressor = codec.createDecompressor();
LOG.info("Got brand-new decompressor ["+codec.getDefaultExtension()+"]");
} else {
if(LOG.isDebugEnabled()) {
LOG.debug("Got recycled decompressor");
}
}
updateLeaseCount(decompressorCounts, decompressor, 1);
return decompressor;
}
/**
* Return the {@link Compressor} to the pool.
*
* @param compressor the <code>Compressor</code> to be returned to the pool
*/
public static void returnCompressor(Compressor compressor) {
if (compressor == null) {
return;
}
// if the compressor can't be reused, don't pool it.
if (compressor.getClass().isAnnotationPresent(DoNotPool.class)) {
return;
}
compressor.reset();
if (payback(compressorPool, compressor)) {
updateLeaseCount(compressorCounts, compressor, -1);
}
}
/**
* Return the {@link Decompressor} to the pool.
*
* @param decompressor the <code>Decompressor</code> to be returned to the
* pool
*/
public static void returnDecompressor(Decompressor decompressor) {
if (decompressor == null) {
return;
}
// if the decompressor can't be reused, don't pool it.
if (decompressor.getClass().isAnnotationPresent(DoNotPool.class)) {
return;
}
decompressor.reset();
if (payback(decompressorPool, decompressor)) {
updateLeaseCount(decompressorCounts, decompressor, -1);
}
}
/**
* Return the number of leased {@link Compressor}s for this
* {@link CompressionCodec}
*/
public static int getLeasedCompressorsCount(CompressionCodec codec) {
return (codec == null) ? 0 : getLeaseCount(compressorCounts,
codec.getCompressorType());
}
/**
* Return the number of leased {@link Decompressor}s for this
* {@link CompressionCodec}
*/
public static int getLeasedDecompressorsCount(CompressionCodec codec) {
return (codec == null) ? 0 : getLeaseCount(decompressorCounts,
codec.getDecompressorType());
}
}
| 8,269 | 32.346774 | 113 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SplittableCompressionCodec.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This interface is meant to be implemented by those compression codecs
* which are capable to compress / de-compress a stream starting at any
* arbitrary position.
*
* Especially the process of de-compressing a stream starting at some arbitrary
* position is challenging. Most of the codecs are only able to successfully
* de-compress a stream, if they start from the very beginning till the end.
* One of the reasons is the stored state at the beginning of the stream which
* is crucial for de-compression.
*
* Yet there are few codecs which do not save the whole state at the beginning
* of the stream and hence can be used to de-compress stream starting at any
* arbitrary points. This interface is meant to be used by such codecs. Such
* codecs are highly valuable, especially in the context of Hadoop, because
* an input compressed file can be split and hence can be worked on by multiple
* machines in parallel.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface SplittableCompressionCodec extends CompressionCodec {
/**
* During decompression, data can be read off from the decompressor in two
* modes, namely continuous and blocked. Few codecs (e.g. BZip2) are capable
* of compressing data in blocks and then decompressing the blocks. In
* Blocked reading mode codecs inform 'end of block' events to its caller.
* While in continuous mode, the caller of codecs is unaware about the blocks
* and uncompressed data is spilled out like a continuous stream.
*/
public enum READ_MODE {CONTINUOUS, BYBLOCK};
/**
* Create a stream as dictated by the readMode. This method is used when
* the codecs wants the ability to work with the underlying stream positions.
*
* @param seekableIn The seekable input stream (seeks in compressed data)
* @param start The start offset into the compressed stream. May be changed
* by the underlying codec.
* @param end The end offset into the compressed stream. May be changed by
* the underlying codec.
* @param readMode Controls whether stream position is reported continuously
* from the compressed stream only only at block boundaries.
* @return a stream to read uncompressed bytes from
*/
SplitCompressionInputStream createInputStream(InputStream seekableIn,
Decompressor decompressor, long start, long end, READ_MODE readMode)
throws IOException;
}
| 3,489 | 43.74359 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DeflateCodec.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress;
/**
* Alias class for DefaultCodec to enable codec discovery by 'deflate' name.
*/
public class DeflateCodec extends DefaultCodec {
}
| 982 | 38.32 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SplitCompressionInputStream.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* An InputStream covering a range of compressed data. The start and end
* offsets requested by a client may be modified by the codec to fit block
* boundaries or other algorithm-dependent requirements.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public abstract class SplitCompressionInputStream
extends CompressionInputStream {
private long start;
private long end;
public SplitCompressionInputStream(InputStream in, long start, long end)
throws IOException {
super(in);
this.start = start;
this.end = end;
}
protected void setStart(long start) {
this.start = start;
}
protected void setEnd(long end) {
this.end = end;
}
/**
* After calling createInputStream, the values of start or end
* might change. So this method can be used to get the new value of start.
* @return The changed value of start
*/
public long getAdjustedStart() {
return start;
}
/**
* After calling createInputStream, the values of start or end
* might change. So this method can be used to get the new value of end.
* @return The changed value of end
*/
public long getAdjustedEnd() {
return end;
}
}
| 2,217 | 29.805556 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* A {@link org.apache.hadoop.io.compress.DecompressorStream} which works
* with 'block-based' based compression algorithms, as opposed to
* 'stream-based' compression algorithms.
*
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class BlockDecompressorStream extends DecompressorStream {
private int originalBlockSize = 0;
private int noUncompressedBytes = 0;
/**
* Create a {@link BlockDecompressorStream}.
*
* @param in input stream
* @param decompressor decompressor to use
* @param bufferSize size of buffer
* @throws IOException
*/
public BlockDecompressorStream(InputStream in, Decompressor decompressor,
int bufferSize) throws IOException {
super(in, decompressor, bufferSize);
}
/**
* Create a {@link BlockDecompressorStream}.
*
* @param in input stream
* @param decompressor decompressor to use
* @throws IOException
*/
public BlockDecompressorStream(InputStream in, Decompressor decompressor) throws IOException {
super(in, decompressor);
}
protected BlockDecompressorStream(InputStream in) throws IOException {
super(in);
}
@Override
protected int decompress(byte[] b, int off, int len) throws IOException {
// Check if we are the beginning of a block
if (noUncompressedBytes == originalBlockSize) {
// Get original data size
try {
originalBlockSize = rawReadInt();
} catch (IOException ioe) {
return -1;
}
noUncompressedBytes = 0;
// EOF if originalBlockSize is 0
// This will occur only when decompressing previous compressed empty file
if (originalBlockSize == 0) {
eof = true;
return -1;
}
}
int n = 0;
while ((n = decompressor.decompress(b, off, len)) == 0) {
if (decompressor.finished() || decompressor.needsDictionary()) {
if (noUncompressedBytes >= originalBlockSize) {
eof = true;
return -1;
}
}
if (decompressor.needsInput()) {
int m;
try {
m = getCompressedData();
} catch (EOFException e) {
eof = true;
return -1;
}
// Send the read data to the decompressor
decompressor.setInput(buffer, 0, m);
}
}
// Note the no. of decompressed bytes read from 'current' block
noUncompressedBytes += n;
return n;
}
@Override
protected int getCompressedData() throws IOException {
checkStream();
// Get the size of the compressed chunk (always non-negative)
int len = rawReadInt();
// Read len bytes from underlying stream
if (len > buffer.length) {
buffer = new byte[len];
}
int n = 0, off = 0;
while (n < len) {
int count = in.read(buffer, off + n, len - n);
if (count < 0) {
throw new EOFException("Unexpected end of block in input stream");
}
n += count;
}
return len;
}
@Override
public void resetState() throws IOException {
originalBlockSize = 0;
noUncompressedBytes = 0;
super.resetState();
}
private int rawReadInt() throws IOException {
int b1 = in.read();
int b2 = in.read();
int b3 = in.read();
int b4 = in.read();
if ((b1 | b2 | b3 | b4) < 0)
throw new EOFException();
return ((b1 << 24) + (b2 << 16) + (b3 << 8) + (b4 << 0));
}
}
| 4,458 | 27.954545 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DirectDecompressionCodec.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This class encapsulates a codec which can decompress direct bytebuffers.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface DirectDecompressionCodec extends CompressionCodec {
/**
* Create a new {@link DirectDecompressor} for use by this {@link DirectDecompressionCodec}.
*
* @return a new direct decompressor for use by this codec
*/
DirectDecompressor createDirectDecompressor();
}
| 1,399 | 37.888889 | 94 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.