repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineSequenceFileInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
/**
* Input format that is a <code>CombineFileInputFormat</code>-equivalent for
* <code>SequenceFileInputFormat</code>.
*
* @see CombineFileInputFormat
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class CombineSequenceFileInputFormat<K,V>
extends CombineFileInputFormat<K,V> {
@SuppressWarnings({ "rawtypes", "unchecked" })
public RecordReader<K,V> createRecordReader(InputSplit split,
TaskAttemptContext context) throws IOException {
return new CombineFileRecordReader((CombineFileSplit)split, context,
SequenceFileRecordReaderWrapper.class);
}
/**
* A record reader that may be passed to <code>CombineFileRecordReader</code>
* so that it can be used in a <code>CombineFileInputFormat</code>-equivalent
* for <code>SequenceFileInputFormat</code>.
*
* @see CombineFileRecordReader
* @see CombineFileInputFormat
* @see SequenceFileInputFormat
*/
private static class SequenceFileRecordReaderWrapper<K,V>
extends CombineFileRecordReaderWrapper<K,V> {
// this constructor signature is required by CombineFileRecordReader
public SequenceFileRecordReaderWrapper(CombineFileSplit split,
TaskAttemptContext context, Integer idx)
throws IOException, InterruptedException {
super(new SequenceFileInputFormat<K,V>(), split, context, idx);
}
}
}
| 2,500 | 38.078125 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/SequenceFileRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
/** An {@link RecordReader} for {@link SequenceFile}s. */
@InterfaceAudience.Public
@InterfaceStability.Stable
public class SequenceFileRecordReader<K, V> extends RecordReader<K, V> {
private SequenceFile.Reader in;
private long start;
private long end;
private boolean more = true;
private K key = null;
private V value = null;
protected Configuration conf;
@Override
public void initialize(InputSplit split,
TaskAttemptContext context
) throws IOException, InterruptedException {
FileSplit fileSplit = (FileSplit) split;
conf = context.getConfiguration();
Path path = fileSplit.getPath();
FileSystem fs = path.getFileSystem(conf);
this.in = new SequenceFile.Reader(fs, path, conf);
this.end = fileSplit.getStart() + fileSplit.getLength();
if (fileSplit.getStart() > in.getPosition()) {
in.sync(fileSplit.getStart()); // sync to start
}
this.start = in.getPosition();
more = start < end;
}
@Override
@SuppressWarnings("unchecked")
public boolean nextKeyValue() throws IOException, InterruptedException {
if (!more) {
return false;
}
long pos = in.getPosition();
key = (K) in.next(key);
if (key == null || (pos >= end && in.syncSeen())) {
more = false;
key = null;
value = null;
} else {
value = (V) in.getCurrentValue(value);
}
return more;
}
@Override
public K getCurrentKey() {
return key;
}
@Override
public V getCurrentValue() {
return value;
}
/**
* Return the progress within the input split
* @return 0.0 to 1.0 of the input byte range
*/
public float getProgress() throws IOException {
if (end == start) {
return 0.0f;
} else {
return Math.min(1.0f, (in.getPosition() - start) / (float)(end - start));
}
}
public synchronized void close() throws IOException { in.close(); }
}
| 3,279 | 29.091743 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/SequenceFileAsTextInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
/**
* This class is similar to SequenceFileInputFormat, except it generates
* SequenceFileAsTextRecordReader which converts the input keys and values
* to their String forms by calling toString() method.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class SequenceFileAsTextInputFormat
extends SequenceFileInputFormat<Text, Text> {
public SequenceFileAsTextInputFormat() {
super();
}
public RecordReader<Text, Text> createRecordReader(InputSplit split,
TaskAttemptContext context) throws IOException {
context.setStatus(split.toString());
return new SequenceFileAsTextRecordReader();
}
}
| 1,830 | 35.62 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/KeyValueTextInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.apache.hadoop.io.compress.SplittableCompressionCodec;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
/**
* An {@link InputFormat} for plain text files. Files are broken into lines.
* Either line feed or carriage-return are used to signal end of line.
* Each line is divided into key and value parts by a separator byte. If no
* such a byte exists, the key will be the entire line and value will be empty.
* The separator byte can be specified in config file under the attribute name
* mapreduce.input.keyvaluelinerecordreader.key.value.separator. The default
* is the tab character ('\t').
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class KeyValueTextInputFormat extends FileInputFormat<Text, Text> {
@Override
protected boolean isSplitable(JobContext context, Path file) {
final CompressionCodec codec =
new CompressionCodecFactory(context.getConfiguration()).getCodec(file);
if (null == codec) {
return true;
}
return codec instanceof SplittableCompressionCodec;
}
public RecordReader<Text, Text> createRecordReader(InputSplit genericSplit,
TaskAttemptContext context) throws IOException {
context.setStatus(genericSplit.toString());
return new KeyValueLineRecordReader(context.getConfiguration());
}
}
| 2,694 | 39.223881 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CompressedSplitLineReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.SplitCompressionInputStream;
/**
* Line reader for compressed splits
*
* Reading records from a compressed split is tricky, as the
* LineRecordReader is using the reported compressed input stream
* position directly to determine when a split has ended. In addition the
* compressed input stream is usually faking the actual byte position, often
* updating it only after the first compressed block after the split is
* accessed.
*
* Depending upon where the last compressed block of the split ends relative
* to the record delimiters it can be easy to accidentally drop the last
* record or duplicate the last record between this split and the next.
*
* Split end scenarios:
*
* 1) Last block of split ends in the middle of a record
* Nothing special that needs to be done here, since the compressed input
* stream will report a position after the split end once the record
* is fully read. The consumer of the next split will discard the
* partial record at the start of the split normally, and no data is lost
* or duplicated between the splits.
*
* 2) Last block of split ends in the middle of a delimiter
* The line reader will continue to consume bytes into the next block to
* locate the end of the delimiter. If a custom delimiter is being used
* then the next record must be read by this split or it will be dropped.
* The consumer of the next split will not recognize the partial
* delimiter at the beginning of its split and will discard it along with
* the next record.
*
* However for the default delimiter processing there is a special case
* because CR, LF, and CRLF are all valid record delimiters. If the
* block ends with a CR then the reader must peek at the next byte to see
* if it is an LF and therefore part of the same record delimiter.
* Peeking at the next byte is an access to the next block and triggers
* the stream to report the end of the split. There are two cases based
* on the next byte:
*
* A) The next byte is LF
* The split needs to end after the current record is returned. The
* consumer of the next split will discard the first record, which
* is degenerate since LF is itself a delimiter, and start consuming
* records after that byte. If the current split tries to read
* another record then the record will be duplicated between splits.
*
* B) The next byte is not LF
* The current record will be returned but the stream will report
* the split has ended due to the peek into the next block. If the
* next record is not read then it will be lost, as the consumer of
* the next split will discard it before processing subsequent
* records. Therefore the next record beyond the reported split end
* must be consumed by this split to avoid data loss.
*
* 3) Last block of split ends at the beginning of a delimiter
* This is equivalent to case 1, as the reader will consume bytes into
* the next block and trigger the end of the split. No further records
* should be read as the consumer of the next split will discard the
* (degenerate) record at the beginning of its split.
*
* 4) Last block of split ends at the end of a delimiter
* Nothing special needs to be done here. The reader will not start
* examining the bytes into the next block until the next record is read,
* so the stream will not report the end of the split just yet. Once the
* next record is read then the next block will be accessed and the
* stream will indicate the end of the split. The consumer of the next
* split will correctly discard the first record of its split, and no
* data is lost or duplicated.
*
* If the default delimiter is used and the block ends at a CR then this
* is treated as case 2 since the reader does not yet know without
* looking at subsequent bytes whether the delimiter has ended.
*
* NOTE: It is assumed that compressed input streams *never* return bytes from
* multiple compressed blocks from a single read. Failure to do so will
* violate the buffering performed by this class, as it will access
* bytes into the next block after the split before returning all of the
* records from the previous block.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class CompressedSplitLineReader extends SplitLineReader {
SplitCompressionInputStream scin;
private boolean usingCRLF;
private boolean needAdditionalRecord = false;
private boolean finished = false;
public CompressedSplitLineReader(SplitCompressionInputStream in,
Configuration conf,
byte[] recordDelimiterBytes)
throws IOException {
super(in, conf, recordDelimiterBytes);
scin = in;
usingCRLF = (recordDelimiterBytes == null);
}
@Override
protected int fillBuffer(InputStream in, byte[] buffer, boolean inDelimiter)
throws IOException {
int bytesRead = in.read(buffer);
// If the split ended in the middle of a record delimiter then we need
// to read one additional record, as the consumer of the next split will
// not recognize the partial delimiter as a record.
// However if using the default delimiter and the next character is a
// linefeed then next split will treat it as a delimiter all by itself
// and the additional record read should not be performed.
if (inDelimiter && bytesRead > 0) {
if (usingCRLF) {
needAdditionalRecord = (buffer[0] != '\n');
} else {
needAdditionalRecord = true;
}
}
return bytesRead;
}
@Override
public int readLine(Text str, int maxLineLength, int maxBytesToConsume)
throws IOException {
int bytesRead = 0;
if (!finished) {
// only allow at most one more record to be read after the stream
// reports the split ended
if (scin.getPos() > scin.getAdjustedEnd()) {
finished = true;
}
bytesRead = super.readLine(str, maxLineLength, maxBytesToConsume);
}
return bytesRead;
}
@Override
public boolean needAdditionalRecordAfterSplit() {
return !finished && needAdditionalRecord;
}
}
| 7,578 | 43.846154 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/InvalidInputException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.IOException;
import java.util.List;
import java.util.Iterator;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This class wraps a list of problems with the input, so that the user
* can get a list of problems together instead of finding and fixing them one
* by one.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class InvalidInputException extends IOException {
private static final long serialVersionUID = -380668190578456802L;
private List<IOException> problems;
/**
* Create the exception with the given list.
* @param probs the list of problems to report. this list is not copied.
*/
public InvalidInputException(List<IOException> probs) {
problems = probs;
}
/**
* Get the complete list of the problems reported.
* @return the list of problems, which must not be modified
*/
public List<IOException> getProblems() {
return problems;
}
/**
* Get a summary message of the problems found.
* @return the concatenated messages from all of the problems.
*/
public String getMessage() {
StringBuffer result = new StringBuffer();
Iterator<IOException> itr = problems.iterator();
while(itr.hasNext()) {
result.append(itr.next().getMessage());
if (itr.hasNext()) {
result.append("\n");
}
}
return result.toString();
}
}
| 2,299 | 31.857143 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.*;
import java.lang.reflect.*;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
/**
* A generic RecordReader that can hand out different recordReaders
* for each chunk in a {@link CombineFileSplit}.
* A CombineFileSplit can combine data chunks from multiple files.
* This class allows using different RecordReaders for processing
* these data chunks from different files.
* @see CombineFileSplit
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class CombineFileRecordReader<K, V> extends RecordReader<K, V> {
static final Class [] constructorSignature = new Class []
{CombineFileSplit.class,
TaskAttemptContext.class,
Integer.class};
protected CombineFileSplit split;
protected Constructor<? extends RecordReader<K,V>> rrConstructor;
protected TaskAttemptContext context;
protected int idx;
protected long progress;
protected RecordReader<K, V> curReader;
public void initialize(InputSplit split,
TaskAttemptContext context) throws IOException, InterruptedException {
this.split = (CombineFileSplit)split;
this.context = context;
if (null != this.curReader) {
this.curReader.initialize(split, context);
}
}
public boolean nextKeyValue() throws IOException, InterruptedException {
while ((curReader == null) || !curReader.nextKeyValue()) {
if (!initNextRecordReader()) {
return false;
}
}
return true;
}
public K getCurrentKey() throws IOException, InterruptedException {
return curReader.getCurrentKey();
}
public V getCurrentValue() throws IOException, InterruptedException {
return curReader.getCurrentValue();
}
public void close() throws IOException {
if (curReader != null) {
curReader.close();
curReader = null;
}
}
/**
* return progress based on the amount of data processed so far.
*/
public float getProgress() throws IOException, InterruptedException {
long subprogress = 0; // bytes processed in current split
if (null != curReader) {
// idx is always one past the current subsplit's true index.
subprogress = (long)(curReader.getProgress() * split.getLength(idx - 1));
}
return Math.min(1.0f, (progress + subprogress)/(float)(split.getLength()));
}
/**
* A generic RecordReader that can hand out different recordReaders
* for each chunk in the CombineFileSplit.
*/
public CombineFileRecordReader(CombineFileSplit split,
TaskAttemptContext context,
Class<? extends RecordReader<K,V>> rrClass)
throws IOException {
this.split = split;
this.context = context;
this.idx = 0;
this.curReader = null;
this.progress = 0;
try {
rrConstructor = rrClass.getDeclaredConstructor(constructorSignature);
rrConstructor.setAccessible(true);
} catch (Exception e) {
throw new RuntimeException(rrClass.getName() +
" does not have valid constructor", e);
}
initNextRecordReader();
}
/**
* Get the record reader for the next chunk in this CombineFileSplit.
*/
protected boolean initNextRecordReader() throws IOException {
if (curReader != null) {
curReader.close();
curReader = null;
if (idx > 0) {
progress += split.getLength(idx-1); // done processing so far
}
}
// if all chunks have been processed, nothing more to do.
if (idx == split.getNumPaths()) {
return false;
}
context.progress();
// get a record reader for the idx-th chunk
try {
Configuration conf = context.getConfiguration();
// setup some helper config variables.
conf.set(MRJobConfig.MAP_INPUT_FILE, split.getPath(idx).toString());
conf.setLong(MRJobConfig.MAP_INPUT_START, split.getOffset(idx));
conf.setLong(MRJobConfig.MAP_INPUT_PATH, split.getLength(idx));
curReader = rrConstructor.newInstance(new Object []
{split, context, Integer.valueOf(idx)});
if (idx > 0) {
// initialize() for the first RecordReader will be called by MapTask;
// we're responsible for initializing subsequent RecordReaders.
curReader.initialize(split, context);
}
} catch (Exception e) {
throw new RuntimeException (e);
}
idx++;
return true;
}
}
| 5,540 | 32.179641 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/SequenceFileAsTextRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
/**
* This class converts the input keys and values to their String forms by
* calling toString() method. This class to SequenceFileAsTextInputFormat
* class is as LineRecordReader class to TextInputFormat class.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class SequenceFileAsTextRecordReader
extends RecordReader<Text, Text> {
private final SequenceFileRecordReader<WritableComparable<?>, Writable>
sequenceFileRecordReader;
private Text key;
private Text value;
public SequenceFileAsTextRecordReader()
throws IOException {
sequenceFileRecordReader =
new SequenceFileRecordReader<WritableComparable<?>, Writable>();
}
public void initialize(InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException {
sequenceFileRecordReader.initialize(split, context);
}
@Override
public Text getCurrentKey()
throws IOException, InterruptedException {
return key;
}
@Override
public Text getCurrentValue()
throws IOException, InterruptedException {
return value;
}
/** Read key/value pair in a line. */
public synchronized boolean nextKeyValue()
throws IOException, InterruptedException {
if (!sequenceFileRecordReader.nextKeyValue()) {
return false;
}
if (key == null) {
key = new Text();
}
if (value == null) {
value = new Text();
}
key.set(sequenceFileRecordReader.getCurrentKey().toString());
value.set(sequenceFileRecordReader.getCurrentValue().toString());
return true;
}
public float getProgress() throws IOException, InterruptedException {
return sequenceFileRecordReader.getProgress();
}
public synchronized void close() throws IOException {
sequenceFileRecordReader.close();
}
}
| 3,087 | 31.166667 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/DelegatingInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.util.ReflectionUtils;
/**
* An {@link InputFormat} that delegates behavior of paths to multiple other
* InputFormats.
*
* @see MultipleInputs#addInputPath(Job, Path, Class, Class)
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class DelegatingInputFormat<K, V> extends InputFormat<K, V> {
@SuppressWarnings("unchecked")
public List<InputSplit> getSplits(JobContext job)
throws IOException, InterruptedException {
Configuration conf = job.getConfiguration();
Job jobCopy = Job.getInstance(conf);
List<InputSplit> splits = new ArrayList<InputSplit>();
Map<Path, InputFormat> formatMap =
MultipleInputs.getInputFormatMap(job);
Map<Path, Class<? extends Mapper>> mapperMap = MultipleInputs
.getMapperTypeMap(job);
Map<Class<? extends InputFormat>, List<Path>> formatPaths
= new HashMap<Class<? extends InputFormat>, List<Path>>();
// First, build a map of InputFormats to Paths
for (Entry<Path, InputFormat> entry : formatMap.entrySet()) {
if (!formatPaths.containsKey(entry.getValue().getClass())) {
formatPaths.put(entry.getValue().getClass(), new LinkedList<Path>());
}
formatPaths.get(entry.getValue().getClass()).add(entry.getKey());
}
for (Entry<Class<? extends InputFormat>, List<Path>> formatEntry :
formatPaths.entrySet()) {
Class<? extends InputFormat> formatClass = formatEntry.getKey();
InputFormat format = (InputFormat) ReflectionUtils.newInstance(
formatClass, conf);
List<Path> paths = formatEntry.getValue();
Map<Class<? extends Mapper>, List<Path>> mapperPaths
= new HashMap<Class<? extends Mapper>, List<Path>>();
// Now, for each set of paths that have a common InputFormat, build
// a map of Mappers to the paths they're used for
for (Path path : paths) {
Class<? extends Mapper> mapperClass = mapperMap.get(path);
if (!mapperPaths.containsKey(mapperClass)) {
mapperPaths.put(mapperClass, new LinkedList<Path>());
}
mapperPaths.get(mapperClass).add(path);
}
// Now each set of paths that has a common InputFormat and Mapper can
// be added to the same job, and split together.
for (Entry<Class<? extends Mapper>, List<Path>> mapEntry :
mapperPaths.entrySet()) {
paths = mapEntry.getValue();
Class<? extends Mapper> mapperClass = mapEntry.getKey();
if (mapperClass == null) {
try {
mapperClass = job.getMapperClass();
} catch (ClassNotFoundException e) {
throw new IOException("Mapper class is not found", e);
}
}
FileInputFormat.setInputPaths(jobCopy, paths.toArray(new Path[paths
.size()]));
// Get splits for each input path and tag with InputFormat
// and Mapper types by wrapping in a TaggedInputSplit.
List<InputSplit> pathSplits = format.getSplits(jobCopy);
for (InputSplit pathSplit : pathSplits) {
splits.add(new TaggedInputSplit(pathSplit, conf, format.getClass(),
mapperClass));
}
}
}
return splits;
}
@Override
public RecordReader<K, V> createRecordReader(InputSplit split,
TaskAttemptContext context) throws IOException, InterruptedException {
return new DelegatingRecordReader<K, V>(split, context);
}
}
| 4,989 | 36.80303 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/NLineInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.util.LineReader;
/**
* NLineInputFormat which splits N lines of input as one split.
*
* In many "pleasantly" parallel applications, each process/mapper
* processes the same input file (s), but with computations are
* controlled by different parameters.(Referred to as "parameter sweeps").
* One way to achieve this, is to specify a set of parameters
* (one set per line) as input in a control file
* (which is the input path to the map-reduce application,
* where as the input dataset is specified
* via a config variable in JobConf.).
*
* The NLineInputFormat can be used in such applications, that splits
* the input file such that by default, one line is fed as
* a value to one map task, and key is the offset.
* i.e. (k,v) is (LongWritable, Text).
* The location hints will span the whole mapred cluster.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class NLineInputFormat extends FileInputFormat<LongWritable, Text> {
public static final String LINES_PER_MAP =
"mapreduce.input.lineinputformat.linespermap";
public RecordReader<LongWritable, Text> createRecordReader(
InputSplit genericSplit, TaskAttemptContext context)
throws IOException {
context.setStatus(genericSplit.toString());
return new LineRecordReader();
}
/**
* Logically splits the set of input files for the job, splits N lines
* of the input as one split.
*
* @see FileInputFormat#getSplits(JobContext)
*/
public List<InputSplit> getSplits(JobContext job)
throws IOException {
List<InputSplit> splits = new ArrayList<InputSplit>();
int numLinesPerSplit = getNumLinesPerSplit(job);
for (FileStatus status : listStatus(job)) {
splits.addAll(getSplitsForFile(status,
job.getConfiguration(), numLinesPerSplit));
}
return splits;
}
public static List<FileSplit> getSplitsForFile(FileStatus status,
Configuration conf, int numLinesPerSplit) throws IOException {
List<FileSplit> splits = new ArrayList<FileSplit> ();
Path fileName = status.getPath();
if (status.isDirectory()) {
throw new IOException("Not a file: " + fileName);
}
FileSystem fs = fileName.getFileSystem(conf);
LineReader lr = null;
try {
FSDataInputStream in = fs.open(fileName);
lr = new LineReader(in, conf);
Text line = new Text();
int numLines = 0;
long begin = 0;
long length = 0;
int num = -1;
while ((num = lr.readLine(line)) > 0) {
numLines++;
length += num;
if (numLines == numLinesPerSplit) {
splits.add(createFileSplit(fileName, begin, length));
begin += length;
length = 0;
numLines = 0;
}
}
if (numLines != 0) {
splits.add(createFileSplit(fileName, begin, length));
}
} finally {
if (lr != null) {
lr.close();
}
}
return splits;
}
/**
* NLineInputFormat uses LineRecordReader, which always reads
* (and consumes) at least one character out of its upper split
* boundary. So to make sure that each mapper gets N lines, we
* move back the upper split limits of each split
* by one character here.
* @param fileName Path of file
* @param begin the position of the first byte in the file to process
* @param length number of bytes in InputSplit
* @return FileSplit
*/
protected static FileSplit createFileSplit(Path fileName, long begin, long length) {
return (begin == 0)
? new FileSplit(fileName, begin, length - 1, new String[] {})
: new FileSplit(fileName, begin - 1, length, new String[] {});
}
/**
* Set the number of lines per split
* @param job the job to modify
* @param numLines the number of lines per split
*/
public static void setNumLinesPerSplit(Job job, int numLines) {
job.getConfiguration().setInt(LINES_PER_MAP, numLines);
}
/**
* Get the number of lines per split
* @param job the job
* @return the number of lines per split
*/
public static int getNumLinesPerSplit(JobContext job) {
return job.getConfiguration().getInt(LINES_PER_MAP, 1);
}
}
| 5,773 | 34.641975 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/SequenceFileInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.MapFile;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
/** An {@link InputFormat} for {@link SequenceFile}s. */
@InterfaceAudience.Public
@InterfaceStability.Stable
public class SequenceFileInputFormat<K, V> extends FileInputFormat<K, V> {
@Override
public RecordReader<K, V> createRecordReader(InputSplit split,
TaskAttemptContext context
) throws IOException {
return new SequenceFileRecordReader<K,V>();
}
@Override
protected long getFormatMinSplitSize() {
return SequenceFile.SYNC_INTERVAL;
}
@Override
protected List<FileStatus> listStatus(JobContext job
)throws IOException {
List<FileStatus> files = super.listStatus(job);
int len = files.size();
for(int i=0; i < len; ++i) {
FileStatus file = files.get(i);
if (file.isDirectory()) { // it's a MapFile
Path p = file.getPath();
FileSystem fs = p.getFileSystem(job.getConfiguration());
// use the data file
files.set(i, fs.getFileStatus(new Path(p, MapFile.DATA_FILE_NAME)));
}
}
return files;
}
}
| 2,605 | 34.216216 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineTextInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
/**
* Input format that is a <code>CombineFileInputFormat</code>-equivalent for
* <code>TextInputFormat</code>.
*
* @see CombineFileInputFormat
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class CombineTextInputFormat
extends CombineFileInputFormat<LongWritable,Text> {
public RecordReader<LongWritable,Text> createRecordReader(InputSplit split,
TaskAttemptContext context) throws IOException {
return new CombineFileRecordReader<LongWritable,Text>(
(CombineFileSplit)split, context, TextRecordReaderWrapper.class);
}
/**
* A record reader that may be passed to <code>CombineFileRecordReader</code>
* so that it can be used in a <code>CombineFileInputFormat</code>-equivalent
* for <code>TextInputFormat</code>.
*
* @see CombineFileRecordReader
* @see CombineFileInputFormat
* @see TextInputFormat
*/
private static class TextRecordReaderWrapper
extends CombineFileRecordReaderWrapper<LongWritable,Text> {
// this constructor signature is required by CombineFileRecordReader
public TextRecordReaderWrapper(CombineFileSplit split,
TaskAttemptContext context, Integer idx)
throws IOException, InterruptedException {
super(new TextInputFormat(), split, context, idx);
}
}
}
| 2,511 | 37.060606 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/UncompressedSplitLineReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.io.Text;
/**
* SplitLineReader for uncompressed files.
* This class can split the file correctly even if the delimiter is multi-bytes.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class UncompressedSplitLineReader extends SplitLineReader {
private boolean needAdditionalRecord = false;
private long splitLength;
/** Total bytes read from the input stream. */
private long totalBytesRead = 0;
private boolean finished = false;
private boolean usingCRLF;
private int unusedBytes = 0;
private int lastBytesRead = 0;
public UncompressedSplitLineReader(FSDataInputStream in, Configuration conf,
byte[] recordDelimiterBytes, long splitLength) throws IOException {
super(in, conf, recordDelimiterBytes);
this.splitLength = splitLength;
usingCRLF = (recordDelimiterBytes == null);
}
@Override
protected int fillBuffer(InputStream in, byte[] buffer, boolean inDelimiter)
throws IOException {
int maxBytesToRead = buffer.length;
if (totalBytesRead < splitLength) {
maxBytesToRead = Math.min(maxBytesToRead,
(int)(splitLength - totalBytesRead));
}
int bytesRead = in.read(buffer, 0, maxBytesToRead);
lastBytesRead = bytesRead;
// If the split ended in the middle of a record delimiter then we need
// to read one additional record, as the consumer of the next split will
// not recognize the partial delimiter as a record.
// However if using the default delimiter and the next character is a
// linefeed then next split will treat it as a delimiter all by itself
// and the additional record read should not be performed.
if (totalBytesRead == splitLength && inDelimiter && bytesRead > 0) {
if (usingCRLF) {
needAdditionalRecord = (buffer[0] != '\n');
} else {
needAdditionalRecord = true;
}
}
if (bytesRead > 0) {
totalBytesRead += bytesRead;
}
return bytesRead;
}
@Override
public int readLine(Text str, int maxLineLength, int maxBytesToConsume)
throws IOException {
long bytesRead = 0;
if (!finished) {
// only allow at most one more record to be read after the stream
// reports the split ended
if (totalBytesRead > splitLength) {
finished = true;
}
bytesRead = totalBytesRead;
int bytesConsumed = super.readLine(str, maxLineLength, maxBytesToConsume);
bytesRead = totalBytesRead - bytesRead;
// No records left.
if (bytesConsumed == 0 && bytesRead == 0) {
return 0;
}
int bufferSize = getBufferSize();
// Add the remaining buffer size not used for the last call
// of fillBuffer method.
if (lastBytesRead <= 0) {
bytesRead += bufferSize;
} else if (bytesRead > 0) {
bytesRead += bufferSize - lastBytesRead;
}
// Adjust the size of the buffer not used for this record.
// The size is carried over for the next calculation.
bytesRead += unusedBytes;
unusedBytes = bufferSize - getBufferPosn();
bytesRead -= unusedBytes;
}
return (int) bytesRead;
}
@Override
public boolean needAdditionalRecordAfterSplit() {
return !finished && needAdditionalRecord;
}
}
| 4,418 | 34.071429 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/MultipleInputs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.util.ReflectionUtils;
/**
* This class supports MapReduce jobs that have multiple input paths with
* a different {@link InputFormat} and {@link Mapper} for each path
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class MultipleInputs {
public static final String DIR_FORMATS =
"mapreduce.input.multipleinputs.dir.formats";
public static final String DIR_MAPPERS =
"mapreduce.input.multipleinputs.dir.mappers";
/**
* Add a {@link Path} with a custom {@link InputFormat} to the list of
* inputs for the map-reduce job.
*
* @param job The {@link Job}
* @param path {@link Path} to be added to the list of inputs for the job
* @param inputFormatClass {@link InputFormat} class to use for this path
*/
@SuppressWarnings("unchecked")
public static void addInputPath(Job job, Path path,
Class<? extends InputFormat> inputFormatClass) {
String inputFormatMapping = path.toString() + ";"
+ inputFormatClass.getName();
Configuration conf = job.getConfiguration();
String inputFormats = conf.get(DIR_FORMATS);
conf.set(DIR_FORMATS,
inputFormats == null ? inputFormatMapping : inputFormats + ","
+ inputFormatMapping);
job.setInputFormatClass(DelegatingInputFormat.class);
}
/**
* Add a {@link Path} with a custom {@link InputFormat} and
* {@link Mapper} to the list of inputs for the map-reduce job.
*
* @param job The {@link Job}
* @param path {@link Path} to be added to the list of inputs for the job
* @param inputFormatClass {@link InputFormat} class to use for this path
* @param mapperClass {@link Mapper} class to use for this path
*/
@SuppressWarnings("unchecked")
public static void addInputPath(Job job, Path path,
Class<? extends InputFormat> inputFormatClass,
Class<? extends Mapper> mapperClass) {
addInputPath(job, path, inputFormatClass);
Configuration conf = job.getConfiguration();
String mapperMapping = path.toString() + ";" + mapperClass.getName();
String mappers = conf.get(DIR_MAPPERS);
conf.set(DIR_MAPPERS, mappers == null ? mapperMapping
: mappers + "," + mapperMapping);
job.setMapperClass(DelegatingMapper.class);
}
/**
* Retrieves a map of {@link Path}s to the {@link InputFormat} class
* that should be used for them.
*
* @param job The {@link JobContext}
* @see #addInputPath(JobConf, Path, Class)
* @return A map of paths to inputformats for the job
*/
@SuppressWarnings("unchecked")
static Map<Path, InputFormat> getInputFormatMap(JobContext job) {
Map<Path, InputFormat> m = new HashMap<Path, InputFormat>();
Configuration conf = job.getConfiguration();
String[] pathMappings = conf.get(DIR_FORMATS).split(",");
for (String pathMapping : pathMappings) {
String[] split = pathMapping.split(";");
InputFormat inputFormat;
try {
inputFormat = (InputFormat) ReflectionUtils.newInstance(conf
.getClassByName(split[1]), conf);
} catch (ClassNotFoundException e) {
throw new RuntimeException(e);
}
m.put(new Path(split[0]), inputFormat);
}
return m;
}
/**
* Retrieves a map of {@link Path}s to the {@link Mapper} class that
* should be used for them.
*
* @param job The {@link JobContext}
* @see #addInputPath(JobConf, Path, Class, Class)
* @return A map of paths to mappers for the job
*/
@SuppressWarnings("unchecked")
static Map<Path, Class<? extends Mapper>>
getMapperTypeMap(JobContext job) {
Configuration conf = job.getConfiguration();
if (conf.get(DIR_MAPPERS) == null) {
return Collections.emptyMap();
}
Map<Path, Class<? extends Mapper>> m =
new HashMap<Path, Class<? extends Mapper>>();
String[] pathMappings = conf.get(DIR_MAPPERS).split(",");
for (String pathMapping : pathMappings) {
String[] split = pathMapping.split(";");
Class<? extends Mapper> mapClass;
try {
mapClass =
(Class<? extends Mapper>) conf.getClassByName(split[1]);
} catch (ClassNotFoundException e) {
throw new RuntimeException(e);
}
m.put(new Path(split[0]), mapClass);
}
return m;
}
}
| 5,579 | 35.953642 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormatCounter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
// Counters used by Task classes
@InterfaceAudience.Public
@InterfaceStability.Evolving
public enum FileInputFormatCounter {
BYTES_READ
}
| 1,115 | 37.482759 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FixedLengthInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
/**
* FixedLengthInputFormat is an input format used to read input files
* which contain fixed length records. The content of a record need not be
* text. It can be arbitrary binary data. Users must configure the record
* length property by calling:
* FixedLengthInputFormat.setRecordLength(conf, recordLength);<br><br> or
* conf.setInt(FixedLengthInputFormat.FIXED_RECORD_LENGTH, recordLength);
* <br><br>
* @see FixedLengthRecordReader
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class FixedLengthInputFormat
extends FileInputFormat<LongWritable, BytesWritable> {
public static final String FIXED_RECORD_LENGTH =
"fixedlengthinputformat.record.length";
/**
* Set the length of each record
* @param conf configuration
* @param recordLength the length of a record
*/
public static void setRecordLength(Configuration conf, int recordLength) {
conf.setInt(FIXED_RECORD_LENGTH, recordLength);
}
/**
* Get record length value
* @param conf configuration
* @return the record length, zero means none was set
*/
public static int getRecordLength(Configuration conf) {
return conf.getInt(FIXED_RECORD_LENGTH, 0);
}
@Override
public RecordReader<LongWritable, BytesWritable>
createRecordReader(InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException {
int recordLength = getRecordLength(context.getConfiguration());
if (recordLength <= 0) {
throw new IOException("Fixed record length " + recordLength
+ " is invalid. It should be set to a value greater than zero");
}
return new FixedLengthRecordReader(recordLength);
}
@Override
protected boolean isSplitable(JobContext context, Path file) {
final CompressionCodec codec =
new CompressionCodecFactory(context.getConfiguration()).getCodec(file);
return (null == codec);
}
}
| 3,399 | 36.362637 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/SplitLineReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class SplitLineReader extends org.apache.hadoop.util.LineReader {
public SplitLineReader(InputStream in, byte[] recordDelimiterBytes) {
super(in, recordDelimiterBytes);
}
public SplitLineReader(InputStream in, Configuration conf,
byte[] recordDelimiterBytes) throws IOException {
super(in, conf, recordDelimiterBytes);
}
public boolean needAdditionalRecordAfterSplit() {
return false;
}
}
| 1,562 | 34.522727 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/TaggedInputSplit.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.IOException;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.serializer.Deserializer;
import org.apache.hadoop.io.serializer.SerializationFactory;
import org.apache.hadoop.io.serializer.Serializer;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringInterner;
/**
* An {@link InputSplit} that tags another InputSplit with extra data for use
* by {@link DelegatingInputFormat}s and {@link DelegatingMapper}s.
*/
class TaggedInputSplit extends InputSplit implements Configurable, Writable {
private Class<? extends InputSplit> inputSplitClass;
private InputSplit inputSplit;
@SuppressWarnings("unchecked")
private Class<? extends InputFormat> inputFormatClass;
@SuppressWarnings("unchecked")
private Class<? extends Mapper> mapperClass;
private Configuration conf;
public TaggedInputSplit() {
// Default constructor.
}
/**
* Creates a new TaggedInputSplit.
*
* @param inputSplit The InputSplit to be tagged
* @param conf The configuration to use
* @param inputFormatClass The InputFormat class to use for this job
* @param mapperClass The Mapper class to use for this job
*/
@SuppressWarnings("unchecked")
public TaggedInputSplit(InputSplit inputSplit, Configuration conf,
Class<? extends InputFormat> inputFormatClass,
Class<? extends Mapper> mapperClass) {
this.inputSplitClass = inputSplit.getClass();
this.inputSplit = inputSplit;
this.conf = conf;
this.inputFormatClass = inputFormatClass;
this.mapperClass = mapperClass;
}
/**
* Retrieves the original InputSplit.
*
* @return The InputSplit that was tagged
*/
public InputSplit getInputSplit() {
return inputSplit;
}
/**
* Retrieves the InputFormat class to use for this split.
*
* @return The InputFormat class to use
*/
@SuppressWarnings("unchecked")
public Class<? extends InputFormat> getInputFormatClass() {
return inputFormatClass;
}
/**
* Retrieves the Mapper class to use for this split.
*
* @return The Mapper class to use
*/
@SuppressWarnings("unchecked")
public Class<? extends Mapper> getMapperClass() {
return mapperClass;
}
public long getLength() throws IOException, InterruptedException {
return inputSplit.getLength();
}
public String[] getLocations() throws IOException, InterruptedException {
return inputSplit.getLocations();
}
@SuppressWarnings("unchecked")
public void readFields(DataInput in) throws IOException {
inputSplitClass = (Class<? extends InputSplit>) readClass(in);
inputFormatClass = (Class<? extends InputFormat<?, ?>>) readClass(in);
mapperClass = (Class<? extends Mapper<?, ?, ?, ?>>) readClass(in);
inputSplit = (InputSplit) ReflectionUtils
.newInstance(inputSplitClass, conf);
SerializationFactory factory = new SerializationFactory(conf);
Deserializer deserializer = factory.getDeserializer(inputSplitClass);
deserializer.open((DataInputStream)in);
inputSplit = (InputSplit)deserializer.deserialize(inputSplit);
}
private Class<?> readClass(DataInput in) throws IOException {
String className = StringInterner.weakIntern(Text.readString(in));
try {
return conf.getClassByName(className);
} catch (ClassNotFoundException e) {
throw new RuntimeException("readObject can't find class", e);
}
}
@SuppressWarnings("unchecked")
public void write(DataOutput out) throws IOException {
Text.writeString(out, inputSplitClass.getName());
Text.writeString(out, inputFormatClass.getName());
Text.writeString(out, mapperClass.getName());
SerializationFactory factory = new SerializationFactory(conf);
Serializer serializer =
factory.getSerializer(inputSplitClass);
serializer.open((DataOutputStream)out);
serializer.serialize(inputSplit);
}
public Configuration getConf() {
return conf;
}
public void setConf(Configuration conf) {
this.conf = conf;
}
@Override
public String toString() {
return inputSplit.toString();
}
}
| 5,347 | 31.216867 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/LineRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.Seekable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.CodecPool;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.SplitCompressionInputStream;
import org.apache.hadoop.io.compress.SplittableCompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.apache.hadoop.io.compress.Decompressor;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;
/**
* Treats keys as offset in file and value as line.
*/
@InterfaceAudience.LimitedPrivate({"MapReduce", "Pig"})
@InterfaceStability.Evolving
public class LineRecordReader extends RecordReader<LongWritable, Text> {
private static final Log LOG = LogFactory.getLog(LineRecordReader.class);
public static final String MAX_LINE_LENGTH =
"mapreduce.input.linerecordreader.line.maxlength";
private long start;
private long pos;
private long end;
private SplitLineReader in;
private FSDataInputStream fileIn;
private Seekable filePosition;
private int maxLineLength;
private LongWritable key;
private Text value;
private boolean isCompressedInput;
private Decompressor decompressor;
private byte[] recordDelimiterBytes;
public LineRecordReader() {
}
public LineRecordReader(byte[] recordDelimiter) {
this.recordDelimiterBytes = recordDelimiter;
}
public void initialize(InputSplit genericSplit,
TaskAttemptContext context) throws IOException {
FileSplit split = (FileSplit) genericSplit;
Configuration job = context.getConfiguration();
this.maxLineLength = job.getInt(MAX_LINE_LENGTH, Integer.MAX_VALUE);
start = split.getStart();
end = start + split.getLength();
final Path file = split.getPath();
// open the file and seek to the start of the split
final FileSystem fs = file.getFileSystem(job);
fileIn = fs.open(file);
CompressionCodec codec = new CompressionCodecFactory(job).getCodec(file);
if (null!=codec) {
isCompressedInput = true;
decompressor = CodecPool.getDecompressor(codec);
if (codec instanceof SplittableCompressionCodec) {
final SplitCompressionInputStream cIn =
((SplittableCompressionCodec)codec).createInputStream(
fileIn, decompressor, start, end,
SplittableCompressionCodec.READ_MODE.BYBLOCK);
in = new CompressedSplitLineReader(cIn, job,
this.recordDelimiterBytes);
start = cIn.getAdjustedStart();
end = cIn.getAdjustedEnd();
filePosition = cIn;
} else {
if (start != 0) {
// So we have a split that is only part of a file stored using
// a Compression codec that cannot be split.
throw new IOException("Cannot seek in " +
codec.getClass().getSimpleName() + " compressed stream");
}
in = new SplitLineReader(codec.createInputStream(fileIn,
decompressor), job, this.recordDelimiterBytes);
filePosition = fileIn;
}
} else {
fileIn.seek(start);
in = new UncompressedSplitLineReader(
fileIn, job, this.recordDelimiterBytes, split.getLength());
filePosition = fileIn;
}
// If this is not the first split, we always throw away first record
// because we always (except the last split) read one extra line in
// next() method.
if (start != 0) {
start += in.readLine(new Text(), 0, maxBytesToConsume(start));
}
this.pos = start;
}
private int maxBytesToConsume(long pos) {
return isCompressedInput
? Integer.MAX_VALUE
: (int) Math.max(Math.min(Integer.MAX_VALUE, end - pos), maxLineLength);
}
private long getFilePosition() throws IOException {
long retVal;
if (isCompressedInput && null != filePosition) {
retVal = filePosition.getPos();
} else {
retVal = pos;
}
return retVal;
}
private int skipUtfByteOrderMark() throws IOException {
// Strip BOM(Byte Order Mark)
// Text only support UTF-8, we only need to check UTF-8 BOM
// (0xEF,0xBB,0xBF) at the start of the text stream.
int newMaxLineLength = (int) Math.min(3L + (long) maxLineLength,
Integer.MAX_VALUE);
int newSize = in.readLine(value, newMaxLineLength, maxBytesToConsume(pos));
// Even we read 3 extra bytes for the first line,
// we won't alter existing behavior (no backwards incompat issue).
// Because the newSize is less than maxLineLength and
// the number of bytes copied to Text is always no more than newSize.
// If the return size from readLine is not less than maxLineLength,
// we will discard the current line and read the next line.
pos += newSize;
int textLength = value.getLength();
byte[] textBytes = value.getBytes();
if ((textLength >= 3) && (textBytes[0] == (byte)0xEF) &&
(textBytes[1] == (byte)0xBB) && (textBytes[2] == (byte)0xBF)) {
// find UTF-8 BOM, strip it.
LOG.info("Found UTF-8 BOM and skipped it");
textLength -= 3;
newSize -= 3;
if (textLength > 0) {
// It may work to use the same buffer and not do the copyBytes
textBytes = value.copyBytes();
value.set(textBytes, 3, textLength);
} else {
value.clear();
}
}
return newSize;
}
public boolean nextKeyValue() throws IOException {
if (key == null) {
key = new LongWritable();
}
key.set(pos);
if (value == null) {
value = new Text();
}
int newSize = 0;
// We always read one extra line, which lies outside the upper
// split limit i.e. (end - 1)
while (getFilePosition() <= end || in.needAdditionalRecordAfterSplit()) {
if (pos == 0) {
newSize = skipUtfByteOrderMark();
} else {
newSize = in.readLine(value, maxLineLength, maxBytesToConsume(pos));
pos += newSize;
}
if ((newSize == 0) || (newSize < maxLineLength)) {
break;
}
// line too long. try again
LOG.info("Skipped line of size " + newSize + " at pos " +
(pos - newSize));
}
if (newSize == 0) {
key = null;
value = null;
return false;
} else {
return true;
}
}
@Override
public LongWritable getCurrentKey() {
return key;
}
@Override
public Text getCurrentValue() {
return value;
}
/**
* Get the progress within the split
*/
public float getProgress() throws IOException {
if (start == end) {
return 0.0f;
} else {
return Math.min(1.0f, (getFilePosition() - start) / (float)(end - start));
}
}
public synchronized void close() throws IOException {
try {
if (in != null) {
in.close();
}
} finally {
if (decompressor != null) {
CodecPool.returnDecompressor(decompressor);
decompressor = null;
}
}
}
}
| 8,313 | 32.524194 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/DelegatingMapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.util.ReflectionUtils;
/**
* An {@link Mapper} that delegates behavior of paths to multiple other
* mappers.
*
* @see MultipleInputs#addInputPath(Job, Path, Class, Class)
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class DelegatingMapper<K1, V1, K2, V2> extends Mapper<K1, V1, K2, V2> {
private Mapper<K1, V1, K2, V2> mapper;
@SuppressWarnings("unchecked")
protected void setup(Context context)
throws IOException, InterruptedException {
// Find the Mapper from the TaggedInputSplit.
TaggedInputSplit inputSplit = (TaggedInputSplit) context.getInputSplit();
mapper = (Mapper<K1, V1, K2, V2>) ReflectionUtils.newInstance(inputSplit
.getMapperClass(), context.getConfiguration());
}
@SuppressWarnings("unchecked")
public void run(Context context)
throws IOException, InterruptedException {
setup(context);
mapper.run(context);
cleanup(context);
}
}
| 2,047 | 33.711864 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileSplit.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.IOException;
import java.io.DataInput;
import java.io.DataOutput;
import org.apache.hadoop.mapred.SplitLocationInfo;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
/** A section of an input file. Returned by {@link
* InputFormat#getSplits(JobContext)} and passed to
* {@link InputFormat#createRecordReader(InputSplit,TaskAttemptContext)}. */
@InterfaceAudience.Public
@InterfaceStability.Stable
public class FileSplit extends InputSplit implements Writable {
private Path file;
private long start;
private long length;
private String[] hosts;
private SplitLocationInfo[] hostInfos;
public FileSplit() {}
/** Constructs a split with host information
*
* @param file the file name
* @param start the position of the first byte in the file to process
* @param length the number of bytes in the file to process
* @param hosts the list of hosts containing the block, possibly null
*/
public FileSplit(Path file, long start, long length, String[] hosts) {
this.file = file;
this.start = start;
this.length = length;
this.hosts = hosts;
}
/** Constructs a split with host and cached-blocks information
*
* @param file the file name
* @param start the position of the first byte in the file to process
* @param length the number of bytes in the file to process
* @param hosts the list of hosts containing the block
* @param inMemoryHosts the list of hosts containing the block in memory
*/
public FileSplit(Path file, long start, long length, String[] hosts,
String[] inMemoryHosts) {
this(file, start, length, hosts);
hostInfos = new SplitLocationInfo[hosts.length];
for (int i = 0; i < hosts.length; i++) {
// because N will be tiny, scanning is probably faster than a HashSet
boolean inMemory = false;
for (String inMemoryHost : inMemoryHosts) {
if (inMemoryHost.equals(hosts[i])) {
inMemory = true;
break;
}
}
hostInfos[i] = new SplitLocationInfo(hosts[i], inMemory);
}
}
/** The file containing this split's data. */
public Path getPath() { return file; }
/** The position of the first byte in the file to process. */
public long getStart() { return start; }
/** The number of bytes in the file to process. */
@Override
public long getLength() { return length; }
@Override
public String toString() { return file + ":" + start + "+" + length; }
////////////////////////////////////////////
// Writable methods
////////////////////////////////////////////
@Override
public void write(DataOutput out) throws IOException {
Text.writeString(out, file.toString());
out.writeLong(start);
out.writeLong(length);
}
@Override
public void readFields(DataInput in) throws IOException {
file = new Path(Text.readString(in));
start = in.readLong();
length = in.readLong();
hosts = null;
}
@Override
public String[] getLocations() throws IOException {
if (this.hosts == null) {
return new String[]{};
} else {
return this.hosts;
}
}
@Override
@Evolving
public SplitLocationInfo[] getLocationInfo() throws IOException {
return hostInfos;
}
}
| 4,467 | 31.852941 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileSplit.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
/**
* A sub-collection of input files.
*
* Unlike {@link FileSplit}, CombineFileSplit class does not represent
* a split of a file, but a split of input files into smaller sets.
* A split may contain blocks from different file but all
* the blocks in the same split are probably local to some rack <br>
* CombineFileSplit can be used to implement {@link RecordReader}'s,
* with reading one record per file.
*
* @see FileSplit
* @see CombineFileInputFormat
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class CombineFileSplit extends InputSplit implements Writable {
private Path[] paths;
private long[] startoffset;
private long[] lengths;
private String[] locations;
private long totLength;
/**
* default constructor
*/
public CombineFileSplit() {}
public CombineFileSplit(Path[] files, long[] start,
long[] lengths, String[] locations) {
initSplit(files, start, lengths, locations);
}
public CombineFileSplit(Path[] files, long[] lengths) {
long[] startoffset = new long[files.length];
for (int i = 0; i < startoffset.length; i++) {
startoffset[i] = 0;
}
String[] locations = new String[files.length];
for (int i = 0; i < locations.length; i++) {
locations[i] = "";
}
initSplit(files, startoffset, lengths, locations);
}
private void initSplit(Path[] files, long[] start,
long[] lengths, String[] locations) {
this.startoffset = start;
this.lengths = lengths;
this.paths = files;
this.totLength = 0;
this.locations = locations;
for(long length : lengths) {
totLength += length;
}
}
/**
* Copy constructor
*/
public CombineFileSplit(CombineFileSplit old) throws IOException {
this(old.getPaths(), old.getStartOffsets(),
old.getLengths(), old.getLocations());
}
public long getLength() {
return totLength;
}
/** Returns an array containing the start offsets of the files in the split*/
public long[] getStartOffsets() {
return startoffset;
}
/** Returns an array containing the lengths of the files in the split*/
public long[] getLengths() {
return lengths;
}
/** Returns the start offset of the i<sup>th</sup> Path */
public long getOffset(int i) {
return startoffset[i];
}
/** Returns the length of the i<sup>th</sup> Path */
public long getLength(int i) {
return lengths[i];
}
/** Returns the number of Paths in the split */
public int getNumPaths() {
return paths.length;
}
/** Returns the i<sup>th</sup> Path */
public Path getPath(int i) {
return paths[i];
}
/** Returns all the Paths in the split */
public Path[] getPaths() {
return paths;
}
/** Returns all the Paths where this input-split resides */
public String[] getLocations() throws IOException {
return locations;
}
public void readFields(DataInput in) throws IOException {
totLength = in.readLong();
int arrLength = in.readInt();
lengths = new long[arrLength];
for(int i=0; i<arrLength;i++) {
lengths[i] = in.readLong();
}
int filesLength = in.readInt();
paths = new Path[filesLength];
for(int i=0; i<filesLength;i++) {
paths[i] = new Path(Text.readString(in));
}
arrLength = in.readInt();
startoffset = new long[arrLength];
for(int i=0; i<arrLength;i++) {
startoffset[i] = in.readLong();
}
}
public void write(DataOutput out) throws IOException {
out.writeLong(totLength);
out.writeInt(lengths.length);
for(long length : lengths) {
out.writeLong(length);
}
out.writeInt(paths.length);
for(Path p : paths) {
Text.writeString(out, p.toString());
}
out.writeInt(startoffset.length);
for(long length : startoffset) {
out.writeLong(length);
}
}
@Override
public String toString() {
StringBuffer sb = new StringBuffer();
for (int i = 0; i < paths.length; i++) {
if (i == 0 ) {
sb.append("Paths:");
}
sb.append(paths[i].toUri().getPath() + ":" + startoffset[i] +
"+" + lengths[i]);
if (i < paths.length -1) {
sb.append(",");
}
}
if (locations != null) {
String locs = "";
StringBuffer locsb = new StringBuffer();
for (int i = 0; i < locations.length; i++) {
locsb.append(locations[i] + ":");
}
locs = locsb.toString();
sb.append(" Locations:" + locs + "; ");
}
return sb.toString();
}
}
| 5,825 | 27.985075 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.LinkedHashSet;
import java.util.HashSet;
import java.util.List;
import java.util.HashMap;
import java.util.Set;
import java.util.Iterator;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.apache.hadoop.io.compress.SplittableCompressionCodec;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.net.NetworkTopology;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.HashMultiset;
import com.google.common.collect.Multiset;
/**
* An abstract {@link InputFormat} that returns {@link CombineFileSplit}'s in
* {@link InputFormat#getSplits(JobContext)} method.
*
* Splits are constructed from the files under the input paths.
* A split cannot have files from different pools.
* Each split returned may contain blocks from different files.
* If a maxSplitSize is specified, then blocks on the same node are
* combined to form a single split. Blocks that are left over are
* then combined with other blocks in the same rack.
* If maxSplitSize is not specified, then blocks from the same rack
* are combined in a single split; no attempt is made to create
* node-local splits.
* If the maxSplitSize is equal to the block size, then this class
* is similar to the default splitting behavior in Hadoop: each
* block is a locally processed split.
* Subclasses implement
* {@link InputFormat#createRecordReader(InputSplit, TaskAttemptContext)}
* to construct <code>RecordReader</code>'s for
* <code>CombineFileSplit</code>'s.
*
* @see CombineFileSplit
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class CombineFileInputFormat<K, V>
extends FileInputFormat<K, V> {
private static final Log LOG = LogFactory.getLog(CombineFileInputFormat.class);
public static final String SPLIT_MINSIZE_PERNODE =
"mapreduce.input.fileinputformat.split.minsize.per.node";
public static final String SPLIT_MINSIZE_PERRACK =
"mapreduce.input.fileinputformat.split.minsize.per.rack";
// ability to limit the size of a single split
private long maxSplitSize = 0;
private long minSplitSizeNode = 0;
private long minSplitSizeRack = 0;
// A pool of input paths filters. A split cannot have blocks from files
// across multiple pools.
private ArrayList<MultiPathFilter> pools = new ArrayList<MultiPathFilter>();
// mapping from a rack name to the set of Nodes in the rack
private HashMap<String, Set<String>> rackToNodes =
new HashMap<String, Set<String>>();
/**
* Specify the maximum size (in bytes) of each split. Each split is
* approximately equal to the specified size.
*/
protected void setMaxSplitSize(long maxSplitSize) {
this.maxSplitSize = maxSplitSize;
}
/**
* Specify the minimum size (in bytes) of each split per node.
* This applies to data that is left over after combining data on a single
* node into splits that are of maximum size specified by maxSplitSize.
* This leftover data will be combined into its own split if its size
* exceeds minSplitSizeNode.
*/
protected void setMinSplitSizeNode(long minSplitSizeNode) {
this.minSplitSizeNode = minSplitSizeNode;
}
/**
* Specify the minimum size (in bytes) of each split per rack.
* This applies to data that is left over after combining data on a single
* rack into splits that are of maximum size specified by maxSplitSize.
* This leftover data will be combined into its own split if its size
* exceeds minSplitSizeRack.
*/
protected void setMinSplitSizeRack(long minSplitSizeRack) {
this.minSplitSizeRack = minSplitSizeRack;
}
/**
* Create a new pool and add the filters to it.
* A split cannot have files from different pools.
*/
protected void createPool(List<PathFilter> filters) {
pools.add(new MultiPathFilter(filters));
}
/**
* Create a new pool and add the filters to it.
* A pathname can satisfy any one of the specified filters.
* A split cannot have files from different pools.
*/
protected void createPool(PathFilter... filters) {
MultiPathFilter multi = new MultiPathFilter();
for (PathFilter f: filters) {
multi.add(f);
}
pools.add(multi);
}
@Override
protected boolean isSplitable(JobContext context, Path file) {
final CompressionCodec codec =
new CompressionCodecFactory(context.getConfiguration()).getCodec(file);
if (null == codec) {
return true;
}
return codec instanceof SplittableCompressionCodec;
}
/**
* default constructor
*/
public CombineFileInputFormat() {
}
@Override
public List<InputSplit> getSplits(JobContext job)
throws IOException {
long minSizeNode = 0;
long minSizeRack = 0;
long maxSize = 0;
Configuration conf = job.getConfiguration();
// the values specified by setxxxSplitSize() takes precedence over the
// values that might have been specified in the config
if (minSplitSizeNode != 0) {
minSizeNode = minSplitSizeNode;
} else {
minSizeNode = conf.getLong(SPLIT_MINSIZE_PERNODE, 0);
}
if (minSplitSizeRack != 0) {
minSizeRack = minSplitSizeRack;
} else {
minSizeRack = conf.getLong(SPLIT_MINSIZE_PERRACK, 0);
}
if (maxSplitSize != 0) {
maxSize = maxSplitSize;
} else {
maxSize = conf.getLong("mapreduce.input.fileinputformat.split.maxsize", 0);
// If maxSize is not configured, a single split will be generated per
// node.
}
if (minSizeNode != 0 && maxSize != 0 && minSizeNode > maxSize) {
throw new IOException("Minimum split size pernode " + minSizeNode +
" cannot be larger than maximum split size " +
maxSize);
}
if (minSizeRack != 0 && maxSize != 0 && minSizeRack > maxSize) {
throw new IOException("Minimum split size per rack " + minSizeRack +
" cannot be larger than maximum split size " +
maxSize);
}
if (minSizeRack != 0 && minSizeNode > minSizeRack) {
throw new IOException("Minimum split size per node " + minSizeNode +
" cannot be larger than minimum split " +
"size per rack " + minSizeRack);
}
// all the files in input set
List<FileStatus> stats = listStatus(job);
List<InputSplit> splits = new ArrayList<InputSplit>();
if (stats.size() == 0) {
return splits;
}
// In one single iteration, process all the paths in a single pool.
// Processing one pool at a time ensures that a split contains paths
// from a single pool only.
for (MultiPathFilter onepool : pools) {
ArrayList<FileStatus> myPaths = new ArrayList<FileStatus>();
// pick one input path. If it matches all the filters in a pool,
// add it to the output set
for (Iterator<FileStatus> iter = stats.iterator(); iter.hasNext();) {
FileStatus p = iter.next();
if (onepool.accept(p.getPath())) {
myPaths.add(p); // add it to my output set
iter.remove();
}
}
// create splits for all files in this pool.
getMoreSplits(job, myPaths, maxSize, minSizeNode, minSizeRack, splits);
}
// create splits for all files that are not in any pool.
getMoreSplits(job, stats, maxSize, minSizeNode, minSizeRack, splits);
// free up rackToNodes map
rackToNodes.clear();
return splits;
}
/**
* Return all the splits in the specified set of paths
*/
private void getMoreSplits(JobContext job, List<FileStatus> stats,
long maxSize, long minSizeNode, long minSizeRack,
List<InputSplit> splits)
throws IOException {
Configuration conf = job.getConfiguration();
// all blocks for all the files in input set
OneFileInfo[] files;
// mapping from a rack name to the list of blocks it has
HashMap<String, List<OneBlockInfo>> rackToBlocks =
new HashMap<String, List<OneBlockInfo>>();
// mapping from a block to the nodes on which it has replicas
HashMap<OneBlockInfo, String[]> blockToNodes =
new HashMap<OneBlockInfo, String[]>();
// mapping from a node to the list of blocks that it contains
HashMap<String, Set<OneBlockInfo>> nodeToBlocks =
new HashMap<String, Set<OneBlockInfo>>();
files = new OneFileInfo[stats.size()];
if (stats.size() == 0) {
return;
}
// populate all the blocks for all files
long totLength = 0;
int i = 0;
for (FileStatus stat : stats) {
files[i] = new OneFileInfo(stat, conf, isSplitable(job, stat.getPath()),
rackToBlocks, blockToNodes, nodeToBlocks,
rackToNodes, maxSize);
totLength += files[i].getLength();
}
createSplits(nodeToBlocks, blockToNodes, rackToBlocks, totLength,
maxSize, minSizeNode, minSizeRack, splits);
}
/**
* Process all the nodes and create splits that are local to a node.
* Generate one split per node iteration, and walk over nodes multiple times
* to distribute the splits across nodes.
* <p>
* Note: The order of processing the nodes is undetermined because the
* implementation of nodeToBlocks is {@link java.util.HashMap} and its order
* of the entries is undetermined.
* @param nodeToBlocks Mapping from a node to the list of blocks that
* it contains.
* @param blockToNodes Mapping from a block to the nodes on which
* it has replicas.
* @param rackToBlocks Mapping from a rack name to the list of blocks it has.
* @param totLength Total length of the input files.
* @param maxSize Max size of each split.
* If set to 0, disable smoothing load.
* @param minSizeNode Minimum split size per node.
* @param minSizeRack Minimum split size per rack.
* @param splits New splits created by this method are added to the list.
*/
@VisibleForTesting
void createSplits(Map<String, Set<OneBlockInfo>> nodeToBlocks,
Map<OneBlockInfo, String[]> blockToNodes,
Map<String, List<OneBlockInfo>> rackToBlocks,
long totLength,
long maxSize,
long minSizeNode,
long minSizeRack,
List<InputSplit> splits
) {
ArrayList<OneBlockInfo> validBlocks = new ArrayList<OneBlockInfo>();
long curSplitSize = 0;
int totalNodes = nodeToBlocks.size();
long totalLength = totLength;
Multiset<String> splitsPerNode = HashMultiset.create();
Set<String> completedNodes = new HashSet<String>();
while(true) {
for (Iterator<Map.Entry<String, Set<OneBlockInfo>>> iter = nodeToBlocks
.entrySet().iterator(); iter.hasNext();) {
Map.Entry<String, Set<OneBlockInfo>> one = iter.next();
String node = one.getKey();
// Skip the node if it has previously been marked as completed.
if (completedNodes.contains(node)) {
continue;
}
Set<OneBlockInfo> blocksInCurrentNode = one.getValue();
// for each block, copy it into validBlocks. Delete it from
// blockToNodes so that the same block does not appear in
// two different splits.
Iterator<OneBlockInfo> oneBlockIter = blocksInCurrentNode.iterator();
while (oneBlockIter.hasNext()) {
OneBlockInfo oneblock = oneBlockIter.next();
// Remove all blocks which may already have been assigned to other
// splits.
if(!blockToNodes.containsKey(oneblock)) {
oneBlockIter.remove();
continue;
}
validBlocks.add(oneblock);
blockToNodes.remove(oneblock);
curSplitSize += oneblock.length;
// if the accumulated split size exceeds the maximum, then
// create this split.
if (maxSize != 0 && curSplitSize >= maxSize) {
// create an input split and add it to the splits array
addCreatedSplit(splits, Collections.singleton(node), validBlocks);
totalLength -= curSplitSize;
curSplitSize = 0;
splitsPerNode.add(node);
// Remove entries from blocksInNode so that we don't walk these
// again.
blocksInCurrentNode.removeAll(validBlocks);
validBlocks.clear();
// Done creating a single split for this node. Move on to the next
// node so that splits are distributed across nodes.
break;
}
}
if (validBlocks.size() != 0) {
// This implies that the last few blocks (or all in case maxSize=0)
// were not part of a split. The node is complete.
// if there were any blocks left over and their combined size is
// larger than minSplitNode, then combine them into one split.
// Otherwise add them back to the unprocessed pool. It is likely
// that they will be combined with other blocks from the
// same rack later on.
// This condition also kicks in when max split size is not set. All
// blocks on a node will be grouped together into a single split.
if (minSizeNode != 0 && curSplitSize >= minSizeNode
&& splitsPerNode.count(node) == 0) {
// haven't created any split on this machine. so its ok to add a
// smaller one for parallelism. Otherwise group it in the rack for
// balanced size create an input split and add it to the splits
// array
addCreatedSplit(splits, Collections.singleton(node), validBlocks);
totalLength -= curSplitSize;
splitsPerNode.add(node);
// Remove entries from blocksInNode so that we don't walk this again.
blocksInCurrentNode.removeAll(validBlocks);
// The node is done. This was the last set of blocks for this node.
} else {
// Put the unplaced blocks back into the pool for later rack-allocation.
for (OneBlockInfo oneblock : validBlocks) {
blockToNodes.put(oneblock, oneblock.hosts);
}
}
validBlocks.clear();
curSplitSize = 0;
completedNodes.add(node);
} else { // No in-flight blocks.
if (blocksInCurrentNode.size() == 0) {
// Node is done. All blocks were fit into node-local splits.
completedNodes.add(node);
} // else Run through the node again.
}
}
// Check if node-local assignments are complete.
if (completedNodes.size() == totalNodes || totalLength == 0) {
// All nodes have been walked over and marked as completed or all blocks
// have been assigned. The rest should be handled via rackLock assignment.
LOG.info("DEBUG: Terminated node allocation with : CompletedNodes: "
+ completedNodes.size() + ", size left: " + totalLength);
break;
}
}
// if blocks in a rack are below the specified minimum size, then keep them
// in 'overflow'. After the processing of all racks is complete, these
// overflow blocks will be combined into splits.
ArrayList<OneBlockInfo> overflowBlocks = new ArrayList<OneBlockInfo>();
Set<String> racks = new HashSet<String>();
// Process all racks over and over again until there is no more work to do.
while (blockToNodes.size() > 0) {
// Create one split for this rack before moving over to the next rack.
// Come back to this rack after creating a single split for each of the
// remaining racks.
// Process one rack location at a time, Combine all possible blocks that
// reside on this rack as one split. (constrained by minimum and maximum
// split size).
// iterate over all racks
for (Iterator<Map.Entry<String, List<OneBlockInfo>>> iter =
rackToBlocks.entrySet().iterator(); iter.hasNext();) {
Map.Entry<String, List<OneBlockInfo>> one = iter.next();
racks.add(one.getKey());
List<OneBlockInfo> blocks = one.getValue();
// for each block, copy it into validBlocks. Delete it from
// blockToNodes so that the same block does not appear in
// two different splits.
boolean createdSplit = false;
for (OneBlockInfo oneblock : blocks) {
if (blockToNodes.containsKey(oneblock)) {
validBlocks.add(oneblock);
blockToNodes.remove(oneblock);
curSplitSize += oneblock.length;
// if the accumulated split size exceeds the maximum, then
// create this split.
if (maxSize != 0 && curSplitSize >= maxSize) {
// create an input split and add it to the splits array
addCreatedSplit(splits, getHosts(racks), validBlocks);
createdSplit = true;
break;
}
}
}
// if we created a split, then just go to the next rack
if (createdSplit) {
curSplitSize = 0;
validBlocks.clear();
racks.clear();
continue;
}
if (!validBlocks.isEmpty()) {
if (minSizeRack != 0 && curSplitSize >= minSizeRack) {
// if there is a minimum size specified, then create a single split
// otherwise, store these blocks into overflow data structure
addCreatedSplit(splits, getHosts(racks), validBlocks);
} else {
// There were a few blocks in this rack that
// remained to be processed. Keep them in 'overflow' block list.
// These will be combined later.
overflowBlocks.addAll(validBlocks);
}
}
curSplitSize = 0;
validBlocks.clear();
racks.clear();
}
}
assert blockToNodes.isEmpty();
assert curSplitSize == 0;
assert validBlocks.isEmpty();
assert racks.isEmpty();
// Process all overflow blocks
for (OneBlockInfo oneblock : overflowBlocks) {
validBlocks.add(oneblock);
curSplitSize += oneblock.length;
// This might cause an exiting rack location to be re-added,
// but it should be ok.
for (int i = 0; i < oneblock.racks.length; i++) {
racks.add(oneblock.racks[i]);
}
// if the accumulated split size exceeds the maximum, then
// create this split.
if (maxSize != 0 && curSplitSize >= maxSize) {
// create an input split and add it to the splits array
addCreatedSplit(splits, getHosts(racks), validBlocks);
curSplitSize = 0;
validBlocks.clear();
racks.clear();
}
}
// Process any remaining blocks, if any.
if (!validBlocks.isEmpty()) {
addCreatedSplit(splits, getHosts(racks), validBlocks);
}
}
/**
* Create a single split from the list of blocks specified in validBlocks
* Add this new split into splitList.
*/
private void addCreatedSplit(List<InputSplit> splitList,
Collection<String> locations,
ArrayList<OneBlockInfo> validBlocks) {
// create an input split
Path[] fl = new Path[validBlocks.size()];
long[] offset = new long[validBlocks.size()];
long[] length = new long[validBlocks.size()];
for (int i = 0; i < validBlocks.size(); i++) {
fl[i] = validBlocks.get(i).onepath;
offset[i] = validBlocks.get(i).offset;
length[i] = validBlocks.get(i).length;
}
// add this split to the list that is returned
CombineFileSplit thissplit = new CombineFileSplit(fl, offset,
length, locations.toArray(new String[0]));
splitList.add(thissplit);
}
/**
* This is not implemented yet.
*/
public abstract RecordReader<K, V> createRecordReader(InputSplit split,
TaskAttemptContext context) throws IOException;
/**
* information about one file from the File System
*/
@VisibleForTesting
static class OneFileInfo {
private long fileSize; // size of the file
private OneBlockInfo[] blocks; // all blocks in this file
OneFileInfo(FileStatus stat, Configuration conf,
boolean isSplitable,
HashMap<String, List<OneBlockInfo>> rackToBlocks,
HashMap<OneBlockInfo, String[]> blockToNodes,
HashMap<String, Set<OneBlockInfo>> nodeToBlocks,
HashMap<String, Set<String>> rackToNodes,
long maxSize)
throws IOException {
this.fileSize = 0;
// get block locations from file system
BlockLocation[] locations;
if (stat instanceof LocatedFileStatus) {
locations = ((LocatedFileStatus) stat).getBlockLocations();
} else {
FileSystem fs = stat.getPath().getFileSystem(conf);
locations = fs.getFileBlockLocations(stat, 0, stat.getLen());
}
// create a list of all block and their locations
if (locations == null) {
blocks = new OneBlockInfo[0];
} else {
if(locations.length == 0 && !stat.isDirectory()) {
locations = new BlockLocation[] { new BlockLocation() };
}
if (!isSplitable) {
// if the file is not splitable, just create the one block with
// full file length
blocks = new OneBlockInfo[1];
fileSize = stat.getLen();
blocks[0] = new OneBlockInfo(stat.getPath(), 0, fileSize,
locations[0].getHosts(), locations[0].getTopologyPaths());
} else {
ArrayList<OneBlockInfo> blocksList = new ArrayList<OneBlockInfo>(
locations.length);
for (int i = 0; i < locations.length; i++) {
fileSize += locations[i].getLength();
// each split can be a maximum of maxSize
long left = locations[i].getLength();
long myOffset = locations[i].getOffset();
long myLength = 0;
do {
if (maxSize == 0) {
myLength = left;
} else {
if (left > maxSize && left < 2 * maxSize) {
// if remainder is between max and 2*max - then
// instead of creating splits of size max, left-max we
// create splits of size left/2 and left/2. This is
// a heuristic to avoid creating really really small
// splits.
myLength = left / 2;
} else {
myLength = Math.min(maxSize, left);
}
}
OneBlockInfo oneblock = new OneBlockInfo(stat.getPath(),
myOffset, myLength, locations[i].getHosts(),
locations[i].getTopologyPaths());
left -= myLength;
myOffset += myLength;
blocksList.add(oneblock);
} while (left > 0);
}
blocks = blocksList.toArray(new OneBlockInfo[blocksList.size()]);
}
populateBlockInfo(blocks, rackToBlocks, blockToNodes,
nodeToBlocks, rackToNodes);
}
}
@VisibleForTesting
static void populateBlockInfo(OneBlockInfo[] blocks,
Map<String, List<OneBlockInfo>> rackToBlocks,
Map<OneBlockInfo, String[]> blockToNodes,
Map<String, Set<OneBlockInfo>> nodeToBlocks,
Map<String, Set<String>> rackToNodes) {
for (OneBlockInfo oneblock : blocks) {
// add this block to the block --> node locations map
blockToNodes.put(oneblock, oneblock.hosts);
// For blocks that do not have host/rack information,
// assign to default rack.
String[] racks = null;
if (oneblock.hosts.length == 0) {
racks = new String[]{NetworkTopology.DEFAULT_RACK};
} else {
racks = oneblock.racks;
}
// add this block to the rack --> block map
for (int j = 0; j < racks.length; j++) {
String rack = racks[j];
List<OneBlockInfo> blklist = rackToBlocks.get(rack);
if (blklist == null) {
blklist = new ArrayList<OneBlockInfo>();
rackToBlocks.put(rack, blklist);
}
blklist.add(oneblock);
if (!racks[j].equals(NetworkTopology.DEFAULT_RACK)) {
// Add this host to rackToNodes map
addHostToRack(rackToNodes, racks[j], oneblock.hosts[j]);
}
}
// add this block to the node --> block map
for (int j = 0; j < oneblock.hosts.length; j++) {
String node = oneblock.hosts[j];
Set<OneBlockInfo> blklist = nodeToBlocks.get(node);
if (blklist == null) {
blklist = new LinkedHashSet<OneBlockInfo>();
nodeToBlocks.put(node, blklist);
}
blklist.add(oneblock);
}
}
}
long getLength() {
return fileSize;
}
OneBlockInfo[] getBlocks() {
return blocks;
}
}
/**
* information about one block from the File System
*/
@VisibleForTesting
static class OneBlockInfo {
Path onepath; // name of this file
long offset; // offset in file
long length; // length of this block
String[] hosts; // nodes on which this block resides
String[] racks; // network topology of hosts
OneBlockInfo(Path path, long offset, long len,
String[] hosts, String[] topologyPaths) {
this.onepath = path;
this.offset = offset;
this.hosts = hosts;
this.length = len;
assert (hosts.length == topologyPaths.length ||
topologyPaths.length == 0);
// if the file system does not have any rack information, then
// use dummy rack location.
if (topologyPaths.length == 0) {
topologyPaths = new String[hosts.length];
for (int i = 0; i < topologyPaths.length; i++) {
topologyPaths[i] = (new NodeBase(hosts[i],
NetworkTopology.DEFAULT_RACK)).toString();
}
}
// The topology paths have the host name included as the last
// component. Strip it.
this.racks = new String[topologyPaths.length];
for (int i = 0; i < topologyPaths.length; i++) {
this.racks[i] = (new NodeBase(topologyPaths[i])).getNetworkLocation();
}
}
}
protected BlockLocation[] getFileBlockLocations(
FileSystem fs, FileStatus stat) throws IOException {
if (stat instanceof LocatedFileStatus) {
return ((LocatedFileStatus) stat).getBlockLocations();
}
return fs.getFileBlockLocations(stat, 0, stat.getLen());
}
private static void addHostToRack(Map<String, Set<String>> rackToNodes,
String rack, String host) {
Set<String> hosts = rackToNodes.get(rack);
if (hosts == null) {
hosts = new HashSet<String>();
rackToNodes.put(rack, hosts);
}
hosts.add(host);
}
private Set<String> getHosts(Set<String> racks) {
Set<String> hosts = new HashSet<String>();
for (String rack : racks) {
if (rackToNodes.containsKey(rack)) {
hosts.addAll(rackToNodes.get(rack));
}
}
return hosts;
}
/**
* Accept a path only if any one of filters given in the
* constructor do.
*/
private static class MultiPathFilter implements PathFilter {
private List<PathFilter> filters;
public MultiPathFilter() {
this.filters = new ArrayList<PathFilter>();
}
public MultiPathFilter(List<PathFilter> filters) {
this.filters = filters;
}
public void add(PathFilter one) {
filters.add(one);
}
public boolean accept(Path path) {
for (PathFilter filter : filters) {
if (filter.accept(path)) {
return true;
}
}
return false;
}
public String toString() {
StringBuffer buf = new StringBuffer();
buf.append("[");
for (PathFilter f: filters) {
buf.append(f);
buf.append(",");
}
buf.append("]");
return buf.toString();
}
}
}
| 30,507 | 36.571429 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/SequenceFileAsBinaryInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.input;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
/**
* InputFormat reading keys, values from SequenceFiles in binary (raw)
* format.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class SequenceFileAsBinaryInputFormat
extends SequenceFileInputFormat<BytesWritable,BytesWritable> {
public SequenceFileAsBinaryInputFormat() {
super();
}
public RecordReader<BytesWritable,BytesWritable> createRecordReader(
InputSplit split, TaskAttemptContext context)
throws IOException {
return new SequenceFileAsBinaryRecordReader();
}
/**
* Read records from a SequenceFile as binary (raw) bytes.
*/
public static class SequenceFileAsBinaryRecordReader
extends RecordReader<BytesWritable,BytesWritable> {
private SequenceFile.Reader in;
private long start;
private long end;
private boolean done = false;
private DataOutputBuffer buffer = new DataOutputBuffer();
private SequenceFile.ValueBytes vbytes;
private BytesWritable key = null;
private BytesWritable value = null;
public void initialize(InputSplit split, TaskAttemptContext context)
throws IOException, InterruptedException {
Path path = ((FileSplit)split).getPath();
Configuration conf = context.getConfiguration();
FileSystem fs = path.getFileSystem(conf);
this.in = new SequenceFile.Reader(fs, path, conf);
this.end = ((FileSplit)split).getStart() + split.getLength();
if (((FileSplit)split).getStart() > in.getPosition()) {
in.sync(((FileSplit)split).getStart()); // sync to start
}
this.start = in.getPosition();
vbytes = in.createValueBytes();
done = start >= end;
}
@Override
public BytesWritable getCurrentKey()
throws IOException, InterruptedException {
return key;
}
@Override
public BytesWritable getCurrentValue()
throws IOException, InterruptedException {
return value;
}
/**
* Retrieve the name of the key class for this SequenceFile.
* @see org.apache.hadoop.io.SequenceFile.Reader#getKeyClassName
*/
public String getKeyClassName() {
return in.getKeyClassName();
}
/**
* Retrieve the name of the value class for this SequenceFile.
* @see org.apache.hadoop.io.SequenceFile.Reader#getValueClassName
*/
public String getValueClassName() {
return in.getValueClassName();
}
/**
* Read raw bytes from a SequenceFile.
*/
public synchronized boolean nextKeyValue()
throws IOException, InterruptedException {
if (done) {
return false;
}
long pos = in.getPosition();
boolean eof = -1 == in.nextRawKey(buffer);
if (!eof) {
if (key == null) {
key = new BytesWritable();
}
if (value == null) {
value = new BytesWritable();
}
key.set(buffer.getData(), 0, buffer.getLength());
buffer.reset();
in.nextRawValue(vbytes);
vbytes.writeUncompressedBytes(buffer);
value.set(buffer.getData(), 0, buffer.getLength());
buffer.reset();
}
return !(done = (eof || (pos >= end && in.syncSeen())));
}
public void close() throws IOException {
in.close();
}
/**
* Return the progress within the input split
* @return 0.0 to 1.0 of the input byte range
*/
public float getProgress() throws IOException, InterruptedException {
if (end == start) {
return 0.0f;
} else {
return Math.min(1.0f, (float)((in.getPosition() - start) /
(double)(end - start)));
}
}
}
}
| 5,064 | 31.677419 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueHistogram.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.aggregate;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.TreeMap;
import java.util.Map.Entry;
import java.util.Arrays;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This class implements a value aggregator that computes the
* histogram of a sequence of strings.
*
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ValueHistogram implements ValueAggregator<String> {
TreeMap<Object, Object> items = null;
public ValueHistogram() {
items = new TreeMap<Object, Object>();
}
/**
* add the given val to the aggregator.
*
* @param val the value to be added. It is expected to be a string
* in the form of xxxx\tnum, meaning xxxx has num occurrences.
*/
public void addNextValue(Object val) {
String valCountStr = val.toString();
int pos = valCountStr.lastIndexOf("\t");
String valStr = valCountStr;
String countStr = "1";
if (pos >= 0) {
valStr = valCountStr.substring(0, pos);
countStr = valCountStr.substring(pos + 1);
}
Long count = (Long) this.items.get(valStr);
long inc = Long.parseLong(countStr);
if (count == null) {
count = inc;
} else {
count = count.longValue() + inc;
}
items.put(valStr, count);
}
/**
* @return the string representation of this aggregator.
* It includes the following basic statistics of the histogram:
* the number of unique values
* the minimum value
* the media value
* the maximum value
* the average value
* the standard deviation
*/
public String getReport() {
long[] counts = new long[items.size()];
StringBuffer sb = new StringBuffer();
Iterator<Object> iter = items.values().iterator();
int i = 0;
while (iter.hasNext()) {
Long count = (Long) iter.next();
counts[i] = count.longValue();
i += 1;
}
Arrays.sort(counts);
sb.append(counts.length);
i = 0;
long acc = 0;
while (i < counts.length) {
long nextVal = counts[i];
int j = i + 1;
while (j < counts.length && counts[j] == nextVal) {
j++;
}
acc += nextVal * (j - i);
i = j;
}
double average = 0.0;
double sd = 0.0;
if (counts.length > 0) {
sb.append("\t").append(counts[0]);
sb.append("\t").append(counts[counts.length / 2]);
sb.append("\t").append(counts[counts.length - 1]);
average = acc * 1.0 / counts.length;
sb.append("\t").append(average);
i = 0;
while (i < counts.length) {
double nextDiff = counts[i] - average;
sd += nextDiff * nextDiff;
i += 1;
}
sd = Math.sqrt(sd / counts.length);
sb.append("\t").append(sd);
}
return sb.toString();
}
/**
*
* @return a string representation of the list of value/frequence pairs of
* the histogram
*/
public String getReportDetails() {
StringBuffer sb = new StringBuffer();
Iterator<Entry<Object,Object>> iter = items.entrySet().iterator();
while (iter.hasNext()) {
Entry<Object,Object> en = iter.next();
Object val = en.getKey();
Long count = (Long) en.getValue();
sb.append("\t").append(val.toString()).append("\t").
append(count.longValue()).append("\n");
}
return sb.toString();
}
/**
* @return a list value/frequence pairs.
* The return value is expected to be used by the reducer.
*/
public ArrayList<String> getCombinerOutput() {
ArrayList<String> retv = new ArrayList<String>();
Iterator<Entry<Object,Object>> iter = items.entrySet().iterator();
while (iter.hasNext()) {
Entry<Object,Object> en = iter.next();
Object val = en.getKey();
Long count = (Long) en.getValue();
retv.add(val.toString() + "\t" + count.longValue());
}
return retv;
}
/**
*
* @return a TreeMap representation of the histogram
*/
public TreeMap<Object,Object> getReportItems() {
return items;
}
/**
* reset the aggregator
*/
public void reset() {
items = new TreeMap<Object, Object>();
}
}
| 5,046 | 26.883978 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/DoubleValueSum.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.aggregate;
import java.util.ArrayList;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This class implements a value aggregator that sums up a sequence of double
* values.
*
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class DoubleValueSum implements ValueAggregator<String> {
double sum = 0;
/**
* The default constructor
*
*/
public DoubleValueSum() {
reset();
}
/**
* add a value to the aggregator
*
* @param val
* an object whose string representation represents a double value.
*
*/
public void addNextValue(Object val) {
this.sum += Double.parseDouble(val.toString());
}
/**
* add a value to the aggregator
*
* @param val
* a double value.
*
*/
public void addNextValue(double val) {
this.sum += val;
}
/**
* @return the string representation of the aggregated value
*/
public String getReport() {
return "" + sum;
}
/**
* @return the aggregated value
*/
public double getSum() {
return this.sum;
}
/**
* reset the aggregator
*/
public void reset() {
sum = 0;
}
/**
* @return return an array of one element. The element is a string
* representation of the aggregated value. The return value is
* expected to be used by the a combiner.
*/
public ArrayList<String> getCombinerOutput() {
ArrayList<String> retv = new ArrayList<String>(1);
retv.add("" + sum);
return retv;
}
}
| 2,434 | 23.108911 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorJobBase.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.aggregate;
import java.util.ArrayList;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
/**
* This abstract class implements some common functionalities of the
* the generic mapper, reducer and combiner classes of Aggregate.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ValueAggregatorJobBase<K1 extends WritableComparable<?>,
V1 extends Writable>
{
public static final String DESCRIPTOR = "mapreduce.aggregate.descriptor";
public static final String DESCRIPTOR_NUM =
"mapreduce.aggregate.descriptor.num";
public static final String USER_JAR = "mapreduce.aggregate.user.jar.file";
protected static ArrayList<ValueAggregatorDescriptor> aggregatorDescriptorList = null;
public static void setup(Configuration job) {
initializeMySpec(job);
logSpec();
}
protected static ValueAggregatorDescriptor getValueAggregatorDescriptor(
String spec, Configuration conf) {
if (spec == null)
return null;
String[] segments = spec.split(",", -1);
String type = segments[0];
if (type.compareToIgnoreCase("UserDefined") == 0) {
String className = segments[1];
return new UserDefinedValueAggregatorDescriptor(className, conf);
}
return null;
}
protected static ArrayList<ValueAggregatorDescriptor> getAggregatorDescriptors(
Configuration conf) {
int num = conf.getInt(DESCRIPTOR_NUM, 0);
ArrayList<ValueAggregatorDescriptor> retv =
new ArrayList<ValueAggregatorDescriptor>(num);
for (int i = 0; i < num; i++) {
String spec = conf.get(DESCRIPTOR + "." + i);
ValueAggregatorDescriptor ad = getValueAggregatorDescriptor(spec, conf);
if (ad != null) {
retv.add(ad);
}
}
return retv;
}
private static void initializeMySpec(Configuration conf) {
aggregatorDescriptorList = getAggregatorDescriptors(conf);
if (aggregatorDescriptorList.size() == 0) {
aggregatorDescriptorList
.add(new UserDefinedValueAggregatorDescriptor(
ValueAggregatorBaseDescriptor.class.getCanonicalName(), conf));
}
}
protected static void logSpec() {
}
}
| 3,229 | 34.888889 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/UserDefinedValueAggregatorDescriptor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.aggregate;
import java.lang.reflect.Constructor;
import java.util.ArrayList;
import java.util.Map.Entry;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
/**
* This class implements a wrapper for a user defined value
* aggregator descriptor.
* It serves two functions: One is to create an object of
* ValueAggregatorDescriptor from the name of a user defined class
* that may be dynamically loaded. The other is to
* delegate invocations of generateKeyValPairs function to the created object.
*
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class UserDefinedValueAggregatorDescriptor implements
ValueAggregatorDescriptor {
private String className;
protected ValueAggregatorDescriptor theAggregatorDescriptor = null;
private static final Class<?>[] argArray = new Class[] {};
/**
* Create an instance of the given class
* @param className the name of the class
* @return a dynamically created instance of the given class
*/
public static Object createInstance(String className) {
Object retv = null;
try {
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
Class<?> theFilterClass = Class.forName(className, true, classLoader);
Constructor<?> meth = theFilterClass.getDeclaredConstructor(argArray);
meth.setAccessible(true);
retv = meth.newInstance();
} catch (Exception e) {
throw new RuntimeException(e);
}
return retv;
}
private void createAggregator(Configuration conf) {
if (theAggregatorDescriptor == null) {
theAggregatorDescriptor = (ValueAggregatorDescriptor)
createInstance(this.className);
theAggregatorDescriptor.configure(conf);
}
}
/**
*
* @param className the class name of the user defined descriptor class
* @param conf a configure object used for decriptor configuration
*/
public UserDefinedValueAggregatorDescriptor(String className,
Configuration conf) {
this.className = className;
this.createAggregator(conf);
}
/**
* Generate a list of aggregation-id/value pairs for the given
* key/value pairs by delegating the invocation to the real object.
*
* @param key
* input key
* @param val
* input value
* @return a list of aggregation id/value pairs. An aggregation id encodes an
* aggregation type which is used to guide the way to aggregate the
* value in the reduce/combiner phrase of an Aggregate based job.
*/
public ArrayList<Entry<Text, Text>> generateKeyValPairs(Object key,
Object val) {
ArrayList<Entry<Text, Text>> retv = new ArrayList<Entry<Text, Text>>();
if (this.theAggregatorDescriptor != null) {
retv = this.theAggregatorDescriptor.generateKeyValPairs(key, val);
}
return retv;
}
/**
* @return the string representation of this object.
*/
public String toString() {
return "UserDefinedValueAggregatorDescriptor with class name:" + "\t"
+ this.className;
}
/**
* Do nothing.
*/
public void configure(Configuration conf) {
}
}
| 4,195 | 32.83871 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorBaseDescriptor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.aggregate;
import java.util.ArrayList;
import java.util.Map.Entry;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.MRJobConfig;
/**
* This class implements the common functionalities of
* the subclasses of ValueAggregatorDescriptor class.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ValueAggregatorBaseDescriptor
implements ValueAggregatorDescriptor {
static public final String UNIQ_VALUE_COUNT = "UniqValueCount";
static public final String LONG_VALUE_SUM = "LongValueSum";
static public final String DOUBLE_VALUE_SUM = "DoubleValueSum";
static public final String VALUE_HISTOGRAM = "ValueHistogram";
static public final String LONG_VALUE_MAX = "LongValueMax";
static public final String LONG_VALUE_MIN = "LongValueMin";
static public final String STRING_VALUE_MAX = "StringValueMax";
static public final String STRING_VALUE_MIN = "StringValueMin";
public String inputFile = null;
private static class MyEntry implements Entry<Text, Text> {
Text key;
Text val;
public Text getKey() {
return key;
}
public Text getValue() {
return val;
}
public Text setValue(Text val) {
this.val = val;
return val;
}
public MyEntry(Text key, Text val) {
this.key = key;
this.val = val;
}
}
/**
*
* @param type the aggregation type
* @param id the aggregation id
* @param val the val associated with the id to be aggregated
* @return an Entry whose key is the aggregation id prefixed with
* the aggregation type.
*/
public static Entry<Text, Text> generateEntry(String type,
String id, Text val) {
Text key = new Text(type + TYPE_SEPARATOR + id);
return new MyEntry(key, val);
}
/**
*
* @param type the aggregation type
* @param uniqCount the limit in the number of unique values to keep,
* if type is UNIQ_VALUE_COUNT
* @return a value aggregator of the given type.
*/
static public ValueAggregator generateValueAggregator(String type, long uniqCount) {
if (type.compareToIgnoreCase(LONG_VALUE_SUM) == 0) {
return new LongValueSum();
} if (type.compareToIgnoreCase(LONG_VALUE_MAX) == 0) {
return new LongValueMax();
} else if (type.compareToIgnoreCase(LONG_VALUE_MIN) == 0) {
return new LongValueMin();
} else if (type.compareToIgnoreCase(STRING_VALUE_MAX) == 0) {
return new StringValueMax();
} else if (type.compareToIgnoreCase(STRING_VALUE_MIN) == 0) {
return new StringValueMin();
} else if (type.compareToIgnoreCase(DOUBLE_VALUE_SUM) == 0) {
return new DoubleValueSum();
} else if (type.compareToIgnoreCase(UNIQ_VALUE_COUNT) == 0) {
return new UniqValueCount(uniqCount);
} else if (type.compareToIgnoreCase(VALUE_HISTOGRAM) == 0) {
return new ValueHistogram();
}
return null;
}
/**
* Generate 1 or 2 aggregation-id/value pairs for the given key/value pair.
* The first id will be of type LONG_VALUE_SUM, with "record_count" as
* its aggregation id. If the input is a file split,
* the second id of the same type will be generated too, with the file name
* as its aggregation id. This achieves the behavior of counting the total
* number of records in the input data, and the number of records
* in each input file.
*
* @param key
* input key
* @param val
* input value
* @return a list of aggregation id/value pairs. An aggregation id encodes an
* aggregation type which is used to guide the way to aggregate the
* value in the reduce/combiner phrase of an Aggregate based job.
*/
public ArrayList<Entry<Text, Text>> generateKeyValPairs(Object key,
Object val) {
ArrayList<Entry<Text, Text>> retv = new ArrayList<Entry<Text, Text>>();
String countType = LONG_VALUE_SUM;
String id = "record_count";
Entry<Text, Text> e = generateEntry(countType, id, ONE);
if (e != null) {
retv.add(e);
}
if (this.inputFile != null) {
e = generateEntry(countType, this.inputFile, ONE);
if (e != null) {
retv.add(e);
}
}
return retv;
}
/**
* get the input file name.
*
* @param conf a configuration object
*/
public void configure(Configuration conf) {
this.inputFile = conf.get(MRJobConfig.MAP_INPUT_FILE);
}
}
| 5,498 | 31.928144 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/LongValueMax.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.aggregate;
import java.util.ArrayList;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This class implements a value aggregator that maintain the maximum of
* a sequence of long values.
*
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class LongValueMax implements ValueAggregator<String> {
long maxVal = Long.MIN_VALUE;
/**
* the default constructor
*
*/
public LongValueMax() {
reset();
}
/**
* add a value to the aggregator
*
* @param val
* an object whose string representation represents a long value.
*
*/
public void addNextValue(Object val) {
long newVal = Long.parseLong(val.toString());
if (this.maxVal < newVal) {
this.maxVal = newVal;
}
}
/**
* add a value to the aggregator
*
* @param newVal
* a long value.
*
*/
public void addNextValue(long newVal) {
if (this.maxVal < newVal) {
this.maxVal = newVal;
};
}
/**
* @return the aggregated value
*/
public long getVal() {
return this.maxVal;
}
/**
* @return the string representation of the aggregated value
*/
public String getReport() {
return ""+maxVal;
}
/**
* reset the aggregator
*/
public void reset() {
maxVal = Long.MIN_VALUE;
}
/**
* @return return an array of one element. The element is a string
* representation of the aggregated value. The return value is
* expected to be used by the a combiner.
*/
public ArrayList<String> getCombinerOutput() {
ArrayList<String> retv = new ArrayList<String>(1);;
retv.add("" + maxVal);
return retv;
}
}
| 2,605 | 24.057692 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorReducer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.aggregate;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapreduce.Reducer;
/**
* This class implements the generic reducer of Aggregate.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ValueAggregatorReducer<K1 extends WritableComparable<?>,
V1 extends Writable>
extends Reducer<Text, Text, Text, Text> {
public void setup(Context context)
throws IOException, InterruptedException {
ValueAggregatorJobBase.setup(context.getConfiguration());
}
/**
* @param key
* the key is expected to be a Text object, whose prefix indicates
* the type of aggregation to aggregate the values. In effect, data
* driven computing is achieved. It is assumed that each aggregator's
* getReport method emits appropriate output for the aggregator. This
* may be further customized.
* @param values the values to be aggregated
* @param context
*/
public void reduce(Text key, Iterable<Text> values,
Context context) throws IOException, InterruptedException {
String keyStr = key.toString();
int pos = keyStr.indexOf(ValueAggregatorDescriptor.TYPE_SEPARATOR);
String type = keyStr.substring(0, pos);
keyStr = keyStr.substring(pos +
ValueAggregatorDescriptor.TYPE_SEPARATOR.length());
long uniqCount = context.getConfiguration().
getLong(UniqValueCount.MAX_NUM_UNIQUE_VALUES, Long.MAX_VALUE);
ValueAggregator aggregator = ValueAggregatorBaseDescriptor
.generateValueAggregator(type, uniqCount);
for (Text value : values) {
aggregator.addNextValue(value);
}
String val = aggregator.getReport();
key = new Text(keyStr);
context.write(key, new Text(val));
}
}
| 2,862 | 37.689189 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/StringValueMin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.aggregate;
import java.util.ArrayList;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This class implements a value aggregator that maintain the smallest of
* a sequence of strings.
*
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class StringValueMin implements ValueAggregator<String> {
String minVal = null;
/**
* the default constructor
*
*/
public StringValueMin() {
reset();
}
/**
* add a value to the aggregator
*
* @param val
* a string.
*
*/
public void addNextValue(Object val) {
String newVal = val.toString();
if (this.minVal == null || this.minVal.compareTo(newVal) > 0) {
this.minVal = newVal;
}
}
/**
* @return the aggregated value
*/
public String getVal() {
return this.minVal;
}
/**
* @return the string representation of the aggregated value
*/
public String getReport() {
return minVal;
}
/**
* reset the aggregator
*/
public void reset() {
minVal = null;
}
/**
* @return return an array of one element. The element is a string
* representation of the aggregated value. The return value is
* expected to be used by the a combiner.
*/
public ArrayList<String> getCombinerOutput() {
ArrayList<String> retv = new ArrayList<String>(1);
retv.add(minVal);
return retv;
}
}
| 2,331 | 24.347826 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorMapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.aggregate;
import java.io.IOException;
import java.util.Iterator;
import java.util.Map.Entry;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapreduce.Mapper;
/**
* This class implements the generic mapper of Aggregate.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ValueAggregatorMapper<K1 extends WritableComparable<?>,
V1 extends Writable>
extends Mapper<K1, V1, Text, Text> {
public void setup(Context context)
throws IOException, InterruptedException {
ValueAggregatorJobBase.setup(context.getConfiguration());
}
/**
* the map function. It iterates through the value aggregator descriptor
* list to generate aggregation id/value pairs and emit them.
*/
public void map(K1 key, V1 value,
Context context) throws IOException, InterruptedException {
Iterator<?> iter =
ValueAggregatorJobBase.aggregatorDescriptorList.iterator();
while (iter.hasNext()) {
ValueAggregatorDescriptor ad = (ValueAggregatorDescriptor) iter.next();
Iterator<Entry<Text, Text>> ens =
ad.generateKeyValPairs(key, value).iterator();
while (ens.hasNext()) {
Entry<Text, Text> en = ens.next();
context.write(en.getKey(), en.getValue());
}
}
}
}
| 2,361 | 34.787879 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorCombiner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.aggregate;
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapreduce.Reducer;
/**
* This class implements the generic combiner of Aggregate.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ValueAggregatorCombiner<K1 extends WritableComparable<?>,
V1 extends Writable>
extends Reducer<Text, Text, Text, Text> {
/** Combines values for a given key.
* @param key the key is expected to be a Text object, whose prefix indicates
* the type of aggregation to aggregate the values.
* @param values the values to combine
* @param context to collect combined values
*/
public void reduce(Text key, Iterable<Text> values, Context context)
throws IOException, InterruptedException {
String keyStr = key.toString();
int pos = keyStr.indexOf(ValueAggregatorDescriptor.TYPE_SEPARATOR);
String type = keyStr.substring(0, pos);
long uniqCount = context.getConfiguration().
getLong(UniqValueCount.MAX_NUM_UNIQUE_VALUES, Long.MAX_VALUE);
ValueAggregator aggregator = ValueAggregatorBaseDescriptor
.generateValueAggregator(type, uniqCount);
for (Text val : values) {
aggregator.addNextValue(val);
}
Iterator<?> outputs = aggregator.getCombinerOutput().iterator();
while (outputs.hasNext()) {
Object v = outputs.next();
if (v instanceof Text) {
context.write(key, (Text)v);
} else {
context.write(key, new Text(v.toString()));
}
}
}
}
| 2,630 | 36.585714 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/LongValueSum.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.aggregate;
import java.util.ArrayList;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This class implements a value aggregator that sums up
* a sequence of long values.
*
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class LongValueSum implements ValueAggregator<String> {
long sum = 0;
/**
* the default constructor
*
*/
public LongValueSum() {
reset();
}
/**
* add a value to the aggregator
*
* @param val
* an object whose string representation represents a long value.
*
*/
public void addNextValue(Object val) {
this.sum += Long.parseLong(val.toString());
}
/**
* add a value to the aggregator
*
* @param val
* a long value.
*
*/
public void addNextValue(long val) {
this.sum += val;
}
/**
* @return the aggregated value
*/
public long getSum() {
return this.sum;
}
/**
* @return the string representation of the aggregated value
*/
public String getReport() {
return ""+sum;
}
/**
* reset the aggregator
*/
public void reset() {
sum = 0;
}
/**
* @return return an array of one element. The element is a string
* representation of the aggregated value. The return value is
* expected to be used by the a combiner.
*/
public ArrayList<String> getCombinerOutput() {
ArrayList<String> retv = new ArrayList<String>(1);
retv.add(""+sum);
return retv;
}
}
| 2,427 | 23.039604 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorJob.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.aggregate;
import java.io.IOException;
import java.util.ArrayList;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.jobcontrol.ControlledJob;
import org.apache.hadoop.mapreduce.lib.jobcontrol.JobControl;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
/**
* This is the main class for creating a map/reduce job using Aggregate
* framework. The Aggregate is a specialization of map/reduce framework,
* specializing for performing various simple aggregations.
*
* Generally speaking, in order to implement an application using Map/Reduce
* model, the developer is to implement Map and Reduce functions (and possibly
* combine function). However, a lot of applications related to counting and
* statistics computing have very similar characteristics. Aggregate abstracts
* out the general patterns of these functions and implementing those patterns.
* In particular, the package provides generic mapper/redducer/combiner
* classes, and a set of built-in value aggregators, and a generic utility
* class that helps user create map/reduce jobs using the generic class.
* The built-in aggregators include:
*
* sum over numeric values count the number of distinct values compute the
* histogram of values compute the minimum, maximum, media,average, standard
* deviation of numeric values
*
* The developer using Aggregate will need only to provide a plugin class
* conforming to the following interface:
*
* public interface ValueAggregatorDescriptor { public ArrayList<Entry>
* generateKeyValPairs(Object key, Object value); public void
* configure(Configuration conf); }
*
* The package also provides a base class, ValueAggregatorBaseDescriptor,
* implementing the above interface. The user can extend the base class and
* implement generateKeyValPairs accordingly.
*
* The primary work of generateKeyValPairs is to emit one or more key/value
* pairs based on the input key/value pair. The key in an output key/value pair
* encode two pieces of information: aggregation type and aggregation id. The
* value will be aggregated onto the aggregation id according the aggregation
* type.
*
* This class offers a function to generate a map/reduce job using Aggregate
* framework. The function takes the following parameters: input directory spec
* input format (text or sequence file) output directory a file specifying the
* user plugin class
*
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ValueAggregatorJob {
public static JobControl createValueAggregatorJobs(String args[],
Class<? extends ValueAggregatorDescriptor>[] descriptors)
throws IOException {
JobControl theControl = new JobControl("ValueAggregatorJobs");
ArrayList<ControlledJob> dependingJobs = new ArrayList<ControlledJob>();
Configuration conf = new Configuration();
if (descriptors != null) {
conf = setAggregatorDescriptors(descriptors);
}
Job job = createValueAggregatorJob(conf, args);
ControlledJob cjob = new ControlledJob(job, dependingJobs);
theControl.addJob(cjob);
return theControl;
}
public static JobControl createValueAggregatorJobs(String args[])
throws IOException {
return createValueAggregatorJobs(args, null);
}
/**
* Create an Aggregate based map/reduce job.
*
* @param conf The configuration for job
* @param args the arguments used for job creation. Generic hadoop
* arguments are accepted.
* @return a Job object ready for submission.
*
* @throws IOException
* @see GenericOptionsParser
*/
public static Job createValueAggregatorJob(Configuration conf, String args[])
throws IOException {
GenericOptionsParser genericParser
= new GenericOptionsParser(conf, args);
args = genericParser.getRemainingArgs();
if (args.length < 2) {
System.out.println("usage: inputDirs outDir "
+ "[numOfReducer [textinputformat|seq [specfile [jobName]]]]");
GenericOptionsParser.printGenericCommandUsage(System.out);
System.exit(2);
}
String inputDir = args[0];
String outputDir = args[1];
int numOfReducers = 1;
if (args.length > 2) {
numOfReducers = Integer.parseInt(args[2]);
}
Class<? extends InputFormat> theInputFormat = null;
if (args.length > 3 &&
args[3].compareToIgnoreCase("textinputformat") == 0) {
theInputFormat = TextInputFormat.class;
} else {
theInputFormat = SequenceFileInputFormat.class;
}
Path specFile = null;
if (args.length > 4) {
specFile = new Path(args[4]);
}
String jobName = "";
if (args.length > 5) {
jobName = args[5];
}
if (specFile != null) {
conf.addResource(specFile);
}
String userJarFile = conf.get(ValueAggregatorJobBase.USER_JAR);
if (userJarFile != null) {
conf.set(MRJobConfig.JAR, userJarFile);
}
Job theJob = Job.getInstance(conf);
if (userJarFile == null) {
theJob.setJarByClass(ValueAggregator.class);
}
theJob.setJobName("ValueAggregatorJob: " + jobName);
FileInputFormat.addInputPaths(theJob, inputDir);
theJob.setInputFormatClass(theInputFormat);
theJob.setMapperClass(ValueAggregatorMapper.class);
FileOutputFormat.setOutputPath(theJob, new Path(outputDir));
theJob.setOutputFormatClass(TextOutputFormat.class);
theJob.setMapOutputKeyClass(Text.class);
theJob.setMapOutputValueClass(Text.class);
theJob.setOutputKeyClass(Text.class);
theJob.setOutputValueClass(Text.class);
theJob.setReducerClass(ValueAggregatorReducer.class);
theJob.setCombinerClass(ValueAggregatorCombiner.class);
theJob.setNumReduceTasks(numOfReducers);
return theJob;
}
public static Job createValueAggregatorJob(String args[],
Class<? extends ValueAggregatorDescriptor>[] descriptors)
throws IOException {
return createValueAggregatorJob(
setAggregatorDescriptors(descriptors), args);
}
public static Configuration setAggregatorDescriptors(
Class<? extends ValueAggregatorDescriptor>[] descriptors) {
Configuration conf = new Configuration();
conf.setInt(ValueAggregatorJobBase.DESCRIPTOR_NUM, descriptors.length);
//specify the aggregator descriptors
for(int i=0; i< descriptors.length; i++) {
conf.set(ValueAggregatorJobBase.DESCRIPTOR + i,
"UserDefined," + descriptors[i].getName());
}
return conf;
}
/**
* create and run an Aggregate based map/reduce job.
*
* @param args the arguments used for job creation
* @throws IOException
*/
public static void main(String args[])
throws IOException, InterruptedException, ClassNotFoundException {
Job job = ValueAggregatorJob.createValueAggregatorJob(
new Configuration(), args);
int ret = job.waitForCompletion(true) ? 0 : 1;
System.exit(ret);
}
}
| 8,454 | 36.914798 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorDescriptor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.aggregate;
import java.util.ArrayList;
import java.util.Map.Entry;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
/**
* This interface defines the contract a value aggregator descriptor must
* support. Such a descriptor can be configured with a {@link Configuration}
* object. Its main function is to generate a list of aggregation-id/value
* pairs. An aggregation id encodes an aggregation type which is used to
* guide the way to aggregate the value in the reduce/combiner phrase of an
* Aggregate based job.
* The mapper in an Aggregate based map/reduce job may create one or more of
* ValueAggregatorDescriptor objects at configuration time. For each input
* key/value pair, the mapper will use those objects to create aggregation
* id/value pairs.
*
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface ValueAggregatorDescriptor {
public static final String TYPE_SEPARATOR = ":";
public static final Text ONE = new Text("1");
/**
* Generate a list of aggregation-id/value pairs for
* the given key/value pair.
* This function is usually called by the mapper of an Aggregate based job.
*
* @param key
* input key
* @param val
* input value
* @return a list of aggregation id/value pairs. An aggregation id encodes an
* aggregation type which is used to guide the way to aggregate the
* value in the reduce/combiner phrase of an Aggregate based job.
*/
public ArrayList<Entry<Text, Text>> generateKeyValPairs(Object key,
Object val);
/**
* Configure the object
*
* @param conf
* a Configuration object that may contain the information
* that can be used to configure the object.
*/
public void configure(Configuration conf);
}
| 2,852 | 37.04 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.aggregate;
import java.util.ArrayList;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This interface defines the minimal protocol for value aggregators.
*
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface ValueAggregator<E> {
/**
* add a value to the aggregator
*
* @param val the value to be added
*/
public void addNextValue(Object val);
/**
* reset the aggregator
*
*/
public void reset();
/**
* @return the string representation of the agregator
*/
public String getReport();
/**
*
* @return an array of values as the outputs of the combiner.
*/
public ArrayList<E> getCombinerOutput();
}
| 1,608 | 26.271186 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/StringValueMax.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.aggregate;
import java.util.ArrayList;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This class implements a value aggregator that maintain the biggest of
* a sequence of strings.
*
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class StringValueMax implements ValueAggregator<String> {
String maxVal = null;
/**
* the default constructor
*
*/
public StringValueMax() {
reset();
}
/**
* add a value to the aggregator
*
* @param val
* a string.
*
*/
public void addNextValue(Object val) {
String newVal = val.toString();
if (this.maxVal == null || this.maxVal.compareTo(newVal) < 0) {
this.maxVal = newVal;
}
}
/**
* @return the aggregated value
*/
public String getVal() {
return this.maxVal;
}
/**
* @return the string representation of the aggregated value
*/
public String getReport() {
return maxVal;
}
/**
* reset the aggregator
*/
public void reset() {
maxVal = null;
}
/**
* @return return an array of one element. The element is a string
* representation of the aggregated value. The return value is
* expected to be used by the a combiner.
*/
public ArrayList<String> getCombinerOutput() {
ArrayList<String> retv = new ArrayList<String>(1);
retv.add(maxVal);
return retv;
}
}
| 2,330 | 24.336957 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/UniqValueCount.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.aggregate;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.Set;
import java.util.TreeMap;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This class implements a value aggregator that dedupes a sequence of objects.
*
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class UniqValueCount implements ValueAggregator<Object> {
public static final String MAX_NUM_UNIQUE_VALUES =
"mapreduce.aggregate.max.num.unique.values";
private TreeMap<Object, Object> uniqItems = null;
private long numItems = 0;
private long maxNumItems = Long.MAX_VALUE;
/**
* the default constructor
*
*/
public UniqValueCount() {
this(Long.MAX_VALUE);
}
/**
* constructor
* @param maxNum the limit in the number of unique values to keep.
*
*/
public UniqValueCount(long maxNum) {
uniqItems = new TreeMap<Object, Object>();
this.numItems = 0;
maxNumItems = Long.MAX_VALUE;
if (maxNum > 0 ) {
this.maxNumItems = maxNum;
}
}
/**
* Set the limit on the number of unique values
* @param n the desired limit on the number of unique values
* @return the new limit on the number of unique values
*/
public long setMaxItems(long n) {
if (n >= numItems) {
this.maxNumItems = n;
} else if (this.maxNumItems >= this.numItems) {
this.maxNumItems = this.numItems;
}
return this.maxNumItems;
}
/**
* add a value to the aggregator
*
* @param val
* an object.
*
*/
public void addNextValue(Object val) {
if (this.numItems <= this.maxNumItems) {
uniqItems.put(val.toString(), "1");
this.numItems = this.uniqItems.size();
}
}
/**
* @return return the number of unique objects aggregated
*/
public String getReport() {
return "" + uniqItems.size();
}
/**
*
* @return the set of the unique objects
*/
public Set<Object> getUniqueItems() {
return uniqItems.keySet();
}
/**
* reset the aggregator
*/
public void reset() {
uniqItems = new TreeMap<Object, Object>();
}
/**
* @return return an array of the unique objects. The return value is
* expected to be used by the a combiner.
*/
public ArrayList<Object> getCombinerOutput() {
Object key = null;
Iterator<Object> iter = uniqItems.keySet().iterator();
ArrayList<Object> retv = new ArrayList<Object>();
while (iter.hasNext()) {
key = iter.next();
retv.add(key);
}
return retv;
}
}
| 3,460 | 25.022556 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/LongValueMin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.lib.aggregate;
import java.util.ArrayList;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This class implements a value aggregator that maintain the minimum of
* a sequence of long values.
*
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class LongValueMin implements ValueAggregator<String> {
long minVal = Long.MAX_VALUE;
/**
* the default constructor
*
*/
public LongValueMin() {
reset();
}
/**
* add a value to the aggregator
*
* @param val
* an object whose string representation represents a long value.
*
*/
public void addNextValue(Object val) {
long newVal = Long.parseLong(val.toString());
if (this.minVal > newVal) {
this.minVal = newVal;
}
}
/**
* add a value to the aggregator
*
* @param newVal
* a long value.
*
*/
public void addNextValue(long newVal) {
if (this.minVal > newVal) {
this.minVal = newVal;
};
}
/**
* @return the aggregated value
*/
public long getVal() {
return this.minVal;
}
/**
* @return the string representation of the aggregated value
*/
public String getReport() {
return ""+minVal;
}
/**
* reset the aggregator
*/
public void reset() {
minVal = Long.MAX_VALUE;
}
/**
* @return return an array of one element. The element is a string
* representation of the aggregated value. The return value is
* expected to be used by the a combiner.
*/
public ArrayList<String> getCombinerOutput() {
ArrayList<String> retv = new ArrayList<String>(1);
retv.add(""+minVal);
return retv;
}
}
| 2,602 | 24.028846 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/server/tasktracker/TTConfig.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.server.tasktracker;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.MRConfig;
/**
* Place holder for TaskTracker server-level configuration.
*
* The keys should have "mapreduce.tasktracker." as the prefix
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public interface TTConfig extends MRConfig {
// Task-tracker configuration properties
public static final String TT_HEALTH_CHECKER_INTERVAL =
"mapreduce.tasktracker.healthchecker.interval";
public static final String TT_HEALTH_CHECKER_SCRIPT_ARGS =
"mapreduce.tasktracker.healthchecker.script.args";
public static final String TT_HEALTH_CHECKER_SCRIPT_PATH =
"mapreduce.tasktracker.healthchecker.script.path";
public static final String TT_HEALTH_CHECKER_SCRIPT_TIMEOUT =
"mapreduce.tasktracker.healthchecker.script.timeout";
public static final String TT_LOCAL_DIR_MINSPACE_KILL =
"mapreduce.tasktracker.local.dir.minspacekill";
public static final String TT_LOCAL_DIR_MINSPACE_START =
"mapreduce.tasktracker.local.dir.minspacestart";
public static final String TT_HTTP_ADDRESS =
"mapreduce.tasktracker.http.address";
public static final String TT_REPORT_ADDRESS =
"mapreduce.tasktracker.report.address";
public static final String TT_TASK_CONTROLLER =
"mapreduce.tasktracker.taskcontroller";
public static final String TT_CONTENTION_TRACKING =
"mapreduce.tasktracker.contention.tracking";
public static final String TT_STATIC_RESOLUTIONS =
"mapreduce.tasktracker.net.static.resolutions";
public static final String TT_HTTP_THREADS =
"mapreduce.tasktracker.http.threads";
public static final String TT_HOST_NAME = "mapreduce.tasktracker.host.name";
public static final String TT_SLEEP_TIME_BEFORE_SIG_KILL =
"mapreduce.tasktracker.tasks.sleeptimebeforesigkill";
public static final String TT_DNS_INTERFACE =
"mapreduce.tasktracker.dns.interface";
public static final String TT_DNS_NAMESERVER =
"mapreduce.tasktracker.dns.nameserver";
public static final String TT_MAX_TASK_COMPLETION_EVENTS_TO_POLL =
"mapreduce.tasktracker.events.batchsize";
public static final String TT_INDEX_CACHE =
"mapreduce.tasktracker.indexcache.mb";
public static final String TT_INSTRUMENTATION =
"mapreduce.tasktracker.instrumentation";
public static final String TT_MAP_SLOTS =
"mapreduce.tasktracker.map.tasks.maximum";
/**
* @deprecated Use {@link #TT_RESOURCE_CALCULATOR_PLUGIN} instead
*/
@Deprecated
public static final String TT_MEMORY_CALCULATOR_PLUGIN =
"mapreduce.tasktracker.memorycalculatorplugin";
public static final String TT_RESOURCE_CALCULATOR_PLUGIN =
"mapreduce.tasktracker.resourcecalculatorplugin";
public static final String TT_REDUCE_SLOTS =
"mapreduce.tasktracker.reduce.tasks.maximum";
public static final String TT_LOCAL_CACHE_SIZE =
"mapreduce.tasktracker.cache.local.size";
public static final String TT_LOCAL_CACHE_SUBDIRS_LIMIT =
"mapreduce.tasktracker.cache.local.numberdirectories";
public static final String TT_OUTOFBAND_HEARBEAT =
"mapreduce.tasktracker.outofband.heartbeat";
public static final String TT_RESERVED_PHYSCIALMEMORY_MB =
"mapreduce.tasktracker.reserved.physicalmemory.mb";
public static final String TT_USER_NAME = "mapreduce.tasktracker.kerberos.principal";
public static final String TT_KEYTAB_FILE =
"mapreduce.tasktracker.keytab.file";
public static final String TT_GROUP =
"mapreduce.tasktracker.group";
public static final String TT_USERLOGCLEANUP_SLEEPTIME =
"mapreduce.tasktracker.userlogcleanup.sleeptime";
public static final String TT_DISTRIBUTED_CACHE_CHECK_PERIOD =
"mapreduce.tasktracker.distributedcache.checkperiod";
/**
* Percentage of the local distributed cache that should be kept in between
* garbage collection.
*/
public static final String TT_LOCAL_CACHE_KEEP_AROUND_PCT =
"mapreduce.tasktracker.cache.local.keep.pct";
}
| 4,931 | 44.666667 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/server/jobtracker/JTConfig.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.server.jobtracker;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.MRConfig;
/**
* Place holder for JobTracker server-level configuration.
*
* The keys should have "mapreduce.jobtracker." as the prefix
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public interface JTConfig extends MRConfig {
// JobTracker configuration parameters
public static final String JT_IPC_ADDRESS = "mapreduce.jobtracker.address";
public static final String JT_HTTP_ADDRESS =
"mapreduce.jobtracker.http.address";
public static final String JT_IPC_HANDLER_COUNT =
"mapreduce.jobtracker.handler.count";
public static final String JT_RESTART_ENABLED =
"mapreduce.jobtracker.restart.recover";
public static final String JT_TASK_SCHEDULER =
"mapreduce.jobtracker.taskscheduler";
public static final String JT_INSTRUMENTATION =
"mapreduce.jobtracker.instrumentation";
public static final String JT_TASKS_PER_JOB =
"mapreduce.jobtracker.maxtasks.perjob";
public static final String JT_HEARTBEATS_IN_SECOND =
"mapreduce.jobtracker.heartbeats.in.second";
public static final String JT_HEARTBEATS_SCALING_FACTOR =
"mapreduce.jobtracker.heartbeats.scaling.factor";
public static final String JT_HEARTBEAT_INTERVAL_MIN =
"mapreduce.jobtracker.heartbeat.interval.min";
public static final int JT_HEARTBEAT_INTERVAL_MIN_DEFAULT = 300;
public static final String JT_PERSIST_JOBSTATUS =
"mapreduce.jobtracker.persist.jobstatus.active";
public static final String JT_PERSIST_JOBSTATUS_HOURS =
"mapreduce.jobtracker.persist.jobstatus.hours";
public static final String JT_PERSIST_JOBSTATUS_DIR =
"mapreduce.jobtracker.persist.jobstatus.dir";
/**
* @deprecated Use MR_SUPERGROUP instead
*/
@Deprecated
public static final String JT_SUPERGROUP =
"mapreduce.jobtracker.permissions.supergroup";
public static final String JT_RETIREJOBS =
"mapreduce.jobtracker.retirejobs";
public static final String JT_RETIREJOB_CACHE_SIZE =
"mapreduce.jobtracker.retiredjobs.cache.size";
public static final String JT_TASKCACHE_LEVELS =
"mapreduce.jobtracker.taskcache.levels";
public static final String JT_TASK_ALLOC_PAD_FRACTION =
"mapreduce.jobtracker.taskscheduler.taskalloc.capacitypad";
public static final String JT_JOBINIT_THREADS =
"mapreduce.jobtracker.jobinit.threads";
public static final String JT_TRACKER_EXPIRY_INTERVAL =
"mapreduce.jobtracker.expire.trackers.interval";
public static final String JT_RUNNINGTASKS_PER_JOB =
"mapreduce.jobtracker.taskscheduler.maxrunningtasks.perjob";
public static final String JT_HOSTS_FILENAME =
"mapreduce.jobtracker.hosts.filename";
public static final String JT_HOSTS_EXCLUDE_FILENAME =
"mapreduce.jobtracker.hosts.exclude.filename";
public static final String JT_JOBHISTORY_CACHE_SIZE =
"mapreduce.jobtracker.jobhistory.lru.cache.size";
public static final String JT_JOBHISTORY_BLOCK_SIZE =
"mapreduce.jobtracker.jobhistory.block.size";
public static final String JT_JOBHISTORY_COMPLETED_LOCATION =
"mapreduce.jobtracker.jobhistory.completed.location";
public static final String JT_JOBHISTORY_LOCATION =
"mapreduce.jobtracker.jobhistory.location";
// number of partial task progress reports we retain in job history
public static final String JT_JOBHISTORY_TASKPROGRESS_NUMBER_SPLITS =
"mapreduce.jobtracker.jobhistory.task.numberprogresssplits";
public static final String JT_AVG_BLACKLIST_THRESHOLD =
"mapreduce.jobtracker.blacklist.average.threshold";
public static final String JT_SYSTEM_DIR = "mapreduce.jobtracker.system.dir";
public static final String JT_STAGING_AREA_ROOT =
"mapreduce.jobtracker.staging.root.dir";
public static final String JT_MAX_TRACKER_BLACKLISTS =
"mapreduce.jobtracker.tasktracker.maxblacklists";
public static final String JT_JOBHISTORY_MAXAGE =
"mapreduce.jobtracker.jobhistory.maxage";
public static final String JT_MAX_MAPMEMORY_MB =
"mapreduce.jobtracker.maxmapmemory.mb";
public static final String JT_MAX_REDUCEMEMORY_MB =
"mapreduce.jobtracker.maxreducememory.mb";
public static final String JT_MAX_JOB_SPLIT_METAINFO_SIZE =
"mapreduce.jobtracker.split.metainfo.maxsize";
public static final String JT_USER_NAME = "mapreduce.jobtracker.kerberos.principal";
public static final String JT_KEYTAB_FILE =
"mapreduce.jobtracker.keytab.file";
public static final String PRIVATE_ACTIONS_KEY =
"mapreduce.jobtracker.webinterface.trusted";
public static final String JT_PLUGINS =
"mapreduce.jobtracker.plugins";
public static final String SHUFFLE_EXCEPTION_STACK_REGEX =
"mapreduce.reduce.shuffle.catch.exception.stack.regex";
public static final String SHUFFLE_EXCEPTION_MSG_REGEX =
"mapreduce.reduce.shuffle.catch.exception.message.regex";
}
| 5,833 | 46.430894 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobInProgress.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.JobCounter;
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class JobInProgress {
/**
* @deprecated Provided for compatibility. Use {@link JobCounter} instead.
*/
@Deprecated
public static enum Counter {
NUM_FAILED_MAPS,
NUM_FAILED_REDUCES,
TOTAL_LAUNCHED_MAPS,
TOTAL_LAUNCHED_REDUCES,
OTHER_LOCAL_MAPS,
DATA_LOCAL_MAPS,
RACK_LOCAL_MAPS,
SLOTS_MILLIS_MAPS,
SLOTS_MILLIS_REDUCES,
FALLOW_SLOTS_MILLIS_MAPS,
FALLOW_SLOTS_MILLIS_REDUCES
}
}
| 1,514 | 30.5625 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Partitioner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Partitions the key space.
*
* <p><code>Partitioner</code> controls the partitioning of the keys of the
* intermediate map-outputs. The key (or a subset of the key) is used to derive
* the partition, typically by a hash function. The total number of partitions
* is the same as the number of reduce tasks for the job. Hence this controls
* which of the <code>m</code> reduce tasks the intermediate key (and hence the
* record) is sent for reduction.</p>
*
* @see Reducer
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface Partitioner<K2, V2> extends JobConfigurable {
/**
* Get the paritition number for a given key (hence record) given the total
* number of partitions i.e. number of reduce-tasks for the job.
*
* <p>Typically a hash function on a all or a subset of the key.</p>
*
* @param key the key to be paritioned.
* @param value the entry value.
* @param numPartitions the total number of partitions.
* @return the partition number for the <code>key</code>.
*/
int getPartition(K2 key, V2 value, int numPartitions);
}
| 2,074 | 38.150943 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobPriority.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Used to describe the priority of the running job.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public enum JobPriority {
VERY_HIGH,
HIGH,
NORMAL,
LOW,
VERY_LOW;
}
| 1,159 | 30.351351 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/AuditLogger.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.net.InetAddress;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.ipc.Server;
/** Manages MapReduce audit logs. Audit logs provides information about
* authorization/authentication events (success/failure).
*
* Audit log format is written as key=value pairs.
*/
class AuditLogger {
private static final Log LOG = LogFactory.getLog(AuditLogger.class);
static enum Keys {USER, OPERATION, TARGET, RESULT, IP, PERMISSIONS,
DESCRIPTION}
static class Constants {
static final String SUCCESS = "SUCCESS";
static final String FAILURE = "FAILURE";
static final String KEY_VAL_SEPARATOR = "=";
static final char PAIR_SEPARATOR = '\t';
// Some constants used by others using AuditLogger.
// Some commonly used targets
static final String JOBTRACKER = "JobTracker";
// Some commonly used operations
static final String REFRESH_QUEUE = "REFRESH_QUEUE";
static final String REFRESH_NODES = "REFRESH_NODES";
// Some commonly used descriptions
static final String UNAUTHORIZED_USER = "Unauthorized user";
}
/**
* A helper api for creating an audit log for a successful event.
* This is factored out for testing purpose.
*/
static String createSuccessLog(String user, String operation, String target) {
StringBuilder b = new StringBuilder();
start(Keys.USER, user, b);
addRemoteIP(b);
add(Keys.OPERATION, operation, b);
add(Keys.TARGET, target ,b);
add(Keys.RESULT, Constants.SUCCESS, b);
return b.toString();
}
/**
* Create a readable and parseable audit log string for a successful event.
*
* @param user User who made the service request to the JobTracker.
* @param operation Operation requested by the user
* @param target The target on which the operation is being performed. Most
* commonly operated targets are jobs, JobTracker, queues etc
*
* <br><br>
* Note that the {@link AuditLogger} uses tabs ('\t') as a key-val delimiter
* and hence the value fields should not contains tabs ('\t').
*/
static void logSuccess(String user, String operation, String target) {
if (LOG.isInfoEnabled()) {
LOG.info(createSuccessLog(user, operation, target));
}
}
/**
* A helper api for creating an audit log for a failure event.
* This is factored out for testing purpose.
*/
static String createFailureLog(String user, String operation, String perm,
String target, String description) {
StringBuilder b = new StringBuilder();
start(Keys.USER, user, b);
addRemoteIP(b);
add(Keys.OPERATION, operation, b);
add(Keys.TARGET, target ,b);
add(Keys.RESULT, Constants.FAILURE, b);
add(Keys.DESCRIPTION, description, b);
add(Keys.PERMISSIONS, perm, b);
return b.toString();
}
/**
* Create a readable and parseable audit log string for a failed event.
*
* @param user User who made the service request to the JobTracker.
* @param operation Operation requested by the user
* @param perm Target permissions like JobACLs for jobs, QueueACLs for queues.
* @param target The target on which the operation is being performed. Most
* commonly operated targets are jobs, JobTracker, queues etc
* @param description Some additional information as to why the operation
* failed.
*
* <br><br>
* Note that the {@link AuditLogger} uses tabs ('\t') as a key-val delimiter
* and hence the value fields should not contains tabs ('\t').
*/
static void logFailure(String user, String operation, String perm,
String target, String description) {
if (LOG.isWarnEnabled()) {
LOG.warn(createFailureLog(user, operation, perm, target, description));
}
}
/**
* A helper api to add remote IP address
*/
static void addRemoteIP(StringBuilder b) {
InetAddress ip = Server.getRemoteIp();
// ip address can be null for testcases
if (ip != null) {
add(Keys.IP, ip.getHostAddress(), b);
}
}
/**
* Adds the first key-val pair to the passed builder in the following format
* key=value
*/
static void start(Keys key, String value, StringBuilder b) {
b.append(key.name()).append(Constants.KEY_VAL_SEPARATOR).append(value);
}
/**
* Appends the key-val pair to the passed builder in the following format
* <pair-delim>key=value
*/
static void add(Keys key, String value, StringBuilder b) {
b.append(Constants.PAIR_SEPARATOR).append(key.name())
.append(Constants.KEY_VAL_SEPARATOR).append(value);
}
}
| 5,530 | 34.683871 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/OutputLogFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
/**
* This class filters log files from directory given
* It doesnt accept paths having _logs.
* This can be used to list paths of output directory as follows:
* Path[] fileList = FileUtil.stat2Paths(fs.listStatus(outDir,
* new OutputLogFilter()));
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class OutputLogFilter implements PathFilter {
private static final PathFilter LOG_FILTER =
new Utils.OutputFileUtils.OutputLogFilter();
public boolean accept(Path path) {
return LOG_FILTER.accept(path);
}
}
| 1,613 | 37.428571 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/RawKeyValueIterator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.util.Progress;
/**
* <code>RawKeyValueIterator</code> is an iterator used to iterate over
* the raw keys and values during sort/merge of intermediate data.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public interface RawKeyValueIterator {
/**
* Gets the current raw key.
*
* @return Gets the current raw key as a DataInputBuffer
* @throws IOException
*/
DataInputBuffer getKey() throws IOException;
/**
* Gets the current raw value.
*
* @return Gets the current raw value as a DataInputBuffer
* @throws IOException
*/
DataInputBuffer getValue() throws IOException;
/**
* Sets up the current key and value (for getKey and getValue).
*
* @return <code>true</code> if there exists a key/value,
* <code>false</code> otherwise.
* @throws IOException
*/
boolean next() throws IOException;
/**
* Closes the iterator so that the underlying streams can be closed.
*
* @throws IOException
*/
void close() throws IOException;
/** Gets the Progress object; this has a float (0.0 - 1.0)
* indicating the bytes processed by the iterator so far
*/
Progress getProgress();
}
| 2,257 | 30.802817 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobStatus.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.security.authorize.AccessControlList;
/**************************************************
* Describes the current status of a job. This is
* not intended to be a comprehensive piece of data.
* For that, look at JobProfile.
*************************************************
**/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class JobStatus extends org.apache.hadoop.mapreduce.JobStatus {
public static final int RUNNING =
org.apache.hadoop.mapreduce.JobStatus.State.RUNNING.getValue();
public static final int SUCCEEDED =
org.apache.hadoop.mapreduce.JobStatus.State.SUCCEEDED.getValue();
public static final int FAILED =
org.apache.hadoop.mapreduce.JobStatus.State.FAILED.getValue();
public static final int PREP =
org.apache.hadoop.mapreduce.JobStatus.State.PREP.getValue();
public static final int KILLED =
org.apache.hadoop.mapreduce.JobStatus.State.KILLED.getValue();
private static final String UNKNOWN = "UNKNOWN";
private static final String[] runStates =
{UNKNOWN, "RUNNING", "SUCCEEDED", "FAILED", "PREP", "KILLED"};
/**
* Helper method to get human-readable state of the job.
* @param state job state
* @return human-readable state of the job
*/
public static String getJobRunState(int state) {
if (state < 1 || state >= runStates.length) {
return UNKNOWN;
}
return runStates[state];
}
static org.apache.hadoop.mapreduce.JobStatus.State getEnum(int state) {
switch (state) {
case 1: return org.apache.hadoop.mapreduce.JobStatus.State.RUNNING;
case 2: return org.apache.hadoop.mapreduce.JobStatus.State.SUCCEEDED;
case 3: return org.apache.hadoop.mapreduce.JobStatus.State.FAILED;
case 4: return org.apache.hadoop.mapreduce.JobStatus.State.PREP;
case 5: return org.apache.hadoop.mapreduce.JobStatus.State.KILLED;
}
return null;
}
/**
*/
public JobStatus() {
}
@Deprecated
public JobStatus(JobID jobid, float mapProgress, float reduceProgress,
float cleanupProgress, int runState) {
this(jobid, mapProgress, reduceProgress, cleanupProgress, runState, null,
null, null, null);
}
/**
* Create a job status object for a given jobid.
* @param jobid The jobid of the job
* @param mapProgress The progress made on the maps
* @param reduceProgress The progress made on the reduces
* @param runState The current state of the job
*/
@Deprecated
public JobStatus(JobID jobid, float mapProgress, float reduceProgress,
int runState) {
this (jobid, mapProgress, reduceProgress, runState, null, null, null, null);
}
/**
* Create a job status object for a given jobid.
* @param jobid The jobid of the job
* @param mapProgress The progress made on the maps
* @param reduceProgress The progress made on the reduces
* @param runState The current state of the job
* @param jp Priority of the job.
*/
@Deprecated
public JobStatus(JobID jobid, float mapProgress, float reduceProgress,
float cleanupProgress, int runState, JobPriority jp) {
this(jobid, mapProgress, reduceProgress, cleanupProgress, runState, jp,
null, null, null, null);
}
/**
* Create a job status object for a given jobid.
* @param jobid The jobid of the job
* @param setupProgress The progress made on the setup
* @param mapProgress The progress made on the maps
* @param reduceProgress The progress made on the reduces
* @param cleanupProgress The progress made on the cleanup
* @param runState The current state of the job
* @param jp Priority of the job.
*/
@Deprecated
public JobStatus(JobID jobid, float setupProgress, float mapProgress,
float reduceProgress, float cleanupProgress,
int runState, JobPriority jp) {
this(jobid, setupProgress, mapProgress, reduceProgress, cleanupProgress,
runState, jp, null, null, null, null);
}
/**
* Create a job status object for a given jobid.
* @param jobid The jobid of the job
* @param mapProgress The progress made on the maps
* @param reduceProgress The progress made on the reduces
* @param cleanupProgress The progress made on cleanup
* @param runState The current state of the job
* @param user userid of the person who submitted the job.
* @param jobName user-specified job name.
* @param jobFile job configuration file.
* @param trackingUrl link to the web-ui for details of the job.
*/
public JobStatus(JobID jobid, float mapProgress, float reduceProgress,
float cleanupProgress, int runState,
String user, String jobName,
String jobFile, String trackingUrl) {
this(jobid, mapProgress, reduceProgress, cleanupProgress, runState,
JobPriority.NORMAL, user, jobName, jobFile, trackingUrl);
}
/**
* Create a job status object for a given jobid.
* @param jobid The jobid of the job
* @param mapProgress The progress made on the maps
* @param reduceProgress The progress made on the reduces
* @param runState The current state of the job
* @param user userid of the person who submitted the job.
* @param jobName user-specified job name.
* @param jobFile job configuration file.
* @param trackingUrl link to the web-ui for details of the job.
*/
public JobStatus(JobID jobid, float mapProgress, float reduceProgress,
int runState, String user, String jobName,
String jobFile, String trackingUrl) {
this(jobid, mapProgress, reduceProgress, 0.0f, runState, user, jobName,
jobFile, trackingUrl);
}
/**
* Create a job status object for a given jobid.
* @param jobid The jobid of the job
* @param mapProgress The progress made on the maps
* @param reduceProgress The progress made on the reduces
* @param runState The current state of the job
* @param jp Priority of the job.
* @param user userid of the person who submitted the job.
* @param jobName user-specified job name.
* @param jobFile job configuration file.
* @param trackingUrl link to the web-ui for details of the job.
*/
public JobStatus(JobID jobid, float mapProgress, float reduceProgress,
float cleanupProgress, int runState, JobPriority jp,
String user, String jobName, String jobFile,
String trackingUrl) {
this(jobid, 0.0f, mapProgress, reduceProgress,
cleanupProgress, runState, jp, user, jobName, jobFile,
trackingUrl);
}
/**
* Create a job status object for a given jobid.
* @param jobid The jobid of the job
* @param setupProgress The progress made on the setup
* @param mapProgress The progress made on the maps
* @param reduceProgress The progress made on the reduces
* @param cleanupProgress The progress made on the cleanup
* @param runState The current state of the job
* @param jp Priority of the job.
* @param user userid of the person who submitted the job.
* @param jobName user-specified job name.
* @param jobFile job configuration file.
* @param trackingUrl link to the web-ui for details of the job.
*/
public JobStatus(JobID jobid, float setupProgress, float mapProgress,
float reduceProgress, float cleanupProgress,
int runState, JobPriority jp, String user, String jobName,
String jobFile, String trackingUrl) {
this(jobid, setupProgress, mapProgress, reduceProgress, cleanupProgress,
runState, jp, user, jobName, "default", jobFile, trackingUrl);
}
/**
* Create a job status object for a given jobid.
* @param jobid The jobid of the job
* @param setupProgress The progress made on the setup
* @param mapProgress The progress made on the maps
* @param reduceProgress The progress made on the reduces
* @param cleanupProgress The progress made on the cleanup
* @param runState The current state of the job
* @param jp Priority of the job.
* @param user userid of the person who submitted the job.
* @param jobName user-specified job name.
* @param jobFile job configuration file.
* @param trackingUrl link to the web-ui for details of the job.
* @param isUber Whether job running in uber mode
*/
public JobStatus(JobID jobid, float setupProgress, float mapProgress,
float reduceProgress, float cleanupProgress,
int runState, JobPriority jp, String user, String jobName,
String jobFile, String trackingUrl, boolean isUber) {
this(jobid, setupProgress, mapProgress, reduceProgress, cleanupProgress,
runState, jp, user, jobName, "default", jobFile, trackingUrl, isUber);
}
/**
* Create a job status object for a given jobid.
* @param jobid The jobid of the job
* @param setupProgress The progress made on the setup
* @param mapProgress The progress made on the maps
* @param reduceProgress The progress made on the reduces
* @param cleanupProgress The progress made on the cleanup
* @param runState The current state of the job
* @param jp Priority of the job.
* @param user userid of the person who submitted the job.
* @param jobName user-specified job name.
* @param queue job queue name.
* @param jobFile job configuration file.
* @param trackingUrl link to the web-ui for details of the job.
*/
public JobStatus(JobID jobid, float setupProgress, float mapProgress,
float reduceProgress, float cleanupProgress,
int runState, JobPriority jp,
String user, String jobName, String queue,
String jobFile, String trackingUrl) {
this(jobid, setupProgress, mapProgress, reduceProgress, cleanupProgress,
runState, jp,
user, jobName, queue, jobFile, trackingUrl, false);
}
/**
* Create a job status object for a given jobid.
* @param jobid The jobid of the job
* @param setupProgress The progress made on the setup
* @param mapProgress The progress made on the maps
* @param reduceProgress The progress made on the reduces
* @param cleanupProgress The progress made on the cleanup
* @param runState The current state of the job
* @param jp Priority of the job.
* @param user userid of the person who submitted the job.
* @param jobName user-specified job name.
* @param queue job queue name.
* @param jobFile job configuration file.
* @param trackingUrl link to the web-ui for details of the job.
* @param isUber Whether job running in uber mode
*/
public JobStatus(JobID jobid, float setupProgress, float mapProgress,
float reduceProgress, float cleanupProgress,
int runState, JobPriority jp,
String user, String jobName, String queue,
String jobFile, String trackingUrl, boolean isUber) {
super(jobid, setupProgress, mapProgress, reduceProgress, cleanupProgress,
getEnum(runState), org.apache.hadoop.mapreduce.JobPriority.valueOf(jp.name()),
user, jobName, queue, jobFile, trackingUrl, isUber);
}
public static JobStatus downgrade(org.apache.hadoop.mapreduce.JobStatus stat){
JobStatus old = new JobStatus(JobID.downgrade(stat.getJobID()),
stat.getSetupProgress(), stat.getMapProgress(), stat.getReduceProgress(),
stat.getCleanupProgress(), stat.getState().getValue(),
JobPriority.valueOf(stat.getPriority().name()),
stat.getUsername(), stat.getJobName(), stat.getQueue(), stat.getJobFile(),
stat.getTrackingUrl(), stat.isUber());
old.setStartTime(stat.getStartTime());
old.setFinishTime(stat.getFinishTime());
old.setSchedulingInfo(stat.getSchedulingInfo());
old.setHistoryFile(stat.getHistoryFile());
return old;
}
/**
* @deprecated use getJobID instead
*/
@Deprecated
public String getJobId() { return getJobID().toString(); }
/**
* @return The jobid of the Job
*/
public JobID getJobID() { return JobID.downgrade(super.getJobID()); }
/**
* Return the priority of the job
* @return job priority
*/
public synchronized JobPriority getJobPriority() {
return JobPriority.valueOf(super.getPriority().name());
}
/**
* Sets the map progress of this job
* @param p The value of map progress to set to
*/
protected synchronized void setMapProgress(float p) {
super.setMapProgress(p);
}
/**
* Sets the cleanup progress of this job
* @param p The value of cleanup progress to set to
*/
protected synchronized void setCleanupProgress(float p) {
super.setCleanupProgress(p);
}
/**
* Sets the setup progress of this job
* @param p The value of setup progress to set to
*/
protected synchronized void setSetupProgress(float p) {
super.setSetupProgress(p);
}
/**
* Sets the reduce progress of this Job
* @param p The value of reduce progress to set to
*/
protected synchronized void setReduceProgress(float p) {
super.setReduceProgress(p);
}
/**
* Set the finish time of the job
* @param finishTime The finishTime of the job
*/
protected synchronized void setFinishTime(long finishTime) {
super.setFinishTime(finishTime);
}
/**
* Set the job history file url for a completed job
*/
protected synchronized void setHistoryFile(String historyFile) {
super.setHistoryFile(historyFile);
}
/**
* Set the link to the web-ui for details of the job.
*/
protected synchronized void setTrackingUrl(String trackingUrl) {
super.setTrackingUrl(trackingUrl);
}
/**
* Set the job retire flag to true.
*/
protected synchronized void setRetired() {
super.setRetired();
}
/**
* Change the current run state of the job.
*
* The setter is public to be compatible with M/R 1.x, however, it should be
* used internally.
*
* @param state the state of the job
*/
@InterfaceAudience.Private
public synchronized void setRunState(int state) {
super.setState(getEnum(state));
}
/**
* @return running state of the job
*/
public synchronized int getRunState() { return super.getState().getValue(); }
/**
* Set the start time of the job
* @param startTime The startTime of the job
*/
protected synchronized void setStartTime(long startTime) {
super.setStartTime(startTime);
}
/**
* @param userName The username of the job
*/
protected synchronized void setUsername(String userName) {
super.setUsername(userName);
}
/**
* Used to set the scheduling information associated to a particular Job.
*
* The setter is public to be compatible with M/R 1.x, however, it should be
* used internally.
*
* @param schedulingInfo Scheduling information of the job
*/
@InterfaceAudience.Private
public synchronized void setSchedulingInfo(String schedulingInfo) {
super.setSchedulingInfo(schedulingInfo);
}
protected synchronized void setJobACLs(Map<JobACL, AccessControlList> acls) {
super.setJobACLs(acls);
}
public synchronized void setFailureInfo(String failureInfo) {
super.setFailureInfo(failureInfo);
}
/**
* Set the priority of the job, defaulting to NORMAL.
* @param jp new job priority
*/
public synchronized void setJobPriority(JobPriority jp) {
super.setPriority(
org.apache.hadoop.mapreduce.JobPriority.valueOf(jp.name()));
}
/**
* @return Percentage of progress in maps
*/
public synchronized float mapProgress() { return super.getMapProgress(); }
/**
* @return Percentage of progress in cleanup
*/
public synchronized float cleanupProgress() {
return super.getCleanupProgress();
}
/**
* @return Percentage of progress in setup
*/
public synchronized float setupProgress() {
return super.getSetupProgress();
}
/**
* @return Percentage of progress in reduce
*/
public synchronized float reduceProgress() {
return super.getReduceProgress();
}
// A utility to convert new job runstates to the old ones.
static int getOldNewJobRunState(
org.apache.hadoop.mapreduce.JobStatus.State state) {
return state.getValue();
}
}
| 17,490 | 35.668763 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/ReduceTask.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.SortedSet;
import java.util.TreeSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystem.Statistics;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableFactories;
import org.apache.hadoop.io.WritableFactory;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.DefaultCodec;
import org.apache.hadoop.mapred.SortedRanges.SkipRangeIterator;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TaskCounter;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormatCounter;
import org.apache.hadoop.mapreduce.task.reduce.Shuffle;
import org.apache.hadoop.util.Progress;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.ReflectionUtils;
/** A Reduce task. */
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class ReduceTask extends Task {
static { // register a ctor
WritableFactories.setFactory
(ReduceTask.class,
new WritableFactory() {
public Writable newInstance() { return new ReduceTask(); }
});
}
private static final Log LOG = LogFactory.getLog(ReduceTask.class.getName());
private int numMaps;
private CompressionCodec codec;
// If this is a LocalJobRunner-based job, this will
// be a mapping from map task attempts to their output files.
// This will be null in other cases.
private Map<TaskAttemptID, MapOutputFile> localMapFiles;
{
getProgress().setStatus("reduce");
setPhase(TaskStatus.Phase.SHUFFLE); // phase to start with
}
private Progress copyPhase;
private Progress sortPhase;
private Progress reducePhase;
private Counters.Counter shuffledMapsCounter =
getCounters().findCounter(TaskCounter.SHUFFLED_MAPS);
private Counters.Counter reduceShuffleBytes =
getCounters().findCounter(TaskCounter.REDUCE_SHUFFLE_BYTES);
private Counters.Counter reduceInputKeyCounter =
getCounters().findCounter(TaskCounter.REDUCE_INPUT_GROUPS);
private Counters.Counter reduceInputValueCounter =
getCounters().findCounter(TaskCounter.REDUCE_INPUT_RECORDS);
private Counters.Counter reduceOutputCounter =
getCounters().findCounter(TaskCounter.REDUCE_OUTPUT_RECORDS);
private Counters.Counter reduceCombineInputCounter =
getCounters().findCounter(TaskCounter.COMBINE_INPUT_RECORDS);
private Counters.Counter reduceCombineOutputCounter =
getCounters().findCounter(TaskCounter.COMBINE_OUTPUT_RECORDS);
private Counters.Counter fileOutputByteCounter =
getCounters().findCounter(FileOutputFormatCounter.BYTES_WRITTEN);
// A custom comparator for map output files. Here the ordering is determined
// by the file's size and path. In case of files with same size and different
// file paths, the first parameter is considered smaller than the second one.
// In case of files with same size and path are considered equal.
private Comparator<FileStatus> mapOutputFileComparator =
new Comparator<FileStatus>() {
public int compare(FileStatus a, FileStatus b) {
if (a.getLen() < b.getLen())
return -1;
else if (a.getLen() == b.getLen())
if (a.getPath().toString().equals(b.getPath().toString()))
return 0;
else
return -1;
else
return 1;
}
};
// A sorted set for keeping a set of map output files on disk
private final SortedSet<FileStatus> mapOutputFilesOnDisk =
new TreeSet<FileStatus>(mapOutputFileComparator);
public ReduceTask() {
super();
}
public ReduceTask(String jobFile, TaskAttemptID taskId,
int partition, int numMaps, int numSlotsRequired) {
super(jobFile, taskId, partition, numSlotsRequired);
this.numMaps = numMaps;
}
/**
* Register the set of mapper outputs created by a LocalJobRunner-based
* job with this ReduceTask so it knows where to fetch from.
*
* This should not be called in normal (networked) execution.
*/
public void setLocalMapFiles(Map<TaskAttemptID, MapOutputFile> mapFiles) {
this.localMapFiles = mapFiles;
}
private CompressionCodec initCodec() {
// check if map-outputs are to be compressed
if (conf.getCompressMapOutput()) {
Class<? extends CompressionCodec> codecClass =
conf.getMapOutputCompressorClass(DefaultCodec.class);
return ReflectionUtils.newInstance(codecClass, conf);
}
return null;
}
@Override
public boolean isMapTask() {
return false;
}
public int getNumMaps() { return numMaps; }
/**
* Localize the given JobConf to be specific for this task.
*/
@Override
public void localizeConfiguration(JobConf conf) throws IOException {
super.localizeConfiguration(conf);
conf.setNumMapTasks(numMaps);
}
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
out.writeInt(numMaps); // write the number of maps
}
@Override
public void readFields(DataInput in) throws IOException {
super.readFields(in);
numMaps = in.readInt();
}
// Get the input files for the reducer (for local jobs).
private Path[] getMapFiles(FileSystem fs) throws IOException {
List<Path> fileList = new ArrayList<Path>();
for(int i = 0; i < numMaps; ++i) {
fileList.add(mapOutputFile.getInputFile(i));
}
return fileList.toArray(new Path[0]);
}
private class ReduceValuesIterator<KEY,VALUE>
extends ValuesIterator<KEY,VALUE> {
public ReduceValuesIterator (RawKeyValueIterator in,
RawComparator<KEY> comparator,
Class<KEY> keyClass,
Class<VALUE> valClass,
Configuration conf, Progressable reporter)
throws IOException {
super(in, comparator, keyClass, valClass, conf, reporter);
}
@Override
public VALUE next() {
reduceInputValueCounter.increment(1);
return moveToNext();
}
protected VALUE moveToNext() {
return super.next();
}
public void informReduceProgress() {
reducePhase.set(super.in.getProgress().getProgress()); // update progress
reporter.progress();
}
}
private class SkippingReduceValuesIterator<KEY,VALUE>
extends ReduceValuesIterator<KEY,VALUE> {
private SkipRangeIterator skipIt;
private TaskUmbilicalProtocol umbilical;
private Counters.Counter skipGroupCounter;
private Counters.Counter skipRecCounter;
private long grpIndex = -1;
private Class<KEY> keyClass;
private Class<VALUE> valClass;
private SequenceFile.Writer skipWriter;
private boolean toWriteSkipRecs;
private boolean hasNext;
private TaskReporter reporter;
public SkippingReduceValuesIterator(RawKeyValueIterator in,
RawComparator<KEY> comparator, Class<KEY> keyClass,
Class<VALUE> valClass, Configuration conf, TaskReporter reporter,
TaskUmbilicalProtocol umbilical) throws IOException {
super(in, comparator, keyClass, valClass, conf, reporter);
this.umbilical = umbilical;
this.skipGroupCounter =
reporter.getCounter(TaskCounter.REDUCE_SKIPPED_GROUPS);
this.skipRecCounter =
reporter.getCounter(TaskCounter.REDUCE_SKIPPED_RECORDS);
this.toWriteSkipRecs = toWriteSkipRecs() &&
SkipBadRecords.getSkipOutputPath(conf)!=null;
this.keyClass = keyClass;
this.valClass = valClass;
this.reporter = reporter;
skipIt = getSkipRanges().skipRangeIterator();
mayBeSkip();
}
public void nextKey() throws IOException {
super.nextKey();
mayBeSkip();
}
public boolean more() {
return super.more() && hasNext;
}
private void mayBeSkip() throws IOException {
hasNext = skipIt.hasNext();
if(!hasNext) {
LOG.warn("Further groups got skipped.");
return;
}
grpIndex++;
long nextGrpIndex = skipIt.next();
long skip = 0;
long skipRec = 0;
while(grpIndex<nextGrpIndex && super.more()) {
while (hasNext()) {
VALUE value = moveToNext();
if(toWriteSkipRecs) {
writeSkippedRec(getKey(), value);
}
skipRec++;
}
super.nextKey();
grpIndex++;
skip++;
}
//close the skip writer once all the ranges are skipped
if(skip>0 && skipIt.skippedAllRanges() && skipWriter!=null) {
skipWriter.close();
}
skipGroupCounter.increment(skip);
skipRecCounter.increment(skipRec);
reportNextRecordRange(umbilical, grpIndex);
}
@SuppressWarnings("unchecked")
private void writeSkippedRec(KEY key, VALUE value) throws IOException{
if(skipWriter==null) {
Path skipDir = SkipBadRecords.getSkipOutputPath(conf);
Path skipFile = new Path(skipDir, getTaskID().toString());
skipWriter = SequenceFile.createWriter(
skipFile.getFileSystem(conf), conf, skipFile,
keyClass, valClass,
CompressionType.BLOCK, reporter);
}
skipWriter.append(key, value);
}
}
@Override
@SuppressWarnings("unchecked")
public void run(JobConf job, final TaskUmbilicalProtocol umbilical)
throws IOException, InterruptedException, ClassNotFoundException {
job.setBoolean(JobContext.SKIP_RECORDS, isSkipping());
if (isMapOrReduce()) {
copyPhase = getProgress().addPhase("copy");
sortPhase = getProgress().addPhase("sort");
reducePhase = getProgress().addPhase("reduce");
}
// start thread that will handle communication with parent
TaskReporter reporter = startReporter(umbilical);
boolean useNewApi = job.getUseNewReducer();
initialize(job, getJobID(), reporter, useNewApi);
// check if it is a cleanupJobTask
if (jobCleanup) {
runJobCleanupTask(umbilical, reporter);
return;
}
if (jobSetup) {
runJobSetupTask(umbilical, reporter);
return;
}
if (taskCleanup) {
runTaskCleanupTask(umbilical, reporter);
return;
}
// Initialize the codec
codec = initCodec();
RawKeyValueIterator rIter = null;
ShuffleConsumerPlugin shuffleConsumerPlugin = null;
Class combinerClass = conf.getCombinerClass();
CombineOutputCollector combineCollector =
(null != combinerClass) ?
new CombineOutputCollector(reduceCombineOutputCounter, reporter, conf) : null;
Class<? extends ShuffleConsumerPlugin> clazz =
job.getClass(MRConfig.SHUFFLE_CONSUMER_PLUGIN, Shuffle.class, ShuffleConsumerPlugin.class);
shuffleConsumerPlugin = ReflectionUtils.newInstance(clazz, job);
LOG.info("Using ShuffleConsumerPlugin: " + shuffleConsumerPlugin);
ShuffleConsumerPlugin.Context shuffleContext =
new ShuffleConsumerPlugin.Context(getTaskID(), job, FileSystem.getLocal(job), umbilical,
super.lDirAlloc, reporter, codec,
combinerClass, combineCollector,
spilledRecordsCounter, reduceCombineInputCounter,
shuffledMapsCounter,
reduceShuffleBytes, failedShuffleCounter,
mergedMapOutputsCounter,
taskStatus, copyPhase, sortPhase, this,
mapOutputFile, localMapFiles);
shuffleConsumerPlugin.init(shuffleContext);
rIter = shuffleConsumerPlugin.run();
// free up the data structures
mapOutputFilesOnDisk.clear();
sortPhase.complete(); // sort is complete
setPhase(TaskStatus.Phase.REDUCE);
statusUpdate(umbilical);
Class keyClass = job.getMapOutputKeyClass();
Class valueClass = job.getMapOutputValueClass();
RawComparator comparator = job.getOutputValueGroupingComparator();
if (useNewApi) {
runNewReducer(job, umbilical, reporter, rIter, comparator,
keyClass, valueClass);
} else {
runOldReducer(job, umbilical, reporter, rIter, comparator,
keyClass, valueClass);
}
shuffleConsumerPlugin.close();
done(umbilical, reporter);
}
@SuppressWarnings("unchecked")
private <INKEY,INVALUE,OUTKEY,OUTVALUE>
void runOldReducer(JobConf job,
TaskUmbilicalProtocol umbilical,
final TaskReporter reporter,
RawKeyValueIterator rIter,
RawComparator<INKEY> comparator,
Class<INKEY> keyClass,
Class<INVALUE> valueClass) throws IOException {
Reducer<INKEY,INVALUE,OUTKEY,OUTVALUE> reducer =
ReflectionUtils.newInstance(job.getReducerClass(), job);
// make output collector
String finalName = getOutputName(getPartition());
RecordWriter<OUTKEY, OUTVALUE> out = new OldTrackingRecordWriter<OUTKEY, OUTVALUE>(
this, job, reporter, finalName);
final RecordWriter<OUTKEY, OUTVALUE> finalOut = out;
OutputCollector<OUTKEY,OUTVALUE> collector =
new OutputCollector<OUTKEY,OUTVALUE>() {
public void collect(OUTKEY key, OUTVALUE value)
throws IOException {
finalOut.write(key, value);
// indicate that progress update needs to be sent
reporter.progress();
}
};
// apply reduce function
try {
//increment processed counter only if skipping feature is enabled
boolean incrProcCount = SkipBadRecords.getReducerMaxSkipGroups(job)>0 &&
SkipBadRecords.getAutoIncrReducerProcCount(job);
ReduceValuesIterator<INKEY,INVALUE> values = isSkipping() ?
new SkippingReduceValuesIterator<INKEY,INVALUE>(rIter,
comparator, keyClass, valueClass,
job, reporter, umbilical) :
new ReduceValuesIterator<INKEY,INVALUE>(rIter,
comparator, keyClass, valueClass,
job, reporter);
values.informReduceProgress();
while (values.more()) {
reduceInputKeyCounter.increment(1);
reducer.reduce(values.getKey(), values, collector, reporter);
if(incrProcCount) {
reporter.incrCounter(SkipBadRecords.COUNTER_GROUP,
SkipBadRecords.COUNTER_REDUCE_PROCESSED_GROUPS, 1);
}
values.nextKey();
values.informReduceProgress();
}
reducer.close();
reducer = null;
out.close(reporter);
out = null;
} finally {
IOUtils.cleanup(LOG, reducer);
closeQuietly(out, reporter);
}
}
static class OldTrackingRecordWriter<K, V> implements RecordWriter<K, V> {
private final RecordWriter<K, V> real;
private final org.apache.hadoop.mapred.Counters.Counter reduceOutputCounter;
private final org.apache.hadoop.mapred.Counters.Counter fileOutputByteCounter;
private final List<Statistics> fsStats;
@SuppressWarnings({ "deprecation", "unchecked" })
public OldTrackingRecordWriter(ReduceTask reduce, JobConf job,
TaskReporter reporter, String finalName) throws IOException {
this.reduceOutputCounter = reduce.reduceOutputCounter;
this.fileOutputByteCounter = reduce.fileOutputByteCounter;
List<Statistics> matchedStats = null;
if (job.getOutputFormat() instanceof FileOutputFormat) {
matchedStats = getFsStatistics(FileOutputFormat.getOutputPath(job), job);
}
fsStats = matchedStats;
FileSystem fs = FileSystem.get(job);
long bytesOutPrev = getOutputBytes(fsStats);
this.real = job.getOutputFormat().getRecordWriter(fs, job, finalName,
reporter);
long bytesOutCurr = getOutputBytes(fsStats);
fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
@Override
public void write(K key, V value) throws IOException {
long bytesOutPrev = getOutputBytes(fsStats);
real.write(key, value);
long bytesOutCurr = getOutputBytes(fsStats);
fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
reduceOutputCounter.increment(1);
}
@Override
public void close(Reporter reporter) throws IOException {
long bytesOutPrev = getOutputBytes(fsStats);
real.close(reporter);
long bytesOutCurr = getOutputBytes(fsStats);
fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
private long getOutputBytes(List<Statistics> stats) {
if (stats == null) return 0;
long bytesWritten = 0;
for (Statistics stat: stats) {
bytesWritten = bytesWritten + stat.getBytesWritten();
}
return bytesWritten;
}
}
static class NewTrackingRecordWriter<K,V>
extends org.apache.hadoop.mapreduce.RecordWriter<K,V> {
private final org.apache.hadoop.mapreduce.RecordWriter<K,V> real;
private final org.apache.hadoop.mapreduce.Counter outputRecordCounter;
private final org.apache.hadoop.mapreduce.Counter fileOutputByteCounter;
private final List<Statistics> fsStats;
@SuppressWarnings("unchecked")
NewTrackingRecordWriter(ReduceTask reduce,
org.apache.hadoop.mapreduce.TaskAttemptContext taskContext)
throws InterruptedException, IOException {
this.outputRecordCounter = reduce.reduceOutputCounter;
this.fileOutputByteCounter = reduce.fileOutputByteCounter;
List<Statistics> matchedStats = null;
if (reduce.outputFormat instanceof org.apache.hadoop.mapreduce.lib.output.FileOutputFormat) {
matchedStats = getFsStatistics(org.apache.hadoop.mapreduce.lib.output.FileOutputFormat
.getOutputPath(taskContext), taskContext.getConfiguration());
}
fsStats = matchedStats;
long bytesOutPrev = getOutputBytes(fsStats);
this.real = (org.apache.hadoop.mapreduce.RecordWriter<K, V>) reduce.outputFormat
.getRecordWriter(taskContext);
long bytesOutCurr = getOutputBytes(fsStats);
fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
@Override
public void close(TaskAttemptContext context) throws IOException,
InterruptedException {
long bytesOutPrev = getOutputBytes(fsStats);
real.close(context);
long bytesOutCurr = getOutputBytes(fsStats);
fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
@Override
public void write(K key, V value) throws IOException, InterruptedException {
long bytesOutPrev = getOutputBytes(fsStats);
real.write(key,value);
long bytesOutCurr = getOutputBytes(fsStats);
fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
outputRecordCounter.increment(1);
}
private long getOutputBytes(List<Statistics> stats) {
if (stats == null) return 0;
long bytesWritten = 0;
for (Statistics stat: stats) {
bytesWritten = bytesWritten + stat.getBytesWritten();
}
return bytesWritten;
}
}
@SuppressWarnings("unchecked")
private <INKEY,INVALUE,OUTKEY,OUTVALUE>
void runNewReducer(JobConf job,
final TaskUmbilicalProtocol umbilical,
final TaskReporter reporter,
RawKeyValueIterator rIter,
RawComparator<INKEY> comparator,
Class<INKEY> keyClass,
Class<INVALUE> valueClass
) throws IOException,InterruptedException,
ClassNotFoundException {
// wrap value iterator to report progress.
final RawKeyValueIterator rawIter = rIter;
rIter = new RawKeyValueIterator() {
public void close() throws IOException {
rawIter.close();
}
public DataInputBuffer getKey() throws IOException {
return rawIter.getKey();
}
public Progress getProgress() {
return rawIter.getProgress();
}
public DataInputBuffer getValue() throws IOException {
return rawIter.getValue();
}
public boolean next() throws IOException {
boolean ret = rawIter.next();
reporter.setProgress(rawIter.getProgress().getProgress());
return ret;
}
};
// make a task context so we can get the classes
org.apache.hadoop.mapreduce.TaskAttemptContext taskContext =
new org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl(job,
getTaskID(), reporter);
// make a reducer
org.apache.hadoop.mapreduce.Reducer<INKEY,INVALUE,OUTKEY,OUTVALUE> reducer =
(org.apache.hadoop.mapreduce.Reducer<INKEY,INVALUE,OUTKEY,OUTVALUE>)
ReflectionUtils.newInstance(taskContext.getReducerClass(), job);
org.apache.hadoop.mapreduce.RecordWriter<OUTKEY,OUTVALUE> trackedRW =
new NewTrackingRecordWriter<OUTKEY, OUTVALUE>(this, taskContext);
job.setBoolean("mapred.skip.on", isSkipping());
job.setBoolean(JobContext.SKIP_RECORDS, isSkipping());
org.apache.hadoop.mapreduce.Reducer.Context
reducerContext = createReduceContext(reducer, job, getTaskID(),
rIter, reduceInputKeyCounter,
reduceInputValueCounter,
trackedRW,
committer,
reporter, comparator, keyClass,
valueClass);
try {
reducer.run(reducerContext);
} finally {
trackedRW.close(reducerContext);
}
}
private <OUTKEY, OUTVALUE>
void closeQuietly(RecordWriter<OUTKEY, OUTVALUE> c, Reporter r) {
if (c != null) {
try {
c.close(r);
} catch (Exception e) {
LOG.info("Exception in closing " + c, e);
}
}
}
}
| 23,588 | 35.628882 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/InputSplit.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Writable;
/**
* <code>InputSplit</code> represents the data to be processed by an
* individual {@link Mapper}.
*
* <p>Typically, it presents a byte-oriented view on the input and is the
* responsibility of {@link RecordReader} of the job to process this and present
* a record-oriented view.
*
* @see InputFormat
* @see RecordReader
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface InputSplit extends Writable {
/**
* Get the total number of bytes in the data of the <code>InputSplit</code>.
*
* @return the number of bytes in the input split.
* @throws IOException
*/
long getLength() throws IOException;
/**
* Get the list of hostnames where the input split is located.
*
* @return list of hostnames where data of the <code>InputSplit</code> is
* located as an array of <code>String</code>s.
* @throws IOException
*/
String[] getLocations() throws IOException;
}
| 1,978 | 32.542373 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapFileOutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.MapFile;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.DefaultCodec;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.ReflectionUtils;
/** An {@link OutputFormat} that writes {@link MapFile}s.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class MapFileOutputFormat
extends FileOutputFormat<WritableComparable, Writable> {
public RecordWriter<WritableComparable, Writable> getRecordWriter(FileSystem ignored, JobConf job,
String name, Progressable progress)
throws IOException {
// get the path of the temporary output file
Path file = FileOutputFormat.getTaskOutputPath(job, name);
FileSystem fs = file.getFileSystem(job);
CompressionCodec codec = null;
CompressionType compressionType = CompressionType.NONE;
if (getCompressOutput(job)) {
// find the kind of compression to do
compressionType = SequenceFileOutputFormat.getOutputCompressionType(job);
// find the right codec
Class<? extends CompressionCodec> codecClass = getOutputCompressorClass(job,
DefaultCodec.class);
codec = ReflectionUtils.newInstance(codecClass, job);
}
// ignore the progress parameter, since MapFile is local
final MapFile.Writer out =
new MapFile.Writer(job, fs, file.toString(),
job.getOutputKeyClass().asSubclass(WritableComparable.class),
job.getOutputValueClass().asSubclass(Writable.class),
compressionType, codec,
progress);
return new RecordWriter<WritableComparable, Writable>() {
public void write(WritableComparable key, Writable value)
throws IOException {
out.append(key, value);
}
public void close(Reporter reporter) throws IOException { out.close();}
};
}
/** Open the output generated by this format. */
public static MapFile.Reader[] getReaders(FileSystem ignored, Path dir,
Configuration conf)
throws IOException {
return org.apache.hadoop.mapreduce.lib.output.MapFileOutputFormat.
getReaders(dir, conf);
}
/** Get an entry from output generated by this class. */
public static <K extends WritableComparable, V extends Writable>
Writable getEntry(MapFile.Reader[] readers,
Partitioner<K, V> partitioner,
K key,
V value) throws IOException {
int part = partitioner.getPartition(key, value, readers.length);
return readers[part].get(key, value);
}
}
| 4,003 | 37.5 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLogAppender.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.Flushable;
import java.util.LinkedList;
import java.util.Queue;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.log4j.FileAppender;
import org.apache.log4j.spi.LoggingEvent;
/**
* A simple log4j-appender for the task child's
* map-reduce system logs.
*
*/
@InterfaceStability.Unstable
public class TaskLogAppender extends FileAppender implements Flushable {
private String taskId; //taskId should be managed as String rather than TaskID object
//so that log4j can configure it from the configuration(log4j.properties).
private Integer maxEvents;
private Queue<LoggingEvent> tail = null;
private Boolean isCleanup;
// System properties passed in from JVM runner
static final String ISCLEANUP_PROPERTY = "hadoop.tasklog.iscleanup";
static final String LOGSIZE_PROPERTY = "hadoop.tasklog.totalLogFileSize";
static final String TASKID_PROPERTY = "hadoop.tasklog.taskid";
@Override
public void activateOptions() {
synchronized (this) {
setOptionsFromSystemProperties();
if (maxEvents > 0) {
tail = new LinkedList<LoggingEvent>();
}
setFile(TaskLog.getTaskLogFile(TaskAttemptID.forName(taskId),
isCleanup, TaskLog.LogName.SYSLOG).toString());
setAppend(true);
super.activateOptions();
}
}
/**
* The Task Runner passes in the options as system properties. Set
* the options if the setters haven't already been called.
*/
private synchronized void setOptionsFromSystemProperties() {
if (isCleanup == null) {
String propValue = System.getProperty(ISCLEANUP_PROPERTY, "false");
isCleanup = Boolean.valueOf(propValue);
}
if (taskId == null) {
taskId = System.getProperty(TASKID_PROPERTY);
}
if (maxEvents == null) {
String propValue = System.getProperty(LOGSIZE_PROPERTY, "0");
setTotalLogFileSize(Long.parseLong(propValue));
}
}
@Override
public void append(LoggingEvent event) {
synchronized (this) {
if (tail == null) {
super.append(event);
} else {
if (tail.size() >= maxEvents) {
tail.remove();
}
tail.add(event);
}
}
}
@Override
public void flush() {
if (qw != null) {
qw.flush();
}
}
@Override
public synchronized void close() {
if (tail != null) {
for(LoggingEvent event: tail) {
super.append(event);
}
}
super.close();
}
/**
* Getter/Setter methods for log4j.
*/
public synchronized String getTaskId() {
return taskId;
}
public synchronized void setTaskId(String taskId) {
this.taskId = taskId;
}
private static final int EVENT_SIZE = 100;
public synchronized long getTotalLogFileSize() {
return maxEvents * EVENT_SIZE;
}
public synchronized void setTotalLogFileSize(long logSize) {
maxEvents = (int) logSize / EVENT_SIZE;
}
/**
* Set whether the task is a cleanup attempt or not.
*
* @param isCleanup
* true if the task is cleanup attempt, false otherwise.
*/
public synchronized void setIsCleanup(boolean isCleanup) {
this.isCleanup = isCleanup;
}
/**
* Get whether task is cleanup attempt or not.
*
* @return true if the task is cleanup attempt, false otherwise.
*/
public synchronized boolean getIsCleanup() {
return isCleanup;
}
}
| 4,243 | 26.558442 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/DeprecatedQueueConfigurationParser.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.QueueState;
import org.apache.hadoop.security.authorize.AccessControlList;
import static org.apache.hadoop.mapred.QueueManager.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import java.util.List;
import java.util.Map;
import java.util.HashMap;
import java.util.ArrayList;
/**
* Class to build queue hierarchy using deprecated conf(mapred-site.xml).
* Generates a single level of queue hierarchy.
*
*/
class DeprecatedQueueConfigurationParser extends QueueConfigurationParser {
private static final Log LOG =
LogFactory.getLog(DeprecatedQueueConfigurationParser.class);
static final String MAPRED_QUEUE_NAMES_KEY = "mapred.queue.names";
DeprecatedQueueConfigurationParser(Configuration conf) {
//If not configuration done return immediately.
if(!deprecatedConf(conf)) {
return;
}
List<Queue> listq = createQueues(conf);
this.setAclsEnabled(conf.getBoolean(MRConfig.MR_ACLS_ENABLED, false));
root = new Queue();
root.setName("");
for (Queue q : listq) {
root.addChild(q);
}
}
private List<Queue> createQueues(Configuration conf) {
String[] queueNameValues = conf.getStrings(
MAPRED_QUEUE_NAMES_KEY);
List<Queue> list = new ArrayList<Queue>();
for (String name : queueNameValues) {
try {
Map<String, AccessControlList> acls = getQueueAcls(
name, conf);
QueueState state = getQueueState(name, conf);
Queue q = new Queue(name, acls, state);
list.add(q);
} catch (Throwable t) {
LOG.warn("Not able to initialize queue " + name);
}
}
return list;
}
/**
* Only applicable to leaf level queues
* Parse ACLs for the queue from the configuration.
*/
private QueueState getQueueState(String name, Configuration conf) {
String stateVal = conf.get(
toFullPropertyName(name, "state"),
QueueState.RUNNING.getStateName());
return QueueState.getState(stateVal);
}
/**
* Check if queue properties are configured in the passed in
* configuration. If yes, print out deprecation warning messages.
*/
private boolean deprecatedConf(Configuration conf) {
String[] queues = null;
String queueNameValues = getQueueNames(conf);
if (queueNameValues == null) {
return false;
} else {
LOG.warn(
"Configuring \"" + MAPRED_QUEUE_NAMES_KEY
+ "\" in mapred-site.xml or "
+ "hadoop-site.xml is deprecated and will overshadow "
+ QUEUE_CONF_FILE_NAME + ". Remove this property and configure "
+ "queue hierarchy in " + QUEUE_CONF_FILE_NAME);
// store queues so we can check if ACLs are also configured
// in the deprecated files.
queues = conf.getStrings(MAPRED_QUEUE_NAMES_KEY);
}
// check if acls are defined
if (queues != null) {
for (String queue : queues) {
for (QueueACL qAcl : QueueACL.values()) {
String key = toFullPropertyName(queue, qAcl.getAclName());
String aclString = conf.get(key);
if (aclString != null) {
LOG.warn(
"Configuring queue ACLs in mapred-site.xml or " +
"hadoop-site.xml is deprecated. Configure queue ACLs in " +
QUEUE_CONF_FILE_NAME);
// even if one string is configured, it is enough for printing
// the warning. so we can return from here.
return true;
}
}
}
}
return true;
}
private String getQueueNames(Configuration conf) {
String queueNameValues = conf.get(MAPRED_QUEUE_NAMES_KEY);
return queueNameValues;
}
/**
* Parse ACLs for the queue from the configuration.
*/
private Map<String, AccessControlList> getQueueAcls(
String name,
Configuration conf) {
HashMap<String, AccessControlList> map =
new HashMap<String, AccessControlList>();
for (QueueACL qAcl : QueueACL.values()) {
String aclKey = toFullPropertyName(name, qAcl.getAclName());
map.put(
aclKey, new AccessControlList(
conf.get(
aclKey, "*")));
}
return map;
}
}
| 5,142 | 32.835526 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TextOutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.GzipCodec;
import org.apache.hadoop.util.*;
/**
* An {@link OutputFormat} that writes plain text files.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class TextOutputFormat<K, V> extends FileOutputFormat<K, V> {
protected static class LineRecordWriter<K, V>
implements RecordWriter<K, V> {
private static final String utf8 = "UTF-8";
private static final byte[] newline;
static {
try {
newline = "\n".getBytes(utf8);
} catch (UnsupportedEncodingException uee) {
throw new IllegalArgumentException("can't find " + utf8 + " encoding");
}
}
protected DataOutputStream out;
private final byte[] keyValueSeparator;
public LineRecordWriter(DataOutputStream out, String keyValueSeparator) {
this.out = out;
try {
this.keyValueSeparator = keyValueSeparator.getBytes(utf8);
} catch (UnsupportedEncodingException uee) {
throw new IllegalArgumentException("can't find " + utf8 + " encoding");
}
}
public LineRecordWriter(DataOutputStream out) {
this(out, "\t");
}
/**
* Write the object to the byte stream, handling Text as a special
* case.
* @param o the object to print
* @throws IOException if the write throws, we pass it on
*/
private void writeObject(Object o) throws IOException {
if (o instanceof Text) {
Text to = (Text) o;
out.write(to.getBytes(), 0, to.getLength());
} else {
out.write(o.toString().getBytes(utf8));
}
}
public synchronized void write(K key, V value)
throws IOException {
boolean nullKey = key == null || key instanceof NullWritable;
boolean nullValue = value == null || value instanceof NullWritable;
if (nullKey && nullValue) {
return;
}
if (!nullKey) {
writeObject(key);
}
if (!(nullKey || nullValue)) {
out.write(keyValueSeparator);
}
if (!nullValue) {
writeObject(value);
}
out.write(newline);
}
public synchronized void close(Reporter reporter) throws IOException {
out.close();
}
}
public RecordWriter<K, V> getRecordWriter(FileSystem ignored,
JobConf job,
String name,
Progressable progress)
throws IOException {
boolean isCompressed = getCompressOutput(job);
String keyValueSeparator = job.get("mapreduce.output.textoutputformat.separator",
"\t");
if (!isCompressed) {
Path file = FileOutputFormat.getTaskOutputPath(job, name);
FileSystem fs = file.getFileSystem(job);
FSDataOutputStream fileOut = fs.create(file, progress);
return new LineRecordWriter<K, V>(fileOut, keyValueSeparator);
} else {
Class<? extends CompressionCodec> codecClass =
getOutputCompressorClass(job, GzipCodec.class);
// create the named codec
CompressionCodec codec = ReflectionUtils.newInstance(codecClass, job);
// build the filename including the extension
Path file =
FileOutputFormat.getTaskOutputPath(job,
name + codec.getDefaultExtension());
FileSystem fs = file.getFileSystem(job);
FSDataOutputStream fileOut = fs.create(file, progress);
return new LineRecordWriter<K, V>(new DataOutputStream
(codec.createOutputStream(fileOut)),
keyValueSeparator);
}
}
}
| 5,021 | 34.118881 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskAttemptContextImpl.java
|
/* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.util.Progressable;
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class TaskAttemptContextImpl
extends org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl
implements TaskAttemptContext {
private Reporter reporter;
public TaskAttemptContextImpl(JobConf conf, TaskAttemptID taskid) {
this(conf, taskid, Reporter.NULL);
}
TaskAttemptContextImpl(JobConf conf, TaskAttemptID taskid,
Reporter reporter) {
super(conf, taskid);
this.reporter = reporter;
}
/**
* Get the taskAttemptID.
*
* @return TaskAttemptID
*/
public TaskAttemptID getTaskAttemptID() {
return (TaskAttemptID) super.getTaskAttemptID();
}
public Progressable getProgressible() {
return reporter;
}
public JobConf getJobConf() {
return (JobConf) getConfiguration();
}
@Override
public float getProgress() {
return reporter.getProgress();
}
@Override
public Counter getCounter(Enum<?> counterName) {
return reporter.getCounter(counterName);
}
@Override
public Counter getCounter(String groupName, String counterName) {
return reporter.getCounter(groupName, counterName);
}
/**
* Report progress.
*/
@Override
public void progress() {
reporter.progress();
}
/**
* Set the current status of the task to the given string.
*/
@Override
public void setStatus(String status) {
setStatusString(status);
reporter.setStatus(status);
}
}
| 2,528 | 26.193548 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IndexCache.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
class IndexCache {
private final JobConf conf;
private final int totalMemoryAllowed;
private AtomicInteger totalMemoryUsed = new AtomicInteger();
private static final Log LOG = LogFactory.getLog(IndexCache.class);
private final ConcurrentHashMap<String,IndexInformation> cache =
new ConcurrentHashMap<String,IndexInformation>();
private final LinkedBlockingQueue<String> queue =
new LinkedBlockingQueue<String>();
public IndexCache(JobConf conf) {
this.conf = conf;
totalMemoryAllowed =
conf.getInt(TTConfig.TT_INDEX_CACHE, 10) * 1024 * 1024;
LOG.info("IndexCache created with max memory = " + totalMemoryAllowed);
}
/**
* This method gets the index information for the given mapId and reduce.
* It reads the index file into cache if it is not already present.
* @param mapId
* @param reduce
* @param fileName The file to read the index information from if it is not
* already present in the cache
* @param expectedIndexOwner The expected owner of the index file
* @return The Index Information
* @throws IOException
*/
public IndexRecord getIndexInformation(String mapId, int reduce,
Path fileName, String expectedIndexOwner)
throws IOException {
IndexInformation info = cache.get(mapId);
if (info == null) {
info = readIndexFileToCache(fileName, mapId, expectedIndexOwner);
} else {
synchronized(info) {
while (isUnderConstruction(info)) {
try {
info.wait();
} catch (InterruptedException e) {
throw new IOException("Interrupted waiting for construction", e);
}
}
}
LOG.debug("IndexCache HIT: MapId " + mapId + " found");
}
if (info.mapSpillRecord.size() == 0 ||
info.mapSpillRecord.size() <= reduce) {
throw new IOException("Invalid request " +
" Map Id = " + mapId + " Reducer = " + reduce +
" Index Info Length = " + info.mapSpillRecord.size());
}
return info.mapSpillRecord.getIndex(reduce);
}
private boolean isUnderConstruction(IndexInformation info) {
synchronized(info) {
return (null == info.mapSpillRecord);
}
}
private IndexInformation readIndexFileToCache(Path indexFileName,
String mapId,
String expectedIndexOwner)
throws IOException {
IndexInformation info;
IndexInformation newInd = new IndexInformation();
if ((info = cache.putIfAbsent(mapId, newInd)) != null) {
synchronized(info) {
while (isUnderConstruction(info)) {
try {
info.wait();
} catch (InterruptedException e) {
throw new IOException("Interrupted waiting for construction", e);
}
}
}
LOG.debug("IndexCache HIT: MapId " + mapId + " found");
return info;
}
LOG.debug("IndexCache MISS: MapId " + mapId + " not found") ;
SpillRecord tmp = null;
try {
tmp = new SpillRecord(indexFileName, conf, expectedIndexOwner);
} catch (Throwable e) {
tmp = new SpillRecord(0);
cache.remove(mapId);
throw new IOException("Error Reading IndexFile", e);
} finally {
synchronized (newInd) {
newInd.mapSpillRecord = tmp;
newInd.notifyAll();
}
}
queue.add(mapId);
if (totalMemoryUsed.addAndGet(newInd.getSize()) > totalMemoryAllowed) {
freeIndexInformation();
}
return newInd;
}
/**
* This method removes the map from the cache if index information for this
* map is loaded(size>0), index information entry in cache will not be
* removed if it is in the loading phrase(size=0), this prevents corruption
* of totalMemoryUsed. It should be called when a map output on this tracker
* is discarded.
* @param mapId The taskID of this map.
*/
public void removeMap(String mapId) {
IndexInformation info = cache.get(mapId);
if (info == null || isUnderConstruction(info)) {
return;
}
info = cache.remove(mapId);
if (info != null) {
totalMemoryUsed.addAndGet(-info.getSize());
if (!queue.remove(mapId)) {
LOG.warn("Map ID" + mapId + " not found in queue!!");
}
} else {
LOG.info("Map ID " + mapId + " not found in cache");
}
}
/**
* This method checks if cache and totolMemoryUsed is consistent.
* It is only used for unit test.
* @return True if cache and totolMemoryUsed is consistent
*/
boolean checkTotalMemoryUsed() {
int totalSize = 0;
for (IndexInformation info : cache.values()) {
totalSize += info.getSize();
}
return totalSize == totalMemoryUsed.get();
}
/**
* Bring memory usage below totalMemoryAllowed.
*/
private synchronized void freeIndexInformation() {
while (totalMemoryUsed.get() > totalMemoryAllowed) {
String s = queue.remove();
IndexInformation info = cache.remove(s);
if (info != null) {
totalMemoryUsed.addAndGet(-info.getSize());
}
}
}
private static class IndexInformation {
SpillRecord mapSpillRecord;
int getSize() {
return mapSpillRecord == null
? 0
: mapSpillRecord.size() * MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH;
}
}
}
| 6,579 | 32.232323 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskReport.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* A report on the state of a task.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class TaskReport extends org.apache.hadoop.mapreduce.TaskReport {
public TaskReport() {
super();
}
/**
* Creates a new TaskReport object
* @param taskid
* @param progress
* @param state
* @param diagnostics
* @param startTime
* @param finishTime
* @param counters
* @deprecated
*/
@Deprecated
TaskReport(TaskID taskid, float progress, String state,
String[] diagnostics, long startTime, long finishTime,
Counters counters) {
this(taskid, progress, state, diagnostics, null, startTime, finishTime,
counters);
}
/**
* Creates a new TaskReport object
* @param taskid
* @param progress
* @param state
* @param diagnostics
* @param currentStatus
* @param startTime
* @param finishTime
* @param counters
*/
TaskReport(TaskID taskid, float progress, String state,
String[] diagnostics, TIPStatus currentStatus,
long startTime, long finishTime,
Counters counters) {
super(taskid, progress, state, diagnostics, currentStatus, startTime,
finishTime, new org.apache.hadoop.mapreduce.Counters(counters));
}
static TaskReport downgrade(
org.apache.hadoop.mapreduce.TaskReport report) {
return new TaskReport(TaskID.downgrade(report.getTaskID()),
report.getProgress(), report.getState(), report.getDiagnostics(),
report.getCurrentStatus(), report.getStartTime(), report.getFinishTime(),
Counters.downgrade(report.getTaskCounters()));
}
static TaskReport[] downgradeArray(org.apache.hadoop.
mapreduce.TaskReport[] reports) {
List<TaskReport> ret = new ArrayList<TaskReport>();
for (org.apache.hadoop.mapreduce.TaskReport report : reports) {
ret.add(downgrade(report));
}
return ret.toArray(new TaskReport[0]);
}
/** The string of the task id. */
public String getTaskId() {
return TaskID.downgrade(super.getTaskID()).toString();
}
/** The id of the task. */
public TaskID getTaskID() {
return TaskID.downgrade(super.getTaskID());
}
public Counters getCounters() {
return Counters.downgrade(super.getTaskCounters());
}
/**
* set successful attempt ID of the task.
*/
public void setSuccessfulAttempt(TaskAttemptID t) {
super.setSuccessfulAttemptId(t);
}
/**
* Get the attempt ID that took this task to completion
*/
public TaskAttemptID getSuccessfulTaskAttempt() {
return TaskAttemptID.downgrade(super.getSuccessfulTaskAttemptId());
}
/**
* set running attempt(s) of the task.
*/
public void setRunningTaskAttempts(
Collection<TaskAttemptID> runningAttempts) {
Collection<org.apache.hadoop.mapreduce.TaskAttemptID> attempts =
new ArrayList<org.apache.hadoop.mapreduce.TaskAttemptID>();
for (TaskAttemptID id : runningAttempts) {
attempts.add(id);
}
super.setRunningTaskAttemptIds(attempts);
}
/**
* Get the running task attempt IDs for this task
*/
public Collection<TaskAttemptID> getRunningTaskAttempts() {
Collection<TaskAttemptID> attempts = new ArrayList<TaskAttemptID>();
for (org.apache.hadoop.mapreduce.TaskAttemptID id :
super.getRunningTaskAttemptIds()) {
attempts.add(TaskAttemptID.downgrade(id));
}
return attempts;
}
/**
* set finish time of task.
* @param finishTime finish time of task.
*/
protected void setFinishTime(long finishTime) {
super.setFinishTime(finishTime);
}
/**
* set start time of the task.
*/
protected void setStartTime(long startTime) {
super.setStartTime(startTime);
}
}
| 4,789 | 29.125786 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IFileInputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.EOFException;
import java.io.FileDescriptor;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.HasFileDescriptor;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.ReadaheadPool;
import org.apache.hadoop.io.ReadaheadPool.ReadaheadRequest;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.util.DataChecksum;
/**
* A checksum input stream, used for IFiles.
* Used to validate the checksum of files created by {@link IFileOutputStream}.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class IFileInputStream extends InputStream {
private final InputStream in; //The input stream to be verified for checksum.
private final FileDescriptor inFd; // the file descriptor, if it is known
private final long length; //The total length of the input file
private final long dataLength;
private DataChecksum sum;
private long currentOffset = 0;
private final byte b[] = new byte[1];
private byte csum[] = null;
private int checksumSize;
private ReadaheadRequest curReadahead = null;
private ReadaheadPool raPool = ReadaheadPool.getInstance();
private boolean readahead;
private int readaheadLength;
public static final Log LOG = LogFactory.getLog(IFileInputStream.class);
private boolean disableChecksumValidation = false;
/**
* Create a checksum input stream that reads
* @param in The input stream to be verified for checksum.
* @param len The length of the input stream including checksum bytes.
*/
public IFileInputStream(InputStream in, long len, Configuration conf) {
this.in = in;
this.inFd = getFileDescriptorIfAvail(in);
sum = DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,
Integer.MAX_VALUE);
checksumSize = sum.getChecksumSize();
length = len;
dataLength = length - checksumSize;
conf = (conf != null) ? conf : new Configuration();
readahead = conf.getBoolean(MRConfig.MAPRED_IFILE_READAHEAD,
MRConfig.DEFAULT_MAPRED_IFILE_READAHEAD);
readaheadLength = conf.getInt(MRConfig.MAPRED_IFILE_READAHEAD_BYTES,
MRConfig.DEFAULT_MAPRED_IFILE_READAHEAD_BYTES);
doReadahead();
}
private static FileDescriptor getFileDescriptorIfAvail(InputStream in) {
FileDescriptor fd = null;
try {
if (in instanceof HasFileDescriptor) {
fd = ((HasFileDescriptor)in).getFileDescriptor();
} else if (in instanceof FileInputStream) {
fd = ((FileInputStream)in).getFD();
}
} catch (IOException e) {
LOG.info("Unable to determine FileDescriptor", e);
}
return fd;
}
/**
* Close the input stream. Note that we need to read to the end of the
* stream to validate the checksum.
*/
@Override
public void close() throws IOException {
if (curReadahead != null) {
curReadahead.cancel();
}
if (currentOffset < dataLength) {
byte[] t = new byte[Math.min((int)
(Integer.MAX_VALUE & (dataLength - currentOffset)), 32 * 1024)];
while (currentOffset < dataLength) {
int n = read(t, 0, t.length);
if (0 == n) {
throw new EOFException("Could not validate checksum");
}
}
}
in.close();
}
@Override
public long skip(long n) throws IOException {
throw new IOException("Skip not supported for IFileInputStream");
}
public long getPosition() {
return (currentOffset >= dataLength) ? dataLength : currentOffset;
}
public long getSize() {
return checksumSize;
}
/**
* Read bytes from the stream.
* At EOF, checksum is validated, but the checksum
* bytes are not passed back in the buffer.
*/
public int read(byte[] b, int off, int len) throws IOException {
if (currentOffset >= dataLength) {
return -1;
}
doReadahead();
return doRead(b,off,len);
}
private void doReadahead() {
if (raPool != null && inFd != null && readahead) {
curReadahead = raPool.readaheadStream(
"ifile", inFd,
currentOffset, readaheadLength, dataLength,
curReadahead);
}
}
/**
* Read bytes from the stream.
* At EOF, checksum is validated and sent back
* as the last four bytes of the buffer. The caller should handle
* these bytes appropriately
*/
public int readWithChecksum(byte[] b, int off, int len) throws IOException {
if (currentOffset == length) {
return -1;
}
else if (currentOffset >= dataLength) {
// If the previous read drained off all the data, then just return
// the checksum now. Note that checksum validation would have
// happened in the earlier read
int lenToCopy = (int) (checksumSize - (currentOffset - dataLength));
if (len < lenToCopy) {
lenToCopy = len;
}
System.arraycopy(csum, (int) (currentOffset - dataLength), b, off,
lenToCopy);
currentOffset += lenToCopy;
return lenToCopy;
}
int bytesRead = doRead(b,off,len);
if (currentOffset == dataLength) {
if (len >= bytesRead + checksumSize) {
System.arraycopy(csum, 0, b, off + bytesRead, checksumSize);
bytesRead += checksumSize;
currentOffset += checksumSize;
}
}
return bytesRead;
}
private int doRead(byte[]b, int off, int len) throws IOException {
// If we are trying to read past the end of data, just read
// the left over data
if (currentOffset + len > dataLength) {
len = (int) dataLength - (int)currentOffset;
}
int bytesRead = in.read(b, off, len);
if (bytesRead < 0) {
throw new ChecksumException("Checksum Error", 0);
}
sum.update(b,off,bytesRead);
currentOffset += bytesRead;
if (disableChecksumValidation) {
return bytesRead;
}
if (currentOffset == dataLength) {
// The last four bytes are checksum. Strip them and verify
csum = new byte[checksumSize];
IOUtils.readFully(in, csum, 0, checksumSize);
if (!sum.compare(csum, 0)) {
throw new ChecksumException("Checksum Error", 0);
}
}
return bytesRead;
}
@Override
public int read() throws IOException {
b[0] = 0;
int l = read(b,0,1);
if (l < 0) return l;
// Upgrade the b[0] to an int so as not to misinterpret the
// first bit of the byte as a sign bit
int result = 0xFF & b[0];
return result;
}
public byte[] getChecksum() {
return csum;
}
void disableChecksumValidation() {
disableChecksumValidation = true;
}
}
| 7,762 | 29.443137 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/ClusterStatus.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus;
import org.apache.hadoop.util.StringInterner;
/**
* Status information on the current state of the Map-Reduce cluster.
*
* <p><code>ClusterStatus</code> provides clients with information such as:
* <ol>
* <li>
* Size of the cluster.
* </li>
* <li>
* Name of the trackers.
* </li>
* <li>
* Task capacity of the cluster.
* </li>
* <li>
* The number of currently running map and reduce tasks.
* </li>
* <li>
* State of the <code>JobTracker</code>.
* </li>
* <li>
* Details regarding black listed trackers.
* </li>
* </ol>
*
* <p>Clients can query for the latest <code>ClusterStatus</code>, via
* {@link JobClient#getClusterStatus()}.</p>
*
* @see JobClient
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ClusterStatus implements Writable {
/**
* Class which encapsulates information about a blacklisted tasktracker.
*
* The information includes the tasktracker's name and reasons for
* getting blacklisted. The toString method of the class will print
* the information in a whitespace separated fashion to enable parsing.
*/
public static class BlackListInfo implements Writable {
private String trackerName;
private String reasonForBlackListing;
private String blackListReport;
BlackListInfo() {
}
/**
* Gets the blacklisted tasktracker's name.
*
* @return tracker's name.
*/
public String getTrackerName() {
return trackerName;
}
/**
* Gets the reason for which the tasktracker was blacklisted.
*
* @return reason which tracker was blacklisted
*/
public String getReasonForBlackListing() {
return reasonForBlackListing;
}
/**
* Sets the blacklisted tasktracker's name.
*
* @param trackerName of the tracker.
*/
void setTrackerName(String trackerName) {
this.trackerName = trackerName;
}
/**
* Sets the reason for which the tasktracker was blacklisted.
*
* @param reasonForBlackListing
*/
void setReasonForBlackListing(String reasonForBlackListing) {
this.reasonForBlackListing = reasonForBlackListing;
}
/**
* Gets a descriptive report about why the tasktracker was blacklisted.
*
* @return report describing why the tasktracker was blacklisted.
*/
public String getBlackListReport() {
return blackListReport;
}
/**
* Sets a descriptive report about why the tasktracker was blacklisted.
* @param blackListReport report describing why the tasktracker
* was blacklisted.
*/
void setBlackListReport(String blackListReport) {
this.blackListReport = blackListReport;
}
@Override
public void readFields(DataInput in) throws IOException {
trackerName = StringInterner.weakIntern(Text.readString(in));
reasonForBlackListing = StringInterner.weakIntern(Text.readString(in));
blackListReport = StringInterner.weakIntern(Text.readString(in));
}
@Override
public void write(DataOutput out) throws IOException {
Text.writeString(out, trackerName);
Text.writeString(out, reasonForBlackListing);
Text.writeString(out, blackListReport);
}
@Override
/**
* Print information related to the blacklisted tasktracker in a
* whitespace separated fashion.
*
* The method changes any newlines in the report describing why
* the tasktracker was blacklisted to a ':' for enabling better
* parsing.
*/
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(trackerName);
sb.append("\t");
sb.append(reasonForBlackListing);
sb.append("\t");
sb.append(blackListReport.replace("\n", ":"));
return sb.toString();
}
}
public static final long UNINITIALIZED_MEMORY_VALUE = -1;
private int numActiveTrackers;
private Collection<String> activeTrackers = new ArrayList<String>();
private int numBlacklistedTrackers;
private int numExcludedNodes;
private long ttExpiryInterval;
private int map_tasks;
private int reduce_tasks;
private int max_map_tasks;
private int max_reduce_tasks;
private JobTrackerStatus status;
private Collection<BlackListInfo> blacklistedTrackersInfo =
new ArrayList<BlackListInfo>();
private int grayListedTrackers;
ClusterStatus() {}
/**
* Construct a new cluster status.
*
* @param trackers no. of tasktrackers in the cluster
* @param blacklists no of blacklisted task trackers in the cluster
* @param ttExpiryInterval the tasktracker expiry interval
* @param maps no. of currently running map-tasks in the cluster
* @param reduces no. of currently running reduce-tasks in the cluster
* @param maxMaps the maximum no. of map tasks in the cluster
* @param maxReduces the maximum no. of reduce tasks in the cluster
* @param status the {@link JobTrackerStatus} of the <code>JobTracker</code>
*/
ClusterStatus(int trackers, int blacklists, long ttExpiryInterval,
int maps, int reduces,
int maxMaps, int maxReduces, JobTrackerStatus status) {
this(trackers, blacklists, ttExpiryInterval, maps, reduces, maxMaps,
maxReduces, status, 0);
}
/**
* Construct a new cluster status.
*
* @param trackers no. of tasktrackers in the cluster
* @param blacklists no of blacklisted task trackers in the cluster
* @param ttExpiryInterval the tasktracker expiry interval
* @param maps no. of currently running map-tasks in the cluster
* @param reduces no. of currently running reduce-tasks in the cluster
* @param maxMaps the maximum no. of map tasks in the cluster
* @param maxReduces the maximum no. of reduce tasks in the cluster
* @param status the {@link JobTrackerStatus} of the <code>JobTracker</code>
* @param numDecommissionedNodes number of decommission trackers
*/
ClusterStatus(int trackers, int blacklists, long ttExpiryInterval, int maps,
int reduces, int maxMaps, int maxReduces, JobTrackerStatus status,
int numDecommissionedNodes) {
this(trackers, blacklists, ttExpiryInterval, maps, reduces, maxMaps,
maxReduces, status, numDecommissionedNodes, 0);
}
/**
* Construct a new cluster status.
*
* @param trackers no. of tasktrackers in the cluster
* @param blacklists no of blacklisted task trackers in the cluster
* @param ttExpiryInterval the tasktracker expiry interval
* @param maps no. of currently running map-tasks in the cluster
* @param reduces no. of currently running reduce-tasks in the cluster
* @param maxMaps the maximum no. of map tasks in the cluster
* @param maxReduces the maximum no. of reduce tasks in the cluster
* @param status the {@link JobTrackerStatus} of the <code>JobTracker</code>
* @param numDecommissionedNodes number of decommission trackers
* @param numGrayListedTrackers number of graylisted trackers
*/
ClusterStatus(int trackers, int blacklists, long ttExpiryInterval, int maps,
int reduces, int maxMaps, int maxReduces, JobTrackerStatus status,
int numDecommissionedNodes, int numGrayListedTrackers) {
numActiveTrackers = trackers;
numBlacklistedTrackers = blacklists;
this.numExcludedNodes = numDecommissionedNodes;
this.ttExpiryInterval = ttExpiryInterval;
map_tasks = maps;
reduce_tasks = reduces;
max_map_tasks = maxMaps;
max_reduce_tasks = maxReduces;
this.status = status;
this.grayListedTrackers = numGrayListedTrackers;
}
/**
* Construct a new cluster status.
*
* @param activeTrackers active tasktrackers in the cluster
* @param blacklistedTrackers blacklisted tasktrackers in the cluster
* @param ttExpiryInterval the tasktracker expiry interval
* @param maps no. of currently running map-tasks in the cluster
* @param reduces no. of currently running reduce-tasks in the cluster
* @param maxMaps the maximum no. of map tasks in the cluster
* @param maxReduces the maximum no. of reduce tasks in the cluster
* @param status the {@link JobTrackerStatus} of the <code>JobTracker</code>
*/
ClusterStatus(Collection<String> activeTrackers,
Collection<BlackListInfo> blacklistedTrackers,
long ttExpiryInterval,
int maps, int reduces, int maxMaps, int maxReduces,
JobTrackerStatus status) {
this(activeTrackers, blacklistedTrackers, ttExpiryInterval, maps, reduces,
maxMaps, maxReduces, status, 0);
}
/**
* Construct a new cluster status.
*
* @param activeTrackers active tasktrackers in the cluster
* @param blackListedTrackerInfo blacklisted tasktrackers information
* in the cluster
* @param ttExpiryInterval the tasktracker expiry interval
* @param maps no. of currently running map-tasks in the cluster
* @param reduces no. of currently running reduce-tasks in the cluster
* @param maxMaps the maximum no. of map tasks in the cluster
* @param maxReduces the maximum no. of reduce tasks in the cluster
* @param status the {@link JobTrackerStatus} of the <code>JobTracker</code>
* @param numDecommissionNodes number of decommission trackers
*/
ClusterStatus(Collection<String> activeTrackers,
Collection<BlackListInfo> blackListedTrackerInfo, long ttExpiryInterval,
int maps, int reduces, int maxMaps, int maxReduces,
JobTrackerStatus status, int numDecommissionNodes) {
this(activeTrackers.size(), blackListedTrackerInfo.size(),
ttExpiryInterval, maps, reduces, maxMaps, maxReduces, status,
numDecommissionNodes);
this.activeTrackers = activeTrackers;
this.blacklistedTrackersInfo = blackListedTrackerInfo;
}
/**
* Get the number of task trackers in the cluster.
*
* @return the number of task trackers in the cluster.
*/
public int getTaskTrackers() {
return numActiveTrackers;
}
/**
* Get the names of task trackers in the cluster.
*
* @return the active task trackers in the cluster.
*/
public Collection<String> getActiveTrackerNames() {
return activeTrackers;
}
/**
* Get the names of task trackers in the cluster.
*
* @return the blacklisted task trackers in the cluster.
*/
public Collection<String> getBlacklistedTrackerNames() {
ArrayList<String> blacklistedTrackers = new ArrayList<String>();
for(BlackListInfo bi : blacklistedTrackersInfo) {
blacklistedTrackers.add(bi.getTrackerName());
}
return blacklistedTrackers;
}
/**
* Get the names of graylisted task trackers in the cluster.
*
* The gray list of trackers is no longer available on M/R 2.x. The function
* is kept to be compatible with M/R 1.x applications.
*
* @return an empty graylisted task trackers in the cluster.
*/
@Deprecated
public Collection<String> getGraylistedTrackerNames() {
return Collections.emptySet();
}
/**
* Get the number of graylisted task trackers in the cluster.
*
* The gray list of trackers is no longer available on M/R 2.x. The function
* is kept to be compatible with M/R 1.x applications.
*
* @return 0 graylisted task trackers in the cluster.
*/
@Deprecated
public int getGraylistedTrackers() {
return grayListedTrackers;
}
/**
* Get the number of blacklisted task trackers in the cluster.
*
* @return the number of blacklisted task trackers in the cluster.
*/
public int getBlacklistedTrackers() {
return numBlacklistedTrackers;
}
/**
* Get the number of excluded hosts in the cluster.
* @return the number of excluded hosts in the cluster.
*/
public int getNumExcludedNodes() {
return numExcludedNodes;
}
/**
* Get the tasktracker expiry interval for the cluster
* @return the expiry interval in msec
*/
public long getTTExpiryInterval() {
return ttExpiryInterval;
}
/**
* Get the number of currently running map tasks in the cluster.
*
* @return the number of currently running map tasks in the cluster.
*/
public int getMapTasks() {
return map_tasks;
}
/**
* Get the number of currently running reduce tasks in the cluster.
*
* @return the number of currently running reduce tasks in the cluster.
*/
public int getReduceTasks() {
return reduce_tasks;
}
/**
* Get the maximum capacity for running map tasks in the cluster.
*
* @return the maximum capacity for running map tasks in the cluster.
*/
public int getMaxMapTasks() {
return max_map_tasks;
}
/**
* Get the maximum capacity for running reduce tasks in the cluster.
*
* @return the maximum capacity for running reduce tasks in the cluster.
*/
public int getMaxReduceTasks() {
return max_reduce_tasks;
}
/**
* Get the JobTracker's status.
*
* @return {@link JobTrackerStatus} of the JobTracker
*/
public JobTrackerStatus getJobTrackerStatus() {
return status;
}
/**
* Returns UNINITIALIZED_MEMORY_VALUE (-1)
*/
@Deprecated
public long getMaxMemory() {
return UNINITIALIZED_MEMORY_VALUE;
}
/**
* Returns UNINITIALIZED_MEMORY_VALUE (-1)
*/
@Deprecated
public long getUsedMemory() {
return UNINITIALIZED_MEMORY_VALUE;
}
/**
* Gets the list of blacklisted trackers along with reasons for blacklisting.
*
* @return the collection of {@link BlackListInfo} objects.
*
*/
public Collection<BlackListInfo> getBlackListedTrackersInfo() {
return blacklistedTrackersInfo;
}
/**
* Get the current state of the <code>JobTracker</code>,
* as {@link JobTracker.State}
*
* {@link JobTracker.State} should no longer be used on M/R 2.x. The function
* is kept to be compatible with M/R 1.x applications.
*
* @return the invalid state of the <code>JobTracker</code>.
*/
@Deprecated
public JobTracker.State getJobTrackerState() {
return JobTracker.State.RUNNING;
}
public void write(DataOutput out) throws IOException {
if (activeTrackers.size() == 0) {
out.writeInt(numActiveTrackers);
out.writeInt(0);
} else {
out.writeInt(activeTrackers.size());
out.writeInt(activeTrackers.size());
for (String tracker : activeTrackers) {
Text.writeString(out, tracker);
}
}
if (blacklistedTrackersInfo.size() == 0) {
out.writeInt(numBlacklistedTrackers);
out.writeInt(blacklistedTrackersInfo.size());
} else {
out.writeInt(blacklistedTrackersInfo.size());
out.writeInt(blacklistedTrackersInfo.size());
for (BlackListInfo tracker : blacklistedTrackersInfo) {
tracker.write(out);
}
}
out.writeInt(numExcludedNodes);
out.writeLong(ttExpiryInterval);
out.writeInt(map_tasks);
out.writeInt(reduce_tasks);
out.writeInt(max_map_tasks);
out.writeInt(max_reduce_tasks);
WritableUtils.writeEnum(out, status);
out.writeInt(grayListedTrackers);
}
public void readFields(DataInput in) throws IOException {
numActiveTrackers = in.readInt();
int numTrackerNames = in.readInt();
if (numTrackerNames > 0) {
for (int i = 0; i < numTrackerNames; i++) {
String name = StringInterner.weakIntern(Text.readString(in));
activeTrackers.add(name);
}
}
numBlacklistedTrackers = in.readInt();
int blackListTrackerInfoSize = in.readInt();
if(blackListTrackerInfoSize > 0) {
for (int i = 0; i < blackListTrackerInfoSize; i++) {
BlackListInfo info = new BlackListInfo();
info.readFields(in);
blacklistedTrackersInfo.add(info);
}
}
numExcludedNodes = in.readInt();
ttExpiryInterval = in.readLong();
map_tasks = in.readInt();
reduce_tasks = in.readInt();
max_map_tasks = in.readInt();
max_reduce_tasks = in.readInt();
status = WritableUtils.readEnum(in, JobTrackerStatus.class);
grayListedTrackers = in.readInt();
}
}
| 17,373 | 31.353818 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/RecordWriter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem;
/**
* <code>RecordWriter</code> writes the output <key, value> pairs
* to an output file.
* <p><code>RecordWriter</code> implementations write the job outputs to the
* {@link FileSystem}.
*
* @see OutputFormat
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface RecordWriter<K, V> {
/**
* Writes a key/value pair.
*
* @param key the key to write.
* @param value the value to write.
* @throws IOException
*/
void write(K key, V value) throws IOException;
/**
* Close this <code>RecordWriter</code> to future operations.
*
* @param reporter facility to report progress.
* @throws IOException
*/
void close(Reporter reporter) throws IOException;
}
| 1,767 | 30.571429 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TextInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.*;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.*;
import com.google.common.base.Charsets;
/**
* An {@link InputFormat} for plain text files. Files are broken into lines.
* Either linefeed or carriage-return are used to signal end of line. Keys are
* the position in the file, and values are the line of text..
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class TextInputFormat extends FileInputFormat<LongWritable, Text>
implements JobConfigurable {
private CompressionCodecFactory compressionCodecs = null;
public void configure(JobConf conf) {
compressionCodecs = new CompressionCodecFactory(conf);
}
protected boolean isSplitable(FileSystem fs, Path file) {
final CompressionCodec codec = compressionCodecs.getCodec(file);
if (null == codec) {
return true;
}
return codec instanceof SplittableCompressionCodec;
}
public RecordReader<LongWritable, Text> getRecordReader(
InputSplit genericSplit, JobConf job,
Reporter reporter)
throws IOException {
reporter.setStatus(genericSplit.toString());
String delimiter = job.get("textinputformat.record.delimiter");
byte[] recordDelimiterBytes = null;
if (null != delimiter) {
recordDelimiterBytes = delimiter.getBytes(Charsets.UTF_8);
}
return new LineRecordReader(job, (FileSplit) genericSplit,
recordDelimiterBytes);
}
}
| 2,555 | 35 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MRConstants.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
/*******************************
* Some handy constants
*
*******************************/
@Private
@Unstable
public interface MRConstants {
//
// Timeouts, constants
//
public static final long COUNTER_UPDATE_INTERVAL = 60 * 1000;
//
// Result codes
//
public static int SUCCESS = 0;
public static int FILE_NOT_FOUND = -1;
/**
* The custom http header used for the map output length.
*/
public static final String MAP_OUTPUT_LENGTH = "Map-Output-Length";
/**
* The custom http header used for the "raw" map output length.
*/
public static final String RAW_MAP_OUTPUT_LENGTH = "Raw-Map-Output-Length";
/**
* The map task from which the map output data is being transferred
*/
public static final String FROM_MAP_TASK = "from-map-task";
/**
* The reduce task number for which this map output is being transferred
*/
public static final String FOR_REDUCE_TASK = "for-reduce-task";
/** Used in MRv1, mostly in TaskTracker code **/
public static final String WORKDIR = "work";
/** Used on by MRv2 */
public static final String APPLICATION_ATTEMPT_ID =
"mapreduce.job.application.attempt.id";
}
| 2,159 | 30.304348 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapReduceBase.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Closeable;
/**
* Base class for {@link Mapper} and {@link Reducer} implementations.
*
* <p>Provides default no-op implementations for a few methods, most non-trivial
* applications need to override some of them.</p>
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class MapReduceBase implements Closeable, JobConfigurable {
/** Default implementation that does nothing. */
public void close() throws IOException {
}
/** Default implementation that does nothing. */
public void configure(JobConf job) {
}
}
| 1,563 | 33 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/KeyValueLineRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
/**
* This class treats a line in the input as a key/value pair separated by a
* separator character. The separator can be specified in config file
* under the attribute name mapreduce.input.keyvaluelinerecordreader.key.value.separator. The default
* separator is the tab character ('\t').
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class KeyValueLineRecordReader implements RecordReader<Text, Text> {
private final LineRecordReader lineRecordReader;
private byte separator = (byte) '\t';
private LongWritable dummyKey;
private Text innerValue;
public Class getKeyClass() { return Text.class; }
public Text createKey() {
return new Text();
}
public Text createValue() {
return new Text();
}
public KeyValueLineRecordReader(Configuration job, FileSplit split)
throws IOException {
lineRecordReader = new LineRecordReader(job, split);
dummyKey = lineRecordReader.createKey();
innerValue = lineRecordReader.createValue();
String sepStr = job.get("mapreduce.input.keyvaluelinerecordreader.key.value.separator", "\t");
this.separator = (byte) sepStr.charAt(0);
}
public static int findSeparator(byte[] utf, int start, int length,
byte sep) {
return org.apache.hadoop.mapreduce.lib.input.
KeyValueLineRecordReader.findSeparator(utf, start, length, sep);
}
/** Read key/value pair in a line. */
public synchronized boolean next(Text key, Text value)
throws IOException {
byte[] line = null;
int lineLen = -1;
if (lineRecordReader.next(dummyKey, innerValue)) {
line = innerValue.getBytes();
lineLen = innerValue.getLength();
} else {
return false;
}
if (line == null)
return false;
int pos = findSeparator(line, 0, lineLen, this.separator);
org.apache.hadoop.mapreduce.lib.input.KeyValueLineRecordReader.
setKeyValue(key, value, line, lineLen, pos);
return true;
}
public float getProgress() throws IOException {
return lineRecordReader.getProgress();
}
public synchronized long getPos() throws IOException {
return lineRecordReader.getPos();
}
public synchronized void close() throws IOException {
lineRecordReader.close();
}
}
| 3,365 | 31.365385 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapRunner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.util.ReflectionUtils;
/** Default {@link MapRunnable} implementation.*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class MapRunner<K1, V1, K2, V2>
implements MapRunnable<K1, V1, K2, V2> {
private Mapper<K1, V1, K2, V2> mapper;
private boolean incrProcCount;
@SuppressWarnings("unchecked")
public void configure(JobConf job) {
this.mapper = ReflectionUtils.newInstance(job.getMapperClass(), job);
//increment processed counter only if skipping feature is enabled
this.incrProcCount = SkipBadRecords.getMapperMaxSkipRecords(job)>0 &&
SkipBadRecords.getAutoIncrMapperProcCount(job);
}
public void run(RecordReader<K1, V1> input, OutputCollector<K2, V2> output,
Reporter reporter)
throws IOException {
try {
// allocate key & value instances that are re-used for all entries
K1 key = input.createKey();
V1 value = input.createValue();
while (input.next(key, value)) {
// map pair to output
mapper.map(key, value, output, reporter);
if(incrProcCount) {
reporter.incrCounter(SkipBadRecords.COUNTER_GROUP,
SkipBadRecords.COUNTER_MAP_PROCESSED_RECORDS, 1);
}
}
} finally {
mapper.close();
}
}
protected Mapper<K1, V1, K2, V2> getMapper() {
return mapper;
}
}
| 2,370 | 33.362319 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MultiFileSplit.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.util.HashSet;
import java.util.Set;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.lib.CombineFileSplit;
/**
* A sub-collection of input files. Unlike {@link FileSplit}, MultiFileSplit
* class does not represent a split of a file, but a split of input files
* into smaller sets. The atomic unit of split is a file. <br>
* MultiFileSplit can be used to implement {@link RecordReader}'s, with
* reading one record per file.
* @see FileSplit
* @see MultiFileInputFormat
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class MultiFileSplit extends CombineFileSplit {
MultiFileSplit() {}
public MultiFileSplit(JobConf job, Path[] files, long[] lengths) {
super(job, files, lengths);
}
public String[] getLocations() throws IOException {
HashSet<String> hostSet = new HashSet<String>();
for (Path file : getPaths()) {
FileSystem fs = file.getFileSystem(getJob());
FileStatus status = fs.getFileStatus(file);
BlockLocation[] blkLocations = fs.getFileBlockLocations(status,
0, status.getLen());
if (blkLocations != null && blkLocations.length > 0) {
addToSet(hostSet, blkLocations[0].getHosts());
}
}
return hostSet.toArray(new String[hostSet.size()]);
}
private void addToSet(Set<String> set, String[] array) {
for(String s:array)
set.add(s);
}
@Override
public String toString() {
StringBuffer sb = new StringBuffer();
for(int i=0; i < getPaths().length; i++) {
sb.append(getPath(i).toUri().getPath() + ":0+" + getLength(i));
if (i < getPaths().length -1) {
sb.append("\n");
}
}
return sb.toString();
}
}
| 2,855 | 32.6 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobProfile.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.net.URL;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableFactories;
import org.apache.hadoop.io.WritableFactory;
import org.apache.hadoop.util.StringInterner;
/**************************************************
* A JobProfile is a MapReduce primitive. Tracks a job,
* whether living or dead.
*
**************************************************/
@InterfaceAudience.LimitedPrivate({"MapReduce"})
@InterfaceStability.Unstable
public class JobProfile implements Writable {
static { // register a ctor
WritableFactories.setFactory
(JobProfile.class,
new WritableFactory() {
public Writable newInstance() { return new JobProfile(); }
});
}
String user;
final JobID jobid;
String jobFile;
String url;
String name;
String queueName;
/**
* Construct an empty {@link JobProfile}.
*/
public JobProfile() {
jobid = new JobID();
}
/**
* Construct a {@link JobProfile} the userid, jobid,
* job config-file, job-details url and job name.
*
* @param user userid of the person who submitted the job.
* @param jobid id of the job.
* @param jobFile job configuration file.
* @param url link to the web-ui for details of the job.
* @param name user-specified job name.
*/
public JobProfile(String user, org.apache.hadoop.mapreduce.JobID jobid,
String jobFile, String url,
String name) {
this(user, jobid, jobFile, url, name, JobConf.DEFAULT_QUEUE_NAME);
}
/**
* Construct a {@link JobProfile} the userid, jobid,
* job config-file, job-details url and job name.
*
* @param user userid of the person who submitted the job.
* @param jobid id of the job.
* @param jobFile job configuration file.
* @param url link to the web-ui for details of the job.
* @param name user-specified job name.
* @param queueName name of the queue to which the job is submitted
*/
public JobProfile(String user, org.apache.hadoop.mapreduce.JobID jobid,
String jobFile, String url,
String name, String queueName) {
this.user = user;
this.jobid = JobID.downgrade(jobid);
this.jobFile = jobFile;
this.url = url;
this.name = name;
this.queueName = queueName;
}
/**
* @deprecated use JobProfile(String, JobID, String, String, String) instead
*/
@Deprecated
public JobProfile(String user, String jobid, String jobFile, String url,
String name) {
this(user, JobID.forName(jobid), jobFile, url, name);
}
/**
* Get the user id.
*/
public String getUser() {
return user;
}
/**
* Get the job id.
*/
public JobID getJobID() {
return jobid;
}
/**
* @deprecated use getJobID() instead
*/
@Deprecated
public String getJobId() {
return jobid.toString();
}
/**
* Get the configuration file for the job.
*/
public String getJobFile() {
return jobFile;
}
/**
* Get the link to the web-ui for details of the job.
*/
public URL getURL() {
try {
return new URL(url);
} catch (IOException ie) {
return null;
}
}
/**
* Get the user-specified job name.
*/
public String getJobName() {
return name;
}
/**
* Get the name of the queue to which the job is submitted.
* @return name of the queue.
*/
public String getQueueName() {
return queueName;
}
///////////////////////////////////////
// Writable
///////////////////////////////////////
public void write(DataOutput out) throws IOException {
jobid.write(out);
Text.writeString(out, jobFile);
Text.writeString(out, url);
Text.writeString(out, user);
Text.writeString(out, name);
Text.writeString(out, queueName);
}
public void readFields(DataInput in) throws IOException {
jobid.readFields(in);
this.jobFile = StringInterner.weakIntern(Text.readString(in));
this.url = StringInterner.weakIntern(Text.readString(in));
this.user = StringInterner.weakIntern(Text.readString(in));
this.name = StringInterner.weakIntern(Text.readString(in));
this.queueName = StringInterner.weakIntern(Text.readString(in));
}
}
| 5,354 | 27.333333 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FixedLengthRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.LongWritable;
/**
* A reader to read fixed length records from a split. Record offset is
* returned as key and the record as bytes is returned in value.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class FixedLengthRecordReader
implements RecordReader<LongWritable, BytesWritable> {
private int recordLength;
// Make use of the new API implementation to avoid code duplication.
private org.apache.hadoop.mapreduce.lib.input.FixedLengthRecordReader reader;
public FixedLengthRecordReader(Configuration job, FileSplit split,
int recordLength) throws IOException {
this.recordLength = recordLength;
reader = new org.apache.hadoop.mapreduce.lib.input.FixedLengthRecordReader(
recordLength);
reader.initialize(job, split.getStart(), split.getLength(),
split.getPath());
}
@Override
public LongWritable createKey() {
return new LongWritable();
}
@Override
public BytesWritable createValue() {
return new BytesWritable(new byte[recordLength]);
}
@Override
public synchronized boolean next(LongWritable key, BytesWritable value)
throws IOException {
boolean dataRead = reader.nextKeyValue();
if (dataRead) {
LongWritable newKey = reader.getCurrentKey();
BytesWritable newValue = reader.getCurrentValue();
key.set(newKey.get());
value.set(newValue);
}
return dataRead;
}
@Override
public float getProgress() throws IOException {
return reader.getProgress();
}
@Override
public synchronized long getPos() throws IOException {
return reader.getPos();
}
@Override
public void close() throws IOException {
reader.close();
}
}
| 2,842 | 30.588889 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IFileOutputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.io.OutputStream;
import java.io.FilterOutputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.util.DataChecksum;
/**
* A Checksum output stream.
* Checksum for the contents of the file is calculated and
* appended to the end of the file on close of the stream.
* Used for IFiles
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class IFileOutputStream extends FilterOutputStream {
/**
* The output stream to be checksummed.
*/
private final DataChecksum sum;
private byte[] barray;
private boolean closed = false;
private boolean finished = false;
/**
* Create a checksum output stream that writes
* the bytes to the given stream.
* @param out
*/
public IFileOutputStream(OutputStream out) {
super(out);
sum = DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,
Integer.MAX_VALUE);
barray = new byte[sum.getChecksumSize()];
}
@Override
public void close() throws IOException {
if (closed) {
return;
}
closed = true;
finish();
out.close();
}
/**
* Finishes writing data to the output stream, by writing
* the checksum bytes to the end. The underlying stream is not closed.
* @throws IOException
*/
public void finish() throws IOException {
if (finished) {
return;
}
finished = true;
sum.writeValue(barray, 0, false);
out.write (barray, 0, sum.getChecksumSize());
out.flush();
}
/**
* Write bytes to the stream.
*/
@Override
public void write(byte[] b, int off, int len) throws IOException {
sum.update(b, off,len);
out.write(b,off,len);
}
@Override
public void write(int b) throws IOException {
barray[0] = (byte) (b & 0xFF);
write(barray,0,1);
}
}
| 2,739 | 26.959184 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskCompletionEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This is used to track task completion events on
* job tracker.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class TaskCompletionEvent
extends org.apache.hadoop.mapreduce.TaskCompletionEvent {
@InterfaceAudience.Public
@InterfaceStability.Stable
/**
* Task Completion Statuses
*/
static public enum Status {
/**
* Task Event Attempt failed but there are attempts remaining.
*/
FAILED,
/**
* Task Event was killed.
*/
KILLED,
/**
* Task Event was successful.
*/
SUCCEEDED,
/**
* Used to Override a previously successful event status.
* Example: Map attempt runs and a SUCCEEDED event is sent. Later a task
* is retroactively failed due to excessive fetch failure during shuffle
* phase. When the retroactive attempt failure occurs, an OBSOLETE event is
* sent for the map attempt indicating the prior event is no longer valid.
*/
OBSOLETE,
/**
* Task Event attempt failed and no further attempts exist.
* reached MAX attempts. When a reducer receives a TIPFAILED event it
* gives up trying to shuffle data from that map task.
*/
TIPFAILED
}
public static final TaskCompletionEvent[] EMPTY_ARRAY =
new TaskCompletionEvent[0];
/**
* Default constructor for Writable.
*
*/
public TaskCompletionEvent() {
super();
}
/**
* Constructor. eventId should be created externally and incremented
* per event for each job.
* @param eventId event id, event id should be unique and assigned in
* incrementally, starting from 0.
* @param taskId task id
* @param status task's status
* @param taskTrackerHttp task tracker's host:port for http.
*/
public TaskCompletionEvent(int eventId,
TaskAttemptID taskId,
int idWithinJob,
boolean isMap,
Status status,
String taskTrackerHttp){
super(eventId, taskId, idWithinJob, isMap, org.apache.hadoop.mapreduce.
TaskCompletionEvent.Status.valueOf(status.name()), taskTrackerHttp);
}
@Private
public static TaskCompletionEvent downgrade(
org.apache.hadoop.mapreduce.TaskCompletionEvent event) {
return new TaskCompletionEvent(event.getEventId(),
TaskAttemptID.downgrade(event.getTaskAttemptId()),event.idWithinJob(),
event.isMapTask(), Status.valueOf(event.getStatus().name()),
event.getTaskTrackerHttp());
}
/**
* Returns task id.
* @return task id
* @deprecated use {@link #getTaskAttemptId()} instead.
*/
@Deprecated
public String getTaskId() {
return getTaskAttemptId().toString();
}
/**
* Returns task id.
* @return task id
*/
public TaskAttemptID getTaskAttemptId() {
return TaskAttemptID.downgrade(super.getTaskAttemptId());
}
/**
* Returns {@link Status}
* @return task completion status
*/
public Status getTaskStatus() {
return Status.valueOf(super.getStatus().name());
}
/**
* Sets task id.
* @param taskId
* @deprecated use {@link #setTaskAttemptId(TaskAttemptID)} instead.
*/
@Deprecated
public void setTaskId(String taskId) {
this.setTaskAttemptId(TaskAttemptID.forName(taskId));
}
/**
* Sets task id.
* @param taskId
* @deprecated use {@link #setTaskAttemptId(TaskAttemptID)} instead.
*/
@Deprecated
public void setTaskID(TaskAttemptID taskId) {
this.setTaskAttemptId(taskId);
}
/**
* Sets task id.
* @param taskId
*/
protected void setTaskAttemptId(TaskAttemptID taskId) {
super.setTaskAttemptId(taskId);
}
/**
* Set task status.
* @param status
*/
@Private
public void setTaskStatus(Status status) {
super.setTaskStatus(org.apache.hadoop.mapreduce.
TaskCompletionEvent.Status.valueOf(status.name()));
}
/**
* Set the task completion time
* @param taskCompletionTime time (in millisec) the task took to complete
*/
@Private
public void setTaskRunTime(int taskCompletionTime) {
super.setTaskRunTime(taskCompletionTime);
}
/**
* set event Id. should be assigned incrementally starting from 0.
* @param eventId
*/
@Private
public void setEventId(int eventId) {
super.setEventId(eventId);
}
/**
* Set task tracker http location.
* @param taskTrackerHttp
*/
@Private
public void setTaskTrackerHttp(String taskTrackerHttp) {
super.setTaskTrackerHttp(taskTrackerHttp);
}
}
| 5,610 | 27.774359 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SequenceFileInputFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.util.regex.PatternSyntaxException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ReflectionUtils;
/**
* A class that allows a map/red job to work on a sample of sequence files.
* The sample is decided by the filter class set by the job.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class SequenceFileInputFilter<K, V>
extends SequenceFileInputFormat<K, V> {
final private static String FILTER_CLASS = org.apache.hadoop.mapreduce.lib.
input.SequenceFileInputFilter.FILTER_CLASS;
public SequenceFileInputFilter() {
}
/** Create a record reader for the given split
* @param split file split
* @param job job configuration
* @param reporter reporter who sends report to task tracker
* @return RecordReader
*/
public RecordReader<K, V> getRecordReader(InputSplit split,
JobConf job, Reporter reporter)
throws IOException {
reporter.setStatus(split.toString());
return new FilterRecordReader<K, V>(job, (FileSplit) split);
}
/** set the filter class
*
* @param conf application configuration
* @param filterClass filter class
*/
public static void setFilterClass(Configuration conf, Class filterClass) {
conf.set(FILTER_CLASS, filterClass.getName());
}
/**
* filter interface
*/
public interface Filter extends
org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFilter.Filter {
}
/**
* base class for Filters
*/
public static abstract class FilterBase extends org.apache.hadoop.mapreduce.
lib.input.SequenceFileInputFilter.FilterBase
implements Filter {
}
/** Records filter by matching key to regex
*/
public static class RegexFilter extends FilterBase {
org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFilter.
RegexFilter rf;
public static void setPattern(Configuration conf, String regex)
throws PatternSyntaxException {
org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFilter.
RegexFilter.setPattern(conf, regex);
}
public RegexFilter() {
rf = new org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFilter.
RegexFilter();
}
/** configure the Filter by checking the configuration
*/
public void setConf(Configuration conf) {
rf.setConf(conf);
}
/** Filtering method
* If key matches the regex, return true; otherwise return false
* @see org.apache.hadoop.mapred.SequenceFileInputFilter.Filter#accept(Object)
*/
public boolean accept(Object key) {
return rf.accept(key);
}
}
/** This class returns a percentage of records
* The percentage is determined by a filtering frequency <i>f</i> using
* the criteria record# % f == 0.
* For example, if the frequency is 10, one out of 10 records is returned.
*/
public static class PercentFilter extends FilterBase {
org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFilter.
PercentFilter pf;
/** set the frequency and stores it in conf
* @param conf configuration
* @param frequency filtering frequencey
*/
public static void setFrequency(Configuration conf, int frequency) {
org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFilter.
PercentFilter.setFrequency(conf, frequency);
}
public PercentFilter() {
pf = new org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFilter.
PercentFilter();
}
/** configure the filter by checking the configuration
*
* @param conf configuration
*/
public void setConf(Configuration conf) {
pf.setConf(conf);
}
/** Filtering method
* If record# % frequency==0, return true; otherwise return false
* @see org.apache.hadoop.mapred.SequenceFileInputFilter.Filter#accept(Object)
*/
public boolean accept(Object key) {
return pf.accept(key);
}
}
/** This class returns a set of records by examing the MD5 digest of its
* key against a filtering frequency <i>f</i>. The filtering criteria is
* MD5(key) % f == 0.
*/
public static class MD5Filter extends FilterBase {
public static final int MD5_LEN = org.apache.hadoop.mapreduce.lib.
input.SequenceFileInputFilter.MD5Filter.MD5_LEN;
org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFilter.MD5Filter mf;
/** set the filtering frequency in configuration
*
* @param conf configuration
* @param frequency filtering frequency
*/
public static void setFrequency(Configuration conf, int frequency) {
org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFilter.MD5Filter.
setFrequency(conf, frequency);
}
public MD5Filter() {
mf = new org.apache.hadoop.mapreduce.lib.input.
SequenceFileInputFilter.MD5Filter();
}
/** configure the filter according to configuration
*
* @param conf configuration
*/
public void setConf(Configuration conf) {
mf.setConf(conf);
}
/** Filtering method
* If MD5(key) % frequency==0, return true; otherwise return false
* @see org.apache.hadoop.mapred.SequenceFileInputFilter.Filter#accept(Object)
*/
public boolean accept(Object key) {
return mf.accept(key);
}
}
private static class FilterRecordReader<K, V>
extends SequenceFileRecordReader<K, V> {
private Filter filter;
public FilterRecordReader(Configuration conf, FileSplit split)
throws IOException {
super(conf, split);
// instantiate filter
filter = (Filter)ReflectionUtils.newInstance(
conf.getClass(FILTER_CLASS, PercentFilter.class),
conf);
}
public synchronized boolean next(K key, V value) throws IOException {
while (next(key)) {
if (filter.accept(key)) {
getCurrentValue(value);
return true;
}
}
return false;
}
}
}
| 7,185 | 31.369369 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskStatus.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.util.StringInterner;
import org.apache.hadoop.util.StringUtils;
/**************************************************
* Describes the current status of a task. This is
* not intended to be a comprehensive piece of data.
*
**************************************************/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public abstract class TaskStatus implements Writable, Cloneable {
static final Log LOG =
LogFactory.getLog(TaskStatus.class.getName());
//enumeration for reporting current phase of a task.
@InterfaceAudience.Private
@InterfaceStability.Unstable
public static enum Phase{STARTING, MAP, SHUFFLE, SORT, REDUCE, CLEANUP}
// what state is the task in?
@InterfaceAudience.Private
@InterfaceStability.Unstable
public static enum State {RUNNING, SUCCEEDED, FAILED, UNASSIGNED, KILLED,
COMMIT_PENDING, FAILED_UNCLEAN, KILLED_UNCLEAN}
private final TaskAttemptID taskid;
private float progress;
private volatile State runState;
private String diagnosticInfo;
private String stateString;
private String taskTracker;
private int numSlots;
private long startTime; //in ms
private long finishTime;
private long outputSize = -1L;
private volatile Phase phase = Phase.STARTING;
private Counters counters;
private boolean includeAllCounters;
private SortedRanges.Range nextRecordRange = new SortedRanges.Range();
// max task-status string size
static final int MAX_STRING_SIZE = 1024;
/**
* Testcases can override {@link #getMaxStringSize()} to control the max-size
* of strings in {@link TaskStatus}. Note that the {@link TaskStatus} is never
* exposed to clients or users (i.e Map or Reduce) and hence users cannot
* override this api to pass large strings in {@link TaskStatus}.
*/
protected int getMaxStringSize() {
return MAX_STRING_SIZE;
}
public TaskStatus() {
taskid = new TaskAttemptID();
numSlots = 0;
}
public TaskStatus(TaskAttemptID taskid, float progress, int numSlots,
State runState, String diagnosticInfo,
String stateString, String taskTracker,
Phase phase, Counters counters) {
this.taskid = taskid;
this.progress = progress;
this.numSlots = numSlots;
this.runState = runState;
setDiagnosticInfo(diagnosticInfo);
setStateString(stateString);
this.taskTracker = taskTracker;
this.phase = phase;
this.counters = counters;
this.includeAllCounters = true;
}
public TaskAttemptID getTaskID() { return taskid; }
public abstract boolean getIsMap();
public int getNumSlots() {
return numSlots;
}
public float getProgress() { return progress; }
public void setProgress(float progress) {
this.progress = progress;
}
public State getRunState() { return runState; }
public String getTaskTracker() {return taskTracker;}
public void setTaskTracker(String tracker) { this.taskTracker = tracker;}
public void setRunState(State runState) { this.runState = runState; }
public String getDiagnosticInfo() { return diagnosticInfo; }
public void setDiagnosticInfo(String info) {
// if the diag-info has already reached its max then log and return
if (diagnosticInfo != null
&& diagnosticInfo.length() == getMaxStringSize()) {
LOG.info("task-diagnostic-info for task " + taskid + " : " + info);
return;
}
diagnosticInfo =
((diagnosticInfo == null) ? info : diagnosticInfo.concat(info));
// trim the string to MAX_STRING_SIZE if needed
if (diagnosticInfo != null
&& diagnosticInfo.length() > getMaxStringSize()) {
LOG.info("task-diagnostic-info for task " + taskid + " : "
+ diagnosticInfo);
diagnosticInfo = diagnosticInfo.substring(0, getMaxStringSize());
}
}
public String getStateString() { return stateString; }
/**
* Set the state of the {@link TaskStatus}.
*/
public void setStateString(String stateString) {
if (stateString != null) {
if (stateString.length() <= getMaxStringSize()) {
this.stateString = stateString;
} else {
// log it
LOG.info("state-string for task " + taskid + " : " + stateString);
// trim the state string
this.stateString = stateString.substring(0, getMaxStringSize());
}
}
}
/**
* Get the next record range which is going to be processed by Task.
* @return nextRecordRange
*/
public SortedRanges.Range getNextRecordRange() {
return nextRecordRange;
}
/**
* Set the next record range which is going to be processed by Task.
* @param nextRecordRange
*/
public void setNextRecordRange(SortedRanges.Range nextRecordRange) {
this.nextRecordRange = nextRecordRange;
}
/**
* Get task finish time. if shuffleFinishTime and sortFinishTime
* are not set before, these are set to finishTime. It takes care of
* the case when shuffle, sort and finish are completed with in the
* heartbeat interval and are not reported separately. if task state is
* TaskStatus.FAILED then finish time represents when the task failed.
* @return finish time of the task.
*/
public long getFinishTime() {
return finishTime;
}
/**
* Sets finishTime for the task status if and only if the
* start time is set and passed finish time is greater than
* zero.
*
* @param finishTime finish time of task.
*/
void setFinishTime(long finishTime) {
if(this.getStartTime() > 0 && finishTime > 0) {
this.finishTime = finishTime;
} else {
//Using String utils to get the stack trace.
LOG.error("Trying to set finish time for task " + taskid +
" when no start time is set, stackTrace is : " +
StringUtils.stringifyException(new Exception()));
}
}
/**
* Get shuffle finish time for the task. If shuffle finish time was
* not set due to shuffle/sort/finish phases ending within same
* heartbeat interval, it is set to finish time of next phase i.e. sort
* or task finish when these are set.
* @return 0 if shuffleFinishTime, sortFinishTime and finish time are not set. else
* it returns approximate shuffle finish time.
*/
public long getShuffleFinishTime() {
return 0;
}
/**
* Set shuffle finish time.
* @param shuffleFinishTime
*/
void setShuffleFinishTime(long shuffleFinishTime) {}
/**
* Get map phase finish time for the task. If map finsh time was
* not set due to sort phase ending within same heartbeat interval,
* it is set to finish time of next phase i.e. sort phase
* when it is set.
* @return 0 if mapFinishTime, sortFinishTime are not set. else
* it returns approximate map finish time.
*/
public long getMapFinishTime() {
return 0;
}
/**
* Set map phase finish time.
* @param mapFinishTime
*/
void setMapFinishTime(long mapFinishTime) {}
/**
* Get sort finish time for the task,. If sort finish time was not set
* due to sort and reduce phase finishing in same heartebat interval, it is
* set to finish time, when finish time is set.
* @return 0 if sort finish time and finish time are not set, else returns sort
* finish time if that is set, else it returns finish time.
*/
public long getSortFinishTime() {
return 0;
}
/**
* Sets sortFinishTime, if shuffleFinishTime is not set before
* then its set to sortFinishTime.
* @param sortFinishTime
*/
void setSortFinishTime(long sortFinishTime) {}
/**
* Get start time of the task.
* @return 0 is start time is not set, else returns start time.
*/
public long getStartTime() {
return startTime;
}
/**
* Set startTime of the task if start time is greater than zero.
* @param startTime start time
*/
void setStartTime(long startTime) {
//Making the assumption of passed startTime to be a positive
//long value explicit.
if (startTime > 0) {
this.startTime = startTime;
} else {
//Using String utils to get the stack trace.
LOG.error("Trying to set illegal startTime for task : " + taskid +
".Stack trace is : " +
StringUtils.stringifyException(new Exception()));
}
}
/**
* Get current phase of this task. Phase.Map in case of map tasks,
* for reduce one of Phase.SHUFFLE, Phase.SORT or Phase.REDUCE.
* @return .
*/
public Phase getPhase(){
return this.phase;
}
/**
* Set current phase of this task.
* @param phase phase of this task
*/
public void setPhase(Phase phase){
TaskStatus.Phase oldPhase = getPhase();
if (oldPhase != phase){
// sort phase started
if (phase == TaskStatus.Phase.SORT){
if (oldPhase == TaskStatus.Phase.MAP) {
setMapFinishTime(System.currentTimeMillis());
}
else {
setShuffleFinishTime(System.currentTimeMillis());
}
}else if (phase == TaskStatus.Phase.REDUCE){
setSortFinishTime(System.currentTimeMillis());
}
this.phase = phase;
}
}
boolean inTaskCleanupPhase() {
return (this.phase == TaskStatus.Phase.CLEANUP &&
(this.runState == TaskStatus.State.FAILED_UNCLEAN ||
this.runState == TaskStatus.State.KILLED_UNCLEAN));
}
public boolean getIncludeAllCounters() {
return includeAllCounters;
}
public void setIncludeAllCounters(boolean send) {
includeAllCounters = send;
counters.setWriteAllCounters(send);
}
/**
* Get task's counters.
*/
public Counters getCounters() {
return counters;
}
/**
* Set the task's counters.
* @param counters
*/
public void setCounters(Counters counters) {
this.counters = counters;
}
/**
* Returns the number of bytes of output from this map.
*/
public long getOutputSize() {
return outputSize;
}
/**
* Set the size on disk of this task's output.
* @param l the number of map output bytes
*/
void setOutputSize(long l) {
outputSize = l;
}
/**
* Get the list of maps from which output-fetches failed.
*
* @return the list of maps from which output-fetches failed.
*/
public List<TaskAttemptID> getFetchFailedMaps() {
return null;
}
/**
* Add to the list of maps from which output-fetches failed.
*
* @param mapTaskId map from which fetch failed
*/
public abstract void addFetchFailedMap(TaskAttemptID mapTaskId);
/**
* Update the status of the task.
*
* This update is done by ping thread before sending the status.
*
* @param progress
* @param state
* @param counters
*/
synchronized void statusUpdate(float progress,
String state,
Counters counters) {
setProgress(progress);
setStateString(state);
setCounters(counters);
}
/**
* Update the status of the task.
*
* @param status updated status
*/
synchronized void statusUpdate(TaskStatus status) {
setProgress (status.getProgress());
this.runState = status.getRunState();
setStateString(status.getStateString());
this.nextRecordRange = status.getNextRecordRange();
setDiagnosticInfo(status.getDiagnosticInfo());
if (status.getStartTime() > 0) {
this.setStartTime(status.getStartTime());
}
if (status.getFinishTime() > 0) {
this.setFinishTime(status.getFinishTime());
}
this.phase = status.getPhase();
this.counters = status.getCounters();
this.outputSize = status.outputSize;
}
/**
* Update specific fields of task status
*
* This update is done in JobTracker when a cleanup attempt of task
* reports its status. Then update only specific fields, not all.
*
* @param runState
* @param progress
* @param state
* @param phase
* @param finishTime
*/
synchronized void statusUpdate(State runState,
float progress,
String state,
Phase phase,
long finishTime) {
setRunState(runState);
setProgress(progress);
setStateString(state);
setPhase(phase);
if (finishTime > 0) {
setFinishTime(finishTime);
}
}
/**
* Clear out transient information after sending out a status-update
* from either the {@link Task} to the {@link TaskTracker} or from the
* {@link TaskTracker} to the {@link JobTracker}.
*/
synchronized void clearStatus() {
// Clear diagnosticInfo
diagnosticInfo = "";
}
@Override
public Object clone() {
try {
return super.clone();
} catch (CloneNotSupportedException cnse) {
// Shouldn't happen since we do implement Clonable
throw new InternalError(cnse.toString());
}
}
//////////////////////////////////////////////
// Writable
//////////////////////////////////////////////
public void write(DataOutput out) throws IOException {
taskid.write(out);
out.writeFloat(progress);
out.writeInt(numSlots);
WritableUtils.writeEnum(out, runState);
Text.writeString(out, diagnosticInfo);
Text.writeString(out, stateString);
WritableUtils.writeEnum(out, phase);
out.writeLong(startTime);
out.writeLong(finishTime);
out.writeBoolean(includeAllCounters);
out.writeLong(outputSize);
counters.write(out);
nextRecordRange.write(out);
}
public void readFields(DataInput in) throws IOException {
this.taskid.readFields(in);
setProgress(in.readFloat());
this.numSlots = in.readInt();
this.runState = WritableUtils.readEnum(in, State.class);
setDiagnosticInfo(StringInterner.weakIntern(Text.readString(in)));
setStateString(StringInterner.weakIntern(Text.readString(in)));
this.phase = WritableUtils.readEnum(in, Phase.class);
this.startTime = in.readLong();
this.finishTime = in.readLong();
counters = new Counters();
this.includeAllCounters = in.readBoolean();
this.outputSize = in.readLong();
counters.readFields(in);
nextRecordRange.readFields(in);
}
//////////////////////////////////////////////////////////////////////////////
// Factory-like methods to create/read/write appropriate TaskStatus objects
//////////////////////////////////////////////////////////////////////////////
static TaskStatus createTaskStatus(DataInput in, TaskAttemptID taskId,
float progress, int numSlots,
State runState, String diagnosticInfo,
String stateString, String taskTracker,
Phase phase, Counters counters)
throws IOException {
boolean isMap = in.readBoolean();
return createTaskStatus(isMap, taskId, progress, numSlots, runState,
diagnosticInfo, stateString, taskTracker, phase,
counters);
}
static TaskStatus createTaskStatus(boolean isMap, TaskAttemptID taskId,
float progress, int numSlots,
State runState, String diagnosticInfo,
String stateString, String taskTracker,
Phase phase, Counters counters) {
return (isMap) ? new MapTaskStatus(taskId, progress, numSlots, runState,
diagnosticInfo, stateString, taskTracker,
phase, counters) :
new ReduceTaskStatus(taskId, progress, numSlots, runState,
diagnosticInfo, stateString,
taskTracker, phase, counters);
}
static TaskStatus createTaskStatus(boolean isMap) {
return (isMap) ? new MapTaskStatus() : new ReduceTaskStatus();
}
}
| 17,315 | 31.795455 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/QueueRefresher.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.util.List;
/**
* Abstract QueueRefresher class. Scheduler's can extend this and return an
* instance of this in the {@link #getQueueRefresher()} method. The
* {@link #refreshQueues(List)} method of this instance will be invoked by the
* {@link QueueManager} whenever it gets a request from an administrator to
* refresh its own queue-configuration. This method has a documented contract
* between the {@link QueueManager} and the {@link TaskScheduler}.
*
* Before calling QueueRefresher, the caller must hold the lock to the
* corresponding {@link TaskScheduler} (generally in the {@link JobTracker}).
*/
abstract class QueueRefresher {
/**
* Refresh the queue-configuration in the scheduler. This method has the
* following contract.
* <ol>
* <li>Before this method, {@link QueueManager} does a validation of the new
* queue-configuration. For e.g, currently addition of new queues, or
* removal of queues at any level in the hierarchy is not supported by
* {@link QueueManager} and so are not supported for schedulers too.</li>
* <li>Schedulers will be passed a list of {@link JobQueueInfo}s of the root
* queues i.e. the queues at the top level. All the descendants are properly
* linked from these top-level queues.</li>
* <li>Schedulers should use the scheduler specific queue properties from
* the newRootQueues, validate the properties themselves and apply them
* internally.</li>
* <li>
* Once the method returns successfully from the schedulers, it is assumed
* that the refresh of queue properties is successful throughout and will be
* 'committed' internally to {@link QueueManager} too. It is guaranteed that
* at no point, after successful return from the scheduler, is the queue
* refresh in QueueManager failed. If ever, such abnormalities happen, the
* queue framework will be inconsistent and will need a JT restart.</li>
* <li>If scheduler throws an exception during {@link #refreshQueues()},
* {@link QueueManager} throws away the newly read configuration, retains
* the old (consistent) configuration and informs the request issuer about
* the error appropriately.</li>
* </ol>
*
* @param newRootQueues
*/
abstract void refreshQueues(List<JobQueueInfo> newRootQueues)
throws Throwable;
}
| 3,170 | 46.328358 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JvmContext.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
class JvmContext implements Writable {
public static final Log LOG =
LogFactory.getLog(JvmContext.class);
JVMId jvmId;
String pid;
JvmContext() {
jvmId = new JVMId();
pid = "";
}
JvmContext(JVMId id, String pid) {
jvmId = id;
this.pid = pid;
}
public void readFields(DataInput in) throws IOException {
jvmId.readFields(in);
this.pid = Text.readString(in);
}
public void write(DataOutput out) throws IOException {
jvmId.write(out);
Text.writeString(out, pid);
}
}
| 1,620 | 26.948276 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/InvalidJobConfException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This exception is thrown when jobconf misses some mendatory attributes
* or value of some attributes is invalid.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class InvalidJobConfException
extends IOException {
private static final long serialVersionUID = 1L;
public InvalidJobConfException() {
super();
}
public InvalidJobConfException(String msg) {
super(msg);
}
public InvalidJobConfException(String msg, Throwable t) {
super(msg, t);
}
public InvalidJobConfException(Throwable t) {
super(t);
}
}
| 1,565 | 28 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileAlreadyExistsException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Used when target file already exists for any operation and
* is not configured to be overwritten.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class FileAlreadyExistsException
extends IOException {
private static final long serialVersionUID = 1L;
public FileAlreadyExistsException() {
super();
}
public FileAlreadyExistsException(String msg) {
super(msg);
}
}
| 1,407 | 30.288889 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Utils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
/**
* A utility class. It provides
* A path filter utility to filter out output/part files in the output dir
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class Utils {
public static class OutputFileUtils {
/**
* This class filters output(part) files from the given directory
* It does not accept files with filenames _logs and _SUCCESS.
* This can be used to list paths of output directory as follows:
* Path[] fileList = FileUtil.stat2Paths(fs.listStatus(outDir,
* new OutputFilesFilter()));
*/
public static class OutputFilesFilter extends OutputLogFilter {
public boolean accept(Path path) {
return super.accept(path)
&& !FileOutputCommitter.SUCCEEDED_FILE_NAME
.equals(path.getName());
}
}
/**
* This class filters log files from directory given
* It doesnt accept paths having _logs.
* This can be used to list paths of output directory as follows:
* Path[] fileList = FileUtil.stat2Paths(fs.listStatus(outDir,
* new OutputLogFilter()));
*/
public static class OutputLogFilter implements PathFilter {
public boolean accept(Path path) {
return !"_logs".equals(path.getName());
}
}
}
}
| 2,395 | 37.031746 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskUmbilicalProtocol.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.mapred.JvmTask;
import org.apache.hadoop.mapreduce.security.token.JobTokenSelector;
import org.apache.hadoop.security.token.TokenInfo;
/** Protocol that task child process uses to contact its parent process. The
* parent is a daemon which which polls the central master for a new map or
* reduce task and runs it as a child process. All communication between child
* and parent is via this protocol. */
@TokenInfo(JobTokenSelector.class)
@InterfaceAudience.Private
@InterfaceStability.Stable
public interface TaskUmbilicalProtocol extends VersionedProtocol {
/**
* Changed the version to 2, since we have a new method getMapOutputs
* Changed version to 3 to have progress() return a boolean
* Changed the version to 4, since we have replaced
* TaskUmbilicalProtocol.progress(String, float, String,
* org.apache.hadoop.mapred.TaskStatus.Phase, Counters)
* with statusUpdate(String, TaskStatus)
*
* Version 5 changed counters representation for HADOOP-2248
* Version 6 changes the TaskStatus representation for HADOOP-2208
* Version 7 changes the done api (via HADOOP-3140). It now expects whether
* or not the task's output needs to be promoted.
* Version 8 changes {job|tip|task}id's to use their corresponding
* objects rather than strings.
* Version 9 changes the counter representation for HADOOP-1915
* Version 10 changed the TaskStatus format and added reportNextRecordRange
* for HADOOP-153
* Version 11 Adds RPCs for task commit as part of HADOOP-3150
* Version 12 getMapCompletionEvents() now also indicates if the events are
* stale or not. Hence the return type is a class that
* encapsulates the events and whether to reset events index.
* Version 13 changed the getTask method signature for HADOOP-249
* Version 14 changed the getTask method signature for HADOOP-4232
* Version 15 Adds FAILED_UNCLEAN and KILLED_UNCLEAN states for HADOOP-4759
* Version 16 Change in signature of getTask() for HADOOP-5488
* Version 17 Modified TaskID to be aware of the new TaskTypes
* Version 18 Added numRequiredSlots to TaskStatus for MAPREDUCE-516
* Version 19 Added fatalError for child to communicate fatal errors to TT
* */
public static final long versionID = 19L;
/**
* Called when a child task process starts, to get its task.
* @param context the JvmContext of the JVM w.r.t the TaskTracker that
* launched it
* @return Task object
* @throws IOException
*/
JvmTask getTask(JvmContext context) throws IOException;
/**
* Report child's progress to parent.
*
* @param taskId task-id of the child
* @param taskStatus status of the child
* @throws IOException
* @throws InterruptedException
* @return True if the task is known
*/
boolean statusUpdate(TaskAttemptID taskId, TaskStatus taskStatus)
throws IOException, InterruptedException;
/** Report error messages back to parent. Calls should be sparing, since all
* such messages are held in the job tracker.
* @param taskid the id of the task involved
* @param trace the text to report
*/
void reportDiagnosticInfo(TaskAttemptID taskid, String trace) throws IOException;
/**
* Report the record range which is going to process next by the Task.
* @param taskid the id of the task involved
* @param range the range of record sequence nos
* @throws IOException
*/
void reportNextRecordRange(TaskAttemptID taskid, SortedRanges.Range range)
throws IOException;
/** Periodically called by child to check if parent is still alive.
* @return True if the task is known
*/
boolean ping(TaskAttemptID taskid) throws IOException;
/** Report that the task is successfully completed. Failure is assumed if
* the task process exits without calling this.
* @param taskid task's id
*/
void done(TaskAttemptID taskid) throws IOException;
/**
* Report that the task is complete, but its commit is pending.
*
* @param taskId task's id
* @param taskStatus status of the child
* @throws IOException
*/
void commitPending(TaskAttemptID taskId, TaskStatus taskStatus)
throws IOException, InterruptedException;
/**
* Polling to know whether the task can go-ahead with commit
* @param taskid
* @return true/false
* @throws IOException
*/
boolean canCommit(TaskAttemptID taskid) throws IOException;
/** Report that a reduce-task couldn't shuffle map-outputs.*/
void shuffleError(TaskAttemptID taskId, String message) throws IOException;
/** Report that the task encounted a local filesystem error.*/
void fsError(TaskAttemptID taskId, String message) throws IOException;
/** Report that the task encounted a fatal error.*/
void fatalError(TaskAttemptID taskId, String message) throws IOException;
/** Called by a reduce task to get the map output locations for finished maps.
* Returns an update centered around the map-task-completion-events.
* The update also piggybacks the information whether the events copy at the
* task-tracker has changed or not. This will trigger some action at the
* child-process.
*
* @param fromIndex the index starting from which the locations should be
* fetched
* @param maxLocs the max number of locations to fetch
* @param id The attempt id of the task that is trying to communicate
* @return A {@link MapTaskCompletionEventsUpdate}
*/
MapTaskCompletionEventsUpdate getMapCompletionEvents(JobID jobId,
int fromIndex,
int maxLocs,
TaskAttemptID id)
throws IOException;
}
| 6,894 | 40.787879 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Operation.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.mapreduce.JobACL;
/**
* Generic operation that maps to the dependent set of ACLs that drive the
* authorization of the operation.
*/
@InterfaceAudience.Private
public enum Operation {
VIEW_JOB_COUNTERS(QueueACL.ADMINISTER_JOBS, JobACL.VIEW_JOB),
VIEW_JOB_DETAILS(QueueACL.ADMINISTER_JOBS, JobACL.VIEW_JOB),
VIEW_TASK_LOGS(QueueACL.ADMINISTER_JOBS, JobACL.VIEW_JOB),
KILL_JOB(QueueACL.ADMINISTER_JOBS, JobACL.MODIFY_JOB),
FAIL_TASK(QueueACL.ADMINISTER_JOBS, JobACL.MODIFY_JOB),
KILL_TASK(QueueACL.ADMINISTER_JOBS, JobACL.MODIFY_JOB),
SET_JOB_PRIORITY(QueueACL.ADMINISTER_JOBS, JobACL.MODIFY_JOB),
SUBMIT_JOB(QueueACL.SUBMIT_JOB, null);
public QueueACL qACLNeeded;
public JobACL jobACLNeeded;
Operation(QueueACL qACL, JobACL jobACL) {
this.qACLNeeded = qACL;
this.jobACLNeeded = jobACL;
}
}
| 1,756 | 38.044444 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashSet;
import java.util.IdentityHashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.mapreduce.security.TokenCache;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StopWatch;
import org.apache.hadoop.util.StringUtils;
import com.google.common.collect.Iterables;
/**
* A base class for file-based {@link InputFormat}.
*
* <p><code>FileInputFormat</code> is the base class for all file-based
* <code>InputFormat</code>s. This provides a generic implementation of
* {@link #getSplits(JobConf, int)}.
*
* Implementations of <code>FileInputFormat</code> can also override the
* {@link #isSplitable(FileSystem, Path)} method to prevent input files
* from being split-up in certain situations. Implementations that may
* deal with non-splittable files <i>must</i> override this method, since
* the default implementation assumes splitting is always possible.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class FileInputFormat<K, V> implements InputFormat<K, V> {
public static final Log LOG =
LogFactory.getLog(FileInputFormat.class);
@Deprecated
public static enum Counter {
BYTES_READ
}
public static final String NUM_INPUT_FILES =
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.NUM_INPUT_FILES;
public static final String INPUT_DIR_RECURSIVE =
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR_RECURSIVE;
private static final double SPLIT_SLOP = 1.1; // 10% slop
private long minSplitSize = 1;
private static final PathFilter hiddenFileFilter = new PathFilter(){
public boolean accept(Path p){
String name = p.getName();
return !name.startsWith("_") && !name.startsWith(".");
}
};
protected void setMinSplitSize(long minSplitSize) {
this.minSplitSize = minSplitSize;
}
/**
* Proxy PathFilter that accepts a path only if all filters given in the
* constructor do. Used by the listPaths() to apply the built-in
* hiddenFileFilter together with a user provided one (if any).
*/
private static class MultiPathFilter implements PathFilter {
private List<PathFilter> filters;
public MultiPathFilter(List<PathFilter> filters) {
this.filters = filters;
}
public boolean accept(Path path) {
for (PathFilter filter : filters) {
if (!filter.accept(path)) {
return false;
}
}
return true;
}
}
/**
* Is the given filename splittable? Usually, true, but if the file is
* stream compressed, it will not be.
*
* The default implementation in <code>FileInputFormat</code> always returns
* true. Implementations that may deal with non-splittable files <i>must</i>
* override this method.
*
* <code>FileInputFormat</code> implementations can override this and return
* <code>false</code> to ensure that individual input files are never split-up
* so that {@link Mapper}s process entire files.
*
* @param fs the file system that the file is on
* @param filename the file name to check
* @return is this file splitable?
*/
protected boolean isSplitable(FileSystem fs, Path filename) {
return true;
}
public abstract RecordReader<K, V> getRecordReader(InputSplit split,
JobConf job,
Reporter reporter)
throws IOException;
/**
* Set a PathFilter to be applied to the input paths for the map-reduce job.
*
* @param filter the PathFilter class use for filtering the input paths.
*/
public static void setInputPathFilter(JobConf conf,
Class<? extends PathFilter> filter) {
conf.setClass(org.apache.hadoop.mapreduce.lib.input.
FileInputFormat.PATHFILTER_CLASS, filter, PathFilter.class);
}
/**
* Get a PathFilter instance of the filter set for the input paths.
*
* @return the PathFilter instance set for the job, NULL if none has been set.
*/
public static PathFilter getInputPathFilter(JobConf conf) {
Class<? extends PathFilter> filterClass = conf.getClass(
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.PATHFILTER_CLASS,
null, PathFilter.class);
return (filterClass != null) ?
ReflectionUtils.newInstance(filterClass, conf) : null;
}
/**
* Add files in the input path recursively into the results.
* @param result
* The List to store all files.
* @param fs
* The FileSystem.
* @param path
* The input path.
* @param inputFilter
* The input filter that can be used to filter files/dirs.
* @throws IOException
*/
protected void addInputPathRecursively(List<FileStatus> result,
FileSystem fs, Path path, PathFilter inputFilter)
throws IOException {
RemoteIterator<LocatedFileStatus> iter = fs.listLocatedStatus(path);
while (iter.hasNext()) {
LocatedFileStatus stat = iter.next();
if (inputFilter.accept(stat.getPath())) {
if (stat.isDirectory()) {
addInputPathRecursively(result, fs, stat.getPath(), inputFilter);
} else {
result.add(stat);
}
}
}
}
/** List input directories.
* Subclasses may override to, e.g., select only files matching a regular
* expression.
*
* @param job the job to list input paths for
* @return array of FileStatus objects
* @throws IOException if zero items.
*/
protected FileStatus[] listStatus(JobConf job) throws IOException {
Path[] dirs = getInputPaths(job);
if (dirs.length == 0) {
throw new IOException("No input paths specified in job");
}
// get tokens for all the required FileSystems..
TokenCache.obtainTokensForNamenodes(job.getCredentials(), dirs, job);
// Whether we need to recursive look into the directory structure
boolean recursive = job.getBoolean(INPUT_DIR_RECURSIVE, false);
// creates a MultiPathFilter with the hiddenFileFilter and the
// user provided one (if any).
List<PathFilter> filters = new ArrayList<PathFilter>();
filters.add(hiddenFileFilter);
PathFilter jobFilter = getInputPathFilter(job);
if (jobFilter != null) {
filters.add(jobFilter);
}
PathFilter inputFilter = new MultiPathFilter(filters);
FileStatus[] result;
int numThreads = job
.getInt(
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.LIST_STATUS_NUM_THREADS,
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.DEFAULT_LIST_STATUS_NUM_THREADS);
StopWatch sw = new StopWatch().start();
if (numThreads == 1) {
List<FileStatus> locatedFiles = singleThreadedListStatus(job, dirs, inputFilter, recursive);
result = locatedFiles.toArray(new FileStatus[locatedFiles.size()]);
} else {
Iterable<FileStatus> locatedFiles = null;
try {
LocatedFileStatusFetcher locatedFileStatusFetcher = new LocatedFileStatusFetcher(
job, dirs, recursive, inputFilter, false);
locatedFiles = locatedFileStatusFetcher.getFileStatuses();
} catch (InterruptedException e) {
throw new IOException("Interrupted while getting file statuses");
}
result = Iterables.toArray(locatedFiles, FileStatus.class);
}
sw.stop();
if (LOG.isDebugEnabled()) {
LOG.debug("Time taken to get FileStatuses: "
+ sw.now(TimeUnit.MILLISECONDS));
}
LOG.info("Total input files to process : " + result.length);
return result;
}
private List<FileStatus> singleThreadedListStatus(JobConf job, Path[] dirs,
PathFilter inputFilter, boolean recursive) throws IOException {
List<FileStatus> result = new ArrayList<FileStatus>();
List<IOException> errors = new ArrayList<IOException>();
for (Path p: dirs) {
FileSystem fs = p.getFileSystem(job);
FileStatus[] matches = fs.globStatus(p, inputFilter);
if (matches == null) {
errors.add(new IOException("Input path does not exist: " + p));
} else if (matches.length == 0) {
errors.add(new IOException("Input Pattern " + p + " matches 0 files"));
} else {
for (FileStatus globStat: matches) {
if (globStat.isDirectory()) {
RemoteIterator<LocatedFileStatus> iter =
fs.listLocatedStatus(globStat.getPath());
while (iter.hasNext()) {
LocatedFileStatus stat = iter.next();
if (inputFilter.accept(stat.getPath())) {
if (recursive && stat.isDirectory()) {
addInputPathRecursively(result, fs, stat.getPath(),
inputFilter);
} else {
result.add(stat);
}
}
}
} else {
result.add(globStat);
}
}
}
}
if (!errors.isEmpty()) {
throw new InvalidInputException(errors);
}
return result;
}
/**
* A factory that makes the split for this class. It can be overridden
* by sub-classes to make sub-types
*/
protected FileSplit makeSplit(Path file, long start, long length,
String[] hosts) {
return new FileSplit(file, start, length, hosts);
}
/**
* A factory that makes the split for this class. It can be overridden
* by sub-classes to make sub-types
*/
protected FileSplit makeSplit(Path file, long start, long length,
String[] hosts, String[] inMemoryHosts) {
return new FileSplit(file, start, length, hosts, inMemoryHosts);
}
/** Splits files returned by {@link #listStatus(JobConf)} when
* they're too big.*/
public InputSplit[] getSplits(JobConf job, int numSplits)
throws IOException {
StopWatch sw = new StopWatch().start();
FileStatus[] files = listStatus(job);
// Save the number of input files for metrics/loadgen
job.setLong(NUM_INPUT_FILES, files.length);
long totalSize = 0; // compute total size
for (FileStatus file: files) { // check we have valid files
if (file.isDirectory()) {
throw new IOException("Not a file: "+ file.getPath());
}
totalSize += file.getLen();
}
long goalSize = totalSize / (numSplits == 0 ? 1 : numSplits);
long minSize = Math.max(job.getLong(org.apache.hadoop.mapreduce.lib.input.
FileInputFormat.SPLIT_MINSIZE, 1), minSplitSize);
// generate splits
ArrayList<FileSplit> splits = new ArrayList<FileSplit>(numSplits);
NetworkTopology clusterMap = new NetworkTopology();
for (FileStatus file: files) {
Path path = file.getPath();
long length = file.getLen();
if (length != 0) {
FileSystem fs = path.getFileSystem(job);
BlockLocation[] blkLocations;
if (file instanceof LocatedFileStatus) {
blkLocations = ((LocatedFileStatus) file).getBlockLocations();
} else {
blkLocations = fs.getFileBlockLocations(file, 0, length);
}
if (isSplitable(fs, path)) {
long blockSize = file.getBlockSize();
long splitSize = computeSplitSize(goalSize, minSize, blockSize);
long bytesRemaining = length;
while (((double) bytesRemaining)/splitSize > SPLIT_SLOP) {
String[][] splitHosts = getSplitHostsAndCachedHosts(blkLocations,
length-bytesRemaining, splitSize, clusterMap);
splits.add(makeSplit(path, length-bytesRemaining, splitSize,
splitHosts[0], splitHosts[1]));
bytesRemaining -= splitSize;
}
if (bytesRemaining != 0) {
String[][] splitHosts = getSplitHostsAndCachedHosts(blkLocations, length
- bytesRemaining, bytesRemaining, clusterMap);
splits.add(makeSplit(path, length - bytesRemaining, bytesRemaining,
splitHosts[0], splitHosts[1]));
}
} else {
String[][] splitHosts = getSplitHostsAndCachedHosts(blkLocations,0,length,clusterMap);
splits.add(makeSplit(path, 0, length, splitHosts[0], splitHosts[1]));
}
} else {
//Create empty hosts array for zero length files
splits.add(makeSplit(path, 0, length, new String[0]));
}
}
sw.stop();
if (LOG.isDebugEnabled()) {
LOG.debug("Total # of splits generated by getSplits: " + splits.size()
+ ", TimeTaken: " + sw.now(TimeUnit.MILLISECONDS));
}
return splits.toArray(new FileSplit[splits.size()]);
}
protected long computeSplitSize(long goalSize, long minSize,
long blockSize) {
return Math.max(minSize, Math.min(goalSize, blockSize));
}
protected int getBlockIndex(BlockLocation[] blkLocations,
long offset) {
for (int i = 0 ; i < blkLocations.length; i++) {
// is the offset inside this block?
if ((blkLocations[i].getOffset() <= offset) &&
(offset < blkLocations[i].getOffset() + blkLocations[i].getLength())){
return i;
}
}
BlockLocation last = blkLocations[blkLocations.length -1];
long fileLength = last.getOffset() + last.getLength() -1;
throw new IllegalArgumentException("Offset " + offset +
" is outside of file (0.." +
fileLength + ")");
}
/**
* Sets the given comma separated paths as the list of inputs
* for the map-reduce job.
*
* @param conf Configuration of the job
* @param commaSeparatedPaths Comma separated paths to be set as
* the list of inputs for the map-reduce job.
*/
public static void setInputPaths(JobConf conf, String commaSeparatedPaths) {
setInputPaths(conf, StringUtils.stringToPath(
getPathStrings(commaSeparatedPaths)));
}
/**
* Add the given comma separated paths to the list of inputs for
* the map-reduce job.
*
* @param conf The configuration of the job
* @param commaSeparatedPaths Comma separated paths to be added to
* the list of inputs for the map-reduce job.
*/
public static void addInputPaths(JobConf conf, String commaSeparatedPaths) {
for (String str : getPathStrings(commaSeparatedPaths)) {
addInputPath(conf, new Path(str));
}
}
/**
* Set the array of {@link Path}s as the list of inputs
* for the map-reduce job.
*
* @param conf Configuration of the job.
* @param inputPaths the {@link Path}s of the input directories/files
* for the map-reduce job.
*/
public static void setInputPaths(JobConf conf, Path... inputPaths) {
Path path = new Path(conf.getWorkingDirectory(), inputPaths[0]);
StringBuffer str = new StringBuffer(StringUtils.escapeString(path.toString()));
for(int i = 1; i < inputPaths.length;i++) {
str.append(StringUtils.COMMA_STR);
path = new Path(conf.getWorkingDirectory(), inputPaths[i]);
str.append(StringUtils.escapeString(path.toString()));
}
conf.set(org.apache.hadoop.mapreduce.lib.input.
FileInputFormat.INPUT_DIR, str.toString());
}
/**
* Add a {@link Path} to the list of inputs for the map-reduce job.
*
* @param conf The configuration of the job
* @param path {@link Path} to be added to the list of inputs for
* the map-reduce job.
*/
public static void addInputPath(JobConf conf, Path path ) {
path = new Path(conf.getWorkingDirectory(), path);
String dirStr = StringUtils.escapeString(path.toString());
String dirs = conf.get(org.apache.hadoop.mapreduce.lib.input.
FileInputFormat.INPUT_DIR);
conf.set(org.apache.hadoop.mapreduce.lib.input.
FileInputFormat.INPUT_DIR, dirs == null ? dirStr :
dirs + StringUtils.COMMA_STR + dirStr);
}
// This method escapes commas in the glob pattern of the given paths.
private static String[] getPathStrings(String commaSeparatedPaths) {
int length = commaSeparatedPaths.length();
int curlyOpen = 0;
int pathStart = 0;
boolean globPattern = false;
List<String> pathStrings = new ArrayList<String>();
for (int i=0; i<length; i++) {
char ch = commaSeparatedPaths.charAt(i);
switch(ch) {
case '{' : {
curlyOpen++;
if (!globPattern) {
globPattern = true;
}
break;
}
case '}' : {
curlyOpen--;
if (curlyOpen == 0 && globPattern) {
globPattern = false;
}
break;
}
case ',' : {
if (!globPattern) {
pathStrings.add(commaSeparatedPaths.substring(pathStart, i));
pathStart = i + 1 ;
}
break;
}
default:
continue; // nothing special to do for this character
}
}
pathStrings.add(commaSeparatedPaths.substring(pathStart, length));
return pathStrings.toArray(new String[0]);
}
/**
* Get the list of input {@link Path}s for the map-reduce job.
*
* @param conf The configuration of the job
* @return the list of input {@link Path}s for the map-reduce job.
*/
public static Path[] getInputPaths(JobConf conf) {
String dirs = conf.get(org.apache.hadoop.mapreduce.lib.input.
FileInputFormat.INPUT_DIR, "");
String [] list = StringUtils.split(dirs);
Path[] result = new Path[list.length];
for (int i = 0; i < list.length; i++) {
result[i] = new Path(StringUtils.unEscapeString(list[i]));
}
return result;
}
private void sortInDescendingOrder(List<NodeInfo> mylist) {
Collections.sort(mylist, new Comparator<NodeInfo> () {
public int compare(NodeInfo obj1, NodeInfo obj2) {
if (obj1 == null || obj2 == null)
return -1;
if (obj1.getValue() == obj2.getValue()) {
return 0;
}
else {
return ((obj1.getValue() < obj2.getValue()) ? 1 : -1);
}
}
}
);
}
/**
* This function identifies and returns the hosts that contribute
* most for a given split. For calculating the contribution, rack
* locality is treated on par with host locality, so hosts from racks
* that contribute the most are preferred over hosts on racks that
* contribute less
* @param blkLocations The list of block locations
* @param offset
* @param splitSize
* @return an array of hosts that contribute most to this split
* @throws IOException
*/
protected String[] getSplitHosts(BlockLocation[] blkLocations,
long offset, long splitSize, NetworkTopology clusterMap) throws IOException {
return getSplitHostsAndCachedHosts(blkLocations, offset, splitSize,
clusterMap)[0];
}
/**
* This function identifies and returns the hosts that contribute
* most for a given split. For calculating the contribution, rack
* locality is treated on par with host locality, so hosts from racks
* that contribute the most are preferred over hosts on racks that
* contribute less
* @param blkLocations The list of block locations
* @param offset
* @param splitSize
* @return two arrays - one of hosts that contribute most to this split, and
* one of hosts that contribute most to this split that have the data
* cached on them
* @throws IOException
*/
private String[][] getSplitHostsAndCachedHosts(BlockLocation[] blkLocations,
long offset, long splitSize, NetworkTopology clusterMap)
throws IOException {
int startIndex = getBlockIndex(blkLocations, offset);
long bytesInThisBlock = blkLocations[startIndex].getOffset() +
blkLocations[startIndex].getLength() - offset;
//If this is the only block, just return
if (bytesInThisBlock >= splitSize) {
return new String[][] { blkLocations[startIndex].getHosts(),
blkLocations[startIndex].getCachedHosts() };
}
long bytesInFirstBlock = bytesInThisBlock;
int index = startIndex + 1;
splitSize -= bytesInThisBlock;
while (splitSize > 0) {
bytesInThisBlock =
Math.min(splitSize, blkLocations[index++].getLength());
splitSize -= bytesInThisBlock;
}
long bytesInLastBlock = bytesInThisBlock;
int endIndex = index - 1;
Map <Node,NodeInfo> hostsMap = new IdentityHashMap<Node,NodeInfo>();
Map <Node,NodeInfo> racksMap = new IdentityHashMap<Node,NodeInfo>();
String [] allTopos = new String[0];
// Build the hierarchy and aggregate the contribution of
// bytes at each level. See TestGetSplitHosts.java
for (index = startIndex; index <= endIndex; index++) {
// Establish the bytes in this block
if (index == startIndex) {
bytesInThisBlock = bytesInFirstBlock;
}
else if (index == endIndex) {
bytesInThisBlock = bytesInLastBlock;
}
else {
bytesInThisBlock = blkLocations[index].getLength();
}
allTopos = blkLocations[index].getTopologyPaths();
// If no topology information is available, just
// prefix a fakeRack
if (allTopos.length == 0) {
allTopos = fakeRacks(blkLocations, index);
}
// NOTE: This code currently works only for one level of
// hierarchy (rack/host). However, it is relatively easy
// to extend this to support aggregation at different
// levels
for (String topo: allTopos) {
Node node, parentNode;
NodeInfo nodeInfo, parentNodeInfo;
node = clusterMap.getNode(topo);
if (node == null) {
node = new NodeBase(topo);
clusterMap.add(node);
}
nodeInfo = hostsMap.get(node);
if (nodeInfo == null) {
nodeInfo = new NodeInfo(node);
hostsMap.put(node,nodeInfo);
parentNode = node.getParent();
parentNodeInfo = racksMap.get(parentNode);
if (parentNodeInfo == null) {
parentNodeInfo = new NodeInfo(parentNode);
racksMap.put(parentNode,parentNodeInfo);
}
parentNodeInfo.addLeaf(nodeInfo);
}
else {
nodeInfo = hostsMap.get(node);
parentNode = node.getParent();
parentNodeInfo = racksMap.get(parentNode);
}
nodeInfo.addValue(index, bytesInThisBlock);
parentNodeInfo.addValue(index, bytesInThisBlock);
} // for all topos
} // for all indices
// We don't yet support cached hosts when bytesInThisBlock > splitSize
return new String[][] { identifyHosts(allTopos.length, racksMap),
new String[0]};
}
private String[] identifyHosts(int replicationFactor,
Map<Node,NodeInfo> racksMap) {
String [] retVal = new String[replicationFactor];
List <NodeInfo> rackList = new LinkedList<NodeInfo>();
rackList.addAll(racksMap.values());
// Sort the racks based on their contribution to this split
sortInDescendingOrder(rackList);
boolean done = false;
int index = 0;
// Get the host list for all our aggregated items, sort
// them and return the top entries
for (NodeInfo ni: rackList) {
Set<NodeInfo> hostSet = ni.getLeaves();
List<NodeInfo>hostList = new LinkedList<NodeInfo>();
hostList.addAll(hostSet);
// Sort the hosts in this rack based on their contribution
sortInDescendingOrder(hostList);
for (NodeInfo host: hostList) {
// Strip out the port number from the host name
retVal[index++] = host.node.getName().split(":")[0];
if (index == replicationFactor) {
done = true;
break;
}
}
if (done == true) {
break;
}
}
return retVal;
}
private String[] fakeRacks(BlockLocation[] blkLocations, int index)
throws IOException {
String[] allHosts = blkLocations[index].getHosts();
String[] allTopos = new String[allHosts.length];
for (int i = 0; i < allHosts.length; i++) {
allTopos[i] = NetworkTopology.DEFAULT_RACK + "/" + allHosts[i];
}
return allTopos;
}
private static class NodeInfo {
final Node node;
final Set<Integer> blockIds;
final Set<NodeInfo> leaves;
private long value;
NodeInfo(Node node) {
this.node = node;
blockIds = new HashSet<Integer>();
leaves = new HashSet<NodeInfo>();
}
long getValue() {return value;}
void addValue(int blockIndex, long value) {
if (blockIds.add(blockIndex) == true) {
this.value += value;
}
}
Set<NodeInfo> getLeaves() { return leaves;}
void addLeaf(NodeInfo nodeInfo) {
leaves.add(nodeInfo);
}
}
}
| 26,631 | 33.677083 | 99 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MROutputFiles.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.LocalDirAllocator;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.MRJobConfig;
/**
* Manipulate the working area for the transient store for maps and reduces.
*
* This class is used by map and reduce tasks to identify the directories that
* they need to write to/read from for intermediate files. The callers of
* these methods are from the Child running the Task.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class MROutputFiles extends MapOutputFile {
private LocalDirAllocator lDirAlloc =
new LocalDirAllocator(MRConfig.LOCAL_DIR);
public MROutputFiles() {
}
/**
* Return the path to local map output file created earlier
*
* @return path
* @throws IOException
*/
@Override
public Path getOutputFile()
throws IOException {
return lDirAlloc.getLocalPathToRead(MRJobConfig.OUTPUT + Path.SEPARATOR
+ MAP_OUTPUT_FILENAME_STRING, getConf());
}
/**
* Create a local map output file name.
*
* @param size the size of the file
* @return path
* @throws IOException
*/
@Override
public Path getOutputFileForWrite(long size)
throws IOException {
return lDirAlloc.getLocalPathForWrite(MRJobConfig.OUTPUT + Path.SEPARATOR
+ MAP_OUTPUT_FILENAME_STRING, size, getConf());
}
/**
* Create a local map output file name on the same volume.
*/
@Override
public Path getOutputFileForWriteInVolume(Path existing) {
return new Path(existing.getParent(), MAP_OUTPUT_FILENAME_STRING);
}
/**
* Return the path to a local map output index file created earlier
*
* @return path
* @throws IOException
*/
@Override
public Path getOutputIndexFile()
throws IOException {
return lDirAlloc.getLocalPathToRead(MRJobConfig.OUTPUT + Path.SEPARATOR
+ MAP_OUTPUT_FILENAME_STRING + MAP_OUTPUT_INDEX_SUFFIX_STRING,
getConf());
}
/**
* Create a local map output index file name.
*
* @param size the size of the file
* @return path
* @throws IOException
*/
@Override
public Path getOutputIndexFileForWrite(long size)
throws IOException {
return lDirAlloc.getLocalPathForWrite(MRJobConfig.OUTPUT + Path.SEPARATOR
+ MAP_OUTPUT_FILENAME_STRING + MAP_OUTPUT_INDEX_SUFFIX_STRING,
size, getConf());
}
/**
* Create a local map output index file name on the same volume.
*/
@Override
public Path getOutputIndexFileForWriteInVolume(Path existing) {
return new Path(existing.getParent(),
MAP_OUTPUT_FILENAME_STRING + MAP_OUTPUT_INDEX_SUFFIX_STRING);
}
/**
* Return a local map spill file created earlier.
*
* @param spillNumber the number
* @return path
* @throws IOException
*/
@Override
public Path getSpillFile(int spillNumber)
throws IOException {
return lDirAlloc.getLocalPathToRead(MRJobConfig.OUTPUT + "/spill"
+ spillNumber + ".out", getConf());
}
/**
* Create a local map spill file name.
*
* @param spillNumber the number
* @param size the size of the file
* @return path
* @throws IOException
*/
@Override
public Path getSpillFileForWrite(int spillNumber, long size)
throws IOException {
return lDirAlloc.getLocalPathForWrite(MRJobConfig.OUTPUT + "/spill"
+ spillNumber + ".out", size, getConf());
}
/**
* Return a local map spill index file created earlier
*
* @param spillNumber the number
* @return path
* @throws IOException
*/
@Override
public Path getSpillIndexFile(int spillNumber)
throws IOException {
return lDirAlloc.getLocalPathToRead(MRJobConfig.OUTPUT + "/spill"
+ spillNumber + ".out.index", getConf());
}
/**
* Create a local map spill index file name.
*
* @param spillNumber the number
* @param size the size of the file
* @return path
* @throws IOException
*/
@Override
public Path getSpillIndexFileForWrite(int spillNumber, long size)
throws IOException {
return lDirAlloc.getLocalPathForWrite(MRJobConfig.OUTPUT + "/spill"
+ spillNumber + ".out.index", size, getConf());
}
/**
* Return a local reduce input file created earlier
*
* @param mapId a map task id
* @return path
* @throws IOException
*/
@Override
public Path getInputFile(int mapId)
throws IOException {
return lDirAlloc.getLocalPathToRead(String.format(
REDUCE_INPUT_FILE_FORMAT_STRING, MRJobConfig.OUTPUT, Integer
.valueOf(mapId)), getConf());
}
/**
* Create a local reduce input file name.
*
* @param mapId a map task id
* @param size the size of the file
* @return path
* @throws IOException
*/
@Override
public Path getInputFileForWrite(org.apache.hadoop.mapreduce.TaskID mapId,
long size)
throws IOException {
return lDirAlloc.getLocalPathForWrite(String.format(
REDUCE_INPUT_FILE_FORMAT_STRING, MRJobConfig.OUTPUT, mapId.getId()),
size, getConf());
}
/** Removes all of the files related to a task. */
@Override
public void removeAll()
throws IOException {
((JobConf)getConf()).deleteLocalFiles(MRJobConfig.OUTPUT);
}
@Override
public void setConf(Configuration conf) {
if (!(conf instanceof JobConf)) {
conf = new JobConf(conf);
}
super.setConf(conf);
}
}
| 6,492 | 27.603524 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.BufferedOutputStream;
import java.io.BufferedReader;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.Flushable;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.List;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.SecureIOUtils;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.util.ProcessTree;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.ShutdownHookManager;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.log4j.Appender;
import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
import com.google.common.base.Charsets;
/**
* A simple logger to handle the task-specific user logs.
* This class uses the system property <code>hadoop.log.dir</code>.
*
*/
@InterfaceAudience.Private
public class TaskLog {
private static final Log LOG =
LogFactory.getLog(TaskLog.class);
static final String USERLOGS_DIR_NAME = "userlogs";
private static final File LOG_DIR =
new File(getBaseLogDir(), USERLOGS_DIR_NAME).getAbsoluteFile();
// localFS is set in (and used by) writeToIndexFile()
static LocalFileSystem localFS = null;
public static String getMRv2LogDir() {
return System.getProperty(YarnConfiguration.YARN_APP_CONTAINER_LOG_DIR);
}
public static File getTaskLogFile(TaskAttemptID taskid, boolean isCleanup,
LogName filter) {
if (getMRv2LogDir() != null) {
return new File(getMRv2LogDir(), filter.toString());
} else {
return new File(getAttemptDir(taskid, isCleanup), filter.toString());
}
}
static File getRealTaskLogFileLocation(TaskAttemptID taskid,
boolean isCleanup, LogName filter) {
LogFileDetail l;
try {
l = getLogFileDetail(taskid, filter, isCleanup);
} catch (IOException ie) {
LOG.error("getTaskLogFileDetail threw an exception " + ie);
return null;
}
return new File(l.location, filter.toString());
}
private static class LogFileDetail {
final static String LOCATION = "LOG_DIR:";
String location;
long start;
long length;
}
private static LogFileDetail getLogFileDetail(TaskAttemptID taskid,
LogName filter,
boolean isCleanup)
throws IOException {
File indexFile = getIndexFile(taskid, isCleanup);
BufferedReader fis = new BufferedReader(new InputStreamReader(
SecureIOUtils.openForRead(indexFile, obtainLogDirOwner(taskid), null),
Charsets.UTF_8));
//the format of the index file is
//LOG_DIR: <the dir where the task logs are really stored>
//stdout:<start-offset in the stdout file> <length>
//stderr:<start-offset in the stderr file> <length>
//syslog:<start-offset in the syslog file> <length>
LogFileDetail l = new LogFileDetail();
String str = null;
try {
str = fis.readLine();
if (str == null) { // the file doesn't have anything
throw new IOException("Index file for the log of " + taskid
+ " doesn't exist.");
}
l.location = str.substring(str.indexOf(LogFileDetail.LOCATION)
+ LogFileDetail.LOCATION.length());
// special cases are the debugout and profile.out files. They are
// guaranteed
// to be associated with each task attempt since jvm reuse is disabled
// when profiling/debugging is enabled
if (filter.equals(LogName.DEBUGOUT) || filter.equals(LogName.PROFILE)) {
l.length = new File(l.location, filter.toString()).length();
l.start = 0;
fis.close();
return l;
}
str = fis.readLine();
while (str != null) {
// look for the exact line containing the logname
if (str.contains(filter.toString())) {
str = str.substring(filter.toString().length() + 1);
String[] startAndLen = str.split(" ");
l.start = Long.parseLong(startAndLen[0]);
l.length = Long.parseLong(startAndLen[1]);
break;
}
str = fis.readLine();
}
fis.close();
fis = null;
} finally {
IOUtils.cleanup(LOG, fis);
}
return l;
}
private static File getTmpIndexFile(TaskAttemptID taskid, boolean isCleanup) {
return new File(getAttemptDir(taskid, isCleanup), "log.tmp");
}
static File getIndexFile(TaskAttemptID taskid, boolean isCleanup) {
return new File(getAttemptDir(taskid, isCleanup), "log.index");
}
/**
* Obtain the owner of the log dir. This is
* determined by checking the job's log directory.
*/
static String obtainLogDirOwner(TaskAttemptID taskid) throws IOException {
Configuration conf = new Configuration();
FileSystem raw = FileSystem.getLocal(conf).getRaw();
Path jobLogDir = new Path(getJobDir(taskid.getJobID()).getAbsolutePath());
FileStatus jobStat = raw.getFileStatus(jobLogDir);
return jobStat.getOwner();
}
static String getBaseLogDir() {
return System.getProperty("hadoop.log.dir");
}
static File getAttemptDir(TaskAttemptID taskid, boolean isCleanup) {
String cleanupSuffix = isCleanup ? ".cleanup" : "";
return new File(getJobDir(taskid.getJobID()), taskid + cleanupSuffix);
}
private static long prevOutLength;
private static long prevErrLength;
private static long prevLogLength;
private static synchronized
void writeToIndexFile(String logLocation,
boolean isCleanup) throws IOException {
// To ensure atomicity of updates to index file, write to temporary index
// file first and then rename.
File tmpIndexFile = getTmpIndexFile(currentTaskid, isCleanup);
BufferedOutputStream bos = null;
DataOutputStream dos = null;
try{
bos = new BufferedOutputStream(
SecureIOUtils.createForWrite(tmpIndexFile, 0644));
dos = new DataOutputStream(bos);
//the format of the index file is
//LOG_DIR: <the dir where the task logs are really stored>
//STDOUT: <start-offset in the stdout file> <length>
//STDERR: <start-offset in the stderr file> <length>
//SYSLOG: <start-offset in the syslog file> <length>
dos.writeBytes(LogFileDetail.LOCATION + logLocation + "\n"
+ LogName.STDOUT.toString() + ":");
dos.writeBytes(Long.toString(prevOutLength) + " ");
dos.writeBytes(Long.toString(new File(logLocation, LogName.STDOUT
.toString()).length() - prevOutLength)
+ "\n" + LogName.STDERR + ":");
dos.writeBytes(Long.toString(prevErrLength) + " ");
dos.writeBytes(Long.toString(new File(logLocation, LogName.STDERR
.toString()).length() - prevErrLength)
+ "\n" + LogName.SYSLOG.toString() + ":");
dos.writeBytes(Long.toString(prevLogLength) + " ");
dos.writeBytes(Long.toString(new File(logLocation, LogName.SYSLOG
.toString()).length() - prevLogLength)
+ "\n");
dos.close();
dos = null;
bos.close();
bos = null;
} finally {
IOUtils.cleanup(LOG, dos, bos);
}
File indexFile = getIndexFile(currentTaskid, isCleanup);
Path indexFilePath = new Path(indexFile.getAbsolutePath());
Path tmpIndexFilePath = new Path(tmpIndexFile.getAbsolutePath());
if (localFS == null) {// set localFS once
localFS = FileSystem.getLocal(new Configuration());
}
localFS.rename (tmpIndexFilePath, indexFilePath);
}
private static void resetPrevLengths(String logLocation) {
prevOutLength = new File(logLocation, LogName.STDOUT.toString()).length();
prevErrLength = new File(logLocation, LogName.STDERR.toString()).length();
prevLogLength = new File(logLocation, LogName.SYSLOG.toString()).length();
}
private volatile static TaskAttemptID currentTaskid = null;
@SuppressWarnings("unchecked")
public synchronized static void syncLogs(String logLocation,
TaskAttemptID taskid,
boolean isCleanup)
throws IOException {
System.out.flush();
System.err.flush();
Enumeration<Logger> allLoggers = LogManager.getCurrentLoggers();
while (allLoggers.hasMoreElements()) {
Logger l = allLoggers.nextElement();
Enumeration<Appender> allAppenders = l.getAllAppenders();
while (allAppenders.hasMoreElements()) {
Appender a = allAppenders.nextElement();
if (a instanceof TaskLogAppender) {
((TaskLogAppender)a).flush();
}
}
}
if (currentTaskid != taskid) {
currentTaskid = taskid;
resetPrevLengths(logLocation);
}
writeToIndexFile(logLocation, isCleanup);
}
public static synchronized void syncLogsShutdown(
ScheduledExecutorService scheduler)
{
// flush standard streams
//
System.out.flush();
System.err.flush();
if (scheduler != null) {
scheduler.shutdownNow();
}
// flush & close all appenders
LogManager.shutdown();
}
@SuppressWarnings("unchecked")
public static synchronized void syncLogs() {
// flush standard streams
//
System.out.flush();
System.err.flush();
// flush flushable appenders
//
final Logger rootLogger = Logger.getRootLogger();
flushAppenders(rootLogger);
final Enumeration<Logger> allLoggers = rootLogger.getLoggerRepository().
getCurrentLoggers();
while (allLoggers.hasMoreElements()) {
final Logger l = allLoggers.nextElement();
flushAppenders(l);
}
}
@SuppressWarnings("unchecked")
private static void flushAppenders(Logger l) {
final Enumeration<Appender> allAppenders = l.getAllAppenders();
while (allAppenders.hasMoreElements()) {
final Appender a = allAppenders.nextElement();
if (a instanceof Flushable) {
try {
((Flushable) a).flush();
} catch (IOException ioe) {
System.err.println(a + ": Failed to flush!"
+ StringUtils.stringifyException(ioe));
}
}
}
}
public static ScheduledExecutorService createLogSyncer() {
final ScheduledExecutorService scheduler =
Executors.newSingleThreadScheduledExecutor(
new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
final Thread t = Executors.defaultThreadFactory().newThread(r);
t.setDaemon(true);
t.setName("Thread for syncLogs");
return t;
}
});
ShutdownHookManager.get().addShutdownHook(new Runnable() {
@Override
public void run() {
TaskLog.syncLogsShutdown(scheduler);
}
}, 50);
scheduler.scheduleWithFixedDelay(
new Runnable() {
@Override
public void run() {
TaskLog.syncLogs();
}
}, 0L, 5L, TimeUnit.SECONDS);
return scheduler;
}
/**
* The filter for userlogs.
*/
@InterfaceAudience.Private
public static enum LogName {
/** Log on the stdout of the task. */
STDOUT ("stdout"),
/** Log on the stderr of the task. */
STDERR ("stderr"),
/** Log on the map-reduce system logs of the task. */
SYSLOG ("syslog"),
/** The java profiler information. */
PROFILE ("profile.out"),
/** Log the debug script's stdout */
DEBUGOUT ("debugout");
private String prefix;
private LogName(String prefix) {
this.prefix = prefix;
}
@Override
public String toString() {
return prefix;
}
}
public static class Reader extends InputStream {
private long bytesRemaining;
private FileInputStream file;
/**
* Read a log file from start to end positions. The offsets may be negative,
* in which case they are relative to the end of the file. For example,
* Reader(taskid, kind, 0, -1) is the entire file and
* Reader(taskid, kind, -4197, -1) is the last 4196 bytes.
* @param taskid the id of the task to read the log file for
* @param kind the kind of log to read
* @param start the offset to read from (negative is relative to tail)
* @param end the offset to read upto (negative is relative to tail)
* @param isCleanup whether the attempt is cleanup attempt or not
* @throws IOException
*/
public Reader(TaskAttemptID taskid, LogName kind,
long start, long end, boolean isCleanup) throws IOException {
// find the right log file
LogFileDetail fileDetail = getLogFileDetail(taskid, kind, isCleanup);
// calculate the start and stop
long size = fileDetail.length;
if (start < 0) {
start += size + 1;
}
if (end < 0) {
end += size + 1;
}
start = Math.max(0, Math.min(start, size));
end = Math.max(0, Math.min(end, size));
start += fileDetail.start;
end += fileDetail.start;
bytesRemaining = end - start;
String owner = obtainLogDirOwner(taskid);
file = SecureIOUtils.openForRead(new File(fileDetail.location, kind.toString()),
owner, null);
// skip upto start
long pos = 0;
while (pos < start) {
long result = file.skip(start - pos);
if (result < 0) {
bytesRemaining = 0;
break;
}
pos += result;
}
}
@Override
public int read() throws IOException {
int result = -1;
if (bytesRemaining > 0) {
bytesRemaining -= 1;
result = file.read();
}
return result;
}
@Override
public int read(byte[] buffer, int offset, int length) throws IOException {
length = (int) Math.min(length, bytesRemaining);
int bytes = file.read(buffer, offset, length);
if (bytes > 0) {
bytesRemaining -= bytes;
}
return bytes;
}
@Override
public int available() throws IOException {
return (int) Math.min(bytesRemaining, file.available());
}
@Override
public void close() throws IOException {
file.close();
}
}
private static final String bashCommand = "bash";
private static final String tailCommand = "tail";
/**
* Get the desired maximum length of task's logs.
* @param conf the job to look in
* @return the number of bytes to cap the log files at
*/
public static long getTaskLogLength(JobConf conf) {
return getTaskLogLimitBytes(conf);
}
public static long getTaskLogLimitBytes(Configuration conf) {
return conf.getLong(JobContext.TASK_USERLOG_LIMIT, 0) * 1024;
}
/**
* Wrap a command in a shell to capture stdout and stderr to files.
* Setup commands such as setting memory limit can be passed which
* will be executed before exec.
* If the tailLength is 0, the entire output will be saved.
* @param setup The setup commands for the execed process.
* @param cmd The command and the arguments that should be run
* @param stdoutFilename The filename that stdout should be saved to
* @param stderrFilename The filename that stderr should be saved to
* @param tailLength The length of the tail to be saved.
* @param useSetsid Should setsid be used in the command or not.
* @return the modified command that should be run
*/
public static List<String> captureOutAndError(List<String> setup,
List<String> cmd,
File stdoutFilename,
File stderrFilename,
long tailLength,
boolean useSetsid
) throws IOException {
List<String> result = new ArrayList<String>(3);
result.add(bashCommand);
result.add("-c");
String mergedCmd = buildCommandLine(setup, cmd, stdoutFilename,
stderrFilename, tailLength,
useSetsid);
result.add(mergedCmd);
return result;
}
/**
* Construct the command line for running the task JVM
* @param setup The setup commands for the execed process.
* @param cmd The command and the arguments that should be run
* @param stdoutFilename The filename that stdout should be saved to
* @param stderrFilename The filename that stderr should be saved to
* @param tailLength The length of the tail to be saved.
* @return the command line as a String
* @throws IOException
*/
static String buildCommandLine(List<String> setup, List<String> cmd,
File stdoutFilename,
File stderrFilename,
long tailLength,
boolean useSetsid)
throws IOException {
String stdout = FileUtil.makeShellPath(stdoutFilename);
String stderr = FileUtil.makeShellPath(stderrFilename);
StringBuffer mergedCmd = new StringBuffer();
// Export the pid of taskJvm to env variable JVM_PID.
// Currently pid is not used on Windows
if (!Shell.WINDOWS) {
mergedCmd.append(" export JVM_PID=`echo $$` ; ");
}
if (setup != null && setup.size() > 0) {
mergedCmd.append(addCommand(setup, false));
mergedCmd.append(";");
}
if (tailLength > 0) {
mergedCmd.append("(");
} else if(ProcessTree.isSetsidAvailable && useSetsid &&
!Shell.WINDOWS) {
mergedCmd.append("exec setsid ");
} else {
mergedCmd.append("exec ");
}
mergedCmd.append(addCommand(cmd, true));
mergedCmd.append(" < /dev/null ");
if (tailLength > 0) {
mergedCmd.append(" | ");
mergedCmd.append(tailCommand);
mergedCmd.append(" -c ");
mergedCmd.append(tailLength);
mergedCmd.append(" >> ");
mergedCmd.append(stdout);
mergedCmd.append(" ; exit $PIPESTATUS ) 2>&1 | ");
mergedCmd.append(tailCommand);
mergedCmd.append(" -c ");
mergedCmd.append(tailLength);
mergedCmd.append(" >> ");
mergedCmd.append(stderr);
mergedCmd.append(" ; exit $PIPESTATUS");
} else {
mergedCmd.append(" 1>> ");
mergedCmd.append(stdout);
mergedCmd.append(" 2>> ");
mergedCmd.append(stderr);
}
return mergedCmd.toString();
}
/**
* Construct the command line for running the debug script
* @param cmd The command and the arguments that should be run
* @param stdoutFilename The filename that stdout should be saved to
* @param stderrFilename The filename that stderr should be saved to
* @param tailLength The length of the tail to be saved.
* @return the command line as a String
* @throws IOException
*/
static String buildDebugScriptCommandLine(List<String> cmd, String debugout)
throws IOException {
StringBuilder mergedCmd = new StringBuilder();
mergedCmd.append("exec ");
boolean isExecutable = true;
for(String s: cmd) {
if (isExecutable) {
// the executable name needs to be expressed as a shell path for the
// shell to find it.
mergedCmd.append(FileUtil.makeShellPath(new File(s)));
isExecutable = false;
} else {
mergedCmd.append(s);
}
mergedCmd.append(" ");
}
mergedCmd.append(" < /dev/null ");
mergedCmd.append(" >");
mergedCmd.append(debugout);
mergedCmd.append(" 2>&1 ");
return mergedCmd.toString();
}
/**
* Add quotes to each of the command strings and
* return as a single string
* @param cmd The command to be quoted
* @param isExecutable makes shell path if the first
* argument is executable
* @return returns The quoted string.
* @throws IOException
*/
public static String addCommand(List<String> cmd, boolean isExecutable)
throws IOException {
StringBuffer command = new StringBuffer();
for(String s: cmd) {
command.append('\'');
if (isExecutable) {
// the executable name needs to be expressed as a shell path for the
// shell to find it.
command.append(FileUtil.makeShellPath(new File(s)));
isExecutable = false;
} else {
command.append(s);
}
command.append('\'');
command.append(" ");
}
return command.toString();
}
/**
* Method to return the location of user log directory.
*
* @return base log directory
*/
static File getUserLogDir() {
if (!LOG_DIR.exists()) {
boolean b = LOG_DIR.mkdirs();
if (!b) {
LOG.debug("mkdirs failed. Ignoring.");
}
}
return LOG_DIR;
}
/**
* Get the user log directory for the job jobid.
*
* @param jobid
* @return user log directory for the job
*/
public static File getJobDir(JobID jobid) {
return new File(getUserLogDir(), jobid.toString());
}
} // TaskLog
| 22,615 | 33.060241 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BasicTypeSorterBase.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.DataOutputStream;
import java.io.IOException;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.OutputBuffer;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.SequenceFile.ValueBytes;
import org.apache.hadoop.util.Progress;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator;
import org.apache.hadoop.util.Progressable;
/** This class implements the sort interface using primitive int arrays as
* the data structures (that is why this class is called 'BasicType'SorterBase)
*/
abstract class BasicTypeSorterBase implements BufferSorter {
protected OutputBuffer keyValBuffer; //the buffer used for storing
//key/values
protected int[] startOffsets; //the array used to store the start offsets of
//keys in keyValBuffer
protected int[] keyLengths; //the array used to store the lengths of
//keys
protected int[] valueLengths; //the array used to store the value lengths
protected int[] pointers; //the array of startOffsets's indices. This will
//be sorted at the end to contain a sorted array of
//indices to offsets
protected RawComparator comparator; //the comparator for the map output
protected int count; //the number of key/values
//the overhead of the arrays in memory
//12 => 4 for keyoffsets, 4 for keylengths, 4 for valueLengths, and
//4 for indices into startOffsets array in the
//pointers array (ignored the partpointers list itself)
static private final int BUFFERED_KEY_VAL_OVERHEAD = 16;
static private final int INITIAL_ARRAY_SIZE = 5;
//we maintain the max lengths of the key/val that we encounter. During
//iteration of the sorted results, we will create a DataOutputBuffer to
//return the keys. The max size of the DataOutputBuffer will be the max
//keylength that we encounter. Expose this value to model memory more
//accurately.
private int maxKeyLength = 0;
private int maxValLength = 0;
//Reference to the Progressable object for sending KeepAlive
protected Progressable reporter;
//Implementation of methods of the SorterBase interface
//
public void configure(JobConf conf) {
comparator = conf.getOutputKeyComparator();
}
public void setProgressable(Progressable reporter) {
this.reporter = reporter;
}
public void addKeyValue(int recordOffset, int keyLength, int valLength) {
//Add the start offset of the key in the startOffsets array and the
//length in the keyLengths array.
if (startOffsets == null || count == startOffsets.length)
grow();
startOffsets[count] = recordOffset;
keyLengths[count] = keyLength;
if (keyLength > maxKeyLength) {
maxKeyLength = keyLength;
}
if (valLength > maxValLength) {
maxValLength = valLength;
}
valueLengths[count] = valLength;
pointers[count] = count;
count++;
}
public void setInputBuffer(OutputBuffer buffer) {
//store a reference to the keyValBuffer that we need to read during sort
this.keyValBuffer = buffer;
}
public long getMemoryUtilized() {
//the total length of the arrays + the max{Key,Val}Length (this will be the
//max size of the DataOutputBuffers during the iteration of the sorted
//keys).
if (startOffsets != null) {
return (startOffsets.length) * BUFFERED_KEY_VAL_OVERHEAD +
maxKeyLength + maxValLength;
}
else { //nothing from this yet
return 0;
}
}
public abstract RawKeyValueIterator sort();
public void close() {
//set count to 0; also, we don't reuse the arrays since we want to maintain
//consistency in the memory model
count = 0;
startOffsets = null;
keyLengths = null;
valueLengths = null;
pointers = null;
maxKeyLength = 0;
maxValLength = 0;
//release the large key-value buffer so that the GC, if necessary,
//can collect it away
keyValBuffer = null;
}
private void grow() {
int currLength = 0;
if (startOffsets != null) {
currLength = startOffsets.length;
}
int newLength = (int)(currLength * 1.1) + 1;
startOffsets = grow(startOffsets, newLength);
keyLengths = grow(keyLengths, newLength);
valueLengths = grow(valueLengths, newLength);
pointers = grow(pointers, newLength);
}
private int[] grow(int[] old, int newLength) {
int[] result = new int[newLength];
if(old != null) {
System.arraycopy(old, 0, result, 0, old.length);
}
return result;
}
} //BasicTypeSorterBase
//Implementation of methods of the RawKeyValueIterator interface. These
//methods must be invoked to iterate over key/vals after sort is done.
//
class MRSortResultIterator implements RawKeyValueIterator {
private int count;
private int[] pointers;
private int[] startOffsets;
private int[] keyLengths;
private int[] valLengths;
private int currStartOffsetIndex;
private int currIndexInPointers;
private OutputBuffer keyValBuffer;
private DataOutputBuffer key = new DataOutputBuffer();
private InMemUncompressedBytes value = new InMemUncompressedBytes();
public MRSortResultIterator(OutputBuffer keyValBuffer,
int []pointers, int []startOffsets,
int []keyLengths, int []valLengths) {
this.count = pointers.length;
this.pointers = pointers;
this.startOffsets = startOffsets;
this.keyLengths = keyLengths;
this.valLengths = valLengths;
this.keyValBuffer = keyValBuffer;
}
public Progress getProgress() {
return null;
}
public DataOutputBuffer getKey() throws IOException {
int currKeyOffset = startOffsets[currStartOffsetIndex];
int currKeyLength = keyLengths[currStartOffsetIndex];
//reuse the same key
key.reset();
key.write(keyValBuffer.getData(), currKeyOffset, currKeyLength);
return key;
}
public ValueBytes getValue() throws IOException {
//value[i] is stored in the following byte range:
//startOffsets[i] + keyLengths[i] through valLengths[i]
value.reset(keyValBuffer,
startOffsets[currStartOffsetIndex] + keyLengths[currStartOffsetIndex],
valLengths[currStartOffsetIndex]);
return value;
}
public boolean next() throws IOException {
if (count == currIndexInPointers)
return false;
currStartOffsetIndex = pointers[currIndexInPointers];
currIndexInPointers++;
return true;
}
public void close() {
return;
}
//An implementation of the ValueBytes interface for the in-memory value
//buffers.
private static class InMemUncompressedBytes implements ValueBytes {
private byte[] data;
int start;
int dataSize;
private void reset(OutputBuffer d, int start, int length)
throws IOException {
data = d.getData();
this.start = start;
dataSize = length;
}
public int getSize() {
return dataSize;
}
public void writeUncompressedBytes(DataOutputStream outStream)
throws IOException {
outStream.write(data, start, dataSize);
}
public void writeCompressedBytes(DataOutputStream outStream)
throws IllegalArgumentException, IOException {
throw
new IllegalArgumentException("UncompressedBytes cannot be compressed!");
}
} // InMemUncompressedBytes
} //MRSortResultIterator
| 8,411 | 33.617284 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SequenceFileRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.*;
import org.apache.hadoop.util.ReflectionUtils;
/**
* An {@link RecordReader} for {@link SequenceFile}s.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class SequenceFileRecordReader<K, V> implements RecordReader<K, V> {
private SequenceFile.Reader in;
private long start;
private long end;
private boolean more = true;
protected Configuration conf;
public SequenceFileRecordReader(Configuration conf, FileSplit split)
throws IOException {
Path path = split.getPath();
FileSystem fs = path.getFileSystem(conf);
this.in = new SequenceFile.Reader(fs, path, conf);
this.end = split.getStart() + split.getLength();
this.conf = conf;
if (split.getStart() > in.getPosition())
in.sync(split.getStart()); // sync to start
this.start = in.getPosition();
more = start < end;
}
/** The class of key that must be passed to {@link
* #next(Object, Object)}.. */
public Class getKeyClass() { return in.getKeyClass(); }
/** The class of value that must be passed to {@link
* #next(Object, Object)}.. */
public Class getValueClass() { return in.getValueClass(); }
@SuppressWarnings("unchecked")
public K createKey() {
return (K) ReflectionUtils.newInstance(getKeyClass(), conf);
}
@SuppressWarnings("unchecked")
public V createValue() {
return (V) ReflectionUtils.newInstance(getValueClass(), conf);
}
public synchronized boolean next(K key, V value) throws IOException {
if (!more) return false;
long pos = in.getPosition();
boolean remaining = (in.next(key) != null);
if (remaining) {
getCurrentValue(value);
}
if (pos >= end && in.syncSeen()) {
more = false;
} else {
more = remaining;
}
return more;
}
protected synchronized boolean next(K key)
throws IOException {
if (!more) return false;
long pos = in.getPosition();
boolean remaining = (in.next(key) != null);
if (pos >= end && in.syncSeen()) {
more = false;
} else {
more = remaining;
}
return more;
}
protected synchronized void getCurrentValue(V value)
throws IOException {
in.getCurrentValue(value);
}
/**
* Return the progress within the input split
* @return 0.0 to 1.0 of the input byte range
*/
public float getProgress() throws IOException {
if (end == start) {
return 0.0f;
} else {
return Math.min(1.0f, (in.getPosition() - start) / (float)(end - start));
}
}
public synchronized long getPos() throws IOException {
return in.getPosition();
}
protected synchronized void seek(long pos) throws IOException {
in.seek(pos);
}
public synchronized void close() throws IOException { in.close(); }
}
| 3,927 | 28.096296 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobTracker.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
/**
* <code>JobTracker</code> is no longer used since M/R 2.x. This is a dummy
* JobTracker class, which is used to be compatible with M/R 1.x applications.
*/
public class JobTracker {
/**
* <code>State</code> is no longer used since M/R 2.x. It is kept in case
* that M/R 1.x applications may still use it.
*/
public static enum State {
INITIALIZING, RUNNING
}
}
| 1,232 | 33.25 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/InputSplitWithLocationInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
@Public
@Evolving
public interface InputSplitWithLocationInfo extends InputSplit {
/**
* Gets info about which nodes the input split is stored on and how it is
* stored at each location.
*
* @return list of <code>SplitLocationInfo</code>s describing how the split
* data is stored at each location. A null value indicates that all the
* locations have the data stored on disk.
* @throws IOException
*/
SplitLocationInfo[] getLocationInfo() throws IOException;
}
| 1,505 | 36.65 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobEndNotifier.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.concurrent.Delayed;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.params.ClientPNames;
import org.apache.http.impl.client.DefaultHttpClient;
import org.apache.http.params.CoreConnectionPNames;
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class JobEndNotifier {
private static final Log LOG =
LogFactory.getLog(JobEndNotifier.class.getName());
private static JobEndStatusInfo createNotification(JobConf conf,
JobStatus status) {
JobEndStatusInfo notification = null;
String uri = conf.getJobEndNotificationURI();
if (uri != null) {
int retryAttempts = conf.getInt(JobContext.MR_JOB_END_RETRY_ATTEMPTS, 0);
long retryInterval = conf.getInt(JobContext.MR_JOB_END_RETRY_INTERVAL, 30000);
int timeout = conf.getInt(JobContext.MR_JOB_END_NOTIFICATION_TIMEOUT,
JobContext.DEFAULT_MR_JOB_END_NOTIFICATION_TIMEOUT);
if (uri.contains("$jobId")) {
uri = uri.replace("$jobId", status.getJobID().toString());
}
if (uri.contains("$jobStatus")) {
String statusStr =
(status.getRunState() == JobStatus.SUCCEEDED) ? "SUCCEEDED" :
(status.getRunState() == JobStatus.FAILED) ? "FAILED" : "KILLED";
uri = uri.replace("$jobStatus", statusStr);
}
notification = new JobEndStatusInfo(
uri, retryAttempts, retryInterval, timeout);
}
return notification;
}
private static int httpNotification(String uri, int timeout)
throws IOException, URISyntaxException {
DefaultHttpClient client = new DefaultHttpClient();
client.getParams()
.setIntParameter(CoreConnectionPNames.SO_TIMEOUT, timeout)
.setLongParameter(ClientPNames.CONN_MANAGER_TIMEOUT, (long) timeout);
HttpGet httpGet = new HttpGet(new URI(uri));
httpGet.setHeader("Accept", "*/*");
return client.execute(httpGet).getStatusLine().getStatusCode();
}
// for use by the LocalJobRunner, without using a thread&queue,
// simple synchronous way
public static void localRunnerNotification(JobConf conf, JobStatus status) {
JobEndStatusInfo notification = createNotification(conf, status);
if (notification != null) {
do {
try {
int code = httpNotification(notification.getUri(),
notification.getTimeout());
if (code != 200) {
throw new IOException("Invalid response status code: " + code);
}
else {
break;
}
}
catch (IOException ioex) {
LOG.error("Notification error [" + notification.getUri() + "]", ioex);
}
catch (Exception ex) {
LOG.error("Notification error [" + notification.getUri() + "]", ex);
}
try {
Thread.sleep(notification.getRetryInterval());
}
catch (InterruptedException iex) {
LOG.error("Notification retry error [" + notification + "]", iex);
}
} while (notification.configureForRetry());
}
}
private static class JobEndStatusInfo implements Delayed {
private String uri;
private int retryAttempts;
private long retryInterval;
private long delayTime;
private int timeout;
JobEndStatusInfo(String uri, int retryAttempts, long retryInterval,
int timeout) {
this.uri = uri;
this.retryAttempts = retryAttempts;
this.retryInterval = retryInterval;
this.delayTime = System.currentTimeMillis();
this.timeout = timeout;
}
public String getUri() {
return uri;
}
public int getRetryAttempts() {
return retryAttempts;
}
public long getRetryInterval() {
return retryInterval;
}
public int getTimeout() {
return timeout;
}
public boolean configureForRetry() {
boolean retry = false;
if (getRetryAttempts() > 0) {
retry = true;
delayTime = System.currentTimeMillis() + retryInterval;
}
retryAttempts--;
return retry;
}
public long getDelay(TimeUnit unit) {
long n = this.delayTime - System.currentTimeMillis();
return unit.convert(n, TimeUnit.MILLISECONDS);
}
public int compareTo(Delayed d) {
return (int)(delayTime - ((JobEndStatusInfo)d).delayTime);
}
@Override
public boolean equals(Object o) {
if (!(o instanceof JobEndStatusInfo)) {
return false;
}
if (delayTime == ((JobEndStatusInfo)o).delayTime) {
return true;
}
return false;
}
@Override
public int hashCode() {
return 37 * 17 + (int) (delayTime^(delayTime>>>32));
}
@Override
public String toString() {
return "URL: " + uri + " remaining retries: " + retryAttempts +
" interval: " + retryInterval;
}
}
}
| 6,058 | 31.40107 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/StatisticsCollector.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.Map;
import java.util.Timer;
import java.util.TimerTask;
import org.apache.hadoop.mapred.StatisticsCollector.Stat.TimeStat;
/**
* Collects the statistics in time windows.
*/
class StatisticsCollector {
private static final int DEFAULT_PERIOD = 5;
static final TimeWindow
SINCE_START = new TimeWindow("Since Start", -1, -1);
static final TimeWindow
LAST_WEEK = new TimeWindow("Last Week", 7 * 24 * 60 * 60, 60 * 60);
static final TimeWindow
LAST_DAY = new TimeWindow("Last Day", 24 * 60 * 60, 60 * 60);
static final TimeWindow
LAST_HOUR = new TimeWindow("Last Hour", 60 * 60, 60);
static final TimeWindow
LAST_MINUTE = new TimeWindow("Last Minute", 60, 10);
static final TimeWindow[] DEFAULT_COLLECT_WINDOWS = {
StatisticsCollector.SINCE_START,
StatisticsCollector.LAST_DAY,
StatisticsCollector.LAST_HOUR
};
private final int period;
private boolean started;
private final Map<TimeWindow, StatUpdater> updaters =
new LinkedHashMap<TimeWindow, StatUpdater>();
private final Map<String, Stat> statistics = new HashMap<String, Stat>();
StatisticsCollector() {
this(DEFAULT_PERIOD);
}
StatisticsCollector(int period) {
this.period = period;
}
synchronized void start() {
if (started) {
return;
}
Timer timer = new Timer("Timer thread for monitoring ", true);
TimerTask task = new TimerTask() {
public void run() {
update();
}
};
long millis = period * 1000;
timer.scheduleAtFixedRate(task, millis, millis);
started = true;
}
protected synchronized void update() {
for (StatUpdater c : updaters.values()) {
c.update();
}
}
Map<TimeWindow, StatUpdater> getUpdaters() {
return Collections.unmodifiableMap(updaters);
}
Map<String, Stat> getStatistics() {
return Collections.unmodifiableMap(statistics);
}
synchronized Stat createStat(String name) {
return createStat(name, DEFAULT_COLLECT_WINDOWS);
}
synchronized Stat createStat(String name, TimeWindow[] windows) {
if (statistics.get(name) != null) {
throw new RuntimeException("Stat with name "+ name +
" is already defined");
}
Map<TimeWindow, TimeStat> timeStats =
new LinkedHashMap<TimeWindow, TimeStat>();
for (TimeWindow window : windows) {
StatUpdater collector = updaters.get(window);
if (collector == null) {
if(SINCE_START.equals(window)) {
collector = new StatUpdater();
} else {
collector = new TimeWindowStatUpdater(window, period);
}
updaters.put(window, collector);
}
TimeStat timeStat = new TimeStat();
collector.addTimeStat(name, timeStat);
timeStats.put(window, timeStat);
}
Stat stat = new Stat(name, timeStats);
statistics.put(name, stat);
return stat;
}
synchronized Stat removeStat(String name) {
Stat stat = statistics.remove(name);
if (stat != null) {
for (StatUpdater collector : updaters.values()) {
collector.removeTimeStat(name);
}
}
return stat;
}
static class TimeWindow {
final String name;
final int windowSize;
final int updateGranularity;
TimeWindow(String name, int windowSize, int updateGranularity) {
if (updateGranularity > windowSize) {
throw new RuntimeException(
"Invalid TimeWindow: updateGranularity > windowSize");
}
this.name = name;
this.windowSize = windowSize;
this.updateGranularity = updateGranularity;
}
public int hashCode() {
return name.hashCode() + updateGranularity + windowSize;
}
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
final TimeWindow other = (TimeWindow) obj;
if (name == null) {
if (other.name != null)
return false;
} else if (!name.equals(other.name))
return false;
if (updateGranularity != other.updateGranularity)
return false;
if (windowSize != other.windowSize)
return false;
return true;
}
}
static class Stat {
final String name;
private Map<TimeWindow, TimeStat> timeStats;
private Stat(String name, Map<TimeWindow, TimeStat> timeStats) {
this.name = name;
this.timeStats = timeStats;
}
public synchronized void inc(int incr) {
for (TimeStat ts : timeStats.values()) {
ts.inc(incr);
}
}
public synchronized void inc() {
inc(1);
}
public synchronized Map<TimeWindow, TimeStat> getValues() {
return Collections.unmodifiableMap(timeStats);
}
static class TimeStat {
private final LinkedList<Integer> buckets = new LinkedList<Integer>();
private int value;
private int currentValue;
public synchronized int getValue() {
return value;
}
private synchronized void inc(int i) {
currentValue += i;
}
private synchronized void addBucket() {
buckets.addLast(currentValue);
setValueToCurrent();
}
private synchronized void setValueToCurrent() {
value += currentValue;
currentValue = 0;
}
private synchronized void removeBucket() {
int removed = buckets.removeFirst();
value -= removed;
}
}
}
private static class StatUpdater {
protected final Map<String, TimeStat> statToCollect =
new HashMap<String, TimeStat>();
synchronized void addTimeStat(String name, TimeStat s) {
statToCollect.put(name, s);
}
synchronized TimeStat removeTimeStat(String name) {
return statToCollect.remove(name);
}
synchronized void update() {
for (TimeStat stat : statToCollect.values()) {
stat.setValueToCurrent();
}
}
}
/**
* Updates TimeWindow statistics in buckets.
*
*/
private static class TimeWindowStatUpdater extends StatUpdater{
final int collectBuckets;
final int updatesPerBucket;
private int updates;
private int buckets;
TimeWindowStatUpdater(TimeWindow w, int updatePeriod) {
if (updatePeriod > w.updateGranularity) {
throw new RuntimeException(
"Invalid conf: updatePeriod > updateGranularity");
}
collectBuckets = w.windowSize / w.updateGranularity;
updatesPerBucket = w.updateGranularity / updatePeriod;
}
synchronized void update() {
updates++;
if (updates == updatesPerBucket) {
for(TimeStat stat : statToCollect.values()) {
stat.addBucket();
}
updates = 0;
buckets++;
if (buckets > collectBuckets) {
for (TimeStat stat : statToCollect.values()) {
stat.removeBucket();
}
buckets--;
}
}
}
}
}
| 7,944 | 25.932203 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SequenceFileAsTextInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
/**
* This class is similar to SequenceFileInputFormat,
* except it generates SequenceFileAsTextRecordReader
* which converts the input keys and values to their
* String forms by calling toString() method.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class SequenceFileAsTextInputFormat
extends SequenceFileInputFormat<Text, Text> {
public SequenceFileAsTextInputFormat() {
super();
}
public RecordReader<Text, Text> getRecordReader(InputSplit split,
JobConf job,
Reporter reporter)
throws IOException {
reporter.setStatus(split.toString());
return new SequenceFileAsTextRecordReader(job, (FileSplit) split);
}
}
| 1,795 | 33.538462 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/KeyValueTextInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.apache.hadoop.io.compress.SplittableCompressionCodec;
/**
* An {@link InputFormat} for plain text files. Files are broken into lines.
* Either linefeed or carriage-return are used to signal end of line. Each line
* is divided into key and value parts by a separator byte. If no such a byte
* exists, the key will be the entire line and value will be empty.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class KeyValueTextInputFormat extends FileInputFormat<Text, Text>
implements JobConfigurable {
private CompressionCodecFactory compressionCodecs = null;
public void configure(JobConf conf) {
compressionCodecs = new CompressionCodecFactory(conf);
}
protected boolean isSplitable(FileSystem fs, Path file) {
final CompressionCodec codec = compressionCodecs.getCodec(file);
if (null == codec) {
return true;
}
return codec instanceof SplittableCompressionCodec;
}
public RecordReader<Text, Text> getRecordReader(InputSplit genericSplit,
JobConf job,
Reporter reporter)
throws IOException {
reporter.setStatus(genericSplit.toString());
return new KeyValueLineRecordReader(job, (FileSplit) genericSplit);
}
}
| 2,527 | 36.731343 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Mapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.io.Closeable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.compress.CompressionCodec;
/**
* Maps input key/value pairs to a set of intermediate key/value pairs.
*
* <p>Maps are the individual tasks which transform input records into a
* intermediate records. The transformed intermediate records need not be of
* the same type as the input records. A given input pair may map to zero or
* many output pairs.</p>
*
* <p>The Hadoop Map-Reduce framework spawns one map task for each
* {@link InputSplit} generated by the {@link InputFormat} for the job.
* <code>Mapper</code> implementations can access the {@link JobConf} for the
* job via the {@link JobConfigurable#configure(JobConf)} and initialize
* themselves. Similarly they can use the {@link Closeable#close()} method for
* de-initialization.</p>
*
* <p>The framework then calls
* {@link #map(Object, Object, OutputCollector, Reporter)}
* for each key/value pair in the <code>InputSplit</code> for that task.</p>
*
* <p>All intermediate values associated with a given output key are
* subsequently grouped by the framework, and passed to a {@link Reducer} to
* determine the final output. Users can control the grouping by specifying
* a <code>Comparator</code> via
* {@link JobConf#setOutputKeyComparatorClass(Class)}.</p>
*
* <p>The grouped <code>Mapper</code> outputs are partitioned per
* <code>Reducer</code>. Users can control which keys (and hence records) go to
* which <code>Reducer</code> by implementing a custom {@link Partitioner}.
*
* <p>Users can optionally specify a <code>combiner</code>, via
* {@link JobConf#setCombinerClass(Class)}, to perform local aggregation of the
* intermediate outputs, which helps to cut down the amount of data transferred
* from the <code>Mapper</code> to the <code>Reducer</code>.
*
* <p>The intermediate, grouped outputs are always stored in
* {@link SequenceFile}s. Applications can specify if and how the intermediate
* outputs are to be compressed and which {@link CompressionCodec}s are to be
* used via the <code>JobConf</code>.</p>
*
* <p>If the job has
* <a href="{@docRoot}/org/apache/hadoop/mapred/JobConf.html#ReducerNone">zero
* reduces</a> then the output of the <code>Mapper</code> is directly written
* to the {@link FileSystem} without grouping by keys.</p>
*
* <p>Example:</p>
* <p><blockquote><pre>
* public class MyMapper<K extends WritableComparable, V extends Writable>
* extends MapReduceBase implements Mapper<K, V, K, V> {
*
* static enum MyCounters { NUM_RECORDS }
*
* private String mapTaskId;
* private String inputFile;
* private int noRecords = 0;
*
* public void configure(JobConf job) {
* mapTaskId = job.get(JobContext.TASK_ATTEMPT_ID);
* inputFile = job.get(JobContext.MAP_INPUT_FILE);
* }
*
* public void map(K key, V val,
* OutputCollector<K, V> output, Reporter reporter)
* throws IOException {
* // Process the <key, value> pair (assume this takes a while)
* // ...
* // ...
*
* // Let the framework know that we are alive, and kicking!
* // reporter.progress();
*
* // Process some more
* // ...
* // ...
*
* // Increment the no. of <key, value> pairs processed
* ++noRecords;
*
* // Increment counters
* reporter.incrCounter(NUM_RECORDS, 1);
*
* // Every 100 records update application-level status
* if ((noRecords%100) == 0) {
* reporter.setStatus(mapTaskId + " processed " + noRecords +
* " from input-file: " + inputFile);
* }
*
* // Output the result
* output.collect(key, val);
* }
* }
* </pre></blockquote>
*
* <p>Applications may write a custom {@link MapRunnable} to exert greater
* control on map processing e.g. multi-threaded <code>Mapper</code>s etc.</p>
*
* @see JobConf
* @see InputFormat
* @see Partitioner
* @see Reducer
* @see MapReduceBase
* @see MapRunnable
* @see SequenceFile
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface Mapper<K1, V1, K2, V2> extends JobConfigurable, Closeable {
/**
* Maps a single input key/value pair into an intermediate key/value pair.
*
* <p>Output pairs need not be of the same types as input pairs. A given
* input pair may map to zero or many output pairs. Output pairs are
* collected with calls to
* {@link OutputCollector#collect(Object,Object)}.</p>
*
* <p>Applications can use the {@link Reporter} provided to report progress
* or just indicate that they are alive. In scenarios where the application
* takes significant amount of time to process individual key/value
* pairs, this is crucial since the framework might assume that the task has
* timed-out and kill that task. The other way of avoiding this is to set
* <a href="{@docRoot}/../mapred-default.html#mapreduce.task.timeout">
* mapreduce.task.timeout</a> to a high-enough value (or even zero for no
* time-outs).</p>
*
* @param key the input key.
* @param value the input value.
* @param output collects mapped keys and values.
* @param reporter facility to report progress.
*/
void map(K1 key, V1 value, OutputCollector<K2, V2> output, Reporter reporter)
throws IOException;
}
| 6,648 | 40.04321 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Reporter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapred.Counters.Counter;
import org.apache.hadoop.util.Progressable;
/**
* A facility for Map-Reduce applications to report progress and update
* counters, status information etc.
*
* <p>{@link Mapper} and {@link Reducer} can use the <code>Reporter</code>
* provided to report progress or just indicate that they are alive. In
* scenarios where the application takes significant amount of time to
* process individual key/value pairs, this is crucial since the framework
* might assume that the task has timed-out and kill that task.
*
* <p>Applications can also update {@link Counters} via the provided
* <code>Reporter</code> .</p>
*
* @see Progressable
* @see Counters
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface Reporter extends Progressable {
/**
* A constant of Reporter type that does nothing.
*/
public static final Reporter NULL = new Reporter() {
public void setStatus(String s) {
}
public void progress() {
}
public Counter getCounter(Enum<?> name) {
return null;
}
public Counter getCounter(String group, String name) {
return null;
}
public void incrCounter(Enum<?> key, long amount) {
}
public void incrCounter(String group, String counter, long amount) {
}
public InputSplit getInputSplit() throws UnsupportedOperationException {
throw new UnsupportedOperationException("NULL reporter has no input");
}
@Override
public float getProgress() {
return 0;
}
};
/**
* Set the status description for the task.
*
* @param status brief description of the current status.
*/
public abstract void setStatus(String status);
/**
* Get the {@link Counter} of the given group with the given name.
*
* @param name counter name
* @return the <code>Counter</code> of the given group/name.
*/
public abstract Counter getCounter(Enum<?> name);
/**
* Get the {@link Counter} of the given group with the given name.
*
* @param group counter group
* @param name counter name
* @return the <code>Counter</code> of the given group/name.
*/
public abstract Counter getCounter(String group, String name);
/**
* Increments the counter identified by the key, which can be of
* any {@link Enum} type, by the specified amount.
*
* @param key key to identify the counter to be incremented. The key can be
* be any <code>Enum</code>.
* @param amount A non-negative amount by which the counter is to
* be incremented.
*/
public abstract void incrCounter(Enum<?> key, long amount);
/**
* Increments the counter identified by the group and counter name
* by the specified amount.
*
* @param group name to identify the group of the counter to be incremented.
* @param counter name to identify the counter within the group.
* @param amount A non-negative amount by which the counter is to
* be incremented.
*/
public abstract void incrCounter(String group, String counter, long amount);
/**
* Get the {@link InputSplit} object for a map.
*
* @return the <code>InputSplit</code> that the map is reading from.
* @throws UnsupportedOperationException if called outside a mapper
*/
public abstract InputSplit getInputSplit()
throws UnsupportedOperationException;
/**
* Get the progress of the task. Progress is represented as a number between
* 0 and 1 (inclusive).
*/
public float getProgress();
}
| 4,578 | 33.171642 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MultiFileInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
/**
* An abstract {@link InputFormat} that returns {@link MultiFileSplit}'s
* in {@link #getSplits(JobConf, int)} method. Splits are constructed from
* the files under the input paths. Each split returned contains <i>nearly</i>
* equal content length. <br>
* Subclasses implement {@link #getRecordReader(InputSplit, JobConf, Reporter)}
* to construct <code>RecordReader</code>'s for <code>MultiFileSplit</code>'s.
* @see MultiFileSplit
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class MultiFileInputFormat<K, V>
extends FileInputFormat<K, V> {
@Override
public InputSplit[] getSplits(JobConf job, int numSplits)
throws IOException {
Path[] paths = FileUtil.stat2Paths(listStatus(job));
List<MultiFileSplit> splits = new ArrayList<MultiFileSplit>(Math.min(numSplits, paths.length));
if (paths.length != 0) {
// HADOOP-1818: Manage splits only if there are paths
long[] lengths = new long[paths.length];
long totLength = 0;
for(int i=0; i<paths.length; i++) {
FileSystem fs = paths[i].getFileSystem(job);
lengths[i] = fs.getContentSummary(paths[i]).getLength();
totLength += lengths[i];
}
double avgLengthPerSplit = ((double)totLength) / numSplits;
long cumulativeLength = 0;
int startIndex = 0;
for(int i=0; i<numSplits; i++) {
int splitSize = findSize(i, avgLengthPerSplit, cumulativeLength
, startIndex, lengths);
if (splitSize != 0) {
// HADOOP-1818: Manage split only if split size is not equals to 0
Path[] splitPaths = new Path[splitSize];
long[] splitLengths = new long[splitSize];
System.arraycopy(paths, startIndex, splitPaths , 0, splitSize);
System.arraycopy(lengths, startIndex, splitLengths , 0, splitSize);
splits.add(new MultiFileSplit(job, splitPaths, splitLengths));
startIndex += splitSize;
for(long l: splitLengths) {
cumulativeLength += l;
}
}
}
}
return splits.toArray(new MultiFileSplit[splits.size()]);
}
private int findSize(int splitIndex, double avgLengthPerSplit
, long cumulativeLength , int startIndex, long[] lengths) {
if(splitIndex == lengths.length - 1)
return lengths.length - startIndex;
long goalLength = (long)((splitIndex + 1) * avgLengthPerSplit);
long partialLength = 0;
// accumulate till just above the goal length;
for(int i = startIndex; i < lengths.length; i++) {
partialLength += lengths[i];
if(partialLength + cumulativeLength >= goalLength) {
return i - startIndex + 1;
}
}
return lengths.length - startIndex;
}
@Override
public abstract RecordReader<K, V> getRecordReader(InputSplit split,
JobConf job, Reporter reporter)
throws IOException;
}
| 4,054 | 36.546296 | 99 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SpillRecord.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import static org.apache.hadoop.mapred.MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH;
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.LongBuffer;
import java.util.zip.CheckedInputStream;
import java.util.zip.CheckedOutputStream;
import java.util.zip.Checksum;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.SecureIOUtils;
import org.apache.hadoop.util.PureJavaCrc32;
@InterfaceAudience.LimitedPrivate({"MapReduce"})
@InterfaceStability.Unstable
public class SpillRecord {
/** Backing store */
private final ByteBuffer buf;
/** View of backing storage as longs */
private final LongBuffer entries;
public SpillRecord(int numPartitions) {
buf = ByteBuffer.allocate(
numPartitions * MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH);
entries = buf.asLongBuffer();
}
public SpillRecord(Path indexFileName, JobConf job) throws IOException {
this(indexFileName, job, null);
}
public SpillRecord(Path indexFileName, JobConf job, String expectedIndexOwner)
throws IOException {
this(indexFileName, job, new PureJavaCrc32(), expectedIndexOwner);
}
public SpillRecord(Path indexFileName, JobConf job, Checksum crc,
String expectedIndexOwner)
throws IOException {
final FileSystem rfs = FileSystem.getLocal(job).getRaw();
final FSDataInputStream in =
SecureIOUtils.openFSDataInputStream(new File(indexFileName.toUri()
.getRawPath()), expectedIndexOwner, null);
try {
final long length = rfs.getFileStatus(indexFileName).getLen();
final int partitions = (int) length / MAP_OUTPUT_INDEX_RECORD_LENGTH;
final int size = partitions * MAP_OUTPUT_INDEX_RECORD_LENGTH;
buf = ByteBuffer.allocate(size);
if (crc != null) {
crc.reset();
CheckedInputStream chk = new CheckedInputStream(in, crc);
IOUtils.readFully(chk, buf.array(), 0, size);
if (chk.getChecksum().getValue() != in.readLong()) {
throw new ChecksumException("Checksum error reading spill index: " +
indexFileName, -1);
}
} else {
IOUtils.readFully(in, buf.array(), 0, size);
}
entries = buf.asLongBuffer();
} finally {
in.close();
}
}
/**
* Return number of IndexRecord entries in this spill.
*/
public int size() {
return entries.capacity() / (MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH / 8);
}
/**
* Get spill offsets for given partition.
*/
public IndexRecord getIndex(int partition) {
final int pos = partition * MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH / 8;
return new IndexRecord(entries.get(pos), entries.get(pos + 1),
entries.get(pos + 2));
}
/**
* Set spill offsets for given partition.
*/
public void putIndex(IndexRecord rec, int partition) {
final int pos = partition * MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH / 8;
entries.put(pos, rec.startOffset);
entries.put(pos + 1, rec.rawLength);
entries.put(pos + 2, rec.partLength);
}
/**
* Write this spill record to the location provided.
*/
public void writeToFile(Path loc, JobConf job)
throws IOException {
writeToFile(loc, job, new PureJavaCrc32());
}
public void writeToFile(Path loc, JobConf job, Checksum crc)
throws IOException {
final FileSystem rfs = FileSystem.getLocal(job).getRaw();
CheckedOutputStream chk = null;
final FSDataOutputStream out = rfs.create(loc);
try {
if (crc != null) {
crc.reset();
chk = new CheckedOutputStream(out, crc);
chk.write(buf.array());
out.writeLong(chk.getChecksum().getValue());
} else {
out.write(buf.array());
}
} finally {
if (chk != null) {
chk.close();
} else {
out.close();
}
}
}
}
| 5,074 | 31.954545 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SplitLocationInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
@Public
@Evolving
public class SplitLocationInfo {
private boolean inMemory;
private String location;
public SplitLocationInfo(String location, boolean inMemory) {
this.location = location;
this.inMemory = inMemory;
}
public boolean isOnDisk() {
return true;
}
public boolean isInMemory() {
return inMemory;
}
public String getLocation() {
return location;
}
}
| 1,390 | 28.595745 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/OutputCollector.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Collects the <code><key, value></code> pairs output by {@link Mapper}s
* and {@link Reducer}s.
*
* <p><code>OutputCollector</code> is the generalization of the facility
* provided by the Map-Reduce framework to collect data output by either the
* <code>Mapper</code> or the <code>Reducer</code> i.e. intermediate outputs
* or the output of the job.</p>
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface OutputCollector<K, V> {
/** Adds a key/value pair to the output.
*
* @param key the key to collect.
* @param value to value to collect.
* @throws IOException
*/
void collect(K key, V value) throws IOException;
}
| 1,679 | 34.744681 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobACLsManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
@InterfaceAudience.Private
public class JobACLsManager {
static final Log LOG = LogFactory.getLog(JobACLsManager.class);
Configuration conf;
private final AccessControlList adminAcl;
public JobACLsManager(Configuration conf) {
adminAcl = new AccessControlList(conf.get(MRConfig.MR_ADMINS, " "));
this.conf = conf;
}
public boolean areACLsEnabled() {
return conf.getBoolean(MRConfig.MR_ACLS_ENABLED, false);
}
/**
* Construct the jobACLs from the configuration so that they can be kept in
* the memory. If authorization is disabled on the JT, nothing is constructed
* and an empty map is returned.
*
* @return JobACL to AccessControlList map.
*/
public Map<JobACL, AccessControlList> constructJobACLs(Configuration conf) {
Map<JobACL, AccessControlList> acls =
new HashMap<JobACL, AccessControlList>();
// Don't construct anything if authorization is disabled.
if (!areACLsEnabled()) {
return acls;
}
for (JobACL aclName : JobACL.values()) {
String aclConfigName = aclName.getAclName();
String aclConfigured = conf.get(aclConfigName);
if (aclConfigured == null) {
// If ACLs are not configured at all, we grant no access to anyone. So
// jobOwner and cluster administrator _only_ can do 'stuff'
aclConfigured = " ";
}
acls.put(aclName, new AccessControlList(aclConfigured));
}
return acls;
}
/**
* Is the calling user an admin for the mapreduce cluster
* i.e. member of mapreduce.cluster.administrators
* @return true, if user is an admin
*/
boolean isMRAdmin(UserGroupInformation callerUGI) {
if (adminAcl.isUserAllowed(callerUGI)) {
return true;
}
return false;
}
/**
* If authorization is enabled, checks whether the user (in the callerUGI)
* is authorized to perform the operation specified by 'jobOperation' on
* the job by checking if the user is jobOwner or part of job ACL for the
* specific job operation.
* <ul>
* <li>The owner of the job can do any operation on the job</li>
* <li>For all other users/groups job-acls are checked</li>
* </ul>
* @param callerUGI
* @param jobOperation
* @param jobOwner
* @param jobACL
*/
public boolean checkAccess(UserGroupInformation callerUGI,
JobACL jobOperation, String jobOwner, AccessControlList jobACL) {
if (LOG.isDebugEnabled()) {
LOG.debug("checkAccess job acls, jobOwner: " + jobOwner + " jobacl: "
+ jobOperation.toString() + " user: " + callerUGI.getShortUserName());
}
String user = callerUGI.getShortUserName();
if (!areACLsEnabled()) {
return true;
}
// Allow Job-owner for any operation on the job
if (isMRAdmin(callerUGI)
|| user.equals(jobOwner)
|| jobACL.isUserAllowed(callerUGI)) {
return true;
}
return false;
}
}
| 4,261 | 32.559055 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IFile.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.io.compress.CodecPool;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionOutputStream;
import org.apache.hadoop.io.compress.Compressor;
import org.apache.hadoop.io.compress.Decompressor;
import org.apache.hadoop.io.serializer.SerializationFactory;
import org.apache.hadoop.io.serializer.Serializer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* <code>IFile</code> is the simple <key-len, value-len, key, value> format
* for the intermediate map-outputs in Map-Reduce.
*
* There is a <code>Writer</code> to write out map-outputs in this format and
* a <code>Reader</code> to read files of this format.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class IFile {
private static final Log LOG = LogFactory.getLog(IFile.class);
public static final int EOF_MARKER = -1; // End of File Marker
/**
* <code>IFile.Writer</code> to write out intermediate map-outputs.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public static class Writer<K extends Object, V extends Object> {
FSDataOutputStream out;
boolean ownOutputStream = false;
long start = 0;
FSDataOutputStream rawOut;
CompressionOutputStream compressedOut;
Compressor compressor;
boolean compressOutput = false;
long decompressedBytesWritten = 0;
long compressedBytesWritten = 0;
// Count records written to disk
private long numRecordsWritten = 0;
private final Counters.Counter writtenRecordsCounter;
IFileOutputStream checksumOut;
Class<K> keyClass;
Class<V> valueClass;
Serializer<K> keySerializer;
Serializer<V> valueSerializer;
DataOutputBuffer buffer = new DataOutputBuffer();
public Writer(Configuration conf, FSDataOutputStream out,
Class<K> keyClass, Class<V> valueClass,
CompressionCodec codec, Counters.Counter writesCounter)
throws IOException {
this(conf, out, keyClass, valueClass, codec, writesCounter, false);
}
protected Writer(Counters.Counter writesCounter) {
writtenRecordsCounter = writesCounter;
}
public Writer(Configuration conf, FSDataOutputStream out,
Class<K> keyClass, Class<V> valueClass,
CompressionCodec codec, Counters.Counter writesCounter,
boolean ownOutputStream)
throws IOException {
this.writtenRecordsCounter = writesCounter;
this.checksumOut = new IFileOutputStream(out);
this.rawOut = out;
this.start = this.rawOut.getPos();
if (codec != null) {
this.compressor = CodecPool.getCompressor(codec);
if (this.compressor != null) {
this.compressor.reset();
this.compressedOut = codec.createOutputStream(checksumOut, compressor);
this.out = new FSDataOutputStream(this.compressedOut, null);
this.compressOutput = true;
} else {
LOG.warn("Could not obtain compressor from CodecPool");
this.out = new FSDataOutputStream(checksumOut,null);
}
} else {
this.out = new FSDataOutputStream(checksumOut,null);
}
this.keyClass = keyClass;
this.valueClass = valueClass;
if (keyClass != null) {
SerializationFactory serializationFactory =
new SerializationFactory(conf);
this.keySerializer = serializationFactory.getSerializer(keyClass);
this.keySerializer.open(buffer);
this.valueSerializer = serializationFactory.getSerializer(valueClass);
this.valueSerializer.open(buffer);
}
this.ownOutputStream = ownOutputStream;
}
public void close() throws IOException {
// When IFile writer is created by BackupStore, we do not have
// Key and Value classes set. So, check before closing the
// serializers
if (keyClass != null) {
keySerializer.close();
valueSerializer.close();
}
// Write EOF_MARKER for key/value length
WritableUtils.writeVInt(out, EOF_MARKER);
WritableUtils.writeVInt(out, EOF_MARKER);
decompressedBytesWritten += 2 * WritableUtils.getVIntSize(EOF_MARKER);
//Flush the stream
out.flush();
if (compressOutput) {
// Flush
compressedOut.finish();
compressedOut.resetState();
}
// Close the underlying stream iff we own it...
if (ownOutputStream) {
out.close();
}
else {
// Write the checksum
checksumOut.finish();
}
compressedBytesWritten = rawOut.getPos() - start;
if (compressOutput) {
// Return back the compressor
CodecPool.returnCompressor(compressor);
compressor = null;
}
out = null;
if(writtenRecordsCounter != null) {
writtenRecordsCounter.increment(numRecordsWritten);
}
}
public void append(K key, V value) throws IOException {
if (key.getClass() != keyClass)
throw new IOException("wrong key class: "+ key.getClass()
+" is not "+ keyClass);
if (value.getClass() != valueClass)
throw new IOException("wrong value class: "+ value.getClass()
+" is not "+ valueClass);
// Append the 'key'
keySerializer.serialize(key);
int keyLength = buffer.getLength();
if (keyLength < 0) {
throw new IOException("Negative key-length not allowed: " + keyLength +
" for " + key);
}
// Append the 'value'
valueSerializer.serialize(value);
int valueLength = buffer.getLength() - keyLength;
if (valueLength < 0) {
throw new IOException("Negative value-length not allowed: " +
valueLength + " for " + value);
}
// Write the record out
WritableUtils.writeVInt(out, keyLength); // key length
WritableUtils.writeVInt(out, valueLength); // value length
out.write(buffer.getData(), 0, buffer.getLength()); // data
// Reset
buffer.reset();
// Update bytes written
decompressedBytesWritten += keyLength + valueLength +
WritableUtils.getVIntSize(keyLength) +
WritableUtils.getVIntSize(valueLength);
++numRecordsWritten;
}
public void append(DataInputBuffer key, DataInputBuffer value)
throws IOException {
int keyLength = key.getLength() - key.getPosition();
if (keyLength < 0) {
throw new IOException("Negative key-length not allowed: " + keyLength +
" for " + key);
}
int valueLength = value.getLength() - value.getPosition();
if (valueLength < 0) {
throw new IOException("Negative value-length not allowed: " +
valueLength + " for " + value);
}
WritableUtils.writeVInt(out, keyLength);
WritableUtils.writeVInt(out, valueLength);
out.write(key.getData(), key.getPosition(), keyLength);
out.write(value.getData(), value.getPosition(), valueLength);
// Update bytes written
decompressedBytesWritten += keyLength + valueLength +
WritableUtils.getVIntSize(keyLength) +
WritableUtils.getVIntSize(valueLength);
++numRecordsWritten;
}
// Required for mark/reset
public DataOutputStream getOutputStream () {
return out;
}
// Required for mark/reset
public void updateCountersForExternalAppend(long length) {
++numRecordsWritten;
decompressedBytesWritten += length;
}
public long getRawLength() {
return decompressedBytesWritten;
}
public long getCompressedLength() {
return compressedBytesWritten;
}
}
/**
* <code>IFile.Reader</code> to read intermediate map-outputs.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public static class Reader<K extends Object, V extends Object> {
private static final int DEFAULT_BUFFER_SIZE = 128*1024;
private static final int MAX_VINT_SIZE = 9;
// Count records read from disk
private long numRecordsRead = 0;
private final Counters.Counter readRecordsCounter;
final InputStream in; // Possibly decompressed stream that we read
Decompressor decompressor;
public long bytesRead = 0;
protected final long fileLength;
protected boolean eof = false;
final IFileInputStream checksumIn;
protected byte[] buffer = null;
protected int bufferSize = DEFAULT_BUFFER_SIZE;
protected DataInputStream dataIn;
protected int recNo = 1;
protected int currentKeyLength;
protected int currentValueLength;
byte keyBytes[] = new byte[0];
/**
* Construct an IFile Reader.
*
* @param conf Configuration File
* @param fs FileSystem
* @param file Path of the file to be opened. This file should have
* checksum bytes for the data at the end of the file.
* @param codec codec
* @param readsCounter Counter for records read from disk
* @throws IOException
*/
public Reader(Configuration conf, FileSystem fs, Path file,
CompressionCodec codec,
Counters.Counter readsCounter) throws IOException {
this(conf, fs.open(file),
fs.getFileStatus(file).getLen(),
codec, readsCounter);
}
/**
* Construct an IFile Reader.
*
* @param conf Configuration File
* @param in The input stream
* @param length Length of the data in the stream, including the checksum
* bytes.
* @param codec codec
* @param readsCounter Counter for records read from disk
* @throws IOException
*/
public Reader(Configuration conf, FSDataInputStream in, long length,
CompressionCodec codec,
Counters.Counter readsCounter) throws IOException {
readRecordsCounter = readsCounter;
checksumIn = new IFileInputStream(in,length, conf);
if (codec != null) {
decompressor = CodecPool.getDecompressor(codec);
if (decompressor != null) {
this.in = codec.createInputStream(checksumIn, decompressor);
} else {
LOG.warn("Could not obtain decompressor from CodecPool");
this.in = checksumIn;
}
} else {
this.in = checksumIn;
}
this.dataIn = new DataInputStream(this.in);
this.fileLength = length;
if (conf != null) {
bufferSize = conf.getInt("io.file.buffer.size", DEFAULT_BUFFER_SIZE);
}
}
public long getLength() {
return fileLength - checksumIn.getSize();
}
public long getPosition() throws IOException {
return checksumIn.getPosition();
}
/**
* Read upto len bytes into buf starting at offset off.
*
* @param buf buffer
* @param off offset
* @param len length of buffer
* @return the no. of bytes read
* @throws IOException
*/
private int readData(byte[] buf, int off, int len) throws IOException {
int bytesRead = 0;
while (bytesRead < len) {
int n = IOUtils.wrappedReadForCompressedData(in, buf, off + bytesRead,
len - bytesRead);
if (n < 0) {
return bytesRead;
}
bytesRead += n;
}
return len;
}
protected boolean positionToNextRecord(DataInput dIn) throws IOException {
// Sanity check
if (eof) {
throw new EOFException("Completed reading " + bytesRead);
}
// Read key and value lengths
currentKeyLength = WritableUtils.readVInt(dIn);
currentValueLength = WritableUtils.readVInt(dIn);
bytesRead += WritableUtils.getVIntSize(currentKeyLength) +
WritableUtils.getVIntSize(currentValueLength);
// Check for EOF
if (currentKeyLength == EOF_MARKER && currentValueLength == EOF_MARKER) {
eof = true;
return false;
}
// Sanity check
if (currentKeyLength < 0) {
throw new IOException("Rec# " + recNo + ": Negative key-length: " +
currentKeyLength);
}
if (currentValueLength < 0) {
throw new IOException("Rec# " + recNo + ": Negative value-length: " +
currentValueLength);
}
return true;
}
public boolean nextRawKey(DataInputBuffer key) throws IOException {
if (!positionToNextRecord(dataIn)) {
return false;
}
if (keyBytes.length < currentKeyLength) {
keyBytes = new byte[currentKeyLength << 1];
}
int i = readData(keyBytes, 0, currentKeyLength);
if (i != currentKeyLength) {
throw new IOException ("Asked for " + currentKeyLength + " Got: " + i);
}
key.reset(keyBytes, currentKeyLength);
bytesRead += currentKeyLength;
return true;
}
public void nextRawValue(DataInputBuffer value) throws IOException {
final byte[] valBytes = (value.getData().length < currentValueLength)
? new byte[currentValueLength << 1]
: value.getData();
int i = readData(valBytes, 0, currentValueLength);
if (i != currentValueLength) {
throw new IOException ("Asked for " + currentValueLength + " Got: " + i);
}
value.reset(valBytes, currentValueLength);
// Record the bytes read
bytesRead += currentValueLength;
++recNo;
++numRecordsRead;
}
public void close() throws IOException {
// Close the underlying stream
in.close();
// Release the buffer
dataIn = null;
buffer = null;
if(readRecordsCounter != null) {
readRecordsCounter.increment(numRecordsRead);
}
// Return the decompressor
if (decompressor != null) {
decompressor.reset();
CodecPool.returnDecompressor(decompressor);
decompressor = null;
}
}
public void reset(int offset) {
return;
}
public void disableChecksumValidation() {
checksumIn.disableChecksumValidation();
}
}
}
| 15,987 | 32.239085 | 81 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.