repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.QueueInfo;
import org.apache.hadoop.mapreduce.QueueState;
/**
* Class that contains the information regarding the Job Queues which are
* maintained by the Hadoop Map/Reduce framework.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class JobQueueInfo extends QueueInfo {
/**
* Default constructor for Job Queue Info.
*
*/
public JobQueueInfo() {
super();
}
/**
* Construct a new JobQueueInfo object using the queue name and the
* scheduling information passed.
*
* @param queueName Name of the job queue
* @param schedulingInfo Scheduling Information associated with the job
* queue
*/
public JobQueueInfo(String queueName, String schedulingInfo) {
super(queueName, schedulingInfo);
}
JobQueueInfo(QueueInfo queue) {
this(queue.getQueueName(), queue.getSchedulingInfo());
setQueueState(queue.getState().getStateName());
setQueueChildren(queue.getQueueChildren());
setProperties(queue.getProperties());
setJobStatuses(queue.getJobStatuses());
}
/**
* Set the queue name of the JobQueueInfo
*
* @param queueName Name of the job queue.
*/
@InterfaceAudience.Private
public void setQueueName(String queueName) {
super.setQueueName(queueName);
}
/**
* Set the scheduling information associated to particular job queue
*
* @param schedulingInfo
*/
@InterfaceAudience.Private
public void setSchedulingInfo(String schedulingInfo) {
super.setSchedulingInfo(schedulingInfo);
}
/**
* Set the state of the queue
* @param state state of the queue.
*/
@InterfaceAudience.Private
public void setQueueState(String state) {
super.setState(QueueState.getState(state));
}
/**
* Use getState() instead
*/
@Deprecated
public String getQueueState() {
return super.getState().toString();
}
@InterfaceAudience.Private
public void setChildren(List<JobQueueInfo> children) {
List<QueueInfo> list = new ArrayList<QueueInfo>();
for (JobQueueInfo q : children) {
list.add(q);
}
super.setQueueChildren(list);
}
public List<JobQueueInfo> getChildren() {
List<JobQueueInfo> list = new ArrayList<JobQueueInfo>();
for (QueueInfo q : super.getQueueChildren()) {
list.add((JobQueueInfo)q);
}
return list;
}
@InterfaceAudience.Private
public void setProperties(Properties props) {
super.setProperties(props);
}
/**
* Add a child {@link JobQueueInfo} to this {@link JobQueueInfo}. Modify the
* fully-qualified name of the child {@link JobQueueInfo} to reflect the
* hierarchy.
*
* Only for testing.
*
* @param child
*/
void addChild(JobQueueInfo child) {
List<JobQueueInfo> children = getChildren();
children.add(child);
setChildren(children);
}
/**
* Remove the child from this {@link JobQueueInfo}. This also resets the
* queue-name of the child from a fully-qualified name to a simple queue name.
*
* Only for testing.
*
* @param child
*/
void removeChild(JobQueueInfo child) {
List<JobQueueInfo> children = getChildren();
children.remove(child);
setChildren(children);
}
@InterfaceAudience.Private
public void setJobStatuses(org.apache.hadoop.mapreduce.JobStatus[] stats) {
super.setJobStatuses(stats);
}
}
| 4,418 | 26.792453 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/OutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.util.Progressable;
/**
* <code>OutputFormat</code> describes the output-specification for a
* Map-Reduce job.
*
* <p>The Map-Reduce framework relies on the <code>OutputFormat</code> of the
* job to:<p>
* <ol>
* <li>
* Validate the output-specification of the job. For e.g. check that the
* output directory doesn't already exist.
* <li>
* Provide the {@link RecordWriter} implementation to be used to write out
* the output files of the job. Output files are stored in a
* {@link FileSystem}.
* </li>
* </ol>
*
* @see RecordWriter
* @see JobConf
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface OutputFormat<K, V> {
/**
* Get the {@link RecordWriter} for the given job.
*
* @param ignored
* @param job configuration for the job whose output is being written.
* @param name the unique name for this part of the output.
* @param progress mechanism for reporting progress while writing to file.
* @return a {@link RecordWriter} to write the output for the job.
* @throws IOException
*/
RecordWriter<K, V> getRecordWriter(FileSystem ignored, JobConf job,
String name, Progressable progress)
throws IOException;
/**
* Check for validity of the output-specification for the job.
*
* <p>This is to validate the output specification for the job when it is
* a job is submitted. Typically checks that it does not already exist,
* throwing an exception when it already exists, so that output is not
* overwritten.</p>
*
* @param ignored
* @param job job configuration.
* @throws IOException when output should not be attempted
*/
void checkOutputSpecs(FileSystem ignored, JobConf job) throws IOException;
}
| 2,834 | 34 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/InvalidInputException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.util.List;
import java.util.Iterator;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This class wraps a list of problems with the input, so that the user
* can get a list of problems together instead of finding and fixing them one
* by one.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class InvalidInputException extends IOException {
private static final long serialVersionUID = 1L;
private List<IOException> problems;
/**
* Create the exception with the given list.
* @param probs the list of problems to report. this list is not copied.
*/
public InvalidInputException(List<IOException> probs) {
problems = probs;
}
/**
* Get the complete list of the problems reported.
* @return the list of problems, which must not be modified
*/
public List<IOException> getProblems() {
return problems;
}
/**
* Get a summary message of the problems found.
* @return the concatenated messages from all of the problems.
*/
public String getMessage() {
StringBuffer result = new StringBuffer();
Iterator<IOException> itr = problems.iterator();
while(itr.hasNext()) {
result.append(itr.next().getMessage());
if (itr.hasNext()) {
result.append("\n");
}
}
return result.toString();
}
}
| 2,270 | 30.985915 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTaskStatus.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
class MapTaskStatus extends TaskStatus {
private long mapFinishTime = 0;
public MapTaskStatus() {}
public MapTaskStatus(TaskAttemptID taskid, float progress, int numSlots,
State runState, String diagnosticInfo, String stateString,
String taskTracker, Phase phase, Counters counters) {
super(taskid, progress, numSlots, runState, diagnosticInfo, stateString,
taskTracker, phase, counters);
}
@Override
public boolean getIsMap() {
return true;
}
/**
* Sets finishTime.
* @param finishTime finish time of task.
*/
@Override
void setFinishTime(long finishTime) {
super.setFinishTime(finishTime);
// set mapFinishTime if it hasn't been set before
if (getMapFinishTime() == 0) {
setMapFinishTime(finishTime);
}
}
@Override
public long getShuffleFinishTime() {
throw new UnsupportedOperationException("getShuffleFinishTime() not supported for MapTask");
}
@Override
void setShuffleFinishTime(long shuffleFinishTime) {
throw new UnsupportedOperationException("setShuffleFinishTime() not supported for MapTask");
}
@Override
public long getMapFinishTime() {
return mapFinishTime;
}
@Override
void setMapFinishTime(long mapFinishTime) {
this.mapFinishTime = mapFinishTime;
}
@Override
synchronized void statusUpdate(TaskStatus status) {
super.statusUpdate(status);
if (status.getMapFinishTime() != 0) {
this.mapFinishTime = status.getMapFinishTime();
}
}
@Override
public void readFields(DataInput in) throws IOException {
super.readFields(in);
mapFinishTime = in.readLong();
}
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
out.writeLong(mapFinishTime);
}
@Override
public void addFetchFailedMap(TaskAttemptID mapTaskId) {
throw new UnsupportedOperationException
("addFetchFailedMap() not supported for MapTask");
}
}
| 2,912 | 26.742857 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.lang.management.GarbageCollectorMXBean;
import java.lang.management.ManagementFactory;
import java.text.NumberFormat;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.concurrent.atomic.AtomicBoolean;
import javax.crypto.SecretKey;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalDirAllocator;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileSystem.Statistics;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.io.serializer.Deserializer;
import org.apache.hadoop.io.serializer.SerializationFactory;
import org.apache.hadoop.mapred.IFile.Writer;
import org.apache.hadoop.mapreduce.FileSystemCounter;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.TaskCounter;
import org.apache.hadoop.mapreduce.JobStatus;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.lib.reduce.WrappedReducer;
import org.apache.hadoop.mapreduce.task.ReduceContextImpl;
import org.apache.hadoop.yarn.util.ResourceCalculatorProcessTree;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.util.Progress;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.ShutdownHookManager;
import org.apache.hadoop.util.StringInterner;
import org.apache.hadoop.util.StringUtils;
import com.google.common.annotations.VisibleForTesting;
/**
* Base class for tasks.
*/
@InterfaceAudience.LimitedPrivate({"MapReduce"})
@InterfaceStability.Unstable
abstract public class Task implements Writable, Configurable {
private static final Log LOG =
LogFactory.getLog(Task.class);
public static String MERGED_OUTPUT_PREFIX = ".merged";
public static final long DEFAULT_COMBINE_RECORDS_BEFORE_PROGRESS = 10000;
/**
* @deprecated Provided for compatibility. Use {@link TaskCounter} instead.
*/
@Deprecated
public static enum Counter {
MAP_INPUT_RECORDS,
MAP_OUTPUT_RECORDS,
MAP_SKIPPED_RECORDS,
MAP_INPUT_BYTES,
MAP_OUTPUT_BYTES,
MAP_OUTPUT_MATERIALIZED_BYTES,
COMBINE_INPUT_RECORDS,
COMBINE_OUTPUT_RECORDS,
REDUCE_INPUT_GROUPS,
REDUCE_SHUFFLE_BYTES,
REDUCE_INPUT_RECORDS,
REDUCE_OUTPUT_RECORDS,
REDUCE_SKIPPED_GROUPS,
REDUCE_SKIPPED_RECORDS,
SPILLED_RECORDS,
SPLIT_RAW_BYTES,
CPU_MILLISECONDS,
PHYSICAL_MEMORY_BYTES,
VIRTUAL_MEMORY_BYTES,
COMMITTED_HEAP_BYTES
}
/**
* Counters to measure the usage of the different file systems.
* Always return the String array with two elements. First one is the name of
* BYTES_READ counter and second one is of the BYTES_WRITTEN counter.
*/
protected static String[] getFileSystemCounterNames(String uriScheme) {
String scheme = StringUtils.toUpperCase(uriScheme);
return new String[]{scheme+"_BYTES_READ", scheme+"_BYTES_WRITTEN"};
}
/**
* Name of the FileSystem counters' group
*/
protected static final String FILESYSTEM_COUNTER_GROUP = "FileSystemCounters";
///////////////////////////////////////////////////////////
// Helper methods to construct task-output paths
///////////////////////////////////////////////////////////
/** Construct output file names so that, when an output directory listing is
* sorted lexicographically, positions correspond to output partitions.*/
private static final NumberFormat NUMBER_FORMAT = NumberFormat.getInstance();
static {
NUMBER_FORMAT.setMinimumIntegerDigits(5);
NUMBER_FORMAT.setGroupingUsed(false);
}
static synchronized String getOutputName(int partition) {
return "part-" + NUMBER_FORMAT.format(partition);
}
////////////////////////////////////////////
// Fields
////////////////////////////////////////////
private String jobFile; // job configuration file
private String user; // user running the job
private TaskAttemptID taskId; // unique, includes job id
private int partition; // id within job
private byte[] encryptedSpillKey = new byte[] {0}; // Key Used to encrypt
// intermediate spills
TaskStatus taskStatus; // current status of the task
protected JobStatus.State jobRunStateForCleanup;
protected boolean jobCleanup = false;
protected boolean jobSetup = false;
protected boolean taskCleanup = false;
// An opaque data field used to attach extra data to each task. This is used
// by the Hadoop scheduler for Mesos to associate a Mesos task ID with each
// task and recover these IDs on the TaskTracker.
protected BytesWritable extraData = new BytesWritable();
//skip ranges based on failed ranges from previous attempts
private SortedRanges skipRanges = new SortedRanges();
private boolean skipping = false;
private boolean writeSkipRecs = true;
//currently processing record start index
private volatile long currentRecStartIndex;
private Iterator<Long> currentRecIndexIterator =
skipRanges.skipRangeIterator();
private ResourceCalculatorProcessTree pTree;
private long initCpuCumulativeTime = ResourceCalculatorProcessTree.UNAVAILABLE;
protected JobConf conf;
protected MapOutputFile mapOutputFile;
protected LocalDirAllocator lDirAlloc;
private final static int MAX_RETRIES = 10;
protected JobContext jobContext;
protected TaskAttemptContext taskContext;
protected org.apache.hadoop.mapreduce.OutputFormat<?,?> outputFormat;
protected org.apache.hadoop.mapreduce.OutputCommitter committer;
protected final Counters.Counter spilledRecordsCounter;
protected final Counters.Counter failedShuffleCounter;
protected final Counters.Counter mergedMapOutputsCounter;
private int numSlotsRequired;
protected TaskUmbilicalProtocol umbilical;
protected SecretKey tokenSecret;
protected SecretKey shuffleSecret;
protected GcTimeUpdater gcUpdater;
////////////////////////////////////////////
// Constructors
////////////////////////////////////////////
public Task() {
taskStatus = TaskStatus.createTaskStatus(isMapTask());
taskId = new TaskAttemptID();
spilledRecordsCounter =
counters.findCounter(TaskCounter.SPILLED_RECORDS);
failedShuffleCounter =
counters.findCounter(TaskCounter.FAILED_SHUFFLE);
mergedMapOutputsCounter =
counters.findCounter(TaskCounter.MERGED_MAP_OUTPUTS);
gcUpdater = new GcTimeUpdater();
}
public Task(String jobFile, TaskAttemptID taskId, int partition,
int numSlotsRequired) {
this.jobFile = jobFile;
this.taskId = taskId;
this.partition = partition;
this.numSlotsRequired = numSlotsRequired;
this.taskStatus = TaskStatus.createTaskStatus(isMapTask(), this.taskId,
0.0f, numSlotsRequired,
TaskStatus.State.UNASSIGNED,
"", "", "",
isMapTask() ?
TaskStatus.Phase.MAP :
TaskStatus.Phase.SHUFFLE,
counters);
spilledRecordsCounter = counters.findCounter(TaskCounter.SPILLED_RECORDS);
failedShuffleCounter = counters.findCounter(TaskCounter.FAILED_SHUFFLE);
mergedMapOutputsCounter =
counters.findCounter(TaskCounter.MERGED_MAP_OUTPUTS);
gcUpdater = new GcTimeUpdater();
}
@VisibleForTesting
void setTaskDone() {
taskDone.set(true);
}
////////////////////////////////////////////
// Accessors
////////////////////////////////////////////
public void setJobFile(String jobFile) { this.jobFile = jobFile; }
public String getJobFile() { return jobFile; }
public TaskAttemptID getTaskID() { return taskId; }
public int getNumSlotsRequired() {
return numSlotsRequired;
}
Counters getCounters() { return counters; }
/**
* Get the job name for this task.
* @return the job name
*/
public JobID getJobID() {
return taskId.getJobID();
}
/**
* Set the job token secret
* @param tokenSecret the secret
*/
public void setJobTokenSecret(SecretKey tokenSecret) {
this.tokenSecret = tokenSecret;
}
/**
* Get Encrypted spill key
* @return encrypted spill key
*/
public byte[] getEncryptedSpillKey() {
return encryptedSpillKey;
}
/**
* Set Encrypted spill key
* @param encryptedSpillKey key
*/
public void setEncryptedSpillKey(byte[] encryptedSpillKey) {
if (encryptedSpillKey != null) {
this.encryptedSpillKey = encryptedSpillKey;
}
}
/**
* Get the job token secret
* @return the token secret
*/
public SecretKey getJobTokenSecret() {
return this.tokenSecret;
}
/**
* Set the secret key used to authenticate the shuffle
* @param shuffleSecret the secret
*/
public void setShuffleSecret(SecretKey shuffleSecret) {
this.shuffleSecret = shuffleSecret;
}
/**
* Get the secret key used to authenticate the shuffle
* @return the shuffle secret
*/
public SecretKey getShuffleSecret() {
return this.shuffleSecret;
}
/**
* Get the index of this task within the job.
* @return the integer part of the task id
*/
public int getPartition() {
return partition;
}
/**
* Return current phase of the task.
* needs to be synchronized as communication thread sends the phase every second
* @return the curent phase of the task
*/
public synchronized TaskStatus.Phase getPhase(){
return this.taskStatus.getPhase();
}
/**
* Set current phase of the task.
* @param phase task phase
*/
protected synchronized void setPhase(TaskStatus.Phase phase){
this.taskStatus.setPhase(phase);
}
/**
* Get whether to write skip records.
*/
protected boolean toWriteSkipRecs() {
return writeSkipRecs;
}
/**
* Set whether to write skip records.
*/
protected void setWriteSkipRecs(boolean writeSkipRecs) {
this.writeSkipRecs = writeSkipRecs;
}
/**
* Report a fatal error to the parent (task) tracker.
*/
protected void reportFatalError(TaskAttemptID id, Throwable throwable,
String logMsg) {
LOG.fatal(logMsg);
if (ShutdownHookManager.get().isShutdownInProgress()) {
return;
}
Throwable tCause = throwable.getCause();
String cause = tCause == null
? StringUtils.stringifyException(throwable)
: StringUtils.stringifyException(tCause);
try {
umbilical.fatalError(id, cause);
} catch (IOException ioe) {
LOG.fatal("Failed to contact the tasktracker", ioe);
System.exit(-1);
}
}
/**
* Gets a handle to the Statistics instance based on the scheme associated
* with path.
*
* @param path the path.
* @param conf the configuration to extract the scheme from if not part of
* the path.
* @return a Statistics instance, or null if none is found for the scheme.
*/
protected static List<Statistics> getFsStatistics(Path path, Configuration conf) throws IOException {
List<Statistics> matchedStats = new ArrayList<FileSystem.Statistics>();
path = path.getFileSystem(conf).makeQualified(path);
String scheme = path.toUri().getScheme();
for (Statistics stats : FileSystem.getAllStatistics()) {
if (stats.getScheme().equals(scheme)) {
matchedStats.add(stats);
}
}
return matchedStats;
}
/**
* Get skipRanges.
*/
public SortedRanges getSkipRanges() {
return skipRanges;
}
/**
* Set skipRanges.
*/
public void setSkipRanges(SortedRanges skipRanges) {
this.skipRanges = skipRanges;
}
/**
* Is Task in skipping mode.
*/
public boolean isSkipping() {
return skipping;
}
/**
* Sets whether to run Task in skipping mode.
* @param skipping
*/
public void setSkipping(boolean skipping) {
this.skipping = skipping;
}
/**
* Return current state of the task.
* needs to be synchronized as communication thread
* sends the state every second
* @return task state
*/
synchronized TaskStatus.State getState(){
return this.taskStatus.getRunState();
}
/**
* Set current state of the task.
* @param state
*/
synchronized void setState(TaskStatus.State state){
this.taskStatus.setRunState(state);
}
void setTaskCleanupTask() {
taskCleanup = true;
}
boolean isTaskCleanupTask() {
return taskCleanup;
}
boolean isJobCleanupTask() {
return jobCleanup;
}
boolean isJobAbortTask() {
// the task is an abort task if its marked for cleanup and the final
// expected state is either failed or killed.
return isJobCleanupTask()
&& (jobRunStateForCleanup == JobStatus.State.KILLED
|| jobRunStateForCleanup == JobStatus.State.FAILED);
}
boolean isJobSetupTask() {
return jobSetup;
}
void setJobSetupTask() {
jobSetup = true;
}
void setJobCleanupTask() {
jobCleanup = true;
}
/**
* Sets the task to do job abort in the cleanup.
* @param status the final runstate of the job.
*/
void setJobCleanupTaskState(JobStatus.State status) {
jobRunStateForCleanup = status;
}
boolean isMapOrReduce() {
return !jobSetup && !jobCleanup && !taskCleanup;
}
/**
* Get the name of the user running the job/task. TaskTracker needs task's
* user name even before it's JobConf is localized. So we explicitly serialize
* the user name.
*
* @return user
*/
String getUser() {
return user;
}
void setUser(String user) {
this.user = user;
}
////////////////////////////////////////////
// Writable methods
////////////////////////////////////////////
public void write(DataOutput out) throws IOException {
Text.writeString(out, jobFile);
taskId.write(out);
out.writeInt(partition);
out.writeInt(numSlotsRequired);
taskStatus.write(out);
skipRanges.write(out);
out.writeBoolean(skipping);
out.writeBoolean(jobCleanup);
if (jobCleanup) {
WritableUtils.writeEnum(out, jobRunStateForCleanup);
}
out.writeBoolean(jobSetup);
out.writeBoolean(writeSkipRecs);
out.writeBoolean(taskCleanup);
Text.writeString(out, user);
out.writeInt(encryptedSpillKey.length);
extraData.write(out);
out.write(encryptedSpillKey);
}
public void readFields(DataInput in) throws IOException {
jobFile = StringInterner.weakIntern(Text.readString(in));
taskId = TaskAttemptID.read(in);
partition = in.readInt();
numSlotsRequired = in.readInt();
taskStatus.readFields(in);
skipRanges.readFields(in);
currentRecIndexIterator = skipRanges.skipRangeIterator();
currentRecStartIndex = currentRecIndexIterator.next();
skipping = in.readBoolean();
jobCleanup = in.readBoolean();
if (jobCleanup) {
jobRunStateForCleanup =
WritableUtils.readEnum(in, JobStatus.State.class);
}
jobSetup = in.readBoolean();
writeSkipRecs = in.readBoolean();
taskCleanup = in.readBoolean();
if (taskCleanup) {
setPhase(TaskStatus.Phase.CLEANUP);
}
user = StringInterner.weakIntern(Text.readString(in));
int len = in.readInt();
encryptedSpillKey = new byte[len];
extraData.readFields(in);
in.readFully(encryptedSpillKey);
}
@Override
public String toString() { return taskId.toString(); }
/**
* Localize the given JobConf to be specific for this task.
*/
public void localizeConfiguration(JobConf conf) throws IOException {
conf.set(JobContext.TASK_ID, taskId.getTaskID().toString());
conf.set(JobContext.TASK_ATTEMPT_ID, taskId.toString());
conf.setBoolean(JobContext.TASK_ISMAP, isMapTask());
conf.setInt(JobContext.TASK_PARTITION, partition);
conf.set(JobContext.ID, taskId.getJobID().toString());
}
/** Run this task as a part of the named job. This method is executed in the
* child process and is what invokes user-supplied map, reduce, etc. methods.
* @param umbilical for progress reports
*/
public abstract void run(JobConf job, TaskUmbilicalProtocol umbilical)
throws IOException, ClassNotFoundException, InterruptedException;
private transient Progress taskProgress = new Progress();
// Current counters
private transient Counters counters = new Counters();
/* flag to track whether task is done */
private AtomicBoolean taskDone = new AtomicBoolean(false);
public abstract boolean isMapTask();
public Progress getProgress() { return taskProgress; }
public void initialize(JobConf job, JobID id,
Reporter reporter,
boolean useNewApi) throws IOException,
ClassNotFoundException,
InterruptedException {
jobContext = new JobContextImpl(job, id, reporter);
taskContext = new TaskAttemptContextImpl(job, taskId, reporter);
if (getState() == TaskStatus.State.UNASSIGNED) {
setState(TaskStatus.State.RUNNING);
}
if (useNewApi) {
if (LOG.isDebugEnabled()) {
LOG.debug("using new api for output committer");
}
outputFormat =
ReflectionUtils.newInstance(taskContext.getOutputFormatClass(), job);
committer = outputFormat.getOutputCommitter(taskContext);
} else {
committer = conf.getOutputCommitter();
}
Path outputPath = FileOutputFormat.getOutputPath(conf);
if (outputPath != null) {
if ((committer instanceof FileOutputCommitter)) {
FileOutputFormat.setWorkOutputPath(conf,
((FileOutputCommitter)committer).getTaskAttemptPath(taskContext));
} else {
FileOutputFormat.setWorkOutputPath(conf, outputPath);
}
}
committer.setupTask(taskContext);
Class<? extends ResourceCalculatorProcessTree> clazz =
conf.getClass(MRConfig.RESOURCE_CALCULATOR_PROCESS_TREE,
null, ResourceCalculatorProcessTree.class);
pTree = ResourceCalculatorProcessTree
.getResourceCalculatorProcessTree(System.getenv().get("JVM_PID"), clazz, conf);
LOG.info(" Using ResourceCalculatorProcessTree : " + pTree);
if (pTree != null) {
pTree.updateProcessTree();
initCpuCumulativeTime = pTree.getCumulativeCpuTime();
}
}
public static String normalizeStatus(String status, Configuration conf) {
// Check to see if the status string is too long
// and truncate it if needed.
int progressStatusLength = conf.getInt(
MRConfig.PROGRESS_STATUS_LEN_LIMIT_KEY,
MRConfig.PROGRESS_STATUS_LEN_LIMIT_DEFAULT);
if (status.length() > progressStatusLength) {
LOG.warn("Task status: \"" + status + "\" truncated to max limit ("
+ progressStatusLength + " characters)");
status = status.substring(0, progressStatusLength);
}
return status;
}
@InterfaceAudience.LimitedPrivate({"MapReduce"})
@InterfaceStability.Unstable
public class TaskReporter
extends org.apache.hadoop.mapreduce.StatusReporter
implements Runnable, Reporter {
private TaskUmbilicalProtocol umbilical;
private InputSplit split = null;
private Progress taskProgress;
private Thread pingThread = null;
private boolean done = true;
private Object lock = new Object();
/**
* flag that indicates whether progress update needs to be sent to parent.
* If true, it has been set. If false, it has been reset.
* Using AtomicBoolean since we need an atomic read & reset method.
*/
private AtomicBoolean progressFlag = new AtomicBoolean(false);
TaskReporter(Progress taskProgress,
TaskUmbilicalProtocol umbilical) {
this.umbilical = umbilical;
this.taskProgress = taskProgress;
}
// getters and setters for flag
void setProgressFlag() {
progressFlag.set(true);
}
boolean resetProgressFlag() {
return progressFlag.getAndSet(false);
}
public void setStatus(String status) {
taskProgress.setStatus(normalizeStatus(status, conf));
// indicate that progress update needs to be sent
setProgressFlag();
}
public void setProgress(float progress) {
// set current phase progress.
// This method assumes that task has phases.
taskProgress.phase().set(progress);
// indicate that progress update needs to be sent
setProgressFlag();
}
public float getProgress() {
return taskProgress.getProgress();
};
public void progress() {
// indicate that progress update needs to be sent
setProgressFlag();
}
public Counters.Counter getCounter(String group, String name) {
Counters.Counter counter = null;
if (counters != null) {
counter = counters.findCounter(group, name);
}
return counter;
}
public Counters.Counter getCounter(Enum<?> name) {
return counters == null ? null : counters.findCounter(name);
}
public void incrCounter(Enum key, long amount) {
if (counters != null) {
counters.incrCounter(key, amount);
}
setProgressFlag();
}
public void incrCounter(String group, String counter, long amount) {
if (counters != null) {
counters.incrCounter(group, counter, amount);
}
if(skipping && SkipBadRecords.COUNTER_GROUP.equals(group) && (
SkipBadRecords.COUNTER_MAP_PROCESSED_RECORDS.equals(counter) ||
SkipBadRecords.COUNTER_REDUCE_PROCESSED_GROUPS.equals(counter))) {
//if application reports the processed records, move the
//currentRecStartIndex to the next.
//currentRecStartIndex is the start index which has not yet been
//finished and is still in task's stomach.
for(int i=0;i<amount;i++) {
currentRecStartIndex = currentRecIndexIterator.next();
}
}
setProgressFlag();
}
public void setInputSplit(InputSplit split) {
this.split = split;
}
public InputSplit getInputSplit() throws UnsupportedOperationException {
if (split == null) {
throw new UnsupportedOperationException("Input only available on map");
} else {
return split;
}
}
/**
* The communication thread handles communication with the parent (Task Tracker).
* It sends progress updates if progress has been made or if the task needs to
* let the parent know that it's alive. It also pings the parent to see if it's alive.
*/
public void run() {
final int MAX_RETRIES = 3;
int remainingRetries = MAX_RETRIES;
// get current flag value and reset it as well
boolean sendProgress = resetProgressFlag();
long taskProgressInterval =
conf.getLong(MRJobConfig.TASK_PROGRESS_REPORT_INTERVAL,
MRJobConfig.DEFAULT_TASK_PROGRESS_REPORT_INTERVAL);
while (!taskDone.get()) {
synchronized (lock) {
done = false;
}
try {
boolean taskFound = true; // whether TT knows about this task
// sleep for a bit
synchronized(lock) {
if (taskDone.get()) {
break;
}
lock.wait(taskProgressInterval);
}
if (taskDone.get()) {
break;
}
if (sendProgress) {
// we need to send progress update
updateCounters();
taskStatus.statusUpdate(taskProgress.get(),
taskProgress.toString(),
counters);
taskFound = umbilical.statusUpdate(taskId, taskStatus);
taskStatus.clearStatus();
}
else {
// send ping
taskFound = umbilical.ping(taskId);
}
// if Task Tracker is not aware of our task ID (probably because it died and
// came back up), kill ourselves
if (!taskFound) {
LOG.warn("Parent died. Exiting "+taskId);
resetDoneFlag();
System.exit(66);
}
sendProgress = resetProgressFlag();
remainingRetries = MAX_RETRIES;
}
catch (Throwable t) {
LOG.info("Communication exception: " + StringUtils.stringifyException(t));
remainingRetries -=1;
if (remainingRetries == 0) {
ReflectionUtils.logThreadInfo(LOG, "Communication exception", 0);
LOG.warn("Last retry, killing "+taskId);
resetDoneFlag();
System.exit(65);
}
}
}
//Notify that we are done with the work
resetDoneFlag();
}
void resetDoneFlag() {
synchronized (lock) {
done = true;
lock.notify();
}
}
public void startCommunicationThread() {
if (pingThread == null) {
pingThread = new Thread(this, "communication thread");
pingThread.setDaemon(true);
pingThread.start();
}
}
public void stopCommunicationThread() throws InterruptedException {
if (pingThread != null) {
// Intent of the lock is to not send an interupt in the middle of an
// umbilical.ping or umbilical.statusUpdate
synchronized(lock) {
//Interrupt if sleeping. Otherwise wait for the RPC call to return.
lock.notify();
}
synchronized (lock) {
while (!done) {
lock.wait();
}
}
pingThread.interrupt();
pingThread.join();
}
}
}
/**
* Reports the next executing record range to TaskTracker.
*
* @param umbilical
* @param nextRecIndex the record index which would be fed next.
* @throws IOException
*/
protected void reportNextRecordRange(final TaskUmbilicalProtocol umbilical,
long nextRecIndex) throws IOException{
//currentRecStartIndex is the start index which has not yet been finished
//and is still in task's stomach.
long len = nextRecIndex - currentRecStartIndex +1;
SortedRanges.Range range =
new SortedRanges.Range(currentRecStartIndex, len);
taskStatus.setNextRecordRange(range);
if (LOG.isDebugEnabled()) {
LOG.debug("sending reportNextRecordRange " + range);
}
umbilical.reportNextRecordRange(taskId, range);
}
/**
* Create a TaskReporter and start communication thread
*/
TaskReporter startReporter(final TaskUmbilicalProtocol umbilical) {
// start thread that will handle communication with parent
TaskReporter reporter = new TaskReporter(getProgress(), umbilical);
reporter.startCommunicationThread();
return reporter;
}
/**
* Update resource information counters
*/
void updateResourceCounters() {
// Update generic resource counters
updateHeapUsageCounter();
// Updating resources specified in ResourceCalculatorProcessTree
if (pTree == null) {
return;
}
pTree.updateProcessTree();
long cpuTime = pTree.getCumulativeCpuTime();
long pMem = pTree.getRssMemorySize();
long vMem = pTree.getVirtualMemorySize();
// Remove the CPU time consumed previously by JVM reuse
if (cpuTime != ResourceCalculatorProcessTree.UNAVAILABLE &&
initCpuCumulativeTime != ResourceCalculatorProcessTree.UNAVAILABLE) {
cpuTime -= initCpuCumulativeTime;
}
if (cpuTime != ResourceCalculatorProcessTree.UNAVAILABLE) {
counters.findCounter(TaskCounter.CPU_MILLISECONDS).setValue(cpuTime);
}
if (pMem != ResourceCalculatorProcessTree.UNAVAILABLE) {
counters.findCounter(TaskCounter.PHYSICAL_MEMORY_BYTES).setValue(pMem);
}
if (vMem != ResourceCalculatorProcessTree.UNAVAILABLE) {
counters.findCounter(TaskCounter.VIRTUAL_MEMORY_BYTES).setValue(vMem);
}
}
/**
* An updater that tracks the amount of time this task has spent in GC.
*/
class GcTimeUpdater {
private long lastGcMillis = 0;
private List<GarbageCollectorMXBean> gcBeans = null;
public GcTimeUpdater() {
this.gcBeans = ManagementFactory.getGarbageCollectorMXBeans();
getElapsedGc(); // Initialize 'lastGcMillis' with the current time spent.
}
/**
* @return the number of milliseconds that the gc has used for CPU
* since the last time this method was called.
*/
protected long getElapsedGc() {
long thisGcMillis = 0;
for (GarbageCollectorMXBean gcBean : gcBeans) {
thisGcMillis += gcBean.getCollectionTime();
}
long delta = thisGcMillis - lastGcMillis;
this.lastGcMillis = thisGcMillis;
return delta;
}
/**
* Increment the gc-elapsed-time counter.
*/
public void incrementGcCounter() {
if (null == counters) {
return; // nothing to do.
}
org.apache.hadoop.mapred.Counters.Counter gcCounter =
counters.findCounter(TaskCounter.GC_TIME_MILLIS);
if (null != gcCounter) {
gcCounter.increment(getElapsedGc());
}
}
}
/**
* An updater that tracks the last number reported for a given file
* system and only creates the counters when they are needed.
*/
class FileSystemStatisticUpdater {
private List<FileSystem.Statistics> stats;
private Counters.Counter readBytesCounter, writeBytesCounter,
readOpsCounter, largeReadOpsCounter, writeOpsCounter;
private String scheme;
FileSystemStatisticUpdater(List<FileSystem.Statistics> stats, String scheme) {
this.stats = stats;
this.scheme = scheme;
}
void updateCounters() {
if (readBytesCounter == null) {
readBytesCounter = counters.findCounter(scheme,
FileSystemCounter.BYTES_READ);
}
if (writeBytesCounter == null) {
writeBytesCounter = counters.findCounter(scheme,
FileSystemCounter.BYTES_WRITTEN);
}
if (readOpsCounter == null) {
readOpsCounter = counters.findCounter(scheme,
FileSystemCounter.READ_OPS);
}
if (largeReadOpsCounter == null) {
largeReadOpsCounter = counters.findCounter(scheme,
FileSystemCounter.LARGE_READ_OPS);
}
if (writeOpsCounter == null) {
writeOpsCounter = counters.findCounter(scheme,
FileSystemCounter.WRITE_OPS);
}
long readBytes = 0;
long writeBytes = 0;
long readOps = 0;
long largeReadOps = 0;
long writeOps = 0;
for (FileSystem.Statistics stat: stats) {
readBytes = readBytes + stat.getBytesRead();
writeBytes = writeBytes + stat.getBytesWritten();
readOps = readOps + stat.getReadOps();
largeReadOps = largeReadOps + stat.getLargeReadOps();
writeOps = writeOps + stat.getWriteOps();
}
readBytesCounter.setValue(readBytes);
writeBytesCounter.setValue(writeBytes);
readOpsCounter.setValue(readOps);
largeReadOpsCounter.setValue(largeReadOps);
writeOpsCounter.setValue(writeOps);
}
}
/**
* A Map where Key-> URIScheme and value->FileSystemStatisticUpdater
*/
private Map<String, FileSystemStatisticUpdater> statisticUpdaters =
new HashMap<String, FileSystemStatisticUpdater>();
private synchronized void updateCounters() {
Map<String, List<FileSystem.Statistics>> map = new
HashMap<String, List<FileSystem.Statistics>>();
for(Statistics stat: FileSystem.getAllStatistics()) {
String uriScheme = stat.getScheme();
if (map.containsKey(uriScheme)) {
List<FileSystem.Statistics> list = map.get(uriScheme);
list.add(stat);
} else {
List<FileSystem.Statistics> list = new ArrayList<FileSystem.Statistics>();
list.add(stat);
map.put(uriScheme, list);
}
}
for (Map.Entry<String, List<FileSystem.Statistics>> entry: map.entrySet()) {
FileSystemStatisticUpdater updater = statisticUpdaters.get(entry.getKey());
if(updater==null) {//new FileSystem has been found in the cache
updater = new FileSystemStatisticUpdater(entry.getValue(), entry.getKey());
statisticUpdaters.put(entry.getKey(), updater);
}
updater.updateCounters();
}
gcUpdater.incrementGcCounter();
updateResourceCounters();
}
/**
* Updates the {@link TaskCounter#COMMITTED_HEAP_BYTES} counter to reflect the
* current total committed heap space usage of this JVM.
*/
@SuppressWarnings("deprecation")
private void updateHeapUsageCounter() {
long currentHeapUsage = Runtime.getRuntime().totalMemory();
counters.findCounter(TaskCounter.COMMITTED_HEAP_BYTES)
.setValue(currentHeapUsage);
}
public void done(TaskUmbilicalProtocol umbilical,
TaskReporter reporter
) throws IOException, InterruptedException {
LOG.info("Task:" + taskId + " is done."
+ " And is in the process of committing");
updateCounters();
boolean commitRequired = isCommitRequired();
if (commitRequired) {
int retries = MAX_RETRIES;
setState(TaskStatus.State.COMMIT_PENDING);
// say the task tracker that task is commit pending
while (true) {
try {
umbilical.commitPending(taskId, taskStatus);
break;
} catch (InterruptedException ie) {
// ignore
} catch (IOException ie) {
LOG.warn("Failure sending commit pending: " +
StringUtils.stringifyException(ie));
if (--retries == 0) {
System.exit(67);
}
}
}
//wait for commit approval and commit
commit(umbilical, reporter, committer);
}
taskDone.set(true);
reporter.stopCommunicationThread();
// Make sure we send at least one set of counter increments. It's
// ok to call updateCounters() in this thread after comm thread stopped.
updateCounters();
sendLastUpdate(umbilical);
//signal the tasktracker that we are done
sendDone(umbilical);
}
/**
* Checks if this task has anything to commit, depending on the
* type of task, as well as on whether the {@link OutputCommitter}
* has anything to commit.
*
* @return true if the task has to commit
* @throws IOException
*/
boolean isCommitRequired() throws IOException {
boolean commitRequired = false;
if (isMapOrReduce()) {
commitRequired = committer.needsTaskCommit(taskContext);
}
return commitRequired;
}
/**
* Send a status update to the task tracker
* @param umbilical
* @throws IOException
*/
public void statusUpdate(TaskUmbilicalProtocol umbilical)
throws IOException {
int retries = MAX_RETRIES;
while (true) {
try {
if (!umbilical.statusUpdate(getTaskID(), taskStatus)) {
LOG.warn("Parent died. Exiting "+taskId);
System.exit(66);
}
taskStatus.clearStatus();
return;
} catch (InterruptedException ie) {
Thread.currentThread().interrupt(); // interrupt ourself
} catch (IOException ie) {
LOG.warn("Failure sending status update: " +
StringUtils.stringifyException(ie));
if (--retries == 0) {
throw ie;
}
}
}
}
/**
* Sends last status update before sending umbilical.done();
*/
private void sendLastUpdate(TaskUmbilicalProtocol umbilical)
throws IOException {
taskStatus.setOutputSize(calculateOutputSize());
// send a final status report
taskStatus.statusUpdate(taskProgress.get(),
taskProgress.toString(),
counters);
statusUpdate(umbilical);
}
/**
* Calculates the size of output for this task.
*
* @return -1 if it can't be found.
*/
private long calculateOutputSize() throws IOException {
if (!isMapOrReduce()) {
return -1;
}
if (isMapTask() && conf.getNumReduceTasks() > 0) {
try {
Path mapOutput = mapOutputFile.getOutputFile();
FileSystem localFS = FileSystem.getLocal(conf);
return localFS.getFileStatus(mapOutput).getLen();
} catch (IOException e) {
LOG.warn ("Could not find output size " , e);
}
}
return -1;
}
private void sendDone(TaskUmbilicalProtocol umbilical) throws IOException {
int retries = MAX_RETRIES;
while (true) {
try {
umbilical.done(getTaskID());
LOG.info("Task '" + taskId + "' done.");
return;
} catch (IOException ie) {
LOG.warn("Failure signalling completion: " +
StringUtils.stringifyException(ie));
if (--retries == 0) {
throw ie;
}
}
}
}
private void commit(TaskUmbilicalProtocol umbilical,
TaskReporter reporter,
org.apache.hadoop.mapreduce.OutputCommitter committer
) throws IOException {
int retries = MAX_RETRIES;
while (true) {
try {
while (!umbilical.canCommit(taskId)) {
try {
Thread.sleep(1000);
} catch(InterruptedException ie) {
//ignore
}
reporter.setProgressFlag();
}
break;
} catch (IOException ie) {
LOG.warn("Failure asking whether task can commit: " +
StringUtils.stringifyException(ie));
if (--retries == 0) {
//if it couldn't query successfully then delete the output
discardOutput(taskContext);
System.exit(68);
}
}
}
// task can Commit now
try {
LOG.info("Task " + taskId + " is allowed to commit now");
committer.commitTask(taskContext);
return;
} catch (IOException iee) {
LOG.warn("Failure committing: " +
StringUtils.stringifyException(iee));
//if it couldn't commit a successfully then delete the output
discardOutput(taskContext);
throw iee;
}
}
private
void discardOutput(TaskAttemptContext taskContext) {
try {
committer.abortTask(taskContext);
} catch (IOException ioe) {
LOG.warn("Failure cleaning up: " +
StringUtils.stringifyException(ioe));
}
}
protected void runTaskCleanupTask(TaskUmbilicalProtocol umbilical,
TaskReporter reporter)
throws IOException, InterruptedException {
taskCleanup(umbilical);
done(umbilical, reporter);
}
void taskCleanup(TaskUmbilicalProtocol umbilical)
throws IOException {
// set phase for this task
setPhase(TaskStatus.Phase.CLEANUP);
getProgress().setStatus("cleanup");
statusUpdate(umbilical);
LOG.info("Runnning cleanup for the task");
// do the cleanup
committer.abortTask(taskContext);
}
protected void runJobCleanupTask(TaskUmbilicalProtocol umbilical,
TaskReporter reporter
) throws IOException, InterruptedException {
// set phase for this task
setPhase(TaskStatus.Phase.CLEANUP);
getProgress().setStatus("cleanup");
statusUpdate(umbilical);
// do the cleanup
LOG.info("Cleaning up job");
if (jobRunStateForCleanup == JobStatus.State.FAILED
|| jobRunStateForCleanup == JobStatus.State.KILLED) {
LOG.info("Aborting job with runstate : " + jobRunStateForCleanup.name());
if (conf.getUseNewMapper()) {
committer.abortJob(jobContext, jobRunStateForCleanup);
} else {
org.apache.hadoop.mapred.OutputCommitter oldCommitter =
(org.apache.hadoop.mapred.OutputCommitter)committer;
oldCommitter.abortJob(jobContext, jobRunStateForCleanup);
}
} else if (jobRunStateForCleanup == JobStatus.State.SUCCEEDED){
LOG.info("Committing job");
committer.commitJob(jobContext);
} else {
throw new IOException("Invalid state of the job for cleanup. State found "
+ jobRunStateForCleanup + " expecting "
+ JobStatus.State.SUCCEEDED + ", "
+ JobStatus.State.FAILED + " or "
+ JobStatus.State.KILLED);
}
// delete the staging area for the job
JobConf conf = new JobConf(jobContext.getConfiguration());
if (!keepTaskFiles(conf)) {
String jobTempDir = conf.get(MRJobConfig.MAPREDUCE_JOB_DIR);
Path jobTempDirPath = new Path(jobTempDir);
FileSystem fs = jobTempDirPath.getFileSystem(conf);
fs.delete(jobTempDirPath, true);
}
done(umbilical, reporter);
}
protected boolean keepTaskFiles(JobConf conf) {
return (conf.getKeepTaskFilesPattern() != null || conf
.getKeepFailedTaskFiles());
}
protected void runJobSetupTask(TaskUmbilicalProtocol umbilical,
TaskReporter reporter
) throws IOException, InterruptedException {
// do the setup
getProgress().setStatus("setup");
committer.setupJob(jobContext);
done(umbilical, reporter);
}
public void setConf(Configuration conf) {
if (conf instanceof JobConf) {
this.conf = (JobConf) conf;
} else {
this.conf = new JobConf(conf);
}
this.mapOutputFile = ReflectionUtils.newInstance(
conf.getClass(MRConfig.TASK_LOCAL_OUTPUT_CLASS,
MROutputFiles.class, MapOutputFile.class), conf);
this.lDirAlloc = new LocalDirAllocator(MRConfig.LOCAL_DIR);
// add the static resolutions (this is required for the junit to
// work on testcases that simulate multiple nodes on a single physical
// node.
String hostToResolved[] = conf.getStrings(MRConfig.STATIC_RESOLUTIONS);
if (hostToResolved != null) {
for (String str : hostToResolved) {
String name = str.substring(0, str.indexOf('='));
String resolvedName = str.substring(str.indexOf('=') + 1);
NetUtils.addStaticResolution(name, resolvedName);
}
}
}
public Configuration getConf() {
return this.conf;
}
public MapOutputFile getMapOutputFile() {
return mapOutputFile;
}
/**
* OutputCollector for the combiner.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public static class CombineOutputCollector<K extends Object, V extends Object>
implements OutputCollector<K, V> {
private Writer<K, V> writer;
private Counters.Counter outCounter;
private Progressable progressable;
private long progressBar;
public CombineOutputCollector(Counters.Counter outCounter, Progressable progressable, Configuration conf) {
this.outCounter = outCounter;
this.progressable=progressable;
progressBar = conf.getLong(MRJobConfig.COMBINE_RECORDS_BEFORE_PROGRESS, DEFAULT_COMBINE_RECORDS_BEFORE_PROGRESS);
}
public synchronized void setWriter(Writer<K, V> writer) {
this.writer = writer;
}
public synchronized void collect(K key, V value)
throws IOException {
outCounter.increment(1);
writer.append(key, value);
if ((outCounter.getValue() % progressBar) == 0) {
progressable.progress();
}
}
}
/** Iterates values while keys match in sorted input. */
static class ValuesIterator<KEY,VALUE> implements Iterator<VALUE> {
protected RawKeyValueIterator in; //input iterator
private KEY key; // current key
private KEY nextKey;
private VALUE value; // current value
private boolean hasNext; // more w/ this key
private boolean more; // more in file
private RawComparator<KEY> comparator;
protected Progressable reporter;
private Deserializer<KEY> keyDeserializer;
private Deserializer<VALUE> valDeserializer;
private DataInputBuffer keyIn = new DataInputBuffer();
private DataInputBuffer valueIn = new DataInputBuffer();
public ValuesIterator (RawKeyValueIterator in,
RawComparator<KEY> comparator,
Class<KEY> keyClass,
Class<VALUE> valClass, Configuration conf,
Progressable reporter)
throws IOException {
this.in = in;
this.comparator = comparator;
this.reporter = reporter;
SerializationFactory serializationFactory = new SerializationFactory(conf);
this.keyDeserializer = serializationFactory.getDeserializer(keyClass);
this.keyDeserializer.open(keyIn);
this.valDeserializer = serializationFactory.getDeserializer(valClass);
this.valDeserializer.open(this.valueIn);
readNextKey();
key = nextKey;
nextKey = null; // force new instance creation
hasNext = more;
}
RawKeyValueIterator getRawIterator() { return in; }
/// Iterator methods
public boolean hasNext() { return hasNext; }
private int ctr = 0;
public VALUE next() {
if (!hasNext) {
throw new NoSuchElementException("iterate past last value");
}
try {
readNextValue();
readNextKey();
} catch (IOException ie) {
throw new RuntimeException("problem advancing post rec#"+ctr, ie);
}
reporter.progress();
return value;
}
public void remove() { throw new RuntimeException("not implemented"); }
/// Auxiliary methods
/** Start processing next unique key. */
public void nextKey() throws IOException {
// read until we find a new key
while (hasNext) {
readNextKey();
}
++ctr;
// move the next key to the current one
KEY tmpKey = key;
key = nextKey;
nextKey = tmpKey;
hasNext = more;
}
/** True iff more keys remain. */
public boolean more() {
return more;
}
/** The current key. */
public KEY getKey() {
return key;
}
/**
* read the next key
*/
private void readNextKey() throws IOException {
more = in.next();
if (more) {
DataInputBuffer nextKeyBytes = in.getKey();
keyIn.reset(nextKeyBytes.getData(), nextKeyBytes.getPosition(), nextKeyBytes.getLength());
nextKey = keyDeserializer.deserialize(nextKey);
hasNext = key != null && (comparator.compare(key, nextKey) == 0);
} else {
hasNext = false;
}
}
/**
* Read the next value
* @throws IOException
*/
private void readNextValue() throws IOException {
DataInputBuffer nextValueBytes = in.getValue();
valueIn.reset(nextValueBytes.getData(), nextValueBytes.getPosition(), nextValueBytes.getLength());
value = valDeserializer.deserialize(value);
}
}
/** Iterator to return Combined values */
@InterfaceAudience.Private
@InterfaceStability.Unstable
public static class CombineValuesIterator<KEY,VALUE>
extends ValuesIterator<KEY,VALUE> {
private final Counters.Counter combineInputCounter;
public CombineValuesIterator(RawKeyValueIterator in,
RawComparator<KEY> comparator, Class<KEY> keyClass,
Class<VALUE> valClass, Configuration conf, Reporter reporter,
Counters.Counter combineInputCounter) throws IOException {
super(in, comparator, keyClass, valClass, conf, reporter);
this.combineInputCounter = combineInputCounter;
}
public VALUE next() {
combineInputCounter.increment(1);
return super.next();
}
}
@SuppressWarnings("unchecked")
protected static <INKEY,INVALUE,OUTKEY,OUTVALUE>
org.apache.hadoop.mapreduce.Reducer<INKEY,INVALUE,OUTKEY,OUTVALUE>.Context
createReduceContext(org.apache.hadoop.mapreduce.Reducer
<INKEY,INVALUE,OUTKEY,OUTVALUE> reducer,
Configuration job,
org.apache.hadoop.mapreduce.TaskAttemptID taskId,
RawKeyValueIterator rIter,
org.apache.hadoop.mapreduce.Counter inputKeyCounter,
org.apache.hadoop.mapreduce.Counter inputValueCounter,
org.apache.hadoop.mapreduce.RecordWriter<OUTKEY,OUTVALUE> output,
org.apache.hadoop.mapreduce.OutputCommitter committer,
org.apache.hadoop.mapreduce.StatusReporter reporter,
RawComparator<INKEY> comparator,
Class<INKEY> keyClass, Class<INVALUE> valueClass
) throws IOException, InterruptedException {
org.apache.hadoop.mapreduce.ReduceContext<INKEY, INVALUE, OUTKEY, OUTVALUE>
reduceContext =
new ReduceContextImpl<INKEY, INVALUE, OUTKEY, OUTVALUE>(job, taskId,
rIter,
inputKeyCounter,
inputValueCounter,
output,
committer,
reporter,
comparator,
keyClass,
valueClass);
org.apache.hadoop.mapreduce.Reducer<INKEY,INVALUE,OUTKEY,OUTVALUE>.Context
reducerContext =
new WrappedReducer<INKEY, INVALUE, OUTKEY, OUTVALUE>().getReducerContext(
reduceContext);
return reducerContext;
}
@InterfaceAudience.LimitedPrivate({"MapReduce"})
@InterfaceStability.Unstable
public static abstract class CombinerRunner<K,V> {
protected final Counters.Counter inputCounter;
protected final JobConf job;
protected final TaskReporter reporter;
CombinerRunner(Counters.Counter inputCounter,
JobConf job,
TaskReporter reporter) {
this.inputCounter = inputCounter;
this.job = job;
this.reporter = reporter;
}
/**
* Run the combiner over a set of inputs.
* @param iterator the key/value pairs to use as input
* @param collector the output collector
*/
public abstract void combine(RawKeyValueIterator iterator,
OutputCollector<K,V> collector
) throws IOException, InterruptedException,
ClassNotFoundException;
@SuppressWarnings("unchecked")
public static <K,V>
CombinerRunner<K,V> create(JobConf job,
TaskAttemptID taskId,
Counters.Counter inputCounter,
TaskReporter reporter,
org.apache.hadoop.mapreduce.OutputCommitter committer
) throws ClassNotFoundException {
Class<? extends Reducer<K,V,K,V>> cls =
(Class<? extends Reducer<K,V,K,V>>) job.getCombinerClass();
if (cls != null) {
return new OldCombinerRunner(cls, job, inputCounter, reporter);
}
// make a task context so we can get the classes
org.apache.hadoop.mapreduce.TaskAttemptContext taskContext =
new org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl(job, taskId,
reporter);
Class<? extends org.apache.hadoop.mapreduce.Reducer<K,V,K,V>> newcls =
(Class<? extends org.apache.hadoop.mapreduce.Reducer<K,V,K,V>>)
taskContext.getCombinerClass();
if (newcls != null) {
return new NewCombinerRunner<K,V>(newcls, job, taskId, taskContext,
inputCounter, reporter, committer);
}
return null;
}
}
@InterfaceAudience.Private
@InterfaceStability.Unstable
protected static class OldCombinerRunner<K,V> extends CombinerRunner<K,V> {
private final Class<? extends Reducer<K,V,K,V>> combinerClass;
private final Class<K> keyClass;
private final Class<V> valueClass;
private final RawComparator<K> comparator;
@SuppressWarnings("unchecked")
protected OldCombinerRunner(Class<? extends Reducer<K,V,K,V>> cls,
JobConf conf,
Counters.Counter inputCounter,
TaskReporter reporter) {
super(inputCounter, conf, reporter);
combinerClass = cls;
keyClass = (Class<K>) job.getMapOutputKeyClass();
valueClass = (Class<V>) job.getMapOutputValueClass();
comparator = (RawComparator<K>)
job.getCombinerKeyGroupingComparator();
}
@SuppressWarnings("unchecked")
public void combine(RawKeyValueIterator kvIter,
OutputCollector<K,V> combineCollector
) throws IOException {
Reducer<K,V,K,V> combiner =
ReflectionUtils.newInstance(combinerClass, job);
try {
CombineValuesIterator<K,V> values =
new CombineValuesIterator<K,V>(kvIter, comparator, keyClass,
valueClass, job, reporter,
inputCounter);
while (values.more()) {
combiner.reduce(values.getKey(), values, combineCollector,
reporter);
values.nextKey();
}
} finally {
combiner.close();
}
}
}
@InterfaceAudience.Private
@InterfaceStability.Unstable
protected static class NewCombinerRunner<K, V> extends CombinerRunner<K,V> {
private final Class<? extends org.apache.hadoop.mapreduce.Reducer<K,V,K,V>>
reducerClass;
private final org.apache.hadoop.mapreduce.TaskAttemptID taskId;
private final RawComparator<K> comparator;
private final Class<K> keyClass;
private final Class<V> valueClass;
private final org.apache.hadoop.mapreduce.OutputCommitter committer;
@SuppressWarnings("unchecked")
NewCombinerRunner(Class reducerClass,
JobConf job,
org.apache.hadoop.mapreduce.TaskAttemptID taskId,
org.apache.hadoop.mapreduce.TaskAttemptContext context,
Counters.Counter inputCounter,
TaskReporter reporter,
org.apache.hadoop.mapreduce.OutputCommitter committer) {
super(inputCounter, job, reporter);
this.reducerClass = reducerClass;
this.taskId = taskId;
keyClass = (Class<K>) context.getMapOutputKeyClass();
valueClass = (Class<V>) context.getMapOutputValueClass();
comparator = (RawComparator<K>) context.getCombinerKeyGroupingComparator();
this.committer = committer;
}
private static class OutputConverter<K,V>
extends org.apache.hadoop.mapreduce.RecordWriter<K,V> {
OutputCollector<K,V> output;
OutputConverter(OutputCollector<K,V> output) {
this.output = output;
}
@Override
public void close(org.apache.hadoop.mapreduce.TaskAttemptContext context){
}
@Override
public void write(K key, V value
) throws IOException, InterruptedException {
output.collect(key,value);
}
}
@SuppressWarnings("unchecked")
@Override
public void combine(RawKeyValueIterator iterator,
OutputCollector<K,V> collector
) throws IOException, InterruptedException,
ClassNotFoundException {
// make a reducer
org.apache.hadoop.mapreduce.Reducer<K,V,K,V> reducer =
(org.apache.hadoop.mapreduce.Reducer<K,V,K,V>)
ReflectionUtils.newInstance(reducerClass, job);
org.apache.hadoop.mapreduce.Reducer.Context
reducerContext = createReduceContext(reducer, job, taskId,
iterator, null, inputCounter,
new OutputConverter(collector),
committer,
reporter, comparator, keyClass,
valueClass);
reducer.run(reducerContext);
}
}
BytesWritable getExtraData() {
return extraData;
}
void setExtraData(BytesWritable extraData) {
this.extraData = extraData;
}
}
| 58,301 | 33.154657 | 119 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SequenceFileAsTextRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
/**
* This class converts the input keys and values to their String forms by calling toString()
* method. This class to SequenceFileAsTextInputFormat class is as LineRecordReader
* class to TextInputFormat class.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class SequenceFileAsTextRecordReader
implements RecordReader<Text, Text> {
private final SequenceFileRecordReader<WritableComparable, Writable>
sequenceFileRecordReader;
private WritableComparable innerKey;
private Writable innerValue;
public SequenceFileAsTextRecordReader(Configuration conf, FileSplit split)
throws IOException {
sequenceFileRecordReader =
new SequenceFileRecordReader<WritableComparable, Writable>(conf, split);
innerKey = sequenceFileRecordReader.createKey();
innerValue = sequenceFileRecordReader.createValue();
}
public Text createKey() {
return new Text();
}
public Text createValue() {
return new Text();
}
/** Read key/value pair in a line. */
public synchronized boolean next(Text key, Text value) throws IOException {
Text tKey = key;
Text tValue = value;
if (!sequenceFileRecordReader.next(innerKey, innerValue)) {
return false;
}
tKey.set(innerKey.toString());
tValue.set(innerValue.toString());
return true;
}
public float getProgress() throws IOException {
return sequenceFileRecordReader.getProgress();
}
public synchronized long getPos() throws IOException {
return sequenceFileRecordReader.getPos();
}
public synchronized void close() throws IOException {
sequenceFileRecordReader.close();
}
}
| 2,804 | 31.241379 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapRunnable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Expert: Generic interface for {@link Mapper}s.
*
* <p>Custom implementations of <code>MapRunnable</code> can exert greater
* control on map processing e.g. multi-threaded, asynchronous mappers etc.</p>
*
* @see Mapper
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface MapRunnable<K1, V1, K2, V2>
extends JobConfigurable {
/**
* Start mapping input <tt><key, value></tt> pairs.
*
* <p>Mapping of input records to output records is complete when this method
* returns.</p>
*
* @param input the {@link RecordReader} to read the input records.
* @param output the {@link OutputCollector} to collect the outputrecords.
* @param reporter {@link Reporter} to report progress, status-updates etc.
* @throws IOException
*/
void run(RecordReader<K1, V1> input, OutputCollector<K2, V2> output,
Reporter reporter)
throws IOException;
}
| 1,922 | 34.611111 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/RecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* <code>RecordReader</code> reads <key, value> pairs from an
* {@link InputSplit}.
*
* <p><code>RecordReader</code>, typically, converts the byte-oriented view of
* the input, provided by the <code>InputSplit</code>, and presents a
* record-oriented view for the {@link Mapper} and {@link Reducer} tasks for
* processing. It thus assumes the responsibility of processing record
* boundaries and presenting the tasks with keys and values.</p>
*
* @see InputSplit
* @see InputFormat
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface RecordReader<K, V> {
/**
* Reads the next key/value pair from the input for processing.
*
* @param key the key to read data into
* @param value the value to read data into
* @return true iff a key/value was read, false if at EOF
*/
boolean next(K key, V value) throws IOException;
/**
* Create an object of the appropriate type to be used as a key.
*
* @return a new key object.
*/
K createKey();
/**
* Create an object of the appropriate type to be used as a value.
*
* @return a new value object.
*/
V createValue();
/**
* Returns the current position in the input.
*
* @return the current position in the input.
* @throws IOException
*/
long getPos() throws IOException;
/**
* Close this {@link InputSplit} to future operations.
*
* @throws IOException
*/
public void close() throws IOException;
/**
* How much of the input has the {@link RecordReader} consumed i.e.
* has been processed by?
*
* @return progress from <code>0.0</code> to <code>1.0</code>.
* @throws IOException
*/
float getProgress() throws IOException;
}
| 2,737 | 29.764045 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/QueueManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.QueueState;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.util.StringUtils;
import org.codehaus.jackson.JsonFactory;
import org.codehaus.jackson.JsonGenerationException;
import org.codehaus.jackson.JsonGenerator;
import java.io.BufferedInputStream;
import java.io.InputStream;
import java.io.IOException;
import java.io.Writer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.net.URL;
/**
* Class that exposes information about queues maintained by the Hadoop
* Map/Reduce framework.
* <p>
* The Map/Reduce framework can be configured with one or more queues,
* depending on the scheduler it is configured with. While some
* schedulers work only with one queue, some schedulers support multiple
* queues. Some schedulers also support the notion of queues within
* queues - a feature called hierarchical queues.
* <p>
* Queue names are unique, and used as a key to lookup queues. Hierarchical
* queues are named by a 'fully qualified name' such as q1:q2:q3, where
* q2 is a child queue of q1 and q3 is a child queue of q2.
* <p>
* Leaf level queues are queues that contain no queues within them. Jobs
* can be submitted only to leaf level queues.
* <p>
* Queues can be configured with various properties. Some of these
* properties are common to all schedulers, and those are handled by this
* class. Schedulers might also associate several custom properties with
* queues. These properties are parsed and maintained per queue by the
* framework. If schedulers need more complicated structure to maintain
* configuration per queue, they are free to not use the facilities
* provided by the framework, but define their own mechanisms. In such cases,
* it is likely that the name of the queue will be used to relate the
* common properties of a queue with scheduler specific properties.
* <p>
* Information related to a queue, such as its name, properties, scheduling
* information and children are exposed by this class via a serializable
* class called {@link JobQueueInfo}.
* <p>
* Queues are configured in the configuration file mapred-queues.xml.
* To support backwards compatibility, queues can also be configured
* in mapred-site.xml. However, when configured in the latter, there is
* no support for hierarchical queues.
*/
@InterfaceAudience.Private
public class QueueManager {
private static final Log LOG = LogFactory.getLog(QueueManager.class);
// Map of a queue name and Queue object
private Map<String, Queue> leafQueues = new HashMap<String,Queue>();
private Map<String, Queue> allQueues = new HashMap<String, Queue>();
public static final String QUEUE_CONF_FILE_NAME = "mapred-queues.xml";
static final String QUEUE_CONF_DEFAULT_FILE_NAME = "mapred-queues-default.xml";
//Prefix in configuration for queue related keys
static final String QUEUE_CONF_PROPERTY_NAME_PREFIX = "mapred.queue.";
//Resource in which queue acls are configured.
private Queue root = null;
// represents if job and queue acls are enabled on the mapreduce cluster
private boolean areAclsEnabled = false;
/**
* Factory method to create an appropriate instance of a queue
* configuration parser.
* <p>
* Returns a parser that can parse either the deprecated property
* style queue configuration in mapred-site.xml, or one that can
* parse hierarchical queues in mapred-queues.xml. First preference
* is given to configuration in mapred-site.xml. If no queue
* configuration is found there, then a parser that can parse
* configuration in mapred-queues.xml is created.
*
* @param conf Configuration instance that determines which parser
* to use.
* @return Queue configuration parser
*/
static QueueConfigurationParser getQueueConfigurationParser(
Configuration conf, boolean reloadConf, boolean areAclsEnabled) {
if (conf != null && conf.get(
DeprecatedQueueConfigurationParser.MAPRED_QUEUE_NAMES_KEY) != null) {
if (reloadConf) {
conf.reloadConfiguration();
}
return new DeprecatedQueueConfigurationParser(conf);
} else {
URL xmlInUrl =
Thread.currentThread().getContextClassLoader()
.getResource(QUEUE_CONF_FILE_NAME);
if (xmlInUrl == null) {
xmlInUrl = Thread.currentThread().getContextClassLoader()
.getResource(QUEUE_CONF_DEFAULT_FILE_NAME);
assert xmlInUrl != null; // this should be in our jar
}
InputStream stream = null;
try {
stream = xmlInUrl.openStream();
return new QueueConfigurationParser(new BufferedInputStream(stream),
areAclsEnabled);
} catch (IOException ioe) {
throw new RuntimeException("Couldn't open queue configuration at " +
xmlInUrl, ioe);
} finally {
IOUtils.closeStream(stream);
}
}
}
QueueManager() {// acls are disabled
this(false);
}
QueueManager(boolean areAclsEnabled) {
this.areAclsEnabled = areAclsEnabled;
initialize(getQueueConfigurationParser(null, false, areAclsEnabled));
}
/**
* Construct a new QueueManager using configuration specified in the passed
* in {@link org.apache.hadoop.conf.Configuration} object.
* <p>
* This instance supports queue configuration specified in mapred-site.xml,
* but without support for hierarchical queues. If no queue configuration
* is found in mapred-site.xml, it will then look for site configuration
* in mapred-queues.xml supporting hierarchical queues.
*
* @param clusterConf mapreduce cluster configuration
*/
public QueueManager(Configuration clusterConf) {
areAclsEnabled = clusterConf.getBoolean(MRConfig.MR_ACLS_ENABLED, false);
initialize(getQueueConfigurationParser(clusterConf, false, areAclsEnabled));
}
/**
* Create an instance that supports hierarchical queues, defined in
* the passed in configuration file.
* <p>
* This is mainly used for testing purposes and should not called from
* production code.
*
* @param confFile File where the queue configuration is found.
*/
QueueManager(String confFile, boolean areAclsEnabled) {
this.areAclsEnabled = areAclsEnabled;
QueueConfigurationParser cp =
new QueueConfigurationParser(confFile, areAclsEnabled);
initialize(cp);
}
/**
* Initialize the queue-manager with the queue hierarchy specified by the
* given {@link QueueConfigurationParser}.
*
* @param cp
*/
private void initialize(QueueConfigurationParser cp) {
this.root = cp.getRoot();
leafQueues.clear();
allQueues.clear();
//At this point we have root populated
//update data structures leafNodes.
leafQueues = getRoot().getLeafQueues();
allQueues.putAll(getRoot().getInnerQueues());
allQueues.putAll(leafQueues);
LOG.info("AllQueues : " + allQueues + "; LeafQueues : " + leafQueues);
}
/**
* Return the set of leaf level queues configured in the system to
* which jobs are submitted.
* <p>
* The number of queues configured should be dependent on the Scheduler
* configured. Note that some schedulers work with only one queue, whereas
* others can support multiple queues.
*
* @return Set of queue names.
*/
public synchronized Set<String> getLeafQueueNames() {
return leafQueues.keySet();
}
/**
* Return true if the given user is part of the ACL for the given
* {@link QueueACL} name for the given queue.
* <p>
* An operation is allowed if all users are provided access for this
* operation, or if either the user or any of the groups specified is
* provided access.
*
* @param queueName Queue on which the operation needs to be performed.
* @param qACL The queue ACL name to be checked
* @param ugi The user and groups who wish to perform the operation.
* @return true if the operation is allowed, false otherwise.
*/
public synchronized boolean hasAccess(
String queueName, QueueACL qACL, UserGroupInformation ugi) {
Queue q = leafQueues.get(queueName);
if (q == null) {
LOG.info("Queue " + queueName + " is not present");
return false;
}
if(q.getChildren() != null && !q.getChildren().isEmpty()) {
LOG.info("Cannot submit job to parent queue " + q.getName());
return false;
}
if (!areAclsEnabled()) {
return true;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Checking access for the acl " + toFullPropertyName(queueName,
qACL.getAclName()) + " for user " + ugi.getShortUserName());
}
AccessControlList acl = q.getAcls().get(
toFullPropertyName(queueName, qACL.getAclName()));
if (acl == null) {
return false;
}
// Check if user is part of the ACL
return acl.isUserAllowed(ugi);
}
/**
* Checks whether the given queue is running or not.
*
* @param queueName name of the queue
* @return true, if the queue is running.
*/
synchronized boolean isRunning(String queueName) {
Queue q = leafQueues.get(queueName);
if (q != null) {
return q.getState().equals(QueueState.RUNNING);
}
return false;
}
/**
* Set a generic Object that represents scheduling information relevant
* to a queue.
* <p>
* A string representation of this Object will be used by the framework
* to display in user facing applications like the JobTracker web UI and
* the hadoop CLI.
*
* @param queueName queue for which the scheduling information is to be set.
* @param queueInfo scheduling information for this queue.
*/
public synchronized void setSchedulerInfo(
String queueName,
Object queueInfo) {
if (allQueues.get(queueName) != null) {
allQueues.get(queueName).setSchedulingInfo(queueInfo);
}
}
/**
* Return the scheduler information configured for this queue.
*
* @param queueName queue for which the scheduling information is required.
* @return The scheduling information for this queue.
*/
public synchronized Object getSchedulerInfo(String queueName) {
if (allQueues.get(queueName) != null) {
return allQueues.get(queueName).getSchedulingInfo();
}
return null;
}
static final String MSG_REFRESH_FAILURE_WITH_CHANGE_OF_HIERARCHY =
"Unable to refresh queues because queue-hierarchy changed. "
+ "Retaining existing configuration. ";
static final String MSG_REFRESH_FAILURE_WITH_SCHEDULER_FAILURE =
"Scheduler couldn't refresh it's queues with the new"
+ " configuration properties. "
+ "Retaining existing configuration throughout the system.";
/**
* Refresh acls, state and scheduler properties for the configured queues.
* <p>
* This method reloads configuration related to queues, but does not
* support changes to the list of queues or hierarchy. The expected usage
* is that an administrator can modify the queue configuration file and
* fire an admin command to reload queue configuration. If there is a
* problem in reloading configuration, then this method guarantees that
* existing queue configuration is untouched and in a consistent state.
*
* @param schedulerRefresher
* @throws IOException when queue configuration file is invalid.
*/
synchronized void refreshQueues(Configuration conf,
QueueRefresher schedulerRefresher)
throws IOException {
// Create a new configuration parser using the passed conf object.
QueueConfigurationParser cp =
getQueueConfigurationParser(conf, true, areAclsEnabled);
/*
* (1) Validate the refresh of properties owned by QueueManager. As of now,
* while refreshing queue properties, we only check that the hierarchy is
* the same w.r.t queue names, ACLs and state for each queue and don't
* support adding new queues or removing old queues
*/
if (!root.isHierarchySameAs(cp.getRoot())) {
LOG.warn(MSG_REFRESH_FAILURE_WITH_CHANGE_OF_HIERARCHY);
throw new IOException(MSG_REFRESH_FAILURE_WITH_CHANGE_OF_HIERARCHY);
}
/*
* (2) QueueManager owned properties are validated. Now validate and
* refresh the properties of scheduler in a single step.
*/
if (schedulerRefresher != null) {
try {
schedulerRefresher.refreshQueues(cp.getRoot().getJobQueueInfo().getChildren());
} catch (Throwable e) {
StringBuilder msg =
new StringBuilder(
"Scheduler's refresh-queues failed with the exception : "
+ StringUtils.stringifyException(e));
msg.append("\n");
msg.append(MSG_REFRESH_FAILURE_WITH_SCHEDULER_FAILURE);
LOG.error(msg.toString());
throw new IOException(msg.toString());
}
}
/*
* (3) Scheduler has validated and refreshed its queues successfully, now
* refresh the properties owned by QueueManager
*/
// First copy the scheduling information recursively into the new
// queue-hierarchy. This is done to retain old scheduling information. This
// is done after scheduler refresh and not before it because during refresh,
// schedulers may wish to change their scheduling info objects too.
cp.getRoot().copySchedulingInfo(this.root);
// Now switch roots.
initialize(cp);
LOG.info("Queue configuration is refreshed successfully.");
}
// this method is for internal use only
public static final String toFullPropertyName(
String queue,
String property) {
return QUEUE_CONF_PROPERTY_NAME_PREFIX + queue + "." + property;
}
/**
* Return an array of {@link JobQueueInfo} objects for all the
* queues configurated in the system.
*
* @return array of JobQueueInfo objects.
*/
synchronized JobQueueInfo[] getJobQueueInfos() {
ArrayList<JobQueueInfo> queueInfoList = new ArrayList<JobQueueInfo>();
for (String queue : allQueues.keySet()) {
JobQueueInfo queueInfo = getJobQueueInfo(queue);
if (queueInfo != null) {
queueInfoList.add(queueInfo);
}
}
return queueInfoList.toArray(
new JobQueueInfo[queueInfoList.size()]);
}
/**
* Return {@link JobQueueInfo} for a given queue.
*
* @param queue name of the queue
* @return JobQueueInfo for the queue, null if the queue is not found.
*/
synchronized JobQueueInfo getJobQueueInfo(String queue) {
if (allQueues.containsKey(queue)) {
return allQueues.get(queue).getJobQueueInfo();
}
return null;
}
/**
* JobQueueInfo for all the queues.
* <p>
* Contribs can use this data structure to either create a hierarchy or for
* traversing.
* They can also use this to refresh properties in case of refreshQueues
*
* @return a map for easy navigation.
*/
synchronized Map<String, JobQueueInfo> getJobQueueInfoMapping() {
Map<String, JobQueueInfo> m = new HashMap<String, JobQueueInfo>();
for (Map.Entry<String,Queue> entry : allQueues.entrySet()) {
m.put(entry.getKey(), entry.getValue().getJobQueueInfo());
}
return m;
}
/**
* Generates the array of QueueAclsInfo object.
* <p>
* The array consists of only those queues for which user has acls.
*
* @return QueueAclsInfo[]
* @throws java.io.IOException
*/
synchronized QueueAclsInfo[] getQueueAcls(UserGroupInformation ugi)
throws IOException {
//List of all QueueAclsInfo objects , this list is returned
ArrayList<QueueAclsInfo> queueAclsInfolist =
new ArrayList<QueueAclsInfo>();
QueueACL[] qAcls = QueueACL.values();
for (String queueName : leafQueues.keySet()) {
QueueAclsInfo queueAclsInfo = null;
ArrayList<String> operationsAllowed = null;
for (QueueACL qAcl : qAcls) {
if (hasAccess(queueName, qAcl, ugi)) {
if (operationsAllowed == null) {
operationsAllowed = new ArrayList<String>();
}
operationsAllowed.add(qAcl.getAclName());
}
}
if (operationsAllowed != null) {
//There is atleast 1 operation supported for queue <queueName>
//, hence initialize queueAclsInfo
queueAclsInfo = new QueueAclsInfo(
queueName, operationsAllowed.toArray
(new String[operationsAllowed.size()]));
queueAclsInfolist.add(queueAclsInfo);
}
}
return queueAclsInfolist.toArray(
new QueueAclsInfo[queueAclsInfolist.size()]);
}
/**
* Return if ACLs are enabled for the Map/Reduce system
*
* @return true if ACLs are enabled.
*/
boolean areAclsEnabled() {
return areAclsEnabled;
}
/**
* Used only for test.
*
* @return
*/
Queue getRoot() {
return root;
}
/**
* Dumps the configuration of hierarchy of queues
* @param out the writer object to which dump is written
* @throws IOException
*/
static void dumpConfiguration(Writer out,Configuration conf) throws IOException {
dumpConfiguration(out, null,conf);
}
/***
* Dumps the configuration of hierarchy of queues with
* the xml file path given. It is to be used directly ONLY FOR TESTING.
* @param out the writer object to which dump is written to.
* @param configFile the filename of xml file
* @throws IOException
*/
static void dumpConfiguration(Writer out, String configFile,
Configuration conf) throws IOException {
if (conf != null && conf.get(DeprecatedQueueConfigurationParser.
MAPRED_QUEUE_NAMES_KEY) != null) {
return;
}
JsonFactory dumpFactory = new JsonFactory();
JsonGenerator dumpGenerator = dumpFactory.createJsonGenerator(out);
QueueConfigurationParser parser;
boolean aclsEnabled = false;
if (conf != null) {
aclsEnabled = conf.getBoolean(MRConfig.MR_ACLS_ENABLED, false);
}
if (configFile != null && !"".equals(configFile)) {
parser = new QueueConfigurationParser(configFile, aclsEnabled);
}
else {
parser = getQueueConfigurationParser(null, false, aclsEnabled);
}
dumpGenerator.writeStartObject();
dumpGenerator.writeFieldName("queues");
dumpGenerator.writeStartArray();
dumpConfiguration(dumpGenerator,parser.getRoot().getChildren());
dumpGenerator.writeEndArray();
dumpGenerator.writeEndObject();
dumpGenerator.flush();
}
/**
* method to perform depth-first search and write the parameters of every
* queue in JSON format.
* @param dumpGenerator JsonGenerator object which takes the dump and flushes
* to a writer object
* @param rootQueues the top-level queues
* @throws JsonGenerationException
* @throws IOException
*/
private static void dumpConfiguration(JsonGenerator dumpGenerator,
Set<Queue> rootQueues) throws JsonGenerationException, IOException {
for (Queue queue : rootQueues) {
dumpGenerator.writeStartObject();
dumpGenerator.writeStringField("name", queue.getName());
dumpGenerator.writeStringField("state", queue.getState().toString());
AccessControlList submitJobList = null;
AccessControlList administerJobsList = null;
if (queue.getAcls() != null) {
submitJobList =
queue.getAcls().get(toFullPropertyName(queue.getName(),
QueueACL.SUBMIT_JOB.getAclName()));
administerJobsList =
queue.getAcls().get(toFullPropertyName(queue.getName(),
QueueACL.ADMINISTER_JOBS.getAclName()));
}
String aclsSubmitJobValue = " ";
if (submitJobList != null ) {
aclsSubmitJobValue = submitJobList.getAclString();
}
dumpGenerator.writeStringField("acl_submit_job", aclsSubmitJobValue);
String aclsAdministerValue = " ";
if (administerJobsList != null) {
aclsAdministerValue = administerJobsList.getAclString();
}
dumpGenerator.writeStringField("acl_administer_jobs",
aclsAdministerValue);
dumpGenerator.writeFieldName("properties");
dumpGenerator.writeStartArray();
if (queue.getProperties() != null) {
for (Map.Entry<Object, Object>property :
queue.getProperties().entrySet()) {
dumpGenerator.writeStartObject();
dumpGenerator.writeStringField("key", (String)property.getKey());
dumpGenerator.writeStringField("value", (String)property.getValue());
dumpGenerator.writeEndObject();
}
}
dumpGenerator.writeEndArray();
Set<Queue> childQueues = queue.getChildren();
dumpGenerator.writeFieldName("children");
dumpGenerator.writeStartArray();
if (childQueues != null && childQueues.size() > 0) {
dumpConfiguration(dumpGenerator, childQueues);
}
dumpGenerator.writeEndArray();
dumpGenerator.writeEndObject();
}
}
}
| 22,087 | 34.857143 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TIPStatus.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/** The states of a Tasks.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public enum TIPStatus {
PENDING, RUNNING, COMPLETE, KILLED, FAILED;
}
| 1,120 | 37.655172 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/InputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem;
/**
* <code>InputFormat</code> describes the input-specification for a
* Map-Reduce job.
*
* <p>The Map-Reduce framework relies on the <code>InputFormat</code> of the
* job to:<p>
* <ol>
* <li>
* Validate the input-specification of the job.
* <li>
* Split-up the input file(s) into logical {@link InputSplit}s, each of
* which is then assigned to an individual {@link Mapper}.
* </li>
* <li>
* Provide the {@link RecordReader} implementation to be used to glean
* input records from the logical <code>InputSplit</code> for processing by
* the {@link Mapper}.
* </li>
* </ol>
*
* <p>The default behavior of file-based {@link InputFormat}s, typically
* sub-classes of {@link FileInputFormat}, is to split the
* input into <i>logical</i> {@link InputSplit}s based on the total size, in
* bytes, of the input files. However, the {@link FileSystem} blocksize of
* the input files is treated as an upper bound for input splits. A lower bound
* on the split size can be set via
* <a href="{@docRoot}/../hadoop-mapreduce-client/hadoop-mapreduce-client-core/mapred-default.xml#mapreduce.input.fileinputformat.split.minsize">
* mapreduce.input.fileinputformat.split.minsize</a>.</p>
*
* <p>Clearly, logical splits based on input-size is insufficient for many
* applications since record boundaries are to be respected. In such cases, the
* application has to also implement a {@link RecordReader} on whom lies the
* responsibilty to respect record-boundaries and present a record-oriented
* view of the logical <code>InputSplit</code> to the individual task.
*
* @see InputSplit
* @see RecordReader
* @see JobClient
* @see FileInputFormat
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface InputFormat<K, V> {
/**
* Logically split the set of input files for the job.
*
* <p>Each {@link InputSplit} is then assigned to an individual {@link Mapper}
* for processing.</p>
*
* <p><i>Note</i>: The split is a <i>logical</i> split of the inputs and the
* input files are not physically split into chunks. For e.g. a split could
* be <i><input-file-path, start, offset></i> tuple.
*
* @param job job configuration.
* @param numSplits the desired number of splits, a hint.
* @return an array of {@link InputSplit}s for the job.
*/
InputSplit[] getSplits(JobConf job, int numSplits) throws IOException;
/**
* Get the {@link RecordReader} for the given {@link InputSplit}.
*
* <p>It is the responsibility of the <code>RecordReader</code> to respect
* record boundaries while processing the logical split to present a
* record-oriented view to the individual task.</p>
*
* @param split the {@link InputSplit}
* @param job the job that this split belongs to
* @return a {@link RecordReader}
*/
RecordReader<K, V> getRecordReader(InputSplit split,
JobConf job,
Reporter reporter) throws IOException;
}
| 4,081 | 38.631068 | 145 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/CleanupQueue.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.util.concurrent.LinkedBlockingQueue;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
class CleanupQueue {
public static final Log LOG =
LogFactory.getLog(CleanupQueue.class);
private static PathCleanupThread cleanupThread;
/**
* Create a singleton path-clean-up queue. It can be used to delete
* paths(directories/files) in a separate thread. This constructor creates a
* clean-up thread and also starts it as a daemon. Callers can instantiate one
* CleanupQueue per JVM and can use it for deleting paths. Use
* {@link CleanupQueue#addToQueue(PathDeletionContext...)} to add paths for
* deletion.
*/
public CleanupQueue() {
synchronized (PathCleanupThread.class) {
if (cleanupThread == null) {
cleanupThread = new PathCleanupThread();
}
}
}
/**
* Contains info related to the path of the file/dir to be deleted
*/
static class PathDeletionContext {
String fullPath;// full path of file or dir
FileSystem fs;
public PathDeletionContext(FileSystem fs, String fullPath) {
this.fs = fs;
this.fullPath = fullPath;
}
protected String getPathForCleanup() {
return fullPath;
}
/**
* Makes the path(and its subdirectories recursively) fully deletable
*/
protected void enablePathForCleanup() throws IOException {
// Do nothing by default.
// Subclasses can override to provide enabling for deletion.
}
}
/**
* Adds the paths to the queue of paths to be deleted by cleanupThread.
*/
void addToQueue(PathDeletionContext... contexts) {
cleanupThread.addToQueue(contexts);
}
protected static boolean deletePath(PathDeletionContext context)
throws IOException {
context.enablePathForCleanup();
if (LOG.isDebugEnabled()) {
LOG.debug("Trying to delete " + context.fullPath);
}
if (context.fs.exists(new Path(context.fullPath))) {
return context.fs.delete(new Path(context.fullPath), true);
}
return true;
}
// currently used by tests only
protected boolean isQueueEmpty() {
return (cleanupThread.queue.size() == 0);
}
private static class PathCleanupThread extends Thread {
// cleanup queue which deletes files/directories of the paths queued up.
private LinkedBlockingQueue<PathDeletionContext> queue =
new LinkedBlockingQueue<PathDeletionContext>();
public PathCleanupThread() {
setName("Directory/File cleanup thread");
setDaemon(true);
start();
}
void addToQueue(PathDeletionContext[] contexts) {
for (PathDeletionContext context : contexts) {
try {
queue.put(context);
} catch(InterruptedException ie) {}
}
}
public void run() {
if (LOG.isDebugEnabled()) {
LOG.debug(getName() + " started.");
}
PathDeletionContext context = null;
while (true) {
try {
context = queue.take();
// delete the path.
if (!deletePath(context)) {
LOG.warn("CleanupThread:Unable to delete path " + context.fullPath);
}
else if (LOG.isDebugEnabled()) {
LOG.debug("DELETED " + context.fullPath);
}
} catch (InterruptedException t) {
LOG.warn("Interrupted deletion of " + context.fullPath);
return;
} catch (Exception e) {
LOG.warn("Error deleting path " + context.fullPath + ": " + e);
}
}
}
}
}
| 4,490 | 29.344595 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SequenceFileInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.MapFile;
/**
* An {@link InputFormat} for {@link SequenceFile}s.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class SequenceFileInputFormat<K, V> extends FileInputFormat<K, V> {
public SequenceFileInputFormat() {
setMinSplitSize(SequenceFile.SYNC_INTERVAL);
}
@Override
protected FileStatus[] listStatus(JobConf job) throws IOException {
FileStatus[] files = super.listStatus(job);
for (int i = 0; i < files.length; i++) {
FileStatus file = files[i];
if (file.isDirectory()) { // it's a MapFile
Path dataFile = new Path(file.getPath(), MapFile.DATA_FILE_NAME);
FileSystem fs = file.getPath().getFileSystem(job);
// use the data file
files[i] = fs.getFileStatus(dataFile);
}
}
return files;
}
public RecordReader<K, V> getRecordReader(InputSplit split,
JobConf job, Reporter reporter)
throws IOException {
reporter.setStatus(split.toString());
return new SequenceFileRecordReader<K, V>(job, (FileSplit) split);
}
}
| 2,260 | 31.768116 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Reducer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.io.Closeable;
/**
* Reduces a set of intermediate values which share a key to a smaller set of
* values.
*
* <p>The number of <code>Reducer</code>s for the job is set by the user via
* {@link JobConf#setNumReduceTasks(int)}. <code>Reducer</code> implementations
* can access the {@link JobConf} for the job via the
* {@link JobConfigurable#configure(JobConf)} method and initialize themselves.
* Similarly they can use the {@link Closeable#close()} method for
* de-initialization.</p>
* <p><code>Reducer</code> has 3 primary phases:</p>
* <ol>
* <li>
*
* <b id="Shuffle">Shuffle</b>
*
* <p><code>Reducer</code> is input the grouped output of a {@link Mapper}.
* In the phase the framework, for each <code>Reducer</code>, fetches the
* relevant partition of the output of all the <code>Mapper</code>s, via HTTP.
* </p>
* </li>
*
* <li>
* <b id="Sort">Sort</b>
*
* <p>The framework groups <code>Reducer</code> inputs by <code>key</code>s
* (since different <code>Mapper</code>s may have output the same key) in this
* stage.</p>
*
* <p>The shuffle and sort phases occur simultaneously i.e. while outputs are
* being fetched they are merged.</p>
*
* <b id="SecondarySort">SecondarySort</b>
*
* <p>If equivalence rules for keys while grouping the intermediates are
* different from those for grouping keys before reduction, then one may
* specify a <code>Comparator</code> via
* {@link JobConf#setOutputValueGroupingComparator(Class)}.Since
* {@link JobConf#setOutputKeyComparatorClass(Class)} can be used to
* control how intermediate keys are grouped, these can be used in conjunction
* to simulate <i>secondary sort on values</i>.</p>
*
*
* For example, say that you want to find duplicate web pages and tag them
* all with the url of the "best" known example. You would set up the job
* like:
* <ul>
* <li>Map Input Key: url</li>
* <li>Map Input Value: document</li>
* <li>Map Output Key: document checksum, url pagerank</li>
* <li>Map Output Value: url</li>
* <li>Partitioner: by checksum</li>
* <li>OutputKeyComparator: by checksum and then decreasing pagerank</li>
* <li>OutputValueGroupingComparator: by checksum</li>
* </ul>
* </li>
*
* <li>
* <b id="Reduce">Reduce</b>
*
* <p>In this phase the
* {@link #reduce(Object, Iterator, OutputCollector, Reporter)}
* method is called for each <code><key, (list of values)></code> pair in
* the grouped inputs.</p>
* <p>The output of the reduce task is typically written to the
* {@link FileSystem} via
* {@link OutputCollector#collect(Object, Object)}.</p>
* </li>
* </ol>
*
* <p>The output of the <code>Reducer</code> is <b>not re-sorted</b>.</p>
*
* <p>Example:</p>
* <p><blockquote><pre>
* public class MyReducer<K extends WritableComparable, V extends Writable>
* extends MapReduceBase implements Reducer<K, V, K, V> {
*
* static enum MyCounters { NUM_RECORDS }
*
* private String reduceTaskId;
* private int noKeys = 0;
*
* public void configure(JobConf job) {
* reduceTaskId = job.get(JobContext.TASK_ATTEMPT_ID);
* }
*
* public void reduce(K key, Iterator<V> values,
* OutputCollector<K, V> output,
* Reporter reporter)
* throws IOException {
*
* // Process
* int noValues = 0;
* while (values.hasNext()) {
* V value = values.next();
*
* // Increment the no. of values for this key
* ++noValues;
*
* // Process the <key, value> pair (assume this takes a while)
* // ...
* // ...
*
* // Let the framework know that we are alive, and kicking!
* if ((noValues%10) == 0) {
* reporter.progress();
* }
*
* // Process some more
* // ...
* // ...
*
* // Output the <key, value>
* output.collect(key, value);
* }
*
* // Increment the no. of <key, list of values> pairs processed
* ++noKeys;
*
* // Increment counters
* reporter.incrCounter(NUM_RECORDS, 1);
*
* // Every 100 keys update application-level status
* if ((noKeys%100) == 0) {
* reporter.setStatus(reduceTaskId + " processed " + noKeys);
* }
* }
* }
* </pre></blockquote>
*
* @see Mapper
* @see Partitioner
* @see Reporter
* @see MapReduceBase
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface Reducer<K2, V2, K3, V3> extends JobConfigurable, Closeable {
/**
* <i>Reduces</i> values for a given key.
*
* <p>The framework calls this method for each
* <code><key, (list of values)></code> pair in the grouped inputs.
* Output values must be of the same type as input values. Input keys must
* not be altered. The framework will <b>reuse</b> the key and value objects
* that are passed into the reduce, therefore the application should clone
* the objects they want to keep a copy of. In many cases, all values are
* combined into zero or one value.
* </p>
*
* <p>Output pairs are collected with calls to
* {@link OutputCollector#collect(Object,Object)}.</p>
*
* <p>Applications can use the {@link Reporter} provided to report progress
* or just indicate that they are alive. In scenarios where the application
* takes a significant amount of time to process individual key/value
* pairs, this is crucial since the framework might assume that the task has
* timed-out and kill that task. The other way of avoiding this is to set
* <a href="{@docRoot}/../mapred-default.html#mapreduce.task.timeout">
* mapreduce.task.timeout</a> to a high-enough value (or even zero for no
* time-outs).</p>
*
* @param key the key.
* @param values the list of values to reduce.
* @param output to collect keys and combined values.
* @param reporter facility to report progress.
*/
void reduce(K2 key, Iterator<V2> values,
OutputCollector<K3, V3> output, Reporter reporter)
throws IOException;
}
| 7,561 | 36.068627 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/ProgressSplitsBlock.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
/*
* This object gathers the [currently four] PeriodStatset's that we
* are gathering for a particular task attempt for packaging and
* handling as a single object.
*/
@Private
@Unstable
public class ProgressSplitsBlock {
final PeriodicStatsAccumulator progressWallclockTime;
final PeriodicStatsAccumulator progressCPUTime;
final PeriodicStatsAccumulator progressVirtualMemoryKbytes;
final PeriodicStatsAccumulator progressPhysicalMemoryKbytes;
static final int[] NULL_ARRAY = new int[0];
static final int WALLCLOCK_TIME_INDEX = 0;
static final int CPU_TIME_INDEX = 1;
static final int VIRTUAL_MEMORY_KBYTES_INDEX = 2;
static final int PHYSICAL_MEMORY_KBYTES_INDEX = 3;
static final int DEFAULT_NUMBER_PROGRESS_SPLITS = 12;
ProgressSplitsBlock(int numberSplits) {
progressWallclockTime
= new CumulativePeriodicStats(numberSplits);
progressCPUTime
= new CumulativePeriodicStats(numberSplits);
progressVirtualMemoryKbytes
= new StatePeriodicStats(numberSplits);
progressPhysicalMemoryKbytes
= new StatePeriodicStats(numberSplits);
}
// this coordinates with LoggedTaskAttempt.SplitVectorKind
int[][] burst() {
int[][] result = new int[4][];
result[WALLCLOCK_TIME_INDEX] = progressWallclockTime.getValues();
result[CPU_TIME_INDEX] = progressCPUTime.getValues();
result[VIRTUAL_MEMORY_KBYTES_INDEX] = progressVirtualMemoryKbytes.getValues();
result[PHYSICAL_MEMORY_KBYTES_INDEX] = progressPhysicalMemoryKbytes.getValues();
return result;
}
static public int[] arrayGet(int[][] burstedBlock, int index) {
return burstedBlock == null ? NULL_ARRAY : burstedBlock[index];
}
static public int[] arrayGetWallclockTime(int[][] burstedBlock) {
return arrayGet(burstedBlock, WALLCLOCK_TIME_INDEX);
}
static public int[] arrayGetCPUTime(int[][] burstedBlock) {
return arrayGet(burstedBlock, CPU_TIME_INDEX);
}
static public int[] arrayGetVMemKbytes(int[][] burstedBlock) {
return arrayGet(burstedBlock, VIRTUAL_MEMORY_KBYTES_INDEX);
}
static public int[] arrayGetPhysMemKbytes(int[][] burstedBlock) {
return arrayGet(burstedBlock, PHYSICAL_MEMORY_KBYTES_INDEX);
}
}
| 3,187 | 34.422222 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.NoSuchElementException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalDirAllocator;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapred.IFile.Reader;
import org.apache.hadoop.mapred.IFile.Writer;
import org.apache.hadoop.mapred.Merger.Segment;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.CryptoUtils;
/**
* <code>BackupStore</code> is an utility class that is used to support
* the mark-reset functionality of values iterator
*
* <p>It has two caches - a memory cache and a file cache where values are
* stored as they are iterated, after a mark. On reset, values are retrieved
* from these caches. Framework moves from the memory cache to the
* file cache when the memory cache becomes full.
*
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class BackupStore<K,V> {
private static final Log LOG = LogFactory.getLog(BackupStore.class.getName());
private static final int MAX_VINT_SIZE = 9;
private static final int EOF_MARKER_SIZE = 2 * MAX_VINT_SIZE;
private final TaskAttemptID tid;
private MemoryCache memCache;
private FileCache fileCache;
List<Segment<K,V>> segmentList = new LinkedList<Segment<K,V>>();
private int readSegmentIndex = 0;
private int firstSegmentOffset = 0;
private int currentKVOffset = 0;
private int nextKVOffset = -1;
private DataInputBuffer currentKey = null;
private DataInputBuffer currentValue = new DataInputBuffer();
private DataInputBuffer currentDiskValue = new DataInputBuffer();
private boolean hasMore = false;
private boolean inReset = false;
private boolean clearMarkFlag = false;
private boolean lastSegmentEOF = false;
private Configuration conf;
public BackupStore(Configuration conf, TaskAttemptID taskid)
throws IOException {
final float bufferPercent =
conf.getFloat(JobContext.REDUCE_MARKRESET_BUFFER_PERCENT, 0f);
if (bufferPercent > 1.0 || bufferPercent < 0.0) {
throw new IOException(JobContext.REDUCE_MARKRESET_BUFFER_PERCENT +
bufferPercent);
}
int maxSize = (int)Math.min(
Runtime.getRuntime().maxMemory() * bufferPercent, Integer.MAX_VALUE);
// Support an absolute size also.
int tmp = conf.getInt(JobContext.REDUCE_MARKRESET_BUFFER_SIZE, 0);
if (tmp > 0) {
maxSize = tmp;
}
memCache = new MemoryCache(maxSize);
fileCache = new FileCache(conf);
tid = taskid;
this.conf = conf;
LOG.info("Created a new BackupStore with a memory of " + maxSize);
}
/**
* Write the given K,V to the cache.
* Write to memcache if space is available, else write to the filecache
* @param key
* @param value
* @throws IOException
*/
public void write(DataInputBuffer key, DataInputBuffer value)
throws IOException {
assert (key != null && value != null);
if (fileCache.isActive()) {
fileCache.write(key, value);
return;
}
if (memCache.reserveSpace(key, value)) {
memCache.write(key, value);
} else {
fileCache.activate();
fileCache.write(key, value);
}
}
public void mark() throws IOException {
// We read one KV pair in advance in hasNext.
// If hasNext has read the next KV pair from a new segment, but the
// user has not called next() for that KV, then reset the readSegmentIndex
// to the previous segment
if (nextKVOffset == 0) {
assert (readSegmentIndex != 0);
assert (currentKVOffset != 0);
readSegmentIndex --;
}
// just drop segments before the current active segment
int i = 0;
Iterator<Segment<K,V>> itr = segmentList.iterator();
while (itr.hasNext()) {
Segment<K,V> s = itr.next();
if (i == readSegmentIndex) {
break;
}
s.close();
itr.remove();
i++;
LOG.debug("Dropping a segment");
}
// FirstSegmentOffset is the offset in the current segment from where we
// need to start reading on the next reset
firstSegmentOffset = currentKVOffset;
readSegmentIndex = 0;
LOG.debug("Setting the FirsSegmentOffset to " + currentKVOffset);
}
public void reset() throws IOException {
// Create a new segment for the previously written records only if we
// are not already in the reset mode
if (!inReset) {
if (fileCache.isActive) {
fileCache.createInDiskSegment();
} else {
memCache.createInMemorySegment();
}
}
inReset = true;
// Reset the segments to the correct position from where the next read
// should begin.
for (int i = 0; i < segmentList.size(); i++) {
Segment<K,V> s = segmentList.get(i);
if (s.inMemory()) {
int offset = (i == 0) ? firstSegmentOffset : 0;
s.getReader().reset(offset);
} else {
s.closeReader();
if (i == 0) {
s.reinitReader(firstSegmentOffset);
s.getReader().disableChecksumValidation();
}
}
}
currentKVOffset = firstSegmentOffset;
nextKVOffset = -1;
readSegmentIndex = 0;
hasMore = false;
lastSegmentEOF = false;
LOG.debug("Reset - First segment offset is " + firstSegmentOffset +
" Segment List Size is " + segmentList.size());
}
public boolean hasNext() throws IOException {
if (lastSegmentEOF) {
return false;
}
// We read the next KV from the cache to decide if there is any left.
// Since hasNext can be called several times before the actual call to
// next(), we use hasMore to avoid extra reads. hasMore is set to false
// when the user actually consumes this record in next()
if (hasMore) {
return true;
}
Segment<K,V> seg = segmentList.get(readSegmentIndex);
// Mark the current position. This would be set to currentKVOffset
// when the user consumes this record in next().
nextKVOffset = (int) seg.getActualPosition();
if (seg.nextRawKey()) {
currentKey = seg.getKey();
seg.getValue(currentValue);
hasMore = true;
return true;
} else {
if (!seg.inMemory()) {
seg.closeReader();
}
}
// If this is the last segment, mark the lastSegmentEOF flag and return
if (readSegmentIndex == segmentList.size() - 1) {
nextKVOffset = -1;
lastSegmentEOF = true;
return false;
}
nextKVOffset = 0;
readSegmentIndex ++;
Segment<K,V> nextSegment = segmentList.get(readSegmentIndex);
// We possibly are moving from a memory segment to a disk segment.
// Reset so that we do not corrupt the in-memory segment buffer.
// See HADOOP-5494
if (!nextSegment.inMemory()) {
currentValue.reset(currentDiskValue.getData(),
currentDiskValue.getLength());
nextSegment.init(null);
}
if (nextSegment.nextRawKey()) {
currentKey = nextSegment.getKey();
nextSegment.getValue(currentValue);
hasMore = true;
return true;
} else {
throw new IOException("New segment did not have even one K/V");
}
}
public void next() throws IOException {
if (!hasNext()) {
throw new NoSuchElementException("iterate past last value");
}
// Reset hasMore. See comment in hasNext()
hasMore = false;
currentKVOffset = nextKVOffset;
nextKVOffset = -1;
}
public DataInputBuffer nextValue() {
return currentValue;
}
public DataInputBuffer nextKey() {
return currentKey;
}
public void reinitialize() throws IOException {
if (segmentList.size() != 0) {
clearSegmentList();
}
memCache.reinitialize(true);
fileCache.reinitialize();
readSegmentIndex = firstSegmentOffset = 0;
currentKVOffset = 0;
nextKVOffset = -1;
hasMore = inReset = clearMarkFlag = false;
}
/**
* This function is called the ValuesIterator when a mark is called
* outside of a reset zone.
*/
public void exitResetMode() throws IOException {
inReset = false;
if (clearMarkFlag ) {
// If a flag was set to clear mark, do the reinit now.
// See clearMark()
reinitialize();
return;
}
if (!fileCache.isActive) {
memCache.reinitialize(false);
}
}
/** For writing the first key and value bytes directly from the
* value iterators, pass the current underlying output stream
* @param length The length of the impending write
*/
public DataOutputStream getOutputStream(int length) throws IOException {
if (memCache.reserveSpace(length)) {
return memCache.dataOut;
} else {
fileCache.activate();
return fileCache.writer.getOutputStream();
}
}
/** This method is called by the valueIterators after writing the first
* key and value bytes to the BackupStore
* @param length
*/
public void updateCounters(int length) {
if (fileCache.isActive) {
fileCache.writer.updateCountersForExternalAppend(length);
} else {
memCache.usedSize += length;
}
}
public void clearMark() throws IOException {
if (inReset) {
// If we are in the reset mode, we just mark a flag and come out
// The actual re initialization would be done when we exit the reset
// mode
clearMarkFlag = true;
} else {
reinitialize();
}
}
private void clearSegmentList() throws IOException {
for (Segment<K,V> segment: segmentList) {
long len = segment.getLength();
segment.close();
if (segment.inMemory()) {
memCache.unreserve(len);
}
}
segmentList.clear();
}
class MemoryCache {
private DataOutputBuffer dataOut;
private int blockSize;
private int usedSize;
private final BackupRamManager ramManager;
// Memory cache is made up of blocks.
private int defaultBlockSize = 1024 * 1024;
public MemoryCache(int maxSize) {
ramManager = new BackupRamManager(maxSize);
if (maxSize < defaultBlockSize) {
defaultBlockSize = maxSize;
}
}
public void unreserve(long len) {
ramManager.unreserve((int)len);
}
/**
* Re-initialize the memory cache.
*
* @param clearAll If true, re-initialize the ramManager also.
*/
void reinitialize(boolean clearAll) {
if (clearAll) {
ramManager.reinitialize();
}
int allocatedSize = createNewMemoryBlock(defaultBlockSize,
defaultBlockSize);
assert(allocatedSize == defaultBlockSize || allocatedSize == 0);
LOG.debug("Created a new mem block of " + allocatedSize);
}
private int createNewMemoryBlock(int requestedSize, int minSize) {
int allocatedSize = ramManager.reserve(requestedSize, minSize);
usedSize = 0;
if (allocatedSize == 0) {
dataOut = null;
blockSize = 0;
} else {
dataOut = new DataOutputBuffer(allocatedSize);
blockSize = allocatedSize;
}
return allocatedSize;
}
/**
* This method determines if there is enough space left in the
* memory cache to write to the requested length + space for
* subsequent EOF makers.
* @param length
* @return true if enough space is available
*/
boolean reserveSpace(int length) throws IOException {
int availableSize = blockSize - usedSize;
if (availableSize >= length + EOF_MARKER_SIZE) {
return true;
}
// Not enough available. Close this block
assert (!inReset);
createInMemorySegment();
// Create a new block
int tmp = Math.max(length + EOF_MARKER_SIZE, defaultBlockSize);
availableSize = createNewMemoryBlock(tmp,
(length + EOF_MARKER_SIZE));
return (availableSize == 0) ? false : true;
}
boolean reserveSpace(DataInputBuffer key, DataInputBuffer value)
throws IOException {
int keyLength = key.getLength() - key.getPosition();
int valueLength = value.getLength() - value.getPosition();
int requestedSize = keyLength + valueLength +
WritableUtils.getVIntSize(keyLength) +
WritableUtils.getVIntSize(valueLength);
return reserveSpace(requestedSize);
}
/**
* Write the key and value to the cache in the IFile format
* @param key
* @param value
* @throws IOException
*/
public void write(DataInputBuffer key, DataInputBuffer value)
throws IOException {
int keyLength = key.getLength() - key.getPosition();
int valueLength = value.getLength() - value.getPosition();
WritableUtils.writeVInt(dataOut, keyLength);
WritableUtils.writeVInt(dataOut, valueLength);
dataOut.write(key.getData(), key.getPosition(), keyLength);
dataOut.write(value.getData(), value.getPosition(), valueLength);
usedSize += keyLength + valueLength +
WritableUtils.getVIntSize(keyLength) +
WritableUtils.getVIntSize(valueLength);
LOG.debug("ID: " + segmentList.size() + " WRITE TO MEM");
}
/**
* This method creates a memory segment from the existing buffer
* @throws IOException
*/
void createInMemorySegment () throws IOException {
// If nothing was written in this block because the record size
// was greater than the allocated block size, just return.
if (usedSize == 0) {
ramManager.unreserve(blockSize);
return;
}
// spaceAvailable would have ensured that there is enough space
// left for the EOF markers.
assert ((blockSize - usedSize) >= EOF_MARKER_SIZE);
WritableUtils.writeVInt(dataOut, IFile.EOF_MARKER);
WritableUtils.writeVInt(dataOut, IFile.EOF_MARKER);
usedSize += EOF_MARKER_SIZE;
ramManager.unreserve(blockSize - usedSize);
Reader<K, V> reader =
new org.apache.hadoop.mapreduce.task.reduce.InMemoryReader<K, V>(null,
(org.apache.hadoop.mapred.TaskAttemptID) tid,
dataOut.getData(), 0, usedSize, conf);
Segment<K, V> segment = new Segment<K, V>(reader, false);
segmentList.add(segment);
LOG.debug("Added Memory Segment to List. List Size is " +
segmentList.size());
}
}
class FileCache {
private LocalDirAllocator lDirAlloc;
private final Configuration conf;
private final FileSystem fs;
private boolean isActive = false;
private Path file = null;
private IFile.Writer<K,V> writer = null;
private int spillNumber = 0;
public FileCache(Configuration conf)
throws IOException {
this.conf = conf;
this.fs = FileSystem.getLocal(conf);
this.lDirAlloc = new LocalDirAllocator(MRConfig.LOCAL_DIR);
}
void write(DataInputBuffer key, DataInputBuffer value)
throws IOException {
if (writer == null) {
// If spillNumber is 0, we should have called activate and not
// come here at all
assert (spillNumber != 0);
writer = createSpillFile();
}
writer.append(key, value);
LOG.debug("ID: " + segmentList.size() + " WRITE TO DISK");
}
void reinitialize() {
spillNumber = 0;
writer = null;
isActive = false;
}
void activate() throws IOException {
isActive = true;
writer = createSpillFile();
}
void createInDiskSegment() throws IOException {
assert (writer != null);
writer.close();
Segment<K,V> s = new Segment<K, V>(conf, fs, file, null, true);
writer = null;
segmentList.add(s);
LOG.debug("Disk Segment added to List. Size is " + segmentList.size());
}
boolean isActive() { return isActive; }
private Writer<K,V> createSpillFile() throws IOException {
Path tmp =
new Path(MRJobConfig.OUTPUT + "/backup_" + tid.getId() + "_"
+ (spillNumber++) + ".out");
LOG.info("Created file: " + tmp);
file = lDirAlloc.getLocalPathForWrite(tmp.toUri().getPath(),
-1, conf);
FSDataOutputStream out = fs.create(file);
out = CryptoUtils.wrapIfNecessary(conf, out);
return new Writer<K, V>(conf, out, null, null, null, null, true);
}
}
static class BackupRamManager implements RamManager {
private int availableSize = 0;
private final int maxSize;
public BackupRamManager(int size) {
availableSize = maxSize = size;
}
public boolean reserve(int requestedSize, InputStream in) {
// Not used
LOG.warn("Reserve(int, InputStream) not supported by BackupRamManager");
return false;
}
int reserve(int requestedSize) {
if (availableSize == 0) {
return 0;
}
int reservedSize = Math.min(requestedSize, availableSize);
availableSize -= reservedSize;
LOG.debug("Reserving: " + reservedSize + " Requested: " + requestedSize);
return reservedSize;
}
int reserve(int requestedSize, int minSize) {
if (availableSize < minSize) {
LOG.debug("No space available. Available: " + availableSize +
" MinSize: " + minSize);
return 0;
} else {
return reserve(requestedSize);
}
}
public void unreserve(int requestedSize) {
availableSize += requestedSize;
LOG.debug("Unreserving: " + requestedSize +
". Available: " + availableSize);
}
void reinitialize() {
availableSize = maxSize;
}
}
}
| 18,979 | 29.174881 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileOutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.text.NumberFormat;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.mapreduce.security.TokenCache;
import org.apache.hadoop.util.Progressable;
/** A base class for {@link OutputFormat}. */
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class FileOutputFormat<K, V> implements OutputFormat<K, V> {
@Deprecated
public static enum Counter {
BYTES_WRITTEN
}
/**
* Set whether the output of the job is compressed.
* @param conf the {@link JobConf} to modify
* @param compress should the output of the job be compressed?
*/
public static void setCompressOutput(JobConf conf, boolean compress) {
conf.setBoolean(org.apache.hadoop.mapreduce.lib.output.
FileOutputFormat.COMPRESS, compress);
}
/**
* Is the job output compressed?
* @param conf the {@link JobConf} to look in
* @return <code>true</code> if the job output should be compressed,
* <code>false</code> otherwise
*/
public static boolean getCompressOutput(JobConf conf) {
return conf.getBoolean(org.apache.hadoop.mapreduce.lib.output.
FileOutputFormat.COMPRESS, false);
}
/**
* Set the {@link CompressionCodec} to be used to compress job outputs.
* @param conf the {@link JobConf} to modify
* @param codecClass the {@link CompressionCodec} to be used to
* compress the job outputs
*/
public static void
setOutputCompressorClass(JobConf conf,
Class<? extends CompressionCodec> codecClass) {
setCompressOutput(conf, true);
conf.setClass(org.apache.hadoop.mapreduce.lib.output.
FileOutputFormat.COMPRESS_CODEC, codecClass,
CompressionCodec.class);
}
/**
* Get the {@link CompressionCodec} for compressing the job outputs.
* @param conf the {@link JobConf} to look in
* @param defaultValue the {@link CompressionCodec} to return if not set
* @return the {@link CompressionCodec} to be used to compress the
* job outputs
* @throws IllegalArgumentException if the class was specified, but not found
*/
public static Class<? extends CompressionCodec>
getOutputCompressorClass(JobConf conf,
Class<? extends CompressionCodec> defaultValue) {
Class<? extends CompressionCodec> codecClass = defaultValue;
String name = conf.get(org.apache.hadoop.mapreduce.lib.output.
FileOutputFormat.COMPRESS_CODEC);
if (name != null) {
try {
codecClass =
conf.getClassByName(name).asSubclass(CompressionCodec.class);
} catch (ClassNotFoundException e) {
throw new IllegalArgumentException("Compression codec " + name +
" was not found.", e);
}
}
return codecClass;
}
public abstract RecordWriter<K, V> getRecordWriter(FileSystem ignored,
JobConf job, String name,
Progressable progress)
throws IOException;
public void checkOutputSpecs(FileSystem ignored, JobConf job)
throws FileAlreadyExistsException,
InvalidJobConfException, IOException {
// Ensure that the output directory is set and not already there
Path outDir = getOutputPath(job);
if (outDir == null && job.getNumReduceTasks() != 0) {
throw new InvalidJobConfException("Output directory not set in JobConf.");
}
if (outDir != null) {
FileSystem fs = outDir.getFileSystem(job);
// normalize the output directory
outDir = fs.makeQualified(outDir);
setOutputPath(job, outDir);
// get delegation token for the outDir's file system
TokenCache.obtainTokensForNamenodes(job.getCredentials(),
new Path[] {outDir}, job);
// check its existence
if (fs.exists(outDir)) {
throw new FileAlreadyExistsException("Output directory " + outDir +
" already exists");
}
}
}
/**
* Set the {@link Path} of the output directory for the map-reduce job.
*
* @param conf The configuration of the job.
* @param outputDir the {@link Path} of the output directory for
* the map-reduce job.
*/
public static void setOutputPath(JobConf conf, Path outputDir) {
outputDir = new Path(conf.getWorkingDirectory(), outputDir);
conf.set(org.apache.hadoop.mapreduce.lib.output.
FileOutputFormat.OUTDIR, outputDir.toString());
}
/**
* Set the {@link Path} of the task's temporary output directory
* for the map-reduce job.
*
* <p><i>Note</i>: Task output path is set by the framework.
* </p>
* @param conf The configuration of the job.
* @param outputDir the {@link Path} of the output directory
* for the map-reduce job.
*/
@Private
public static void setWorkOutputPath(JobConf conf, Path outputDir) {
outputDir = new Path(conf.getWorkingDirectory(), outputDir);
conf.set(JobContext.TASK_OUTPUT_DIR, outputDir.toString());
}
/**
* Get the {@link Path} to the output directory for the map-reduce job.
*
* @return the {@link Path} to the output directory for the map-reduce job.
* @see FileOutputFormat#getWorkOutputPath(JobConf)
*/
public static Path getOutputPath(JobConf conf) {
String name = conf.get(org.apache.hadoop.mapreduce.lib.output.
FileOutputFormat.OUTDIR);
return name == null ? null: new Path(name);
}
/**
* Get the {@link Path} to the task's temporary output directory
* for the map-reduce job
*
* <b id="SideEffectFiles">Tasks' Side-Effect Files</b>
*
* <p><i>Note:</i> The following is valid only if the {@link OutputCommitter}
* is {@link FileOutputCommitter}. If <code>OutputCommitter</code> is not
* a <code>FileOutputCommitter</code>, the task's temporary output
* directory is same as {@link #getOutputPath(JobConf)} i.e.
* <tt>${mapreduce.output.fileoutputformat.outputdir}$</tt></p>
*
* <p>Some applications need to create/write-to side-files, which differ from
* the actual job-outputs.
*
* <p>In such cases there could be issues with 2 instances of the same TIP
* (running simultaneously e.g. speculative tasks) trying to open/write-to the
* same file (path) on HDFS. Hence the application-writer will have to pick
* unique names per task-attempt (e.g. using the attemptid, say
* <tt>attempt_200709221812_0001_m_000000_0</tt>), not just per TIP.</p>
*
* <p>To get around this the Map-Reduce framework helps the application-writer
* out by maintaining a special
* <tt>${mapreduce.output.fileoutputformat.outputdir}/_temporary/_${taskid}</tt>
* sub-directory for each task-attempt on HDFS where the output of the
* task-attempt goes. On successful completion of the task-attempt the files
* in the <tt>${mapreduce.output.fileoutputformat.outputdir}/_temporary/_${taskid}</tt> (only)
* are <i>promoted</i> to <tt>${mapreduce.output.fileoutputformat.outputdir}</tt>. Of course, the
* framework discards the sub-directory of unsuccessful task-attempts. This
* is completely transparent to the application.</p>
*
* <p>The application-writer can take advantage of this by creating any
* side-files required in <tt>${mapreduce.task.output.dir}</tt> during execution
* of his reduce-task i.e. via {@link #getWorkOutputPath(JobConf)}, and the
* framework will move them out similarly - thus she doesn't have to pick
* unique paths per task-attempt.</p>
*
* <p><i>Note</i>: the value of <tt>${mapreduce.task.output.dir}</tt> during
* execution of a particular task-attempt is actually
* <tt>${mapreduce.output.fileoutputformat.outputdir}/_temporary/_{$taskid}</tt>, and this value is
* set by the map-reduce framework. So, just create any side-files in the
* path returned by {@link #getWorkOutputPath(JobConf)} from map/reduce
* task to take advantage of this feature.</p>
*
* <p>The entire discussion holds true for maps of jobs with
* reducer=NONE (i.e. 0 reduces) since output of the map, in that case,
* goes directly to HDFS.</p>
*
* @return the {@link Path} to the task's temporary output directory
* for the map-reduce job.
*/
public static Path getWorkOutputPath(JobConf conf) {
String name = conf.get(JobContext.TASK_OUTPUT_DIR);
return name == null ? null: new Path(name);
}
/**
* Helper function to create the task's temporary output directory and
* return the path to the task's output file.
*
* @param conf job-configuration
* @param name temporary task-output filename
* @return path to the task's temporary output file
* @throws IOException
*/
public static Path getTaskOutputPath(JobConf conf, String name)
throws IOException {
// ${mapred.out.dir}
Path outputPath = getOutputPath(conf);
if (outputPath == null) {
throw new IOException("Undefined job output-path");
}
OutputCommitter committer = conf.getOutputCommitter();
Path workPath = outputPath;
TaskAttemptContext context =
new TaskAttemptContextImpl(conf,
TaskAttemptID.forName(conf.get(
JobContext.TASK_ATTEMPT_ID)));
if (committer instanceof FileOutputCommitter) {
workPath = ((FileOutputCommitter)committer).getWorkPath(context,
outputPath);
}
// ${mapred.out.dir}/_temporary/_${taskid}/${name}
return new Path(workPath, name);
}
/**
* Helper function to generate a name that is unique for the task.
*
* <p>The generated name can be used to create custom files from within the
* different tasks for the job, the names for different tasks will not collide
* with each other.</p>
*
* <p>The given name is postfixed with the task type, 'm' for maps, 'r' for
* reduces and the task partition number. For example, give a name 'test'
* running on the first map o the job the generated name will be
* 'test-m-00000'.</p>
*
* @param conf the configuration for the job.
* @param name the name to make unique.
* @return a unique name accross all tasks of the job.
*/
public static String getUniqueName(JobConf conf, String name) {
int partition = conf.getInt(JobContext.TASK_PARTITION, -1);
if (partition == -1) {
throw new IllegalArgumentException(
"This method can only be called from within a Job");
}
String taskType = conf.getBoolean(JobContext.TASK_ISMAP,
JobContext.DEFAULT_TASK_ISMAP) ? "m" : "r";
NumberFormat numberFormat = NumberFormat.getInstance();
numberFormat.setMinimumIntegerDigits(5);
numberFormat.setGroupingUsed(false);
return name + "-" + taskType + "-" + numberFormat.format(partition);
}
/**
* Helper function to generate a {@link Path} for a file that is unique for
* the task within the job output directory.
*
* <p>The path can be used to create custom files from within the map and
* reduce tasks. The path name will be unique for each task. The path parent
* will be the job output directory.</p>ls
*
* <p>This method uses the {@link #getUniqueName} method to make the file name
* unique for the task.</p>
*
* @param conf the configuration for the job.
* @param name the name for the file.
* @return a unique path accross all tasks of the job.
*/
public static Path getPathForCustomFile(JobConf conf, String name) {
return new Path(getWorkOutputPath(conf), getUniqueName(conf, name));
}
}
| 12,882 | 39.38558 | 102 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SortedRanges.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Iterator;
import java.util.SortedSet;
import java.util.TreeSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.Writable;
/**
* Keeps the Ranges sorted by startIndex.
* The added ranges are always ensured to be non-overlapping.
* Provides the SkipRangeIterator, which skips the Ranges
* stored in this object.
*/
class SortedRanges implements Writable{
private static final Log LOG =
LogFactory.getLog(SortedRanges.class);
private TreeSet<Range> ranges = new TreeSet<Range>();
private long indicesCount;
/**
* Get Iterator which skips the stored ranges.
* The Iterator.next() call return the index starting from 0.
* @return SkipRangeIterator
*/
synchronized SkipRangeIterator skipRangeIterator(){
return new SkipRangeIterator(ranges.iterator());
}
/**
* Get the no of indices stored in the ranges.
* @return indices count
*/
synchronized long getIndicesCount() {
return indicesCount;
}
/**
* Get the sorted set of ranges.
* @return ranges
*/
synchronized SortedSet<Range> getRanges() {
return ranges;
}
/**
* Add the range indices. It is ensured that the added range
* doesn't overlap the existing ranges. If it overlaps, the
* existing overlapping ranges are removed and a single range
* having the superset of all the removed ranges and this range
* is added.
* If the range is of 0 length, doesn't do anything.
* @param range Range to be added.
*/
synchronized void add(Range range){
if(range.isEmpty()) {
return;
}
long startIndex = range.getStartIndex();
long endIndex = range.getEndIndex();
//make sure that there are no overlapping ranges
SortedSet<Range> headSet = ranges.headSet(range);
if(headSet.size()>0) {
Range previousRange = headSet.last();
LOG.debug("previousRange "+previousRange);
if(startIndex<previousRange.getEndIndex()) {
//previousRange overlaps this range
//remove the previousRange
if(ranges.remove(previousRange)) {
indicesCount-=previousRange.getLength();
}
//expand this range
startIndex = previousRange.getStartIndex();
endIndex = endIndex>=previousRange.getEndIndex() ?
endIndex : previousRange.getEndIndex();
}
}
Iterator<Range> tailSetIt = ranges.tailSet(range).iterator();
while(tailSetIt.hasNext()) {
Range nextRange = tailSetIt.next();
LOG.debug("nextRange "+nextRange +" startIndex:"+startIndex+
" endIndex:"+endIndex);
if(endIndex>=nextRange.getStartIndex()) {
//nextRange overlaps this range
//remove the nextRange
tailSetIt.remove();
indicesCount-=nextRange.getLength();
if(endIndex<nextRange.getEndIndex()) {
//expand this range
endIndex = nextRange.getEndIndex();
break;
}
} else {
break;
}
}
add(startIndex,endIndex);
}
/**
* Remove the range indices. If this range is
* found in existing ranges, the existing ranges
* are shrunk.
* If range is of 0 length, doesn't do anything.
* @param range Range to be removed.
*/
synchronized void remove(Range range) {
if(range.isEmpty()) {
return;
}
long startIndex = range.getStartIndex();
long endIndex = range.getEndIndex();
//make sure that there are no overlapping ranges
SortedSet<Range> headSet = ranges.headSet(range);
if(headSet.size()>0) {
Range previousRange = headSet.last();
LOG.debug("previousRange "+previousRange);
if(startIndex<previousRange.getEndIndex()) {
//previousRange overlaps this range
//narrow down the previousRange
if(ranges.remove(previousRange)) {
indicesCount-=previousRange.getLength();
LOG.debug("removed previousRange "+previousRange);
}
add(previousRange.getStartIndex(), startIndex);
if(endIndex<=previousRange.getEndIndex()) {
add(endIndex, previousRange.getEndIndex());
}
}
}
Iterator<Range> tailSetIt = ranges.tailSet(range).iterator();
while(tailSetIt.hasNext()) {
Range nextRange = tailSetIt.next();
LOG.debug("nextRange "+nextRange +" startIndex:"+startIndex+
" endIndex:"+endIndex);
if(endIndex>nextRange.getStartIndex()) {
//nextRange overlaps this range
//narrow down the nextRange
tailSetIt.remove();
indicesCount-=nextRange.getLength();
if(endIndex<nextRange.getEndIndex()) {
add(endIndex, nextRange.getEndIndex());
break;
}
} else {
break;
}
}
}
private void add(long start, long end) {
if(end>start) {
Range recRange = new Range(start, end-start);
ranges.add(recRange);
indicesCount+=recRange.getLength();
LOG.debug("added "+recRange);
}
}
public synchronized void readFields(DataInput in) throws IOException {
indicesCount = in.readLong();
ranges = new TreeSet<Range>();
int size = in.readInt();
for(int i=0;i<size;i++) {
Range range = new Range();
range.readFields(in);
ranges.add(range);
}
}
public synchronized void write(DataOutput out) throws IOException {
out.writeLong(indicesCount);
out.writeInt(ranges.size());
Iterator<Range> it = ranges.iterator();
while(it.hasNext()) {
Range range = it.next();
range.write(out);
}
}
public String toString() {
StringBuffer sb = new StringBuffer();
Iterator<Range> it = ranges.iterator();
while(it.hasNext()) {
Range range = it.next();
sb.append(range.toString()+"\n");
}
return sb.toString();
}
/**
* Index Range. Comprises of start index and length.
* A Range can be of 0 length also. The Range stores indices
* of type long.
*/
static class Range implements Comparable<Range>, Writable{
private long startIndex;
private long length;
Range(long startIndex, long length) {
if(length<0) {
throw new RuntimeException("length can't be negative");
}
this.startIndex = startIndex;
this.length = length;
}
Range() {
this(0,0);
}
/**
* Get the start index. Start index in inclusive.
* @return startIndex.
*/
long getStartIndex() {
return startIndex;
}
/**
* Get the end index. End index is exclusive.
* @return endIndex.
*/
long getEndIndex() {
return startIndex + length;
}
/**
* Get Length.
* @return length
*/
long getLength() {
return length;
}
/**
* Range is empty if its length is zero.
* @return <code>true</code> if empty
* <code>false</code> otherwise.
*/
boolean isEmpty() {
return length==0;
}
public boolean equals(Object o) {
if (o instanceof Range) {
Range range = (Range)o;
return startIndex==range.startIndex &&
length==range.length;
}
return false;
}
public int hashCode() {
return Long.valueOf(startIndex).hashCode() +
Long.valueOf(length).hashCode();
}
public int compareTo(Range o) {
// Ensure sgn(x.compareTo(y) == -sgn(y.compareTo(x))
return this.startIndex < o.startIndex ? -1 :
(this.startIndex > o.startIndex ? 1 :
(this.length < o.length ? -1 :
(this.length > o.length ? 1 : 0)));
}
public void readFields(DataInput in) throws IOException {
startIndex = in.readLong();
length = in.readLong();
}
public void write(DataOutput out) throws IOException {
out.writeLong(startIndex);
out.writeLong(length);
}
public String toString() {
return startIndex +":" + length;
}
}
/**
* Index Iterator which skips the stored ranges.
*/
static class SkipRangeIterator implements Iterator<Long> {
Iterator<Range> rangeIterator;
Range range = new Range();
long next = -1;
/**
* Constructor
* @param rangeIterator the iterator which gives the ranges.
*/
SkipRangeIterator(Iterator<Range> rangeIterator) {
this.rangeIterator = rangeIterator;
doNext();
}
/**
* Returns true till the index reaches Long.MAX_VALUE.
* @return <code>true</code> next index exists.
* <code>false</code> otherwise.
*/
public synchronized boolean hasNext() {
return next<Long.MAX_VALUE;
}
/**
* Get the next available index. The index starts from 0.
* @return next index
*/
public synchronized Long next() {
long ci = next;
doNext();
return ci;
}
private void doNext() {
next++;
LOG.debug("currentIndex "+next +" "+range);
skipIfInRange();
while(next>=range.getEndIndex() && rangeIterator.hasNext()) {
range = rangeIterator.next();
skipIfInRange();
}
}
private void skipIfInRange() {
if(next>=range.getStartIndex() &&
next<range.getEndIndex()) {
//need to skip the range
LOG.warn("Skipping index " + next +"-" + range.getEndIndex());
next = range.getEndIndex();
}
}
/**
* Get whether all the ranges have been skipped.
* @return <code>true</code> if all ranges have been skipped.
* <code>false</code> otherwise.
*/
synchronized boolean skippedAllRanges() {
return !rangeIterator.hasNext() && next>range.getEndIndex();
}
/**
* Remove is not supported. Doesn't apply.
*/
public void remove() {
throw new UnsupportedOperationException("remove not supported.");
}
}
}
| 10,894 | 27.298701 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/LocatedFileStatusFetcher.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import com.google.common.collect.Iterables;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
/**
* Utility class to fetch block locations for specified Input paths using a
* configured number of threads.
*/
@Private
public class LocatedFileStatusFetcher {
private final Path[] inputDirs;
private final PathFilter inputFilter;
private final Configuration conf;
private final boolean recursive;
private final boolean newApi;
private final ExecutorService rawExec;
private final ListeningExecutorService exec;
private final BlockingQueue<List<FileStatus>> resultQueue;
private final List<IOException> invalidInputErrors = new LinkedList<IOException>();
private final ProcessInitialInputPathCallback processInitialInputPathCallback =
new ProcessInitialInputPathCallback();
private final ProcessInputDirCallback processInputDirCallback =
new ProcessInputDirCallback();
private final AtomicInteger runningTasks = new AtomicInteger(0);
private final ReentrantLock lock = new ReentrantLock();
private final Condition condition = lock.newCondition();
private volatile Throwable unknownError;
/**
* @param conf configuration for the job
* @param dirs the initial list of paths
* @param recursive whether to traverse the patchs recursively
* @param inputFilter inputFilter to apply to the resulting paths
* @param newApi whether using the mapred or mapreduce API
* @throws InterruptedException
* @throws IOException
*/
public LocatedFileStatusFetcher(Configuration conf, Path[] dirs,
boolean recursive, PathFilter inputFilter, boolean newApi) throws InterruptedException,
IOException {
int numThreads = conf.getInt(FileInputFormat.LIST_STATUS_NUM_THREADS,
FileInputFormat.DEFAULT_LIST_STATUS_NUM_THREADS);
rawExec = Executors.newFixedThreadPool(
numThreads,
new ThreadFactoryBuilder().setDaemon(true)
.setNameFormat("GetFileInfo #%d").build());
exec = MoreExecutors.listeningDecorator(rawExec);
resultQueue = new LinkedBlockingQueue<List<FileStatus>>();
this.conf = conf;
this.inputDirs = dirs;
this.recursive = recursive;
this.inputFilter = inputFilter;
this.newApi = newApi;
}
/**
* Start executing and return FileStatuses based on the parameters specified
* @return fetched file statuses
* @throws InterruptedException
* @throws IOException
*/
public Iterable<FileStatus> getFileStatuses() throws InterruptedException,
IOException {
// Increment to make sure a race between the first thread completing and the
// rest being scheduled does not lead to a termination.
runningTasks.incrementAndGet();
for (Path p : inputDirs) {
runningTasks.incrementAndGet();
ListenableFuture<ProcessInitialInputPathCallable.Result> future = exec
.submit(new ProcessInitialInputPathCallable(p, conf, inputFilter));
Futures.addCallback(future, processInitialInputPathCallback);
}
runningTasks.decrementAndGet();
lock.lock();
try {
while (runningTasks.get() != 0 && unknownError == null) {
condition.await();
}
} finally {
lock.unlock();
}
this.exec.shutdownNow();
if (this.unknownError != null) {
if (this.unknownError instanceof Error) {
throw (Error) this.unknownError;
} else if (this.unknownError instanceof RuntimeException) {
throw (RuntimeException) this.unknownError;
} else if (this.unknownError instanceof IOException) {
throw (IOException) this.unknownError;
} else if (this.unknownError instanceof InterruptedException) {
throw (InterruptedException) this.unknownError;
} else {
throw new IOException(this.unknownError);
}
}
if (this.invalidInputErrors.size() != 0) {
if (this.newApi) {
throw new org.apache.hadoop.mapreduce.lib.input.InvalidInputException(
invalidInputErrors);
} else {
throw new InvalidInputException(invalidInputErrors);
}
}
return Iterables.concat(resultQueue);
}
/**
* Collect misconfigured Input errors. Errors while actually reading file info
* are reported immediately
*/
private void registerInvalidInputError(List<IOException> errors) {
synchronized (this) {
this.invalidInputErrors.addAll(errors);
}
}
/**
* Register fatal errors - example an IOException while accessing a file or a
* full exection queue
*/
private void registerError(Throwable t) {
lock.lock();
try {
if (unknownError != null) {
unknownError = t;
condition.signal();
}
} finally {
lock.unlock();
}
}
private void decrementRunningAndCheckCompletion() {
lock.lock();
try {
if (runningTasks.decrementAndGet() == 0) {
condition.signal();
}
} finally {
lock.unlock();
}
}
/**
* Retrieves block locations for the given @link {@link FileStatus}, and adds
* additional paths to the process queue if required.
*/
private static class ProcessInputDirCallable implements
Callable<ProcessInputDirCallable.Result> {
private final FileSystem fs;
private final FileStatus fileStatus;
private final boolean recursive;
private final PathFilter inputFilter;
ProcessInputDirCallable(FileSystem fs, FileStatus fileStatus,
boolean recursive, PathFilter inputFilter) {
this.fs = fs;
this.fileStatus = fileStatus;
this.recursive = recursive;
this.inputFilter = inputFilter;
}
@Override
public Result call() throws Exception {
Result result = new Result();
result.fs = fs;
if (fileStatus.isDirectory()) {
RemoteIterator<LocatedFileStatus> iter = fs
.listLocatedStatus(fileStatus.getPath());
while (iter.hasNext()) {
LocatedFileStatus stat = iter.next();
if (inputFilter.accept(stat.getPath())) {
if (recursive && stat.isDirectory()) {
result.dirsNeedingRecursiveCalls.add(stat);
} else {
result.locatedFileStatuses.add(stat);
}
}
}
} else {
result.locatedFileStatuses.add(fileStatus);
}
return result;
}
private static class Result {
private List<FileStatus> locatedFileStatuses = new LinkedList<FileStatus>();
private List<FileStatus> dirsNeedingRecursiveCalls = new LinkedList<FileStatus>();
private FileSystem fs;
}
}
/**
* The callback handler to handle results generated by
* {@link ProcessInputDirCallable}. This populates the final result set.
*
*/
private class ProcessInputDirCallback implements
FutureCallback<ProcessInputDirCallable.Result> {
@Override
public void onSuccess(ProcessInputDirCallable.Result result) {
try {
if (result.locatedFileStatuses.size() != 0) {
resultQueue.add(result.locatedFileStatuses);
}
if (result.dirsNeedingRecursiveCalls.size() != 0) {
for (FileStatus fileStatus : result.dirsNeedingRecursiveCalls) {
runningTasks.incrementAndGet();
ListenableFuture<ProcessInputDirCallable.Result> future = exec
.submit(new ProcessInputDirCallable(result.fs, fileStatus,
recursive, inputFilter));
Futures.addCallback(future, processInputDirCallback);
}
}
decrementRunningAndCheckCompletion();
} catch (Throwable t) { // Error within the callback itself.
registerError(t);
}
}
@Override
public void onFailure(Throwable t) {
// Any generated exceptions. Leads to immediate termination.
registerError(t);
}
}
/**
* Processes an initial Input Path pattern through the globber and PathFilter
* to generate a list of files which need further processing.
*/
private static class ProcessInitialInputPathCallable implements
Callable<ProcessInitialInputPathCallable.Result> {
private final Path path;
private final Configuration conf;
private final PathFilter inputFilter;
public ProcessInitialInputPathCallable(Path path, Configuration conf,
PathFilter pathFilter) {
this.path = path;
this.conf = conf;
this.inputFilter = pathFilter;
}
@Override
public Result call() throws Exception {
Result result = new Result();
FileSystem fs = path.getFileSystem(conf);
result.fs = fs;
FileStatus[] matches = fs.globStatus(path, inputFilter);
if (matches == null) {
result.addError(new IOException("Input path does not exist: " + path));
} else if (matches.length == 0) {
result.addError(new IOException("Input Pattern " + path
+ " matches 0 files"));
} else {
result.matchedFileStatuses = matches;
}
return result;
}
private static class Result {
private List<IOException> errors;
private FileStatus[] matchedFileStatuses;
private FileSystem fs;
void addError(IOException ioe) {
if (errors == null) {
errors = new LinkedList<IOException>();
}
errors.add(ioe);
}
}
}
/**
* The callback handler to handle results generated by
* {@link ProcessInitialInputPathCallable}
*
*/
private class ProcessInitialInputPathCallback implements
FutureCallback<ProcessInitialInputPathCallable.Result> {
@Override
public void onSuccess(ProcessInitialInputPathCallable.Result result) {
try {
if (result.errors != null) {
registerInvalidInputError(result.errors);
}
if (result.matchedFileStatuses != null) {
for (FileStatus matched : result.matchedFileStatuses) {
runningTasks.incrementAndGet();
ListenableFuture<ProcessInputDirCallable.Result> future = exec
.submit(new ProcessInputDirCallable(result.fs, matched,
recursive, inputFilter));
Futures.addCallback(future, processInputDirCallback);
}
}
decrementRunningAndCheckCompletion();
} catch (Throwable t) { // Exception within the callback
registerError(t);
}
}
@Override
public void onFailure(Throwable t) {
// Any generated exceptions. Leads to immediate termination.
registerError(t);
}
}
}
| 12,489 | 32.665768 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URL;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.ClusterStatus.BlackListInfo;
import org.apache.hadoop.mapreduce.Cluster;
import org.apache.hadoop.mapreduce.ClusterMetrics;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.QueueInfo;
import org.apache.hadoop.mapreduce.TaskTrackerInfo;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.filecache.DistributedCache;
import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.mapreduce.tools.CLI;
import org.apache.hadoop.mapreduce.util.ConfigUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenRenewer;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* <code>JobClient</code> is the primary interface for the user-job to interact
* with the cluster.
*
* <code>JobClient</code> provides facilities to submit jobs, track their
* progress, access component-tasks' reports/logs, get the Map-Reduce cluster
* status information etc.
*
* <p>The job submission process involves:
* <ol>
* <li>
* Checking the input and output specifications of the job.
* </li>
* <li>
* Computing the {@link InputSplit}s for the job.
* </li>
* <li>
* Setup the requisite accounting information for the {@link DistributedCache}
* of the job, if necessary.
* </li>
* <li>
* Copying the job's jar and configuration to the map-reduce system directory
* on the distributed file-system.
* </li>
* <li>
* Submitting the job to the cluster and optionally monitoring
* it's status.
* </li>
* </ol>
*
* Normally the user creates the application, describes various facets of the
* job via {@link JobConf} and then uses the <code>JobClient</code> to submit
* the job and monitor its progress.
*
* <p>Here is an example on how to use <code>JobClient</code>:</p>
* <p><blockquote><pre>
* // Create a new JobConf
* JobConf job = new JobConf(new Configuration(), MyJob.class);
*
* // Specify various job-specific parameters
* job.setJobName("myjob");
*
* job.setInputPath(new Path("in"));
* job.setOutputPath(new Path("out"));
*
* job.setMapperClass(MyJob.MyMapper.class);
* job.setReducerClass(MyJob.MyReducer.class);
*
* // Submit the job, then poll for progress until the job is complete
* JobClient.runJob(job);
* </pre></blockquote>
*
* <b id="JobControl">Job Control</b>
*
* <p>At times clients would chain map-reduce jobs to accomplish complex tasks
* which cannot be done via a single map-reduce job. This is fairly easy since
* the output of the job, typically, goes to distributed file-system and that
* can be used as the input for the next job.</p>
*
* <p>However, this also means that the onus on ensuring jobs are complete
* (success/failure) lies squarely on the clients. In such situations the
* various job-control options are:
* <ol>
* <li>
* {@link #runJob(JobConf)} : submits the job and returns only after
* the job has completed.
* </li>
* <li>
* {@link #submitJob(JobConf)} : only submits the job, then poll the
* returned handle to the {@link RunningJob} to query status and make
* scheduling decisions.
* </li>
* <li>
* {@link JobConf#setJobEndNotificationURI(String)} : setup a notification
* on job-completion, thus avoiding polling.
* </li>
* </ol>
*
* @see JobConf
* @see ClusterStatus
* @see Tool
* @see DistributedCache
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class JobClient extends CLI {
@InterfaceAudience.Private
public static final String MAPREDUCE_CLIENT_RETRY_POLICY_ENABLED_KEY =
"mapreduce.jobclient.retry.policy.enabled";
@InterfaceAudience.Private
public static final boolean MAPREDUCE_CLIENT_RETRY_POLICY_ENABLED_DEFAULT =
false;
@InterfaceAudience.Private
public static final String MAPREDUCE_CLIENT_RETRY_POLICY_SPEC_KEY =
"mapreduce.jobclient.retry.policy.spec";
@InterfaceAudience.Private
public static final String MAPREDUCE_CLIENT_RETRY_POLICY_SPEC_DEFAULT =
"10000,6,60000,10"; // t1,n1,t2,n2,...
public static enum TaskStatusFilter { NONE, KILLED, FAILED, SUCCEEDED, ALL }
private TaskStatusFilter taskOutputFilter = TaskStatusFilter.FAILED;
private int maxRetry = MRJobConfig.DEFAULT_MR_CLIENT_JOB_MAX_RETRIES;
private long retryInterval =
MRJobConfig.DEFAULT_MR_CLIENT_JOB_RETRY_INTERVAL;
static{
ConfigUtil.loadResources();
}
/**
* A NetworkedJob is an implementation of RunningJob. It holds
* a JobProfile object to provide some info, and interacts with the
* remote service to provide certain functionality.
*/
static class NetworkedJob implements RunningJob {
Job job;
/**
* We store a JobProfile and a timestamp for when we last
* acquired the job profile. If the job is null, then we cannot
* perform any of the tasks. The job might be null if the cluster
* has completely forgotten about the job. (eg, 24 hours after the
* job completes.)
*/
public NetworkedJob(JobStatus status, Cluster cluster) throws IOException {
this(status, cluster, new JobConf(status.getJobFile()));
}
private NetworkedJob(JobStatus status, Cluster cluster, JobConf conf)
throws IOException {
this(Job.getInstance(cluster, status, conf));
}
public NetworkedJob(Job job) throws IOException {
this.job = job;
}
public Configuration getConfiguration() {
return job.getConfiguration();
}
/**
* An identifier for the job
*/
public JobID getID() {
return JobID.downgrade(job.getJobID());
}
/** @deprecated This method is deprecated and will be removed. Applications should
* rather use {@link #getID()}.*/
@Deprecated
public String getJobID() {
return getID().toString();
}
/**
* The user-specified job name
*/
public String getJobName() {
return job.getJobName();
}
/**
* The name of the job file
*/
public String getJobFile() {
return job.getJobFile();
}
/**
* A URL where the job's status can be seen
*/
public String getTrackingURL() {
return job.getTrackingURL();
}
/**
* A float between 0.0 and 1.0, indicating the % of map work
* completed.
*/
public float mapProgress() throws IOException {
return job.mapProgress();
}
/**
* A float between 0.0 and 1.0, indicating the % of reduce work
* completed.
*/
public float reduceProgress() throws IOException {
return job.reduceProgress();
}
/**
* A float between 0.0 and 1.0, indicating the % of cleanup work
* completed.
*/
public float cleanupProgress() throws IOException {
try {
return job.cleanupProgress();
} catch (InterruptedException ie) {
throw new IOException(ie);
}
}
/**
* A float between 0.0 and 1.0, indicating the % of setup work
* completed.
*/
public float setupProgress() throws IOException {
return job.setupProgress();
}
/**
* Returns immediately whether the whole job is done yet or not.
*/
public synchronized boolean isComplete() throws IOException {
return job.isComplete();
}
/**
* True iff job completed successfully.
*/
public synchronized boolean isSuccessful() throws IOException {
return job.isSuccessful();
}
/**
* Blocks until the job is finished
*/
public void waitForCompletion() throws IOException {
try {
job.waitForCompletion(false);
} catch (InterruptedException ie) {
throw new IOException(ie);
} catch (ClassNotFoundException ce) {
throw new IOException(ce);
}
}
/**
* Tells the service to get the state of the current job.
*/
public synchronized int getJobState() throws IOException {
try {
return job.getJobState().getValue();
} catch (InterruptedException ie) {
throw new IOException(ie);
}
}
/**
* Tells the service to terminate the current job.
*/
public synchronized void killJob() throws IOException {
job.killJob();
}
/** Set the priority of the job.
* @param priority new priority of the job.
*/
public synchronized void setJobPriority(String priority)
throws IOException {
try {
job.setPriority(
org.apache.hadoop.mapreduce.JobPriority.valueOf(priority));
} catch (InterruptedException ie) {
throw new IOException(ie);
}
}
/**
* Kill indicated task attempt.
* @param taskId the id of the task to kill.
* @param shouldFail if true the task is failed and added to failed tasks list, otherwise
* it is just killed, w/o affecting job failure status.
*/
public synchronized void killTask(TaskAttemptID taskId,
boolean shouldFail) throws IOException {
if (shouldFail) {
job.failTask(taskId);
} else {
job.killTask(taskId);
}
}
/** @deprecated Applications should rather use {@link #killTask(TaskAttemptID, boolean)}*/
@Deprecated
public synchronized void killTask(String taskId, boolean shouldFail) throws IOException {
killTask(TaskAttemptID.forName(taskId), shouldFail);
}
/**
* Fetch task completion events from cluster for this job.
*/
public synchronized TaskCompletionEvent[] getTaskCompletionEvents(
int startFrom) throws IOException {
try {
org.apache.hadoop.mapreduce.TaskCompletionEvent[] acls =
job.getTaskCompletionEvents(startFrom, 10);
TaskCompletionEvent[] ret = new TaskCompletionEvent[acls.length];
for (int i = 0 ; i < acls.length; i++ ) {
ret[i] = TaskCompletionEvent.downgrade(acls[i]);
}
return ret;
} catch (InterruptedException ie) {
throw new IOException(ie);
}
}
/**
* Dump stats to screen
*/
@Override
public String toString() {
return job.toString();
}
/**
* Returns the counters for this job
*/
public Counters getCounters() throws IOException {
Counters result = null;
org.apache.hadoop.mapreduce.Counters temp = job.getCounters();
if(temp != null) {
result = Counters.downgrade(temp);
}
return result;
}
@Override
public String[] getTaskDiagnostics(TaskAttemptID id) throws IOException {
try {
return job.getTaskDiagnostics(id);
} catch (InterruptedException ie) {
throw new IOException(ie);
}
}
public String getHistoryUrl() throws IOException {
try {
return job.getHistoryUrl();
} catch (InterruptedException ie) {
throw new IOException(ie);
}
}
public boolean isRetired() throws IOException {
try {
return job.isRetired();
} catch (InterruptedException ie) {
throw new IOException(ie);
}
}
boolean monitorAndPrintJob() throws IOException, InterruptedException {
return job.monitorAndPrintJob();
}
@Override
public String getFailureInfo() throws IOException {
try {
return job.getStatus().getFailureInfo();
} catch (InterruptedException ie) {
throw new IOException(ie);
}
}
@Override
public JobStatus getJobStatus() throws IOException {
try {
return JobStatus.downgrade(job.getStatus());
} catch (InterruptedException ie) {
throw new IOException(ie);
}
}
}
/**
* Ugi of the client. We store this ugi when the client is created and
* then make sure that the same ugi is used to run the various protocols.
*/
UserGroupInformation clientUgi;
/**
* Create a job client.
*/
public JobClient() {
}
/**
* Build a job client with the given {@link JobConf}, and connect to the
* default cluster
*
* @param conf the job configuration.
* @throws IOException
*/
public JobClient(JobConf conf) throws IOException {
init(conf);
}
/**
* Build a job client with the given {@link Configuration},
* and connect to the default cluster
*
* @param conf the configuration.
* @throws IOException
*/
public JobClient(Configuration conf) throws IOException {
init(new JobConf(conf));
}
/**
* Connect to the default cluster
* @param conf the job configuration.
* @throws IOException
*/
public void init(JobConf conf) throws IOException {
setConf(conf);
cluster = new Cluster(conf);
clientUgi = UserGroupInformation.getCurrentUser();
maxRetry = conf.getInt(MRJobConfig.MR_CLIENT_JOB_MAX_RETRIES,
MRJobConfig.DEFAULT_MR_CLIENT_JOB_MAX_RETRIES);
retryInterval =
conf.getLong(MRJobConfig.MR_CLIENT_JOB_RETRY_INTERVAL,
MRJobConfig.DEFAULT_MR_CLIENT_JOB_RETRY_INTERVAL);
}
/**
* Build a job client, connect to the indicated job tracker.
*
* @param jobTrackAddr the job tracker to connect to.
* @param conf configuration.
*/
public JobClient(InetSocketAddress jobTrackAddr,
Configuration conf) throws IOException {
cluster = new Cluster(jobTrackAddr, conf);
clientUgi = UserGroupInformation.getCurrentUser();
}
/**
* Close the <code>JobClient</code>.
*/
public synchronized void close() throws IOException {
cluster.close();
}
/**
* Get a filesystem handle. We need this to prepare jobs
* for submission to the MapReduce system.
*
* @return the filesystem handle.
*/
public synchronized FileSystem getFs() throws IOException {
try {
return cluster.getFileSystem();
} catch (InterruptedException ie) {
throw new IOException(ie);
}
}
/**
* Get a handle to the Cluster
*/
public Cluster getClusterHandle() {
return cluster;
}
/**
* Submit a job to the MR system.
*
* This returns a handle to the {@link RunningJob} which can be used to track
* the running-job.
*
* @param jobFile the job configuration.
* @return a handle to the {@link RunningJob} which can be used to track the
* running-job.
* @throws FileNotFoundException
* @throws InvalidJobConfException
* @throws IOException
*/
public RunningJob submitJob(String jobFile) throws FileNotFoundException,
InvalidJobConfException,
IOException {
// Load in the submitted job details
JobConf job = new JobConf(jobFile);
return submitJob(job);
}
/**
* Submit a job to the MR system.
* This returns a handle to the {@link RunningJob} which can be used to track
* the running-job.
*
* @param conf the job configuration.
* @return a handle to the {@link RunningJob} which can be used to track the
* running-job.
* @throws FileNotFoundException
* @throws IOException
*/
public RunningJob submitJob(final JobConf conf) throws FileNotFoundException,
IOException {
return submitJobInternal(conf);
}
@InterfaceAudience.Private
public RunningJob submitJobInternal(final JobConf conf)
throws FileNotFoundException, IOException {
try {
conf.setBooleanIfUnset("mapred.mapper.new-api", false);
conf.setBooleanIfUnset("mapred.reducer.new-api", false);
Job job = clientUgi.doAs(new PrivilegedExceptionAction<Job> () {
@Override
public Job run() throws IOException, ClassNotFoundException,
InterruptedException {
Job job = Job.getInstance(conf);
job.submit();
return job;
}
});
// update our Cluster instance with the one created by Job for submission
// (we can't pass our Cluster instance to Job, since Job wraps the config
// instance, and the two configs would then diverge)
cluster = job.getCluster();
return new NetworkedJob(job);
} catch (InterruptedException ie) {
throw new IOException("interrupted", ie);
}
}
private Job getJobUsingCluster(final JobID jobid) throws IOException,
InterruptedException {
return clientUgi.doAs(new PrivilegedExceptionAction<Job>() {
public Job run() throws IOException, InterruptedException {
return cluster.getJob(jobid);
}
});
}
protected RunningJob getJobInner(final JobID jobid) throws IOException {
try {
Job job = getJobUsingCluster(jobid);
if (job != null) {
JobStatus status = JobStatus.downgrade(job.getStatus());
if (status != null) {
return new NetworkedJob(status, cluster,
new JobConf(job.getConfiguration()));
}
}
} catch (InterruptedException ie) {
throw new IOException(ie);
}
return null;
}
/**
* Get an {@link RunningJob} object to track an ongoing job. Returns
* null if the id does not correspond to any known job.
*
* @param jobid the jobid of the job.
* @return the {@link RunningJob} handle to track the job, null if the
* <code>jobid</code> doesn't correspond to any known job.
* @throws IOException
*/
public RunningJob getJob(final JobID jobid) throws IOException {
for (int i = 0;i <= maxRetry;i++) {
if (i > 0) {
try {
Thread.sleep(retryInterval);
} catch (Exception e) { }
}
RunningJob job = getJobInner(jobid);
if (job != null) {
return job;
}
}
return null;
}
/**@deprecated Applications should rather use {@link #getJob(JobID)}.
*/
@Deprecated
public RunningJob getJob(String jobid) throws IOException {
return getJob(JobID.forName(jobid));
}
private static final TaskReport[] EMPTY_TASK_REPORTS = new TaskReport[0];
/**
* Get the information of the current state of the map tasks of a job.
*
* @param jobId the job to query.
* @return the list of all of the map tips.
* @throws IOException
*/
public TaskReport[] getMapTaskReports(JobID jobId) throws IOException {
return getTaskReports(jobId, TaskType.MAP);
}
private TaskReport[] getTaskReports(final JobID jobId, TaskType type) throws
IOException {
try {
Job j = getJobUsingCluster(jobId);
if(j == null) {
return EMPTY_TASK_REPORTS;
}
return TaskReport.downgradeArray(j.getTaskReports(type));
} catch (InterruptedException ie) {
throw new IOException(ie);
}
}
/**@deprecated Applications should rather use {@link #getMapTaskReports(JobID)}*/
@Deprecated
public TaskReport[] getMapTaskReports(String jobId) throws IOException {
return getMapTaskReports(JobID.forName(jobId));
}
/**
* Get the information of the current state of the reduce tasks of a job.
*
* @param jobId the job to query.
* @return the list of all of the reduce tips.
* @throws IOException
*/
public TaskReport[] getReduceTaskReports(JobID jobId) throws IOException {
return getTaskReports(jobId, TaskType.REDUCE);
}
/**
* Get the information of the current state of the cleanup tasks of a job.
*
* @param jobId the job to query.
* @return the list of all of the cleanup tips.
* @throws IOException
*/
public TaskReport[] getCleanupTaskReports(JobID jobId) throws IOException {
return getTaskReports(jobId, TaskType.JOB_CLEANUP);
}
/**
* Get the information of the current state of the setup tasks of a job.
*
* @param jobId the job to query.
* @return the list of all of the setup tips.
* @throws IOException
*/
public TaskReport[] getSetupTaskReports(JobID jobId) throws IOException {
return getTaskReports(jobId, TaskType.JOB_SETUP);
}
/**@deprecated Applications should rather use {@link #getReduceTaskReports(JobID)}*/
@Deprecated
public TaskReport[] getReduceTaskReports(String jobId) throws IOException {
return getReduceTaskReports(JobID.forName(jobId));
}
/**
* Display the information about a job's tasks, of a particular type and
* in a particular state
*
* @param jobId the ID of the job
* @param type the type of the task (map/reduce/setup/cleanup)
* @param state the state of the task
* (pending/running/completed/failed/killed)
*/
public void displayTasks(final JobID jobId, String type, String state)
throws IOException {
try {
Job job = getJobUsingCluster(jobId);
super.displayTasks(job, type, state);
} catch (InterruptedException ie) {
throw new IOException(ie);
}
}
/**
* Get status information about the Map-Reduce cluster.
*
* @return the status information about the Map-Reduce cluster as an object
* of {@link ClusterStatus}.
* @throws IOException
*/
public ClusterStatus getClusterStatus() throws IOException {
try {
return clientUgi.doAs(new PrivilegedExceptionAction<ClusterStatus>() {
public ClusterStatus run() throws IOException, InterruptedException {
ClusterMetrics metrics = cluster.getClusterStatus();
return new ClusterStatus(metrics.getTaskTrackerCount(), metrics
.getBlackListedTaskTrackerCount(), cluster
.getTaskTrackerExpiryInterval(), metrics.getOccupiedMapSlots(),
metrics.getOccupiedReduceSlots(), metrics.getMapSlotCapacity(),
metrics.getReduceSlotCapacity(), cluster.getJobTrackerStatus(),
metrics.getDecommissionedTaskTrackerCount(), metrics
.getGrayListedTaskTrackerCount());
}
});
} catch (InterruptedException ie) {
throw new IOException(ie);
}
}
private Collection<String> arrayToStringList(TaskTrackerInfo[] objs) {
Collection<String> list = new ArrayList<String>();
for (TaskTrackerInfo info: objs) {
list.add(info.getTaskTrackerName());
}
return list;
}
private Collection<BlackListInfo> arrayToBlackListInfo(TaskTrackerInfo[] objs) {
Collection<BlackListInfo> list = new ArrayList<BlackListInfo>();
for (TaskTrackerInfo info: objs) {
BlackListInfo binfo = new BlackListInfo();
binfo.setTrackerName(info.getTaskTrackerName());
binfo.setReasonForBlackListing(info.getReasonForBlacklist());
binfo.setBlackListReport(info.getBlacklistReport());
list.add(binfo);
}
return list;
}
/**
* Get status information about the Map-Reduce cluster.
*
* @param detailed if true then get a detailed status including the
* tracker names
* @return the status information about the Map-Reduce cluster as an object
* of {@link ClusterStatus}.
* @throws IOException
*/
public ClusterStatus getClusterStatus(boolean detailed) throws IOException {
try {
return clientUgi.doAs(new PrivilegedExceptionAction<ClusterStatus>() {
public ClusterStatus run() throws IOException, InterruptedException {
ClusterMetrics metrics = cluster.getClusterStatus();
return new ClusterStatus(arrayToStringList(cluster.getActiveTaskTrackers()),
arrayToBlackListInfo(cluster.getBlackListedTaskTrackers()),
cluster.getTaskTrackerExpiryInterval(), metrics.getOccupiedMapSlots(),
metrics.getOccupiedReduceSlots(), metrics.getMapSlotCapacity(),
metrics.getReduceSlotCapacity(),
cluster.getJobTrackerStatus());
}
});
} catch (InterruptedException ie) {
throw new IOException(ie);
}
}
/**
* Get the jobs that are not completed and not failed.
*
* @return array of {@link JobStatus} for the running/to-be-run jobs.
* @throws IOException
*/
public JobStatus[] jobsToComplete() throws IOException {
List<JobStatus> stats = new ArrayList<JobStatus>();
for (JobStatus stat : getAllJobs()) {
if (!stat.isJobComplete()) {
stats.add(stat);
}
}
return stats.toArray(new JobStatus[0]);
}
/**
* Get the jobs that are submitted.
*
* @return array of {@link JobStatus} for the submitted jobs.
* @throws IOException
*/
public JobStatus[] getAllJobs() throws IOException {
try {
org.apache.hadoop.mapreduce.JobStatus[] jobs =
clientUgi.doAs(new PrivilegedExceptionAction<
org.apache.hadoop.mapreduce.JobStatus[]> () {
public org.apache.hadoop.mapreduce.JobStatus[] run()
throws IOException, InterruptedException {
return cluster.getAllJobStatuses();
}
});
JobStatus[] stats = new JobStatus[jobs.length];
for (int i = 0; i < jobs.length; i++) {
stats[i] = JobStatus.downgrade(jobs[i]);
}
return stats;
} catch (InterruptedException ie) {
throw new IOException(ie);
}
}
/**
* Utility that submits a job, then polls for progress until the job is
* complete.
*
* @param job the job configuration.
* @throws IOException if the job fails
*/
public static RunningJob runJob(JobConf job) throws IOException {
JobClient jc = new JobClient(job);
RunningJob rj = jc.submitJob(job);
try {
if (!jc.monitorAndPrintJob(job, rj)) {
throw new IOException("Job failed!");
}
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
}
return rj;
}
/**
* Monitor a job and print status in real-time as progress is made and tasks
* fail.
* @param conf the job's configuration
* @param job the job to track
* @return true if the job succeeded
* @throws IOException if communication to the JobTracker fails
*/
public boolean monitorAndPrintJob(JobConf conf,
RunningJob job
) throws IOException, InterruptedException {
return ((NetworkedJob)job).monitorAndPrintJob();
}
static String getTaskLogURL(TaskAttemptID taskId, String baseUrl) {
return (baseUrl + "/tasklog?plaintext=true&attemptid=" + taskId);
}
static Configuration getConfiguration(String jobTrackerSpec)
{
Configuration conf = new Configuration();
if (jobTrackerSpec != null) {
if (jobTrackerSpec.indexOf(":") >= 0) {
conf.set("mapred.job.tracker", jobTrackerSpec);
} else {
String classpathFile = "hadoop-" + jobTrackerSpec + ".xml";
URL validate = conf.getResource(classpathFile);
if (validate == null) {
throw new RuntimeException(classpathFile + " not found on CLASSPATH");
}
conf.addResource(classpathFile);
}
}
return conf;
}
/**
* Sets the output filter for tasks. only those tasks are printed whose
* output matches the filter.
* @param newValue task filter.
*/
@Deprecated
public void setTaskOutputFilter(TaskStatusFilter newValue){
this.taskOutputFilter = newValue;
}
/**
* Get the task output filter out of the JobConf.
*
* @param job the JobConf to examine.
* @return the filter level.
*/
public static TaskStatusFilter getTaskOutputFilter(JobConf job) {
return TaskStatusFilter.valueOf(job.get("jobclient.output.filter",
"FAILED"));
}
/**
* Modify the JobConf to set the task output filter.
*
* @param job the JobConf to modify.
* @param newValue the value to set.
*/
public static void setTaskOutputFilter(JobConf job,
TaskStatusFilter newValue) {
job.set("jobclient.output.filter", newValue.toString());
}
/**
* Returns task output filter.
* @return task filter.
*/
@Deprecated
public TaskStatusFilter getTaskOutputFilter(){
return this.taskOutputFilter;
}
protected long getCounter(org.apache.hadoop.mapreduce.Counters cntrs,
String counterGroupName, String counterName) throws IOException {
Counters counters = Counters.downgrade(cntrs);
return counters.findCounter(counterGroupName, counterName).getValue();
}
/**
* Get status information about the max available Maps in the cluster.
*
* @return the max available Maps in the cluster
* @throws IOException
*/
public int getDefaultMaps() throws IOException {
try {
return clientUgi.doAs(new PrivilegedExceptionAction<Integer>() {
@Override
public Integer run() throws IOException, InterruptedException {
return cluster.getClusterStatus().getMapSlotCapacity();
}
});
} catch (InterruptedException ie) {
throw new IOException(ie);
}
}
/**
* Get status information about the max available Reduces in the cluster.
*
* @return the max available Reduces in the cluster
* @throws IOException
*/
public int getDefaultReduces() throws IOException {
try {
return clientUgi.doAs(new PrivilegedExceptionAction<Integer>() {
@Override
public Integer run() throws IOException, InterruptedException {
return cluster.getClusterStatus().getReduceSlotCapacity();
}
});
} catch (InterruptedException ie) {
throw new IOException(ie);
}
}
/**
* Grab the jobtracker system directory path where job-specific files are to be placed.
*
* @return the system directory where job-specific files are to be placed.
*/
public Path getSystemDir() {
try {
return clientUgi.doAs(new PrivilegedExceptionAction<Path>() {
@Override
public Path run() throws IOException, InterruptedException {
return cluster.getSystemDir();
}
});
} catch (IOException ioe) {
return null;
} catch (InterruptedException ie) {
return null;
}
}
/**
* Checks if the job directory is clean and has all the required components
* for (re) starting the job
*/
public static boolean isJobDirValid(Path jobDirPath, FileSystem fs)
throws IOException {
FileStatus[] contents = fs.listStatus(jobDirPath);
int matchCount = 0;
if (contents != null && contents.length >= 2) {
for (FileStatus status : contents) {
if ("job.xml".equals(status.getPath().getName())) {
++matchCount;
}
if ("job.split".equals(status.getPath().getName())) {
++matchCount;
}
}
if (matchCount == 2) {
return true;
}
}
return false;
}
/**
* Fetch the staging area directory for the application
*
* @return path to staging area directory
* @throws IOException
*/
public Path getStagingAreaDir() throws IOException {
try {
return clientUgi.doAs(new PrivilegedExceptionAction<Path>() {
@Override
public Path run() throws IOException, InterruptedException {
return cluster.getStagingAreaDir();
}
});
} catch (InterruptedException ie) {
// throw RuntimeException instead for compatibility reasons
throw new RuntimeException(ie);
}
}
private JobQueueInfo getJobQueueInfo(QueueInfo queue) {
JobQueueInfo ret = new JobQueueInfo(queue);
// make sure to convert any children
if (queue.getQueueChildren().size() > 0) {
List<JobQueueInfo> childQueues = new ArrayList<JobQueueInfo>(queue
.getQueueChildren().size());
for (QueueInfo child : queue.getQueueChildren()) {
childQueues.add(getJobQueueInfo(child));
}
ret.setChildren(childQueues);
}
return ret;
}
private JobQueueInfo[] getJobQueueInfoArray(QueueInfo[] queues)
throws IOException {
JobQueueInfo[] ret = new JobQueueInfo[queues.length];
for (int i = 0; i < queues.length; i++) {
ret[i] = getJobQueueInfo(queues[i]);
}
return ret;
}
/**
* Returns an array of queue information objects about root level queues
* configured
*
* @return the array of root level JobQueueInfo objects
* @throws IOException
*/
public JobQueueInfo[] getRootQueues() throws IOException {
try {
return clientUgi.doAs(new PrivilegedExceptionAction<JobQueueInfo[]>() {
public JobQueueInfo[] run() throws IOException, InterruptedException {
return getJobQueueInfoArray(cluster.getRootQueues());
}
});
} catch (InterruptedException ie) {
throw new IOException(ie);
}
}
/**
* Returns an array of queue information objects about immediate children
* of queue queueName.
*
* @param queueName
* @return the array of immediate children JobQueueInfo objects
* @throws IOException
*/
public JobQueueInfo[] getChildQueues(final String queueName) throws IOException {
try {
return clientUgi.doAs(new PrivilegedExceptionAction<JobQueueInfo[]>() {
public JobQueueInfo[] run() throws IOException, InterruptedException {
return getJobQueueInfoArray(cluster.getChildQueues(queueName));
}
});
} catch (InterruptedException ie) {
throw new IOException(ie);
}
}
/**
* Return an array of queue information objects about all the Job Queues
* configured.
*
* @return Array of JobQueueInfo objects
* @throws IOException
*/
public JobQueueInfo[] getQueues() throws IOException {
try {
return clientUgi.doAs(new PrivilegedExceptionAction<JobQueueInfo[]>() {
public JobQueueInfo[] run() throws IOException, InterruptedException {
return getJobQueueInfoArray(cluster.getQueues());
}
});
} catch (InterruptedException ie) {
throw new IOException(ie);
}
}
/**
* Gets all the jobs which were added to particular Job Queue
*
* @param queueName name of the Job Queue
* @return Array of jobs present in the job queue
* @throws IOException
*/
public JobStatus[] getJobsFromQueue(final String queueName) throws IOException {
try {
QueueInfo queue = clientUgi.doAs(new PrivilegedExceptionAction<QueueInfo>() {
@Override
public QueueInfo run() throws IOException, InterruptedException {
return cluster.getQueue(queueName);
}
});
if (queue == null) {
return null;
}
org.apache.hadoop.mapreduce.JobStatus[] stats =
queue.getJobStatuses();
JobStatus[] ret = new JobStatus[stats.length];
for (int i = 0 ; i < stats.length; i++ ) {
ret[i] = JobStatus.downgrade(stats[i]);
}
return ret;
} catch (InterruptedException ie) {
throw new IOException(ie);
}
}
/**
* Gets the queue information associated to a particular Job Queue
*
* @param queueName name of the job queue.
* @return Queue information associated to particular queue.
* @throws IOException
*/
public JobQueueInfo getQueueInfo(final String queueName) throws IOException {
try {
QueueInfo queueInfo = clientUgi.doAs(new
PrivilegedExceptionAction<QueueInfo>() {
public QueueInfo run() throws IOException, InterruptedException {
return cluster.getQueue(queueName);
}
});
if (queueInfo != null) {
return new JobQueueInfo(queueInfo);
}
return null;
} catch (InterruptedException ie) {
throw new IOException(ie);
}
}
/**
* Gets the Queue ACLs for current user
* @return array of QueueAclsInfo object for current user.
* @throws IOException
*/
public QueueAclsInfo[] getQueueAclsForCurrentUser() throws IOException {
try {
org.apache.hadoop.mapreduce.QueueAclsInfo[] acls =
clientUgi.doAs(new
PrivilegedExceptionAction
<org.apache.hadoop.mapreduce.QueueAclsInfo[]>() {
public org.apache.hadoop.mapreduce.QueueAclsInfo[] run()
throws IOException, InterruptedException {
return cluster.getQueueAclsForCurrentUser();
}
});
QueueAclsInfo[] ret = new QueueAclsInfo[acls.length];
for (int i = 0 ; i < acls.length; i++ ) {
ret[i] = QueueAclsInfo.downgrade(acls[i]);
}
return ret;
} catch (InterruptedException ie) {
throw new IOException(ie);
}
}
/**
* Get a delegation token for the user from the JobTracker.
* @param renewer the user who can renew the token
* @return the new token
* @throws IOException
*/
public Token<DelegationTokenIdentifier>
getDelegationToken(final Text renewer) throws IOException, InterruptedException {
return clientUgi.doAs(new
PrivilegedExceptionAction<Token<DelegationTokenIdentifier>>() {
public Token<DelegationTokenIdentifier> run() throws IOException,
InterruptedException {
return cluster.getDelegationToken(renewer);
}
});
}
/**
* Renew a delegation token
* @param token the token to renew
* @return true if the renewal went well
* @throws InvalidToken
* @throws IOException
* @deprecated Use {@link Token#renew} instead
*/
public long renewDelegationToken(Token<DelegationTokenIdentifier> token
) throws InvalidToken, IOException,
InterruptedException {
return token.renew(getConf());
}
/**
* Cancel a delegation token from the JobTracker
* @param token the token to cancel
* @throws IOException
* @deprecated Use {@link Token#cancel} instead
*/
public void cancelDelegationToken(Token<DelegationTokenIdentifier> token
) throws InvalidToken, IOException,
InterruptedException {
token.cancel(getConf());
}
/**
*/
public static void main(String argv[]) throws Exception {
int res = ToolRunner.run(new JobClient(), argv);
System.exit(res);
}
}
| 39,775 | 30.295043 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskAttemptContext.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.util.Progressable;
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface TaskAttemptContext
extends org.apache.hadoop.mapreduce.TaskAttemptContext {
public TaskAttemptID getTaskAttemptID();
public Progressable getProgressible();
public JobConf getJobConf();
}
| 1,283 | 34.666667 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Queue.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapreduce.QueueState;
import org.apache.hadoop.security.authorize.AccessControlList;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.TreeSet;
/**
* A class for storing the properties of a job queue.
*/
class Queue implements Comparable<Queue>{
private static final Log LOG = LogFactory.getLog(Queue.class);
//Queue name
private String name = null;
//acls list
private Map<String, AccessControlList> acls;
//Queue State
private QueueState state = QueueState.RUNNING;
// An Object that can be used by schedulers to fill in
// arbitrary scheduling information. The toString method
// of these objects will be called by the framework to
// get a String that can be displayed on UI.
private Object schedulingInfo;
private Set<Queue> children;
private Properties props;
/**
* Default constructor is useful in creating the hierarchy.
* The variables are populated using mutator methods.
*/
Queue() {
}
/**
* Create a job queue
* @param name name of the queue
* @param acls ACLs for the queue
* @param state state of the queue
*/
Queue(String name, Map<String, AccessControlList> acls, QueueState state) {
this.name = name;
this.acls = acls;
this.state = state;
}
/**
* Return the name of the queue
*
* @return name of the queue
*/
String getName() {
return name;
}
/**
* Set the name of the queue
* @param name name of the queue
*/
void setName(String name) {
this.name = name;
}
/**
* Return the ACLs for the queue
*
* The keys in the map indicate the operations that can be performed,
* and the values indicate the list of users/groups who can perform
* the operation.
*
* @return Map containing the operations that can be performed and
* who can perform the operations.
*/
Map<String, AccessControlList> getAcls() {
return acls;
}
/**
* Set the ACLs for the queue
* @param acls Map containing the operations that can be performed and
* who can perform the operations.
*/
void setAcls(Map<String, AccessControlList> acls) {
this.acls = acls;
}
/**
* Return the state of the queue.
* @return state of the queue
*/
QueueState getState() {
return state;
}
/**
* Set the state of the queue.
* @param state state of the queue.
*/
void setState(QueueState state) {
this.state = state;
}
/**
* Return the scheduling information for the queue
* @return scheduling information for the queue.
*/
Object getSchedulingInfo() {
return schedulingInfo;
}
/**
* Set the scheduling information from the queue.
* @param schedulingInfo scheduling information for the queue.
*/
void setSchedulingInfo(Object schedulingInfo) {
this.schedulingInfo = schedulingInfo;
}
/**
* Copy the scheduling information from the sourceQueue into this queue
* recursively.
*
* @param sourceQueue
*/
void copySchedulingInfo(Queue sourceQueue) {
// First update the children queues recursively.
Set<Queue> destChildren = getChildren();
if (destChildren != null) {
Iterator<Queue> itr1 = destChildren.iterator();
Iterator<Queue> itr2 = sourceQueue.getChildren().iterator();
while (itr1.hasNext()) {
itr1.next().copySchedulingInfo(itr2.next());
}
}
// Now, copy the information for the root-queue itself
setSchedulingInfo(sourceQueue.getSchedulingInfo());
}
/**
*
*/
void addChild(Queue child) {
if(children == null) {
children = new TreeSet<Queue>();
}
children.add(child);
}
/**
*
* @return
*/
Set<Queue> getChildren() {
return children;
}
/**
*
* @param props
*/
void setProperties(Properties props) {
this.props = props;
}
/**
*
* @return
*/
Properties getProperties() {
return this.props;
}
/**
* This methods helps in traversing the
* tree hierarchy.
*
* Returns list of all inner queues.i.e nodes which has children.
* below this level.
*
* Incase of children being null , returns an empty map.
* This helps in case of creating union of inner and leaf queues.
* @return
*/
Map<String,Queue> getInnerQueues() {
Map<String,Queue> l = new HashMap<String,Queue>();
//If no children , return empty set.
//This check is required for root node.
if(children == null) {
return l;
}
//check for children if they are parent.
for(Queue child:children) {
//check if children are themselves parent add them
if(child.getChildren() != null && child.getChildren().size() > 0) {
l.put(child.getName(),child);
l.putAll(child.getInnerQueues());
}
}
return l;
}
/**
* This method helps in maintaining the single
* data structure across QueueManager.
*
* Now if we just maintain list of root queues we
* should be done.
*
* Doesn't return null .
* Adds itself if this is leaf node.
* @return
*/
Map<String,Queue> getLeafQueues() {
Map<String,Queue> l = new HashMap<String,Queue>();
if(children == null) {
l.put(name,this);
return l;
}
for(Queue child:children) {
l.putAll(child.getLeafQueues());
}
return l;
}
@Override
public int compareTo(Queue queue) {
return name.compareTo(queue.getName());
}
@Override
public boolean equals(Object o) {
if(o == this) {
return true;
}
if(! (o instanceof Queue)) {
return false;
}
return ((Queue)o).getName().equals(name);
}
@Override
public String toString() {
return this.getName();
}
@Override
public int hashCode() {
return this.getName().hashCode();
}
/**
* Return hierarchy of {@link JobQueueInfo} objects
* under this Queue.
*
* @return JobQueueInfo[]
*/
JobQueueInfo getJobQueueInfo() {
JobQueueInfo queueInfo = new JobQueueInfo();
queueInfo.setQueueName(name);
LOG.debug("created jobQInfo " + queueInfo.getQueueName());
queueInfo.setQueueState(state.getStateName());
if (schedulingInfo != null) {
queueInfo.setSchedulingInfo(schedulingInfo.toString());
}
if (props != null) {
//Create deep copy of properties.
Properties newProps = new Properties();
for (Object key : props.keySet()) {
newProps.setProperty(key.toString(), props.getProperty(key.toString()));
}
queueInfo.setProperties(newProps);
}
if (children != null && children.size() > 0) {
List<JobQueueInfo> list = new ArrayList<JobQueueInfo>();
for (Queue child : children) {
list.add(child.getJobQueueInfo());
}
queueInfo.setChildren(list);
}
return queueInfo;
}
/**
* For each node validate if current node hierarchy is same newState.
* recursively check for child nodes.
*
* @param newState
* @return
*/
boolean isHierarchySameAs(Queue newState) {
if(newState == null) {
return false;
}
//First check if names are equal
if(!(name.equals(newState.getName())) ) {
LOG.info(" current name " + name + " not equal to " + newState.getName());
return false;
}
if (children == null || children.size() == 0) {
if(newState.getChildren() != null && newState.getChildren().size() > 0) {
LOG.info( newState + " has added children in refresh ");
return false;
}
} else if(children.size() > 0) {
//check for the individual children and then see if all of them
//are updated.
if (newState.getChildren() == null) {
LOG.fatal("In the current state, queue " + getName() + " has "
+ children.size() + " but the new state has none!");
return false;
}
int childrenSize = children.size();
int newChildrenSize = newState.getChildren().size();
if (childrenSize != newChildrenSize) {
LOG.fatal("Number of children for queue " + newState.getName()
+ " in newState is " + newChildrenSize + " which is not equal to "
+ childrenSize + " in the current state.");
return false;
}
//children are pre sorted as they are stored in treeset.
//hence order shold be the same.
Iterator<Queue> itr1 = children.iterator();
Iterator<Queue> itr2 = newState.getChildren().iterator();
while(itr1.hasNext()) {
Queue q = itr1.next();
Queue newq = itr2.next();
if(! (q.isHierarchySameAs(newq)) ) {
LOG.info(" Queue " + q.getName() + " not equal to " + newq.getName());
return false;
}
}
}
return true;
}
}
| 9,818 | 24.839474 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FixedLengthInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
/**
* FixedLengthInputFormat is an input format used to read input files
* which contain fixed length records. The content of a record need not be
* text. It can be arbitrary binary data. Users must configure the record
* length property by calling:
* FixedLengthInputFormat.setRecordLength(conf, recordLength);<br><br> or
* conf.setInt(FixedLengthInputFormat.FIXED_RECORD_LENGTH, recordLength);
* <br><br>
* @see FixedLengthRecordReader
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class FixedLengthInputFormat
extends FileInputFormat<LongWritable, BytesWritable>
implements JobConfigurable {
private CompressionCodecFactory compressionCodecs = null;
public static final String FIXED_RECORD_LENGTH =
"fixedlengthinputformat.record.length";
/**
* Set the length of each record
* @param conf configuration
* @param recordLength the length of a record
*/
public static void setRecordLength(Configuration conf, int recordLength) {
conf.setInt(FIXED_RECORD_LENGTH, recordLength);
}
/**
* Get record length value
* @param conf configuration
* @return the record length, zero means none was set
*/
public static int getRecordLength(Configuration conf) {
return conf.getInt(FIXED_RECORD_LENGTH, 0);
}
@Override
public void configure(JobConf conf) {
compressionCodecs = new CompressionCodecFactory(conf);
}
@Override
public RecordReader<LongWritable, BytesWritable>
getRecordReader(InputSplit genericSplit, JobConf job, Reporter reporter)
throws IOException {
reporter.setStatus(genericSplit.toString());
int recordLength = getRecordLength(job);
if (recordLength <= 0) {
throw new IOException("Fixed record length " + recordLength
+ " is invalid. It should be set to a value greater than zero");
}
return new FixedLengthRecordReader(job, (FileSplit)genericSplit,
recordLength);
}
@Override
protected boolean isSplitable(FileSystem fs, Path file) {
final CompressionCodec codec = compressionCodecs.getCodec(file);
return(null == codec);
}
}
| 3,466 | 34.377551 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Master.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@Private
@Unstable
public class Master {
public enum State {
INITIALIZING, RUNNING;
}
public static String getMasterUserName(Configuration conf) {
String framework = conf.get(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
if (framework.equals(MRConfig.CLASSIC_FRAMEWORK_NAME)) {
return conf.get(MRConfig.MASTER_USER_NAME);
}
else {
return conf.get(YarnConfiguration.RM_PRINCIPAL);
}
}
public static InetSocketAddress getMasterAddress(Configuration conf) {
String masterAddress;
String framework = conf.get(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
if (framework.equals(MRConfig.CLASSIC_FRAMEWORK_NAME)) {
masterAddress = conf.get(MRConfig.MASTER_ADDRESS, "localhost:8012");
return NetUtils.createSocketAddr(masterAddress, 8012, MRConfig.MASTER_ADDRESS);
}
else {
return conf.getSocketAddr(
YarnConfiguration.RM_ADDRESS,
YarnConfiguration.DEFAULT_RM_ADDRESS,
YarnConfiguration.DEFAULT_RM_PORT);
}
}
public static String getMasterPrincipal(Configuration conf)
throws IOException {
String masterHostname = getMasterAddress(conf).getHostName();
// get kerberos principal for use as delegation token renewer
return SecurityUtil.getServerPrincipal(getMasterUserName(conf), masterHostname);
}
}
| 2,626 | 34.986301 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.mapred.lib.HashPartitioner;
import org.apache.hadoop.mapred.lib.IdentityMapper;
import org.apache.hadoop.mapred.lib.IdentityReducer;
import org.apache.hadoop.mapred.lib.KeyFieldBasedComparator;
import org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.filecache.DistributedCache;
import org.apache.hadoop.mapreduce.util.ConfigUtil;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.util.ClassUtil;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.Tool;
import org.apache.log4j.Level;
/**
* A map/reduce job configuration.
*
* <p><code>JobConf</code> is the primary interface for a user to describe a
* map-reduce job to the Hadoop framework for execution. The framework tries to
* faithfully execute the job as-is described by <code>JobConf</code>, however:
* <ol>
* <li>
* Some configuration parameters might have been marked as
* <a href="{@docRoot}/org/apache/hadoop/conf/Configuration.html#FinalParams">
* final</a> by administrators and hence cannot be altered.
* </li>
* <li>
* While some job parameters are straight-forward to set
* (e.g. {@link #setNumReduceTasks(int)}), some parameters interact subtly
* with the rest of the framework and/or job-configuration and is relatively
* more complex for the user to control finely
* (e.g. {@link #setNumMapTasks(int)}).
* </li>
* </ol>
*
* <p><code>JobConf</code> typically specifies the {@link Mapper}, combiner
* (if any), {@link Partitioner}, {@link Reducer}, {@link InputFormat} and
* {@link OutputFormat} implementations to be used etc.
*
* <p>Optionally <code>JobConf</code> is used to specify other advanced facets
* of the job such as <code>Comparator</code>s to be used, files to be put in
* the {@link DistributedCache}, whether or not intermediate and/or job outputs
* are to be compressed (and how), debugability via user-provided scripts
* ( {@link #setMapDebugScript(String)}/{@link #setReduceDebugScript(String)}),
* for doing post-processing on task logs, task's stdout, stderr, syslog.
* and etc.</p>
*
* <p>Here is an example on how to configure a job via <code>JobConf</code>:</p>
* <p><blockquote><pre>
* // Create a new JobConf
* JobConf job = new JobConf(new Configuration(), MyJob.class);
*
* // Specify various job-specific parameters
* job.setJobName("myjob");
*
* FileInputFormat.setInputPaths(job, new Path("in"));
* FileOutputFormat.setOutputPath(job, new Path("out"));
*
* job.setMapperClass(MyJob.MyMapper.class);
* job.setCombinerClass(MyJob.MyReducer.class);
* job.setReducerClass(MyJob.MyReducer.class);
*
* job.setInputFormat(SequenceFileInputFormat.class);
* job.setOutputFormat(SequenceFileOutputFormat.class);
* </pre></blockquote>
*
* @see JobClient
* @see ClusterStatus
* @see Tool
* @see DistributedCache
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class JobConf extends Configuration {
private static final Log LOG = LogFactory.getLog(JobConf.class);
static{
ConfigUtil.loadResources();
}
/**
* @deprecated Use {@link #MAPREDUCE_JOB_MAP_MEMORY_MB_PROPERTY} and
* {@link #MAPREDUCE_JOB_REDUCE_MEMORY_MB_PROPERTY}
*/
@Deprecated
public static final String MAPRED_TASK_MAXVMEM_PROPERTY =
"mapred.task.maxvmem";
/**
* @deprecated
*/
@Deprecated
public static final String UPPER_LIMIT_ON_TASK_VMEM_PROPERTY =
"mapred.task.limit.maxvmem";
/**
* @deprecated
*/
@Deprecated
public static final String MAPRED_TASK_DEFAULT_MAXVMEM_PROPERTY =
"mapred.task.default.maxvmem";
/**
* @deprecated
*/
@Deprecated
public static final String MAPRED_TASK_MAXPMEM_PROPERTY =
"mapred.task.maxpmem";
/**
* A value which if set for memory related configuration options,
* indicates that the options are turned off.
* Deprecated because it makes no sense in the context of MR2.
*/
@Deprecated
public static final long DISABLED_MEMORY_LIMIT = -1L;
/**
* Property name for the configuration property mapreduce.cluster.local.dir
*/
public static final String MAPRED_LOCAL_DIR_PROPERTY = MRConfig.LOCAL_DIR;
/**
* Name of the queue to which jobs will be submitted, if no queue
* name is mentioned.
*/
public static final String DEFAULT_QUEUE_NAME = "default";
static final String MAPREDUCE_JOB_MAP_MEMORY_MB_PROPERTY =
JobContext.MAP_MEMORY_MB;
static final String MAPREDUCE_JOB_REDUCE_MEMORY_MB_PROPERTY =
JobContext.REDUCE_MEMORY_MB;
/**
* The variable is kept for M/R 1.x applications, while M/R 2.x applications
* should use {@link #MAPREDUCE_JOB_MAP_MEMORY_MB_PROPERTY}
*/
@Deprecated
public static final String MAPRED_JOB_MAP_MEMORY_MB_PROPERTY =
"mapred.job.map.memory.mb";
/**
* The variable is kept for M/R 1.x applications, while M/R 2.x applications
* should use {@link #MAPREDUCE_JOB_REDUCE_MEMORY_MB_PROPERTY}
*/
@Deprecated
public static final String MAPRED_JOB_REDUCE_MEMORY_MB_PROPERTY =
"mapred.job.reduce.memory.mb";
/** Pattern for the default unpacking behavior for job jars */
public static final Pattern UNPACK_JAR_PATTERN_DEFAULT =
Pattern.compile("(?:classes/|lib/).*");
/**
* Configuration key to set the java command line options for the child
* map and reduce tasks.
*
* Java opts for the task tracker child processes.
* The following symbol, if present, will be interpolated: @taskid@.
* It is replaced by current TaskID. Any other occurrences of '@' will go
* unchanged.
* For example, to enable verbose gc logging to a file named for the taskid in
* /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
* -Xmx1024m -verbose:gc -Xloggc:/tmp/@[email protected]
*
* The configuration variable {@link #MAPRED_TASK_ENV} can be used to pass
* other environment variables to the child processes.
*
* @deprecated Use {@link #MAPRED_MAP_TASK_JAVA_OPTS} or
* {@link #MAPRED_REDUCE_TASK_JAVA_OPTS}
*/
@Deprecated
public static final String MAPRED_TASK_JAVA_OPTS = "mapred.child.java.opts";
/**
* Configuration key to set the java command line options for the map tasks.
*
* Java opts for the task tracker child map processes.
* The following symbol, if present, will be interpolated: @taskid@.
* It is replaced by current TaskID. Any other occurrences of '@' will go
* unchanged.
* For example, to enable verbose gc logging to a file named for the taskid in
* /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
* -Xmx1024m -verbose:gc -Xloggc:/tmp/@[email protected]
*
* The configuration variable {@link #MAPRED_MAP_TASK_ENV} can be used to pass
* other environment variables to the map processes.
*/
public static final String MAPRED_MAP_TASK_JAVA_OPTS =
JobContext.MAP_JAVA_OPTS;
/**
* Configuration key to set the java command line options for the reduce tasks.
*
* Java opts for the task tracker child reduce processes.
* The following symbol, if present, will be interpolated: @taskid@.
* It is replaced by current TaskID. Any other occurrences of '@' will go
* unchanged.
* For example, to enable verbose gc logging to a file named for the taskid in
* /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
* -Xmx1024m -verbose:gc -Xloggc:/tmp/@[email protected]
*
* The configuration variable {@link #MAPRED_REDUCE_TASK_ENV} can be used to
* pass process environment variables to the reduce processes.
*/
public static final String MAPRED_REDUCE_TASK_JAVA_OPTS =
JobContext.REDUCE_JAVA_OPTS;
public static final String DEFAULT_MAPRED_TASK_JAVA_OPTS = "-Xmx200m";
/**
* @deprecated
* Configuration key to set the maximum virtual memory available to the child
* map and reduce tasks (in kilo-bytes). This has been deprecated and will no
* longer have any effect.
*/
@Deprecated
public static final String MAPRED_TASK_ULIMIT = "mapred.child.ulimit";
/**
* @deprecated
* Configuration key to set the maximum virtual memory available to the
* map tasks (in kilo-bytes). This has been deprecated and will no
* longer have any effect.
*/
@Deprecated
public static final String MAPRED_MAP_TASK_ULIMIT = "mapreduce.map.ulimit";
/**
* @deprecated
* Configuration key to set the maximum virtual memory available to the
* reduce tasks (in kilo-bytes). This has been deprecated and will no
* longer have any effect.
*/
@Deprecated
public static final String MAPRED_REDUCE_TASK_ULIMIT =
"mapreduce.reduce.ulimit";
/**
* Configuration key to set the environment of the child map/reduce tasks.
*
* The format of the value is <code>k1=v1,k2=v2</code>. Further it can
* reference existing environment variables via <code>$key</code> on
* Linux or <code>%key%</code> on Windows.
*
* Example:
* <ul>
* <li> A=foo - This will set the env variable A to foo. </li>
* <li> B=$X:c This is inherit tasktracker's X env variable on Linux. </li>
* <li> B=%X%;c This is inherit tasktracker's X env variable on Windows. </li>
* </ul>
*
* @deprecated Use {@link #MAPRED_MAP_TASK_ENV} or
* {@link #MAPRED_REDUCE_TASK_ENV}
*/
@Deprecated
public static final String MAPRED_TASK_ENV = "mapred.child.env";
/**
* Configuration key to set the environment of the child map tasks.
*
* The format of the value is <code>k1=v1,k2=v2</code>. Further it can
* reference existing environment variables via <code>$key</code> on
* Linux or <code>%key%</code> on Windows.
*
* Example:
* <ul>
* <li> A=foo - This will set the env variable A to foo. </li>
* <li> B=$X:c This is inherit tasktracker's X env variable on Linux. </li>
* <li> B=%X%;c This is inherit tasktracker's X env variable on Windows. </li>
* </ul>
*/
public static final String MAPRED_MAP_TASK_ENV = JobContext.MAP_ENV;
/**
* Configuration key to set the environment of the child reduce tasks.
*
* The format of the value is <code>k1=v1,k2=v2</code>. Further it can
* reference existing environment variables via <code>$key</code> on
* Linux or <code>%key%</code> on Windows.
*
* Example:
* <ul>
* <li> A=foo - This will set the env variable A to foo. </li>
* <li> B=$X:c This is inherit tasktracker's X env variable on Linux. </li>
* <li> B=%X%;c This is inherit tasktracker's X env variable on Windows. </li>
* </ul>
*/
public static final String MAPRED_REDUCE_TASK_ENV = JobContext.REDUCE_ENV;
private Credentials credentials = new Credentials();
/**
* Configuration key to set the logging {@link Level} for the map task.
*
* The allowed logging levels are:
* OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
*/
public static final String MAPRED_MAP_TASK_LOG_LEVEL =
JobContext.MAP_LOG_LEVEL;
/**
* Configuration key to set the logging {@link Level} for the reduce task.
*
* The allowed logging levels are:
* OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
*/
public static final String MAPRED_REDUCE_TASK_LOG_LEVEL =
JobContext.REDUCE_LOG_LEVEL;
/**
* Default logging level for map/reduce tasks.
*/
public static final Level DEFAULT_LOG_LEVEL = Level.INFO;
/**
* The variable is kept for M/R 1.x applications, M/R 2.x applications should
* use {@link MRJobConfig#WORKFLOW_ID} instead
*/
@Deprecated
public static final String WORKFLOW_ID = MRJobConfig.WORKFLOW_ID;
/**
* The variable is kept for M/R 1.x applications, M/R 2.x applications should
* use {@link MRJobConfig#WORKFLOW_NAME} instead
*/
@Deprecated
public static final String WORKFLOW_NAME = MRJobConfig.WORKFLOW_NAME;
/**
* The variable is kept for M/R 1.x applications, M/R 2.x applications should
* use {@link MRJobConfig#WORKFLOW_NODE_NAME} instead
*/
@Deprecated
public static final String WORKFLOW_NODE_NAME =
MRJobConfig.WORKFLOW_NODE_NAME;
/**
* The variable is kept for M/R 1.x applications, M/R 2.x applications should
* use {@link MRJobConfig#WORKFLOW_ADJACENCY_PREFIX_STRING} instead
*/
@Deprecated
public static final String WORKFLOW_ADJACENCY_PREFIX_STRING =
MRJobConfig.WORKFLOW_ADJACENCY_PREFIX_STRING;
/**
* The variable is kept for M/R 1.x applications, M/R 2.x applications should
* use {@link MRJobConfig#WORKFLOW_ADJACENCY_PREFIX_PATTERN} instead
*/
@Deprecated
public static final String WORKFLOW_ADJACENCY_PREFIX_PATTERN =
MRJobConfig.WORKFLOW_ADJACENCY_PREFIX_PATTERN;
/**
* The variable is kept for M/R 1.x applications, M/R 2.x applications should
* use {@link MRJobConfig#WORKFLOW_TAGS} instead
*/
@Deprecated
public static final String WORKFLOW_TAGS = MRJobConfig.WORKFLOW_TAGS;
/**
* The variable is kept for M/R 1.x applications, M/R 2.x applications should
* not use it
*/
@Deprecated
public static final String MAPREDUCE_RECOVER_JOB =
"mapreduce.job.restart.recover";
/**
* The variable is kept for M/R 1.x applications, M/R 2.x applications should
* not use it
*/
@Deprecated
public static final boolean DEFAULT_MAPREDUCE_RECOVER_JOB = true;
/**
* Construct a map/reduce job configuration.
*/
public JobConf() {
checkAndWarnDeprecation();
}
/**
* Construct a map/reduce job configuration.
*
* @param exampleClass a class whose containing jar is used as the job's jar.
*/
public JobConf(Class exampleClass) {
setJarByClass(exampleClass);
checkAndWarnDeprecation();
}
/**
* Construct a map/reduce job configuration.
*
* @param conf a Configuration whose settings will be inherited.
*/
public JobConf(Configuration conf) {
super(conf);
if (conf instanceof JobConf) {
JobConf that = (JobConf)conf;
credentials = that.credentials;
}
checkAndWarnDeprecation();
}
/** Construct a map/reduce job configuration.
*
* @param conf a Configuration whose settings will be inherited.
* @param exampleClass a class whose containing jar is used as the job's jar.
*/
public JobConf(Configuration conf, Class exampleClass) {
this(conf);
setJarByClass(exampleClass);
}
/** Construct a map/reduce configuration.
*
* @param config a Configuration-format XML job description file.
*/
public JobConf(String config) {
this(new Path(config));
}
/** Construct a map/reduce configuration.
*
* @param config a Configuration-format XML job description file.
*/
public JobConf(Path config) {
super();
addResource(config);
checkAndWarnDeprecation();
}
/** A new map/reduce configuration where the behavior of reading from the
* default resources can be turned off.
* <p>
* If the parameter {@code loadDefaults} is false, the new instance
* will not load resources from the default files.
*
* @param loadDefaults specifies whether to load from the default files
*/
public JobConf(boolean loadDefaults) {
super(loadDefaults);
checkAndWarnDeprecation();
}
/**
* Get credentials for the job.
* @return credentials for the job
*/
public Credentials getCredentials() {
return credentials;
}
@Private
public void setCredentials(Credentials credentials) {
this.credentials = credentials;
}
/**
* Get the user jar for the map-reduce job.
*
* @return the user jar for the map-reduce job.
*/
public String getJar() { return get(JobContext.JAR); }
/**
* Set the user jar for the map-reduce job.
*
* @param jar the user jar for the map-reduce job.
*/
public void setJar(String jar) { set(JobContext.JAR, jar); }
/**
* Get the pattern for jar contents to unpack on the tasktracker
*/
public Pattern getJarUnpackPattern() {
return getPattern(JobContext.JAR_UNPACK_PATTERN, UNPACK_JAR_PATTERN_DEFAULT);
}
/**
* Set the job's jar file by finding an example class location.
*
* @param cls the example class.
*/
public void setJarByClass(Class cls) {
String jar = ClassUtil.findContainingJar(cls);
if (jar != null) {
setJar(jar);
}
}
public String[] getLocalDirs() throws IOException {
return getTrimmedStrings(MRConfig.LOCAL_DIR);
}
/**
* Use MRAsyncDiskService.moveAndDeleteAllVolumes instead.
*/
@Deprecated
public void deleteLocalFiles() throws IOException {
String[] localDirs = getLocalDirs();
for (int i = 0; i < localDirs.length; i++) {
FileSystem.getLocal(this).delete(new Path(localDirs[i]), true);
}
}
public void deleteLocalFiles(String subdir) throws IOException {
String[] localDirs = getLocalDirs();
for (int i = 0; i < localDirs.length; i++) {
FileSystem.getLocal(this).delete(new Path(localDirs[i], subdir), true);
}
}
/**
* Constructs a local file name. Files are distributed among configured
* local directories.
*/
public Path getLocalPath(String pathString) throws IOException {
return getLocalPath(MRConfig.LOCAL_DIR, pathString);
}
/**
* Get the reported username for this job.
*
* @return the username
*/
public String getUser() {
return get(JobContext.USER_NAME);
}
/**
* Set the reported username for this job.
*
* @param user the username for this job.
*/
public void setUser(String user) {
set(JobContext.USER_NAME, user);
}
/**
* Set whether the framework should keep the intermediate files for
* failed tasks.
*
* @param keep <code>true</code> if framework should keep the intermediate files
* for failed tasks, <code>false</code> otherwise.
*
*/
public void setKeepFailedTaskFiles(boolean keep) {
setBoolean(JobContext.PRESERVE_FAILED_TASK_FILES, keep);
}
/**
* Should the temporary files for failed tasks be kept?
*
* @return should the files be kept?
*/
public boolean getKeepFailedTaskFiles() {
return getBoolean(JobContext.PRESERVE_FAILED_TASK_FILES, false);
}
/**
* Set a regular expression for task names that should be kept.
* The regular expression ".*_m_000123_0" would keep the files
* for the first instance of map 123 that ran.
*
* @param pattern the java.util.regex.Pattern to match against the
* task names.
*/
public void setKeepTaskFilesPattern(String pattern) {
set(JobContext.PRESERVE_FILES_PATTERN, pattern);
}
/**
* Get the regular expression that is matched against the task names
* to see if we need to keep the files.
*
* @return the pattern as a string, if it was set, othewise null.
*/
public String getKeepTaskFilesPattern() {
return get(JobContext.PRESERVE_FILES_PATTERN);
}
/**
* Set the current working directory for the default file system.
*
* @param dir the new current working directory.
*/
public void setWorkingDirectory(Path dir) {
dir = new Path(getWorkingDirectory(), dir);
set(JobContext.WORKING_DIR, dir.toString());
}
/**
* Get the current working directory for the default file system.
*
* @return the directory name.
*/
public Path getWorkingDirectory() {
String name = get(JobContext.WORKING_DIR);
if (name != null) {
return new Path(name);
} else {
try {
Path dir = FileSystem.get(this).getWorkingDirectory();
set(JobContext.WORKING_DIR, dir.toString());
return dir;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
/**
* Sets the number of tasks that a spawned task JVM should run
* before it exits
* @param numTasks the number of tasks to execute; defaults to 1;
* -1 signifies no limit
*/
public void setNumTasksToExecutePerJvm(int numTasks) {
setInt(JobContext.JVM_NUMTASKS_TORUN, numTasks);
}
/**
* Get the number of tasks that a spawned JVM should execute
*/
public int getNumTasksToExecutePerJvm() {
return getInt(JobContext.JVM_NUMTASKS_TORUN, 1);
}
/**
* Get the {@link InputFormat} implementation for the map-reduce job,
* defaults to {@link TextInputFormat} if not specified explicity.
*
* @return the {@link InputFormat} implementation for the map-reduce job.
*/
public InputFormat getInputFormat() {
return ReflectionUtils.newInstance(getClass("mapred.input.format.class",
TextInputFormat.class,
InputFormat.class),
this);
}
/**
* Set the {@link InputFormat} implementation for the map-reduce job.
*
* @param theClass the {@link InputFormat} implementation for the map-reduce
* job.
*/
public void setInputFormat(Class<? extends InputFormat> theClass) {
setClass("mapred.input.format.class", theClass, InputFormat.class);
}
/**
* Get the {@link OutputFormat} implementation for the map-reduce job,
* defaults to {@link TextOutputFormat} if not specified explicity.
*
* @return the {@link OutputFormat} implementation for the map-reduce job.
*/
public OutputFormat getOutputFormat() {
return ReflectionUtils.newInstance(getClass("mapred.output.format.class",
TextOutputFormat.class,
OutputFormat.class),
this);
}
/**
* Get the {@link OutputCommitter} implementation for the map-reduce job,
* defaults to {@link FileOutputCommitter} if not specified explicitly.
*
* @return the {@link OutputCommitter} implementation for the map-reduce job.
*/
public OutputCommitter getOutputCommitter() {
return (OutputCommitter)ReflectionUtils.newInstance(
getClass("mapred.output.committer.class", FileOutputCommitter.class,
OutputCommitter.class), this);
}
/**
* Set the {@link OutputCommitter} implementation for the map-reduce job.
*
* @param theClass the {@link OutputCommitter} implementation for the map-reduce
* job.
*/
public void setOutputCommitter(Class<? extends OutputCommitter> theClass) {
setClass("mapred.output.committer.class", theClass, OutputCommitter.class);
}
/**
* Set the {@link OutputFormat} implementation for the map-reduce job.
*
* @param theClass the {@link OutputFormat} implementation for the map-reduce
* job.
*/
public void setOutputFormat(Class<? extends OutputFormat> theClass) {
setClass("mapred.output.format.class", theClass, OutputFormat.class);
}
/**
* Should the map outputs be compressed before transfer?
*
* @param compress should the map outputs be compressed?
*/
public void setCompressMapOutput(boolean compress) {
setBoolean(JobContext.MAP_OUTPUT_COMPRESS, compress);
}
/**
* Are the outputs of the maps be compressed?
*
* @return <code>true</code> if the outputs of the maps are to be compressed,
* <code>false</code> otherwise.
*/
public boolean getCompressMapOutput() {
return getBoolean(JobContext.MAP_OUTPUT_COMPRESS, false);
}
/**
* Set the given class as the {@link CompressionCodec} for the map outputs.
*
* @param codecClass the {@link CompressionCodec} class that will compress
* the map outputs.
*/
public void
setMapOutputCompressorClass(Class<? extends CompressionCodec> codecClass) {
setCompressMapOutput(true);
setClass(JobContext.MAP_OUTPUT_COMPRESS_CODEC, codecClass,
CompressionCodec.class);
}
/**
* Get the {@link CompressionCodec} for compressing the map outputs.
*
* @param defaultValue the {@link CompressionCodec} to return if not set
* @return the {@link CompressionCodec} class that should be used to compress the
* map outputs.
* @throws IllegalArgumentException if the class was specified, but not found
*/
public Class<? extends CompressionCodec>
getMapOutputCompressorClass(Class<? extends CompressionCodec> defaultValue) {
Class<? extends CompressionCodec> codecClass = defaultValue;
String name = get(JobContext.MAP_OUTPUT_COMPRESS_CODEC);
if (name != null) {
try {
codecClass = getClassByName(name).asSubclass(CompressionCodec.class);
} catch (ClassNotFoundException e) {
throw new IllegalArgumentException("Compression codec " + name +
" was not found.", e);
}
}
return codecClass;
}
/**
* Get the key class for the map output data. If it is not set, use the
* (final) output key class. This allows the map output key class to be
* different than the final output key class.
*
* @return the map output key class.
*/
public Class<?> getMapOutputKeyClass() {
Class<?> retv = getClass(JobContext.MAP_OUTPUT_KEY_CLASS, null, Object.class);
if (retv == null) {
retv = getOutputKeyClass();
}
return retv;
}
/**
* Set the key class for the map output data. This allows the user to
* specify the map output key class to be different than the final output
* value class.
*
* @param theClass the map output key class.
*/
public void setMapOutputKeyClass(Class<?> theClass) {
setClass(JobContext.MAP_OUTPUT_KEY_CLASS, theClass, Object.class);
}
/**
* Get the value class for the map output data. If it is not set, use the
* (final) output value class This allows the map output value class to be
* different than the final output value class.
*
* @return the map output value class.
*/
public Class<?> getMapOutputValueClass() {
Class<?> retv = getClass(JobContext.MAP_OUTPUT_VALUE_CLASS, null,
Object.class);
if (retv == null) {
retv = getOutputValueClass();
}
return retv;
}
/**
* Set the value class for the map output data. This allows the user to
* specify the map output value class to be different than the final output
* value class.
*
* @param theClass the map output value class.
*/
public void setMapOutputValueClass(Class<?> theClass) {
setClass(JobContext.MAP_OUTPUT_VALUE_CLASS, theClass, Object.class);
}
/**
* Get the key class for the job output data.
*
* @return the key class for the job output data.
*/
public Class<?> getOutputKeyClass() {
return getClass(JobContext.OUTPUT_KEY_CLASS,
LongWritable.class, Object.class);
}
/**
* Set the key class for the job output data.
*
* @param theClass the key class for the job output data.
*/
public void setOutputKeyClass(Class<?> theClass) {
setClass(JobContext.OUTPUT_KEY_CLASS, theClass, Object.class);
}
/**
* Get the {@link RawComparator} comparator used to compare keys.
*
* @return the {@link RawComparator} comparator used to compare keys.
*/
public RawComparator getOutputKeyComparator() {
Class<? extends RawComparator> theClass = getClass(
JobContext.KEY_COMPARATOR, null, RawComparator.class);
if (theClass != null)
return ReflectionUtils.newInstance(theClass, this);
return WritableComparator.get(getMapOutputKeyClass().asSubclass(WritableComparable.class), this);
}
/**
* Set the {@link RawComparator} comparator used to compare keys.
*
* @param theClass the {@link RawComparator} comparator used to
* compare keys.
* @see #setOutputValueGroupingComparator(Class)
*/
public void setOutputKeyComparatorClass(Class<? extends RawComparator> theClass) {
setClass(JobContext.KEY_COMPARATOR,
theClass, RawComparator.class);
}
/**
* Set the {@link KeyFieldBasedComparator} options used to compare keys.
*
* @param keySpec the key specification of the form -k pos1[,pos2], where,
* pos is of the form f[.c][opts], where f is the number
* of the key field to use, and c is the number of the first character from
* the beginning of the field. Fields and character posns are numbered
* starting with 1; a character position of zero in pos2 indicates the
* field's last character. If '.c' is omitted from pos1, it defaults to 1
* (the beginning of the field); if omitted from pos2, it defaults to 0
* (the end of the field). opts are ordering options. The supported options
* are:
* -n, (Sort numerically)
* -r, (Reverse the result of comparison)
*/
public void setKeyFieldComparatorOptions(String keySpec) {
setOutputKeyComparatorClass(KeyFieldBasedComparator.class);
set(KeyFieldBasedComparator.COMPARATOR_OPTIONS, keySpec);
}
/**
* Get the {@link KeyFieldBasedComparator} options
*/
public String getKeyFieldComparatorOption() {
return get(KeyFieldBasedComparator.COMPARATOR_OPTIONS);
}
/**
* Set the {@link KeyFieldBasedPartitioner} options used for
* {@link Partitioner}
*
* @param keySpec the key specification of the form -k pos1[,pos2], where,
* pos is of the form f[.c][opts], where f is the number
* of the key field to use, and c is the number of the first character from
* the beginning of the field. Fields and character posns are numbered
* starting with 1; a character position of zero in pos2 indicates the
* field's last character. If '.c' is omitted from pos1, it defaults to 1
* (the beginning of the field); if omitted from pos2, it defaults to 0
* (the end of the field).
*/
public void setKeyFieldPartitionerOptions(String keySpec) {
setPartitionerClass(KeyFieldBasedPartitioner.class);
set(KeyFieldBasedPartitioner.PARTITIONER_OPTIONS, keySpec);
}
/**
* Get the {@link KeyFieldBasedPartitioner} options
*/
public String getKeyFieldPartitionerOption() {
return get(KeyFieldBasedPartitioner.PARTITIONER_OPTIONS);
}
/**
* Get the user defined {@link WritableComparable} comparator for
* grouping keys of inputs to the combiner.
*
* @return comparator set by the user for grouping values.
* @see #setCombinerKeyGroupingComparator(Class) for details.
*/
public RawComparator getCombinerKeyGroupingComparator() {
Class<? extends RawComparator> theClass = getClass(
JobContext.COMBINER_GROUP_COMPARATOR_CLASS, null, RawComparator.class);
if (theClass == null) {
return getOutputKeyComparator();
}
return ReflectionUtils.newInstance(theClass, this);
}
/**
* Get the user defined {@link WritableComparable} comparator for
* grouping keys of inputs to the reduce.
*
* @return comparator set by the user for grouping values.
* @see #setOutputValueGroupingComparator(Class) for details.
*/
public RawComparator getOutputValueGroupingComparator() {
Class<? extends RawComparator> theClass = getClass(
JobContext.GROUP_COMPARATOR_CLASS, null, RawComparator.class);
if (theClass == null) {
return getOutputKeyComparator();
}
return ReflectionUtils.newInstance(theClass, this);
}
/**
* Set the user defined {@link RawComparator} comparator for
* grouping keys in the input to the combiner.
*
* <p>This comparator should be provided if the equivalence rules for keys
* for sorting the intermediates are different from those for grouping keys
* before each call to
* {@link Reducer#reduce(Object, java.util.Iterator, OutputCollector, Reporter)}.</p>
*
* <p>For key-value pairs (K1,V1) and (K2,V2), the values (V1, V2) are passed
* in a single call to the reduce function if K1 and K2 compare as equal.</p>
*
* <p>Since {@link #setOutputKeyComparatorClass(Class)} can be used to control
* how keys are sorted, this can be used in conjunction to simulate
* <i>secondary sort on values</i>.</p>
*
* <p><i>Note</i>: This is not a guarantee of the combiner sort being
* <i>stable</i> in any sense. (In any case, with the order of available
* map-outputs to the combiner being non-deterministic, it wouldn't make
* that much sense.)</p>
*
* @param theClass the comparator class to be used for grouping keys for the
* combiner. It should implement <code>RawComparator</code>.
* @see #setOutputKeyComparatorClass(Class)
*/
public void setCombinerKeyGroupingComparator(
Class<? extends RawComparator> theClass) {
setClass(JobContext.COMBINER_GROUP_COMPARATOR_CLASS,
theClass, RawComparator.class);
}
/**
* Set the user defined {@link RawComparator} comparator for
* grouping keys in the input to the reduce.
*
* <p>This comparator should be provided if the equivalence rules for keys
* for sorting the intermediates are different from those for grouping keys
* before each call to
* {@link Reducer#reduce(Object, java.util.Iterator, OutputCollector, Reporter)}.</p>
*
* <p>For key-value pairs (K1,V1) and (K2,V2), the values (V1, V2) are passed
* in a single call to the reduce function if K1 and K2 compare as equal.</p>
*
* <p>Since {@link #setOutputKeyComparatorClass(Class)} can be used to control
* how keys are sorted, this can be used in conjunction to simulate
* <i>secondary sort on values</i>.</p>
*
* <p><i>Note</i>: This is not a guarantee of the reduce sort being
* <i>stable</i> in any sense. (In any case, with the order of available
* map-outputs to the reduce being non-deterministic, it wouldn't make
* that much sense.)</p>
*
* @param theClass the comparator class to be used for grouping keys.
* It should implement <code>RawComparator</code>.
* @see #setOutputKeyComparatorClass(Class)
* @see #setCombinerKeyGroupingComparator(Class)
*/
public void setOutputValueGroupingComparator(
Class<? extends RawComparator> theClass) {
setClass(JobContext.GROUP_COMPARATOR_CLASS,
theClass, RawComparator.class);
}
/**
* Should the framework use the new context-object code for running
* the mapper?
* @return true, if the new api should be used
*/
public boolean getUseNewMapper() {
return getBoolean("mapred.mapper.new-api", false);
}
/**
* Set whether the framework should use the new api for the mapper.
* This is the default for jobs submitted with the new Job api.
* @param flag true, if the new api should be used
*/
public void setUseNewMapper(boolean flag) {
setBoolean("mapred.mapper.new-api", flag);
}
/**
* Should the framework use the new context-object code for running
* the reducer?
* @return true, if the new api should be used
*/
public boolean getUseNewReducer() {
return getBoolean("mapred.reducer.new-api", false);
}
/**
* Set whether the framework should use the new api for the reducer.
* This is the default for jobs submitted with the new Job api.
* @param flag true, if the new api should be used
*/
public void setUseNewReducer(boolean flag) {
setBoolean("mapred.reducer.new-api", flag);
}
/**
* Get the value class for job outputs.
*
* @return the value class for job outputs.
*/
public Class<?> getOutputValueClass() {
return getClass(JobContext.OUTPUT_VALUE_CLASS, Text.class, Object.class);
}
/**
* Set the value class for job outputs.
*
* @param theClass the value class for job outputs.
*/
public void setOutputValueClass(Class<?> theClass) {
setClass(JobContext.OUTPUT_VALUE_CLASS, theClass, Object.class);
}
/**
* Get the {@link Mapper} class for the job.
*
* @return the {@link Mapper} class for the job.
*/
public Class<? extends Mapper> getMapperClass() {
return getClass("mapred.mapper.class", IdentityMapper.class, Mapper.class);
}
/**
* Set the {@link Mapper} class for the job.
*
* @param theClass the {@link Mapper} class for the job.
*/
public void setMapperClass(Class<? extends Mapper> theClass) {
setClass("mapred.mapper.class", theClass, Mapper.class);
}
/**
* Get the {@link MapRunnable} class for the job.
*
* @return the {@link MapRunnable} class for the job.
*/
public Class<? extends MapRunnable> getMapRunnerClass() {
return getClass("mapred.map.runner.class",
MapRunner.class, MapRunnable.class);
}
/**
* Expert: Set the {@link MapRunnable} class for the job.
*
* Typically used to exert greater control on {@link Mapper}s.
*
* @param theClass the {@link MapRunnable} class for the job.
*/
public void setMapRunnerClass(Class<? extends MapRunnable> theClass) {
setClass("mapred.map.runner.class", theClass, MapRunnable.class);
}
/**
* Get the {@link Partitioner} used to partition {@link Mapper}-outputs
* to be sent to the {@link Reducer}s.
*
* @return the {@link Partitioner} used to partition map-outputs.
*/
public Class<? extends Partitioner> getPartitionerClass() {
return getClass("mapred.partitioner.class",
HashPartitioner.class, Partitioner.class);
}
/**
* Set the {@link Partitioner} class used to partition
* {@link Mapper}-outputs to be sent to the {@link Reducer}s.
*
* @param theClass the {@link Partitioner} used to partition map-outputs.
*/
public void setPartitionerClass(Class<? extends Partitioner> theClass) {
setClass("mapred.partitioner.class", theClass, Partitioner.class);
}
/**
* Get the {@link Reducer} class for the job.
*
* @return the {@link Reducer} class for the job.
*/
public Class<? extends Reducer> getReducerClass() {
return getClass("mapred.reducer.class",
IdentityReducer.class, Reducer.class);
}
/**
* Set the {@link Reducer} class for the job.
*
* @param theClass the {@link Reducer} class for the job.
*/
public void setReducerClass(Class<? extends Reducer> theClass) {
setClass("mapred.reducer.class", theClass, Reducer.class);
}
/**
* Get the user-defined <i>combiner</i> class used to combine map-outputs
* before being sent to the reducers. Typically the combiner is same as the
* the {@link Reducer} for the job i.e. {@link #getReducerClass()}.
*
* @return the user-defined combiner class used to combine map-outputs.
*/
public Class<? extends Reducer> getCombinerClass() {
return getClass("mapred.combiner.class", null, Reducer.class);
}
/**
* Set the user-defined <i>combiner</i> class used to combine map-outputs
* before being sent to the reducers.
*
* <p>The combiner is an application-specified aggregation operation, which
* can help cut down the amount of data transferred between the
* {@link Mapper} and the {@link Reducer}, leading to better performance.</p>
*
* <p>The framework may invoke the combiner 0, 1, or multiple times, in both
* the mapper and reducer tasks. In general, the combiner is called as the
* sort/merge result is written to disk. The combiner must:
* <ul>
* <li> be side-effect free</li>
* <li> have the same input and output key types and the same input and
* output value types</li>
* </ul>
*
* <p>Typically the combiner is same as the <code>Reducer</code> for the
* job i.e. {@link #setReducerClass(Class)}.</p>
*
* @param theClass the user-defined combiner class used to combine
* map-outputs.
*/
public void setCombinerClass(Class<? extends Reducer> theClass) {
setClass("mapred.combiner.class", theClass, Reducer.class);
}
/**
* Should speculative execution be used for this job?
* Defaults to <code>true</code>.
*
* @return <code>true</code> if speculative execution be used for this job,
* <code>false</code> otherwise.
*/
public boolean getSpeculativeExecution() {
return (getMapSpeculativeExecution() || getReduceSpeculativeExecution());
}
/**
* Turn speculative execution on or off for this job.
*
* @param speculativeExecution <code>true</code> if speculative execution
* should be turned on, else <code>false</code>.
*/
public void setSpeculativeExecution(boolean speculativeExecution) {
setMapSpeculativeExecution(speculativeExecution);
setReduceSpeculativeExecution(speculativeExecution);
}
/**
* Should speculative execution be used for this job for map tasks?
* Defaults to <code>true</code>.
*
* @return <code>true</code> if speculative execution be
* used for this job for map tasks,
* <code>false</code> otherwise.
*/
public boolean getMapSpeculativeExecution() {
return getBoolean(JobContext.MAP_SPECULATIVE, true);
}
/**
* Turn speculative execution on or off for this job for map tasks.
*
* @param speculativeExecution <code>true</code> if speculative execution
* should be turned on for map tasks,
* else <code>false</code>.
*/
public void setMapSpeculativeExecution(boolean speculativeExecution) {
setBoolean(JobContext.MAP_SPECULATIVE, speculativeExecution);
}
/**
* Should speculative execution be used for this job for reduce tasks?
* Defaults to <code>true</code>.
*
* @return <code>true</code> if speculative execution be used
* for reduce tasks for this job,
* <code>false</code> otherwise.
*/
public boolean getReduceSpeculativeExecution() {
return getBoolean(JobContext.REDUCE_SPECULATIVE, true);
}
/**
* Turn speculative execution on or off for this job for reduce tasks.
*
* @param speculativeExecution <code>true</code> if speculative execution
* should be turned on for reduce tasks,
* else <code>false</code>.
*/
public void setReduceSpeculativeExecution(boolean speculativeExecution) {
setBoolean(JobContext.REDUCE_SPECULATIVE,
speculativeExecution);
}
/**
* Get configured the number of reduce tasks for this job.
* Defaults to <code>1</code>.
*
* @return the number of reduce tasks for this job.
*/
public int getNumMapTasks() { return getInt(JobContext.NUM_MAPS, 1); }
/**
* Set the number of map tasks for this job.
*
* <p><i>Note</i>: This is only a <i>hint</i> to the framework. The actual
* number of spawned map tasks depends on the number of {@link InputSplit}s
* generated by the job's {@link InputFormat#getSplits(JobConf, int)}.
*
* A custom {@link InputFormat} is typically used to accurately control
* the number of map tasks for the job.</p>
*
* <b id="NoOfMaps">How many maps?</b>
*
* <p>The number of maps is usually driven by the total size of the inputs
* i.e. total number of blocks of the input files.</p>
*
* <p>The right level of parallelism for maps seems to be around 10-100 maps
* per-node, although it has been set up to 300 or so for very cpu-light map
* tasks. Task setup takes awhile, so it is best if the maps take at least a
* minute to execute.</p>
*
* <p>The default behavior of file-based {@link InputFormat}s is to split the
* input into <i>logical</i> {@link InputSplit}s based on the total size, in
* bytes, of input files. However, the {@link FileSystem} blocksize of the
* input files is treated as an upper bound for input splits. A lower bound
* on the split size can be set via
* <a href="{@docRoot}/../mapred-default.html#mapreduce.input.fileinputformat.split.minsize">
* mapreduce.input.fileinputformat.split.minsize</a>.</p>
*
* <p>Thus, if you expect 10TB of input data and have a blocksize of 128MB,
* you'll end up with 82,000 maps, unless {@link #setNumMapTasks(int)} is
* used to set it even higher.</p>
*
* @param n the number of map tasks for this job.
* @see InputFormat#getSplits(JobConf, int)
* @see FileInputFormat
* @see FileSystem#getDefaultBlockSize()
* @see FileStatus#getBlockSize()
*/
public void setNumMapTasks(int n) { setInt(JobContext.NUM_MAPS, n); }
/**
* Get configured the number of reduce tasks for this job. Defaults to
* <code>1</code>.
*
* @return the number of reduce tasks for this job.
*/
public int getNumReduceTasks() { return getInt(JobContext.NUM_REDUCES, 1); }
/**
* Set the requisite number of reduce tasks for this job.
*
* <b id="NoOfReduces">How many reduces?</b>
*
* <p>The right number of reduces seems to be <code>0.95</code> or
* <code>1.75</code> multiplied by (<<i>no. of nodes</i>> *
* <a href="{@docRoot}/../mapred-default.html#mapreduce.tasktracker.reduce.tasks.maximum">
* mapreduce.tasktracker.reduce.tasks.maximum</a>).
* </p>
*
* <p>With <code>0.95</code> all of the reduces can launch immediately and
* start transfering map outputs as the maps finish. With <code>1.75</code>
* the faster nodes will finish their first round of reduces and launch a
* second wave of reduces doing a much better job of load balancing.</p>
*
* <p>Increasing the number of reduces increases the framework overhead, but
* increases load balancing and lowers the cost of failures.</p>
*
* <p>The scaling factors above are slightly less than whole numbers to
* reserve a few reduce slots in the framework for speculative-tasks, failures
* etc.</p>
*
* <b id="ReducerNone">Reducer NONE</b>
*
* <p>It is legal to set the number of reduce-tasks to <code>zero</code>.</p>
*
* <p>In this case the output of the map-tasks directly go to distributed
* file-system, to the path set by
* {@link FileOutputFormat#setOutputPath(JobConf, Path)}. Also, the
* framework doesn't sort the map-outputs before writing it out to HDFS.</p>
*
* @param n the number of reduce tasks for this job.
*/
public void setNumReduceTasks(int n) { setInt(JobContext.NUM_REDUCES, n); }
/**
* Get the configured number of maximum attempts that will be made to run a
* map task, as specified by the <code>mapreduce.map.maxattempts</code>
* property. If this property is not already set, the default is 4 attempts.
*
* @return the max number of attempts per map task.
*/
public int getMaxMapAttempts() {
return getInt(JobContext.MAP_MAX_ATTEMPTS, 4);
}
/**
* Expert: Set the number of maximum attempts that will be made to run a
* map task.
*
* @param n the number of attempts per map task.
*/
public void setMaxMapAttempts(int n) {
setInt(JobContext.MAP_MAX_ATTEMPTS, n);
}
/**
* Get the configured number of maximum attempts that will be made to run a
* reduce task, as specified by the <code>mapreduce.reduce.maxattempts</code>
* property. If this property is not already set, the default is 4 attempts.
*
* @return the max number of attempts per reduce task.
*/
public int getMaxReduceAttempts() {
return getInt(JobContext.REDUCE_MAX_ATTEMPTS, 4);
}
/**
* Expert: Set the number of maximum attempts that will be made to run a
* reduce task.
*
* @param n the number of attempts per reduce task.
*/
public void setMaxReduceAttempts(int n) {
setInt(JobContext.REDUCE_MAX_ATTEMPTS, n);
}
/**
* Get the user-specified job name. This is only used to identify the
* job to the user.
*
* @return the job's name, defaulting to "".
*/
public String getJobName() {
return get(JobContext.JOB_NAME, "");
}
/**
* Set the user-specified job name.
*
* @param name the job's new name.
*/
public void setJobName(String name) {
set(JobContext.JOB_NAME, name);
}
/**
* Get the user-specified session identifier. The default is the empty string.
*
* The session identifier is used to tag metric data that is reported to some
* performance metrics system via the org.apache.hadoop.metrics API. The
* session identifier is intended, in particular, for use by Hadoop-On-Demand
* (HOD) which allocates a virtual Hadoop cluster dynamically and transiently.
* HOD will set the session identifier by modifying the mapred-site.xml file
* before starting the cluster.
*
* When not running under HOD, this identifer is expected to remain set to
* the empty string.
*
* @return the session identifier, defaulting to "".
*/
@Deprecated
public String getSessionId() {
return get("session.id", "");
}
/**
* Set the user-specified session identifier.
*
* @param sessionId the new session id.
*/
@Deprecated
public void setSessionId(String sessionId) {
set("session.id", sessionId);
}
/**
* Set the maximum no. of failures of a given job per tasktracker.
* If the no. of task failures exceeds <code>noFailures</code>, the
* tasktracker is <i>blacklisted</i> for this job.
*
* @param noFailures maximum no. of failures of a given job per tasktracker.
*/
public void setMaxTaskFailuresPerTracker(int noFailures) {
setInt(JobContext.MAX_TASK_FAILURES_PER_TRACKER, noFailures);
}
/**
* Expert: Get the maximum no. of failures of a given job per tasktracker.
* If the no. of task failures exceeds this, the tasktracker is
* <i>blacklisted</i> for this job.
*
* @return the maximum no. of failures of a given job per tasktracker.
*/
public int getMaxTaskFailuresPerTracker() {
return getInt(JobContext.MAX_TASK_FAILURES_PER_TRACKER, 3);
}
/**
* Get the maximum percentage of map tasks that can fail without
* the job being aborted.
*
* Each map task is executed a minimum of {@link #getMaxMapAttempts()}
* attempts before being declared as <i>failed</i>.
*
* Defaults to <code>zero</code>, i.e. <i>any</i> failed map-task results in
* the job being declared as {@link JobStatus#FAILED}.
*
* @return the maximum percentage of map tasks that can fail without
* the job being aborted.
*/
public int getMaxMapTaskFailuresPercent() {
return getInt(JobContext.MAP_FAILURES_MAX_PERCENT, 0);
}
/**
* Expert: Set the maximum percentage of map tasks that can fail without the
* job being aborted.
*
* Each map task is executed a minimum of {@link #getMaxMapAttempts} attempts
* before being declared as <i>failed</i>.
*
* @param percent the maximum percentage of map tasks that can fail without
* the job being aborted.
*/
public void setMaxMapTaskFailuresPercent(int percent) {
setInt(JobContext.MAP_FAILURES_MAX_PERCENT, percent);
}
/**
* Get the maximum percentage of reduce tasks that can fail without
* the job being aborted.
*
* Each reduce task is executed a minimum of {@link #getMaxReduceAttempts()}
* attempts before being declared as <i>failed</i>.
*
* Defaults to <code>zero</code>, i.e. <i>any</i> failed reduce-task results
* in the job being declared as {@link JobStatus#FAILED}.
*
* @return the maximum percentage of reduce tasks that can fail without
* the job being aborted.
*/
public int getMaxReduceTaskFailuresPercent() {
return getInt(JobContext.REDUCE_FAILURES_MAXPERCENT, 0);
}
/**
* Set the maximum percentage of reduce tasks that can fail without the job
* being aborted.
*
* Each reduce task is executed a minimum of {@link #getMaxReduceAttempts()}
* attempts before being declared as <i>failed</i>.
*
* @param percent the maximum percentage of reduce tasks that can fail without
* the job being aborted.
*/
public void setMaxReduceTaskFailuresPercent(int percent) {
setInt(JobContext.REDUCE_FAILURES_MAXPERCENT, percent);
}
/**
* Set {@link JobPriority} for this job.
*
* @param prio the {@link JobPriority} for this job.
*/
public void setJobPriority(JobPriority prio) {
set(JobContext.PRIORITY, prio.toString());
}
/**
* Get the {@link JobPriority} for this job.
*
* @return the {@link JobPriority} for this job.
*/
public JobPriority getJobPriority() {
String prio = get(JobContext.PRIORITY);
if(prio == null) {
return JobPriority.NORMAL;
}
return JobPriority.valueOf(prio);
}
/**
* Set JobSubmitHostName for this job.
*
* @param hostname the JobSubmitHostName for this job.
*/
void setJobSubmitHostName(String hostname) {
set(MRJobConfig.JOB_SUBMITHOST, hostname);
}
/**
* Get the JobSubmitHostName for this job.
*
* @return the JobSubmitHostName for this job.
*/
String getJobSubmitHostName() {
String hostname = get(MRJobConfig.JOB_SUBMITHOST);
return hostname;
}
/**
* Set JobSubmitHostAddress for this job.
*
* @param hostadd the JobSubmitHostAddress for this job.
*/
void setJobSubmitHostAddress(String hostadd) {
set(MRJobConfig.JOB_SUBMITHOSTADDR, hostadd);
}
/**
* Get JobSubmitHostAddress for this job.
*
* @return JobSubmitHostAddress for this job.
*/
String getJobSubmitHostAddress() {
String hostadd = get(MRJobConfig.JOB_SUBMITHOSTADDR);
return hostadd;
}
/**
* Get whether the task profiling is enabled.
* @return true if some tasks will be profiled
*/
public boolean getProfileEnabled() {
return getBoolean(JobContext.TASK_PROFILE, false);
}
/**
* Set whether the system should collect profiler information for some of
* the tasks in this job? The information is stored in the user log
* directory.
* @param newValue true means it should be gathered
*/
public void setProfileEnabled(boolean newValue) {
setBoolean(JobContext.TASK_PROFILE, newValue);
}
/**
* Get the profiler configuration arguments.
*
* The default value for this property is
* "-agentlib:hprof=cpu=samples,heap=sites,force=n,thread=y,verbose=n,file=%s"
*
* @return the parameters to pass to the task child to configure profiling
*/
public String getProfileParams() {
return get(JobContext.TASK_PROFILE_PARAMS,
MRJobConfig.DEFAULT_TASK_PROFILE_PARAMS);
}
/**
* Set the profiler configuration arguments. If the string contains a '%s' it
* will be replaced with the name of the profiling output file when the task
* runs.
*
* This value is passed to the task child JVM on the command line.
*
* @param value the configuration string
*/
public void setProfileParams(String value) {
set(JobContext.TASK_PROFILE_PARAMS, value);
}
/**
* Get the range of maps or reduces to profile.
* @param isMap is the task a map?
* @return the task ranges
*/
public IntegerRanges getProfileTaskRange(boolean isMap) {
return getRange((isMap ? JobContext.NUM_MAP_PROFILES :
JobContext.NUM_REDUCE_PROFILES), "0-2");
}
/**
* Set the ranges of maps or reduces to profile. setProfileEnabled(true)
* must also be called.
* @param newValue a set of integer ranges of the map ids
*/
public void setProfileTaskRange(boolean isMap, String newValue) {
// parse the value to make sure it is legal
new Configuration.IntegerRanges(newValue);
set((isMap ? JobContext.NUM_MAP_PROFILES : JobContext.NUM_REDUCE_PROFILES),
newValue);
}
/**
* Set the debug script to run when the map tasks fail.
*
* <p>The debug script can aid debugging of failed map tasks. The script is
* given task's stdout, stderr, syslog, jobconf files as arguments.</p>
*
* <p>The debug command, run on the node where the map failed, is:</p>
* <p><blockquote><pre>
* $script $stdout $stderr $syslog $jobconf.
* </pre></blockquote>
*
* <p> The script file is distributed through {@link DistributedCache}
* APIs. The script needs to be symlinked. </p>
*
* <p>Here is an example on how to submit a script
* <p><blockquote><pre>
* job.setMapDebugScript("./myscript");
* DistributedCache.createSymlink(job);
* DistributedCache.addCacheFile("/debug/scripts/myscript#myscript");
* </pre></blockquote>
*
* @param mDbgScript the script name
*/
public void setMapDebugScript(String mDbgScript) {
set(JobContext.MAP_DEBUG_SCRIPT, mDbgScript);
}
/**
* Get the map task's debug script.
*
* @return the debug Script for the mapred job for failed map tasks.
* @see #setMapDebugScript(String)
*/
public String getMapDebugScript() {
return get(JobContext.MAP_DEBUG_SCRIPT);
}
/**
* Set the debug script to run when the reduce tasks fail.
*
* <p>The debug script can aid debugging of failed reduce tasks. The script
* is given task's stdout, stderr, syslog, jobconf files as arguments.</p>
*
* <p>The debug command, run on the node where the map failed, is:</p>
* <p><blockquote><pre>
* $script $stdout $stderr $syslog $jobconf.
* </pre></blockquote>
*
* <p> The script file is distributed through {@link DistributedCache}
* APIs. The script file needs to be symlinked </p>
*
* <p>Here is an example on how to submit a script
* <p><blockquote><pre>
* job.setReduceDebugScript("./myscript");
* DistributedCache.createSymlink(job);
* DistributedCache.addCacheFile("/debug/scripts/myscript#myscript");
* </pre></blockquote>
*
* @param rDbgScript the script name
*/
public void setReduceDebugScript(String rDbgScript) {
set(JobContext.REDUCE_DEBUG_SCRIPT, rDbgScript);
}
/**
* Get the reduce task's debug Script
*
* @return the debug script for the mapred job for failed reduce tasks.
* @see #setReduceDebugScript(String)
*/
public String getReduceDebugScript() {
return get(JobContext.REDUCE_DEBUG_SCRIPT);
}
/**
* Get the uri to be invoked in-order to send a notification after the job
* has completed (success/failure).
*
* @return the job end notification uri, <code>null</code> if it hasn't
* been set.
* @see #setJobEndNotificationURI(String)
*/
public String getJobEndNotificationURI() {
return get(JobContext.MR_JOB_END_NOTIFICATION_URL);
}
/**
* Set the uri to be invoked in-order to send a notification after the job
* has completed (success/failure).
*
* <p>The uri can contain 2 special parameters: <tt>$jobId</tt> and
* <tt>$jobStatus</tt>. Those, if present, are replaced by the job's
* identifier and completion-status respectively.</p>
*
* <p>This is typically used by application-writers to implement chaining of
* Map-Reduce jobs in an <i>asynchronous manner</i>.</p>
*
* @param uri the job end notification uri
* @see JobStatus
*/
public void setJobEndNotificationURI(String uri) {
set(JobContext.MR_JOB_END_NOTIFICATION_URL, uri);
}
/**
* Get job-specific shared directory for use as scratch space
*
* <p>
* When a job starts, a shared directory is created at location
* <code>
* ${mapreduce.cluster.local.dir}/taskTracker/$user/jobcache/$jobid/work/ </code>.
* This directory is exposed to the users through
* <code>mapreduce.job.local.dir </code>.
* So, the tasks can use this space
* as scratch space and share files among them. </p>
* This value is available as System property also.
*
* @return The localized job specific shared directory
*/
public String getJobLocalDir() {
return get(JobContext.JOB_LOCAL_DIR);
}
/**
* Get memory required to run a map task of the job, in MB.
*
* If a value is specified in the configuration, it is returned.
* Else, it returns {@link JobContext#DEFAULT_MAP_MEMORY_MB}.
* <p>
* For backward compatibility, if the job configuration sets the
* key {@link #MAPRED_TASK_MAXVMEM_PROPERTY} to a value different
* from {@link #DISABLED_MEMORY_LIMIT}, that value will be used
* after converting it from bytes to MB.
* @return memory required to run a map task of the job, in MB,
*/
public long getMemoryForMapTask() {
long value = getDeprecatedMemoryValue();
if (value < 0) {
return getLong(JobConf.MAPRED_JOB_MAP_MEMORY_MB_PROPERTY,
JobContext.DEFAULT_MAP_MEMORY_MB);
}
return value;
}
public void setMemoryForMapTask(long mem) {
setLong(JobConf.MAPREDUCE_JOB_MAP_MEMORY_MB_PROPERTY, mem);
// In case that M/R 1.x applications use the old property name
setLong(JobConf.MAPRED_JOB_MAP_MEMORY_MB_PROPERTY, mem);
}
/**
* Get memory required to run a reduce task of the job, in MB.
*
* If a value is specified in the configuration, it is returned.
* Else, it returns {@link JobContext#DEFAULT_REDUCE_MEMORY_MB}.
* <p>
* For backward compatibility, if the job configuration sets the
* key {@link #MAPRED_TASK_MAXVMEM_PROPERTY} to a value different
* from {@link #DISABLED_MEMORY_LIMIT}, that value will be used
* after converting it from bytes to MB.
* @return memory required to run a reduce task of the job, in MB.
*/
public long getMemoryForReduceTask() {
long value = getDeprecatedMemoryValue();
if (value < 0) {
return getLong(JobConf.MAPRED_JOB_REDUCE_MEMORY_MB_PROPERTY,
JobContext.DEFAULT_REDUCE_MEMORY_MB);
}
return value;
}
// Return the value set to the key MAPRED_TASK_MAXVMEM_PROPERTY,
// converted into MBs.
// Returns DISABLED_MEMORY_LIMIT if unset, or set to a negative
// value.
private long getDeprecatedMemoryValue() {
long oldValue = getLong(MAPRED_TASK_MAXVMEM_PROPERTY,
DISABLED_MEMORY_LIMIT);
if (oldValue > 0) {
oldValue /= (1024*1024);
}
return oldValue;
}
public void setMemoryForReduceTask(long mem) {
setLong(JobConf.MAPREDUCE_JOB_REDUCE_MEMORY_MB_PROPERTY, mem);
// In case that M/R 1.x applications use the old property name
setLong(JobConf.MAPRED_JOB_REDUCE_MEMORY_MB_PROPERTY, mem);
}
/**
* Return the name of the queue to which this job is submitted.
* Defaults to 'default'.
*
* @return name of the queue
*/
public String getQueueName() {
return get(JobContext.QUEUE_NAME, DEFAULT_QUEUE_NAME);
}
/**
* Set the name of the queue to which this job should be submitted.
*
* @param queueName Name of the queue
*/
public void setQueueName(String queueName) {
set(JobContext.QUEUE_NAME, queueName);
}
/**
* Normalize the negative values in configuration
*
* @param val
* @return normalized value
*/
public static long normalizeMemoryConfigValue(long val) {
if (val < 0) {
val = DISABLED_MEMORY_LIMIT;
}
return val;
}
/**
* Find a jar that contains a class of the same name, if any.
* It will return a jar file, even if that is not the first thing
* on the class path that has a class with the same name.
*
* @param my_class the class to find.
* @return a jar file that contains the class, or null.
*/
public static String findContainingJar(Class my_class) {
return ClassUtil.findContainingJar(my_class);
}
/**
* Get the memory required to run a task of this job, in bytes. See
* {@link #MAPRED_TASK_MAXVMEM_PROPERTY}
* <p>
* This method is deprecated. Now, different memory limits can be
* set for map and reduce tasks of a job, in MB.
* <p>
* For backward compatibility, if the job configuration sets the
* key {@link #MAPRED_TASK_MAXVMEM_PROPERTY}, that value is returned.
* Otherwise, this method will return the larger of the values returned by
* {@link #getMemoryForMapTask()} and {@link #getMemoryForReduceTask()}
* after converting them into bytes.
*
* @return Memory required to run a task of this job, in bytes.
* @see #setMaxVirtualMemoryForTask(long)
* @deprecated Use {@link #getMemoryForMapTask()} and
* {@link #getMemoryForReduceTask()}
*/
@Deprecated
public long getMaxVirtualMemoryForTask() {
LOG.warn(
"getMaxVirtualMemoryForTask() is deprecated. " +
"Instead use getMemoryForMapTask() and getMemoryForReduceTask()");
long value = getLong(MAPRED_TASK_MAXVMEM_PROPERTY,
Math.max(getMemoryForMapTask(), getMemoryForReduceTask()) * 1024 * 1024);
return value;
}
/**
* Set the maximum amount of memory any task of this job can use. See
* {@link #MAPRED_TASK_MAXVMEM_PROPERTY}
* <p>
* mapred.task.maxvmem is split into
* mapreduce.map.memory.mb
* and mapreduce.map.memory.mb,mapred
* each of the new key are set
* as mapred.task.maxvmem / 1024
* as new values are in MB
*
* @param vmem Maximum amount of virtual memory in bytes any task of this job
* can use.
* @see #getMaxVirtualMemoryForTask()
* @deprecated
* Use {@link #setMemoryForMapTask(long mem)} and
* Use {@link #setMemoryForReduceTask(long mem)}
*/
@Deprecated
public void setMaxVirtualMemoryForTask(long vmem) {
LOG.warn("setMaxVirtualMemoryForTask() is deprecated."+
"Instead use setMemoryForMapTask() and setMemoryForReduceTask()");
if (vmem < 0) {
throw new IllegalArgumentException("Task memory allocation may not be < 0");
}
if(get(JobConf.MAPRED_TASK_MAXVMEM_PROPERTY) == null) {
setMemoryForMapTask(vmem / (1024 * 1024)); //Changing bytes to mb
setMemoryForReduceTask(vmem / (1024 * 1024));//Changing bytes to mb
}else{
this.setLong(JobConf.MAPRED_TASK_MAXVMEM_PROPERTY,vmem);
}
}
/**
* @deprecated this variable is deprecated and nolonger in use.
*/
@Deprecated
public long getMaxPhysicalMemoryForTask() {
LOG.warn("The API getMaxPhysicalMemoryForTask() is deprecated."
+ " Refer to the APIs getMemoryForMapTask() and"
+ " getMemoryForReduceTask() for details.");
return -1;
}
/*
* @deprecated this
*/
@Deprecated
public void setMaxPhysicalMemoryForTask(long mem) {
LOG.warn("The API setMaxPhysicalMemoryForTask() is deprecated."
+ " The value set is ignored. Refer to "
+ " setMemoryForMapTask() and setMemoryForReduceTask() for details.");
}
static String deprecatedString(String key) {
return "The variable " + key + " is no longer used.";
}
private void checkAndWarnDeprecation() {
if(get(JobConf.MAPRED_TASK_MAXVMEM_PROPERTY) != null) {
LOG.warn(JobConf.deprecatedString(JobConf.MAPRED_TASK_MAXVMEM_PROPERTY)
+ " Instead use " + JobConf.MAPREDUCE_JOB_MAP_MEMORY_MB_PROPERTY
+ " and " + JobConf.MAPREDUCE_JOB_REDUCE_MEMORY_MB_PROPERTY);
}
if(get(JobConf.MAPRED_TASK_ULIMIT) != null ) {
LOG.warn(JobConf.deprecatedString(JobConf.MAPRED_TASK_ULIMIT));
}
if(get(JobConf.MAPRED_MAP_TASK_ULIMIT) != null ) {
LOG.warn(JobConf.deprecatedString(JobConf.MAPRED_MAP_TASK_ULIMIT));
}
if(get(JobConf.MAPRED_REDUCE_TASK_ULIMIT) != null ) {
LOG.warn(JobConf.deprecatedString(JobConf.MAPRED_REDUCE_TASK_ULIMIT));
}
}
/* For debugging. Dump configurations to system output as XML format. */
public static void main(String[] args) throws Exception {
new JobConf(new Configuration()).writeXml(System.out);
}
}
| 69,457 | 33.198917 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.IntBuffer;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystem.Statistics;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RawLocalFileSystem;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.DefaultCodec;
import org.apache.hadoop.io.serializer.Deserializer;
import org.apache.hadoop.io.serializer.SerializationFactory;
import org.apache.hadoop.io.serializer.Serializer;
import org.apache.hadoop.mapred.IFile.Writer;
import org.apache.hadoop.mapred.Merger.Segment;
import org.apache.hadoop.mapred.SortedRanges.SkipRangeIterator;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TaskCounter;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormatCounter;
import org.apache.hadoop.mapreduce.lib.map.WrappedMapper;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormatCounter;
import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitIndex;
import org.apache.hadoop.mapreduce.task.MapContextImpl;
import org.apache.hadoop.mapreduce.CryptoUtils;
import org.apache.hadoop.util.IndexedSortable;
import org.apache.hadoop.util.IndexedSorter;
import org.apache.hadoop.util.Progress;
import org.apache.hadoop.util.QuickSort;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringInterner;
import org.apache.hadoop.util.StringUtils;
/** A Map task. */
@InterfaceAudience.LimitedPrivate({"MapReduce"})
@InterfaceStability.Unstable
public class MapTask extends Task {
/**
* The size of each record in the index file for the map-outputs.
*/
public static final int MAP_OUTPUT_INDEX_RECORD_LENGTH = 24;
private TaskSplitIndex splitMetaInfo = new TaskSplitIndex();
private final static int APPROX_HEADER_LENGTH = 150;
private static final Log LOG = LogFactory.getLog(MapTask.class.getName());
private Progress mapPhase;
private Progress sortPhase;
{ // set phase for this task
setPhase(TaskStatus.Phase.MAP);
getProgress().setStatus("map");
}
public MapTask() {
super();
}
public MapTask(String jobFile, TaskAttemptID taskId,
int partition, TaskSplitIndex splitIndex,
int numSlotsRequired) {
super(jobFile, taskId, partition, numSlotsRequired);
this.splitMetaInfo = splitIndex;
}
@Override
public boolean isMapTask() {
return true;
}
@Override
public void localizeConfiguration(JobConf conf)
throws IOException {
super.localizeConfiguration(conf);
}
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
if (isMapOrReduce()) {
splitMetaInfo.write(out);
splitMetaInfo = null;
}
}
@Override
public void readFields(DataInput in) throws IOException {
super.readFields(in);
if (isMapOrReduce()) {
splitMetaInfo.readFields(in);
}
}
/**
* This class wraps the user's record reader to update the counters and progress
* as records are read.
* @param <K>
* @param <V>
*/
class TrackedRecordReader<K, V>
implements RecordReader<K,V> {
private RecordReader<K,V> rawIn;
private Counters.Counter fileInputByteCounter;
private Counters.Counter inputRecordCounter;
private TaskReporter reporter;
private long bytesInPrev = -1;
private long bytesInCurr = -1;
private final List<Statistics> fsStats;
TrackedRecordReader(TaskReporter reporter, JobConf job)
throws IOException{
inputRecordCounter = reporter.getCounter(TaskCounter.MAP_INPUT_RECORDS);
fileInputByteCounter = reporter.getCounter(FileInputFormatCounter.BYTES_READ);
this.reporter = reporter;
List<Statistics> matchedStats = null;
if (this.reporter.getInputSplit() instanceof FileSplit) {
matchedStats = getFsStatistics(((FileSplit) this.reporter
.getInputSplit()).getPath(), job);
}
fsStats = matchedStats;
bytesInPrev = getInputBytes(fsStats);
rawIn = job.getInputFormat().getRecordReader(reporter.getInputSplit(),
job, reporter);
bytesInCurr = getInputBytes(fsStats);
fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
public K createKey() {
return rawIn.createKey();
}
public V createValue() {
return rawIn.createValue();
}
public synchronized boolean next(K key, V value)
throws IOException {
boolean ret = moveToNext(key, value);
if (ret) {
incrCounters();
}
return ret;
}
protected void incrCounters() {
inputRecordCounter.increment(1);
}
protected synchronized boolean moveToNext(K key, V value)
throws IOException {
bytesInPrev = getInputBytes(fsStats);
boolean ret = rawIn.next(key, value);
bytesInCurr = getInputBytes(fsStats);
fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
reporter.setProgress(getProgress());
return ret;
}
public long getPos() throws IOException { return rawIn.getPos(); }
public void close() throws IOException {
bytesInPrev = getInputBytes(fsStats);
rawIn.close();
bytesInCurr = getInputBytes(fsStats);
fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
public float getProgress() throws IOException {
return rawIn.getProgress();
}
TaskReporter getTaskReporter() {
return reporter;
}
private long getInputBytes(List<Statistics> stats) {
if (stats == null) return 0;
long bytesRead = 0;
for (Statistics stat: stats) {
bytesRead = bytesRead + stat.getBytesRead();
}
return bytesRead;
}
}
/**
* This class skips the records based on the failed ranges from previous
* attempts.
*/
class SkippingRecordReader<K, V> extends TrackedRecordReader<K,V> {
private SkipRangeIterator skipIt;
private SequenceFile.Writer skipWriter;
private boolean toWriteSkipRecs;
private TaskUmbilicalProtocol umbilical;
private Counters.Counter skipRecCounter;
private long recIndex = -1;
SkippingRecordReader(TaskUmbilicalProtocol umbilical,
TaskReporter reporter, JobConf job) throws IOException{
super(reporter, job);
this.umbilical = umbilical;
this.skipRecCounter = reporter.getCounter(TaskCounter.MAP_SKIPPED_RECORDS);
this.toWriteSkipRecs = toWriteSkipRecs() &&
SkipBadRecords.getSkipOutputPath(conf)!=null;
skipIt = getSkipRanges().skipRangeIterator();
}
public synchronized boolean next(K key, V value)
throws IOException {
if(!skipIt.hasNext()) {
LOG.warn("Further records got skipped.");
return false;
}
boolean ret = moveToNext(key, value);
long nextRecIndex = skipIt.next();
long skip = 0;
while(recIndex<nextRecIndex && ret) {
if(toWriteSkipRecs) {
writeSkippedRec(key, value);
}
ret = moveToNext(key, value);
skip++;
}
//close the skip writer once all the ranges are skipped
if(skip>0 && skipIt.skippedAllRanges() && skipWriter!=null) {
skipWriter.close();
}
skipRecCounter.increment(skip);
reportNextRecordRange(umbilical, recIndex);
if (ret) {
incrCounters();
}
return ret;
}
protected synchronized boolean moveToNext(K key, V value)
throws IOException {
recIndex++;
return super.moveToNext(key, value);
}
@SuppressWarnings("unchecked")
private void writeSkippedRec(K key, V value) throws IOException{
if(skipWriter==null) {
Path skipDir = SkipBadRecords.getSkipOutputPath(conf);
Path skipFile = new Path(skipDir, getTaskID().toString());
skipWriter =
SequenceFile.createWriter(
skipFile.getFileSystem(conf), conf, skipFile,
(Class<K>) createKey().getClass(),
(Class<V>) createValue().getClass(),
CompressionType.BLOCK, getTaskReporter());
}
skipWriter.append(key, value);
}
}
@Override
public void run(final JobConf job, final TaskUmbilicalProtocol umbilical)
throws IOException, ClassNotFoundException, InterruptedException {
this.umbilical = umbilical;
if (isMapTask()) {
// If there are no reducers then there won't be any sort. Hence the map
// phase will govern the entire attempt's progress.
if (conf.getNumReduceTasks() == 0) {
mapPhase = getProgress().addPhase("map", 1.0f);
} else {
// If there are reducers then the entire attempt's progress will be
// split between the map phase (67%) and the sort phase (33%).
mapPhase = getProgress().addPhase("map", 0.667f);
sortPhase = getProgress().addPhase("sort", 0.333f);
}
}
TaskReporter reporter = startReporter(umbilical);
boolean useNewApi = job.getUseNewMapper();
initialize(job, getJobID(), reporter, useNewApi);
// check if it is a cleanupJobTask
if (jobCleanup) {
runJobCleanupTask(umbilical, reporter);
return;
}
if (jobSetup) {
runJobSetupTask(umbilical, reporter);
return;
}
if (taskCleanup) {
runTaskCleanupTask(umbilical, reporter);
return;
}
if (useNewApi) {
runNewMapper(job, splitMetaInfo, umbilical, reporter);
} else {
runOldMapper(job, splitMetaInfo, umbilical, reporter);
}
done(umbilical, reporter);
}
public Progress getSortPhase() {
return sortPhase;
}
@SuppressWarnings("unchecked")
private <T> T getSplitDetails(Path file, long offset)
throws IOException {
FileSystem fs = file.getFileSystem(conf);
FSDataInputStream inFile = fs.open(file);
inFile.seek(offset);
String className = StringInterner.weakIntern(Text.readString(inFile));
Class<T> cls;
try {
cls = (Class<T>) conf.getClassByName(className);
} catch (ClassNotFoundException ce) {
IOException wrap = new IOException("Split class " + className +
" not found");
wrap.initCause(ce);
throw wrap;
}
SerializationFactory factory = new SerializationFactory(conf);
Deserializer<T> deserializer =
(Deserializer<T>) factory.getDeserializer(cls);
deserializer.open(inFile);
T split = deserializer.deserialize(null);
long pos = inFile.getPos();
getCounters().findCounter(
TaskCounter.SPLIT_RAW_BYTES).increment(pos - offset);
inFile.close();
return split;
}
@SuppressWarnings("unchecked")
private <KEY, VALUE> MapOutputCollector<KEY, VALUE>
createSortingCollector(JobConf job, TaskReporter reporter)
throws IOException, ClassNotFoundException {
MapOutputCollector.Context context =
new MapOutputCollector.Context(this, job, reporter);
Class<?>[] collectorClasses = job.getClasses(
JobContext.MAP_OUTPUT_COLLECTOR_CLASS_ATTR, MapOutputBuffer.class);
int remainingCollectors = collectorClasses.length;
Exception lastException = null;
for (Class clazz : collectorClasses) {
try {
if (!MapOutputCollector.class.isAssignableFrom(clazz)) {
throw new IOException("Invalid output collector class: " + clazz.getName() +
" (does not implement MapOutputCollector)");
}
Class<? extends MapOutputCollector> subclazz =
clazz.asSubclass(MapOutputCollector.class);
LOG.debug("Trying map output collector class: " + subclazz.getName());
MapOutputCollector<KEY, VALUE> collector =
ReflectionUtils.newInstance(subclazz, job);
collector.init(context);
LOG.info("Map output collector class = " + collector.getClass().getName());
return collector;
} catch (Exception e) {
String msg = "Unable to initialize MapOutputCollector " + clazz.getName();
if (--remainingCollectors > 0) {
msg += " (" + remainingCollectors + " more collector(s) to try)";
}
lastException = e;
LOG.warn(msg, e);
}
}
throw new IOException("Initialization of all the collectors failed. " +
"Error in last collector was :" + lastException.getMessage(), lastException);
}
@SuppressWarnings("unchecked")
private <INKEY,INVALUE,OUTKEY,OUTVALUE>
void runOldMapper(final JobConf job,
final TaskSplitIndex splitIndex,
final TaskUmbilicalProtocol umbilical,
TaskReporter reporter
) throws IOException, InterruptedException,
ClassNotFoundException {
InputSplit inputSplit = getSplitDetails(new Path(splitIndex.getSplitLocation()),
splitIndex.getStartOffset());
updateJobWithSplit(job, inputSplit);
reporter.setInputSplit(inputSplit);
RecordReader<INKEY,INVALUE> in = isSkipping() ?
new SkippingRecordReader<INKEY,INVALUE>(umbilical, reporter, job) :
new TrackedRecordReader<INKEY,INVALUE>(reporter, job);
job.setBoolean(JobContext.SKIP_RECORDS, isSkipping());
int numReduceTasks = conf.getNumReduceTasks();
LOG.info("numReduceTasks: " + numReduceTasks);
MapOutputCollector<OUTKEY, OUTVALUE> collector = null;
if (numReduceTasks > 0) {
collector = createSortingCollector(job, reporter);
} else {
collector = new DirectMapOutputCollector<OUTKEY, OUTVALUE>();
MapOutputCollector.Context context =
new MapOutputCollector.Context(this, job, reporter);
collector.init(context);
}
MapRunnable<INKEY,INVALUE,OUTKEY,OUTVALUE> runner =
ReflectionUtils.newInstance(job.getMapRunnerClass(), job);
try {
runner.run(in, new OldOutputCollector(collector, conf), reporter);
mapPhase.complete();
// start the sort phase only if there are reducers
if (numReduceTasks > 0) {
setPhase(TaskStatus.Phase.SORT);
}
statusUpdate(umbilical);
collector.flush();
in.close();
in = null;
collector.close();
collector = null;
} finally {
closeQuietly(in);
closeQuietly(collector);
}
}
/**
* Update the job with details about the file split
* @param job the job configuration to update
* @param inputSplit the file split
*/
private void updateJobWithSplit(final JobConf job, InputSplit inputSplit) {
if (inputSplit instanceof FileSplit) {
FileSplit fileSplit = (FileSplit) inputSplit;
job.set(JobContext.MAP_INPUT_FILE, fileSplit.getPath().toString());
job.setLong(JobContext.MAP_INPUT_START, fileSplit.getStart());
job.setLong(JobContext.MAP_INPUT_PATH, fileSplit.getLength());
}
LOG.info("Processing split: " + inputSplit);
}
static class NewTrackingRecordReader<K,V>
extends org.apache.hadoop.mapreduce.RecordReader<K,V> {
private final org.apache.hadoop.mapreduce.RecordReader<K,V> real;
private final org.apache.hadoop.mapreduce.Counter inputRecordCounter;
private final org.apache.hadoop.mapreduce.Counter fileInputByteCounter;
private final TaskReporter reporter;
private final List<Statistics> fsStats;
NewTrackingRecordReader(org.apache.hadoop.mapreduce.InputSplit split,
org.apache.hadoop.mapreduce.InputFormat<K, V> inputFormat,
TaskReporter reporter,
org.apache.hadoop.mapreduce.TaskAttemptContext taskContext)
throws InterruptedException, IOException {
this.reporter = reporter;
this.inputRecordCounter = reporter
.getCounter(TaskCounter.MAP_INPUT_RECORDS);
this.fileInputByteCounter = reporter
.getCounter(FileInputFormatCounter.BYTES_READ);
List <Statistics> matchedStats = null;
if (split instanceof org.apache.hadoop.mapreduce.lib.input.FileSplit) {
matchedStats = getFsStatistics(((org.apache.hadoop.mapreduce.lib.input.FileSplit) split)
.getPath(), taskContext.getConfiguration());
}
fsStats = matchedStats;
long bytesInPrev = getInputBytes(fsStats);
this.real = inputFormat.createRecordReader(split, taskContext);
long bytesInCurr = getInputBytes(fsStats);
fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
@Override
public void close() throws IOException {
long bytesInPrev = getInputBytes(fsStats);
real.close();
long bytesInCurr = getInputBytes(fsStats);
fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
@Override
public K getCurrentKey() throws IOException, InterruptedException {
return real.getCurrentKey();
}
@Override
public V getCurrentValue() throws IOException, InterruptedException {
return real.getCurrentValue();
}
@Override
public float getProgress() throws IOException, InterruptedException {
return real.getProgress();
}
@Override
public void initialize(org.apache.hadoop.mapreduce.InputSplit split,
org.apache.hadoop.mapreduce.TaskAttemptContext context
) throws IOException, InterruptedException {
long bytesInPrev = getInputBytes(fsStats);
real.initialize(split, context);
long bytesInCurr = getInputBytes(fsStats);
fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
long bytesInPrev = getInputBytes(fsStats);
boolean result = real.nextKeyValue();
long bytesInCurr = getInputBytes(fsStats);
if (result) {
inputRecordCounter.increment(1);
}
fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
reporter.setProgress(getProgress());
return result;
}
private long getInputBytes(List<Statistics> stats) {
if (stats == null) return 0;
long bytesRead = 0;
for (Statistics stat: stats) {
bytesRead = bytesRead + stat.getBytesRead();
}
return bytesRead;
}
}
/**
* Since the mapred and mapreduce Partitioners don't share a common interface
* (JobConfigurable is deprecated and a subtype of mapred.Partitioner), the
* partitioner lives in Old/NewOutputCollector. Note that, for map-only jobs,
* the configured partitioner should not be called. It's common for
* partitioners to compute a result mod numReduces, which causes a div0 error
*/
private static class OldOutputCollector<K,V> implements OutputCollector<K,V> {
private final Partitioner<K,V> partitioner;
private final MapOutputCollector<K,V> collector;
private final int numPartitions;
@SuppressWarnings("unchecked")
OldOutputCollector(MapOutputCollector<K,V> collector, JobConf conf) {
numPartitions = conf.getNumReduceTasks();
if (numPartitions > 1) {
partitioner = (Partitioner<K,V>)
ReflectionUtils.newInstance(conf.getPartitionerClass(), conf);
} else {
partitioner = new Partitioner<K,V>() {
@Override
public void configure(JobConf job) { }
@Override
public int getPartition(K key, V value, int numPartitions) {
return numPartitions - 1;
}
};
}
this.collector = collector;
}
@Override
public void collect(K key, V value) throws IOException {
try {
collector.collect(key, value,
partitioner.getPartition(key, value, numPartitions));
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
throw new IOException("interrupt exception", ie);
}
}
}
private class NewDirectOutputCollector<K,V>
extends org.apache.hadoop.mapreduce.RecordWriter<K,V> {
private final org.apache.hadoop.mapreduce.RecordWriter out;
private final TaskReporter reporter;
private final Counters.Counter mapOutputRecordCounter;
private final Counters.Counter fileOutputByteCounter;
private final List<Statistics> fsStats;
@SuppressWarnings("unchecked")
NewDirectOutputCollector(MRJobConfig jobContext,
JobConf job, TaskUmbilicalProtocol umbilical, TaskReporter reporter)
throws IOException, ClassNotFoundException, InterruptedException {
this.reporter = reporter;
mapOutputRecordCounter = reporter
.getCounter(TaskCounter.MAP_OUTPUT_RECORDS);
fileOutputByteCounter = reporter
.getCounter(FileOutputFormatCounter.BYTES_WRITTEN);
List<Statistics> matchedStats = null;
if (outputFormat instanceof org.apache.hadoop.mapreduce.lib.output.FileOutputFormat) {
matchedStats = getFsStatistics(org.apache.hadoop.mapreduce.lib.output.FileOutputFormat
.getOutputPath(taskContext), taskContext.getConfiguration());
}
fsStats = matchedStats;
long bytesOutPrev = getOutputBytes(fsStats);
out = outputFormat.getRecordWriter(taskContext);
long bytesOutCurr = getOutputBytes(fsStats);
fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
@Override
@SuppressWarnings("unchecked")
public void write(K key, V value)
throws IOException, InterruptedException {
reporter.progress();
long bytesOutPrev = getOutputBytes(fsStats);
out.write(key, value);
long bytesOutCurr = getOutputBytes(fsStats);
fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
mapOutputRecordCounter.increment(1);
}
@Override
public void close(TaskAttemptContext context)
throws IOException,InterruptedException {
reporter.progress();
if (out != null) {
long bytesOutPrev = getOutputBytes(fsStats);
out.close(context);
long bytesOutCurr = getOutputBytes(fsStats);
fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
}
private long getOutputBytes(List<Statistics> stats) {
if (stats == null) return 0;
long bytesWritten = 0;
for (Statistics stat: stats) {
bytesWritten = bytesWritten + stat.getBytesWritten();
}
return bytesWritten;
}
}
private class NewOutputCollector<K,V>
extends org.apache.hadoop.mapreduce.RecordWriter<K,V> {
private final MapOutputCollector<K,V> collector;
private final org.apache.hadoop.mapreduce.Partitioner<K,V> partitioner;
private final int partitions;
@SuppressWarnings("unchecked")
NewOutputCollector(org.apache.hadoop.mapreduce.JobContext jobContext,
JobConf job,
TaskUmbilicalProtocol umbilical,
TaskReporter reporter
) throws IOException, ClassNotFoundException {
collector = createSortingCollector(job, reporter);
partitions = jobContext.getNumReduceTasks();
if (partitions > 1) {
partitioner = (org.apache.hadoop.mapreduce.Partitioner<K,V>)
ReflectionUtils.newInstance(jobContext.getPartitionerClass(), job);
} else {
partitioner = new org.apache.hadoop.mapreduce.Partitioner<K,V>() {
@Override
public int getPartition(K key, V value, int numPartitions) {
return partitions - 1;
}
};
}
}
@Override
public void write(K key, V value) throws IOException, InterruptedException {
collector.collect(key, value,
partitioner.getPartition(key, value, partitions));
}
@Override
public void close(TaskAttemptContext context
) throws IOException,InterruptedException {
try {
collector.flush();
} catch (ClassNotFoundException cnf) {
throw new IOException("can't find class ", cnf);
}
collector.close();
}
}
@SuppressWarnings("unchecked")
private <INKEY,INVALUE,OUTKEY,OUTVALUE>
void runNewMapper(final JobConf job,
final TaskSplitIndex splitIndex,
final TaskUmbilicalProtocol umbilical,
TaskReporter reporter
) throws IOException, ClassNotFoundException,
InterruptedException {
// make a task context so we can get the classes
org.apache.hadoop.mapreduce.TaskAttemptContext taskContext =
new org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl(job,
getTaskID(),
reporter);
// make a mapper
org.apache.hadoop.mapreduce.Mapper<INKEY,INVALUE,OUTKEY,OUTVALUE> mapper =
(org.apache.hadoop.mapreduce.Mapper<INKEY,INVALUE,OUTKEY,OUTVALUE>)
ReflectionUtils.newInstance(taskContext.getMapperClass(), job);
// make the input format
org.apache.hadoop.mapreduce.InputFormat<INKEY,INVALUE> inputFormat =
(org.apache.hadoop.mapreduce.InputFormat<INKEY,INVALUE>)
ReflectionUtils.newInstance(taskContext.getInputFormatClass(), job);
// rebuild the input split
org.apache.hadoop.mapreduce.InputSplit split = null;
split = getSplitDetails(new Path(splitIndex.getSplitLocation()),
splitIndex.getStartOffset());
LOG.info("Processing split: " + split);
org.apache.hadoop.mapreduce.RecordReader<INKEY,INVALUE> input =
new NewTrackingRecordReader<INKEY,INVALUE>
(split, inputFormat, reporter, taskContext);
job.setBoolean(JobContext.SKIP_RECORDS, isSkipping());
org.apache.hadoop.mapreduce.RecordWriter output = null;
// get an output object
if (job.getNumReduceTasks() == 0) {
output =
new NewDirectOutputCollector(taskContext, job, umbilical, reporter);
} else {
output = new NewOutputCollector(taskContext, job, umbilical, reporter);
}
org.apache.hadoop.mapreduce.MapContext<INKEY, INVALUE, OUTKEY, OUTVALUE>
mapContext =
new MapContextImpl<INKEY, INVALUE, OUTKEY, OUTVALUE>(job, getTaskID(),
input, output,
committer,
reporter, split);
org.apache.hadoop.mapreduce.Mapper<INKEY,INVALUE,OUTKEY,OUTVALUE>.Context
mapperContext =
new WrappedMapper<INKEY, INVALUE, OUTKEY, OUTVALUE>().getMapContext(
mapContext);
try {
input.initialize(split, mapperContext);
mapper.run(mapperContext);
mapPhase.complete();
setPhase(TaskStatus.Phase.SORT);
statusUpdate(umbilical);
input.close();
input = null;
output.close(mapperContext);
output = null;
} finally {
closeQuietly(input);
closeQuietly(output, mapperContext);
}
}
class DirectMapOutputCollector<K, V>
implements MapOutputCollector<K, V> {
private RecordWriter<K, V> out = null;
private TaskReporter reporter = null;
private Counters.Counter mapOutputRecordCounter;
private Counters.Counter fileOutputByteCounter;
private List<Statistics> fsStats;
public DirectMapOutputCollector() {
}
@SuppressWarnings("unchecked")
public void init(MapOutputCollector.Context context
) throws IOException, ClassNotFoundException {
this.reporter = context.getReporter();
JobConf job = context.getJobConf();
String finalName = getOutputName(getPartition());
FileSystem fs = FileSystem.get(job);
OutputFormat<K, V> outputFormat = job.getOutputFormat();
mapOutputRecordCounter = reporter.getCounter(TaskCounter.MAP_OUTPUT_RECORDS);
fileOutputByteCounter = reporter
.getCounter(FileOutputFormatCounter.BYTES_WRITTEN);
List<Statistics> matchedStats = null;
if (outputFormat instanceof FileOutputFormat) {
matchedStats = getFsStatistics(FileOutputFormat.getOutputPath(job), job);
}
fsStats = matchedStats;
long bytesOutPrev = getOutputBytes(fsStats);
out = job.getOutputFormat().getRecordWriter(fs, job, finalName, reporter);
long bytesOutCurr = getOutputBytes(fsStats);
fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
public void close() throws IOException {
if (this.out != null) {
long bytesOutPrev = getOutputBytes(fsStats);
out.close(this.reporter);
long bytesOutCurr = getOutputBytes(fsStats);
fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
}
}
public void flush() throws IOException, InterruptedException,
ClassNotFoundException {
}
public void collect(K key, V value, int partition) throws IOException {
reporter.progress();
long bytesOutPrev = getOutputBytes(fsStats);
out.write(key, value);
long bytesOutCurr = getOutputBytes(fsStats);
fileOutputByteCounter.increment(bytesOutCurr - bytesOutPrev);
mapOutputRecordCounter.increment(1);
}
private long getOutputBytes(List<Statistics> stats) {
if (stats == null) return 0;
long bytesWritten = 0;
for (Statistics stat: stats) {
bytesWritten = bytesWritten + stat.getBytesWritten();
}
return bytesWritten;
}
}
@InterfaceAudience.LimitedPrivate({"MapReduce"})
@InterfaceStability.Unstable
public static class MapOutputBuffer<K extends Object, V extends Object>
implements MapOutputCollector<K, V>, IndexedSortable {
private int partitions;
private JobConf job;
private TaskReporter reporter;
private Class<K> keyClass;
private Class<V> valClass;
private RawComparator<K> comparator;
private SerializationFactory serializationFactory;
private Serializer<K> keySerializer;
private Serializer<V> valSerializer;
private CombinerRunner<K,V> combinerRunner;
private CombineOutputCollector<K, V> combineCollector;
// Compression for map-outputs
private CompressionCodec codec;
// k/v accounting
private IntBuffer kvmeta; // metadata overlay on backing store
int kvstart; // marks origin of spill metadata
int kvend; // marks end of spill metadata
int kvindex; // marks end of fully serialized records
int equator; // marks origin of meta/serialization
int bufstart; // marks beginning of spill
int bufend; // marks beginning of collectable
int bufmark; // marks end of record
int bufindex; // marks end of collected
int bufvoid; // marks the point where we should stop
// reading at the end of the buffer
byte[] kvbuffer; // main output buffer
private final byte[] b0 = new byte[0];
private static final int VALSTART = 0; // val offset in acct
private static final int KEYSTART = 1; // key offset in acct
private static final int PARTITION = 2; // partition offset in acct
private static final int VALLEN = 3; // length of value
private static final int NMETA = 4; // num meta ints
private static final int METASIZE = NMETA * 4; // size in bytes
// spill accounting
private int maxRec;
private int softLimit;
boolean spillInProgress;;
int bufferRemaining;
volatile Throwable sortSpillException = null;
int numSpills = 0;
private int minSpillsForCombine;
private IndexedSorter sorter;
final ReentrantLock spillLock = new ReentrantLock();
final Condition spillDone = spillLock.newCondition();
final Condition spillReady = spillLock.newCondition();
final BlockingBuffer bb = new BlockingBuffer();
volatile boolean spillThreadRunning = false;
final SpillThread spillThread = new SpillThread();
private FileSystem rfs;
// Counters
private Counters.Counter mapOutputByteCounter;
private Counters.Counter mapOutputRecordCounter;
private Counters.Counter fileOutputByteCounter;
final ArrayList<SpillRecord> indexCacheList =
new ArrayList<SpillRecord>();
private int totalIndexCacheMemory;
private int indexCacheMemoryLimit;
private static final int INDEX_CACHE_MEMORY_LIMIT_DEFAULT = 1024 * 1024;
private MapTask mapTask;
private MapOutputFile mapOutputFile;
private Progress sortPhase;
private Counters.Counter spilledRecordsCounter;
public MapOutputBuffer() {
}
@SuppressWarnings("unchecked")
public void init(MapOutputCollector.Context context
) throws IOException, ClassNotFoundException {
job = context.getJobConf();
reporter = context.getReporter();
mapTask = context.getMapTask();
mapOutputFile = mapTask.getMapOutputFile();
sortPhase = mapTask.getSortPhase();
spilledRecordsCounter = reporter.getCounter(TaskCounter.SPILLED_RECORDS);
partitions = job.getNumReduceTasks();
rfs = ((LocalFileSystem)FileSystem.getLocal(job)).getRaw();
//sanity checks
final float spillper =
job.getFloat(JobContext.MAP_SORT_SPILL_PERCENT, (float)0.8);
final int sortmb = job.getInt(JobContext.IO_SORT_MB, 100);
indexCacheMemoryLimit = job.getInt(JobContext.INDEX_CACHE_MEMORY_LIMIT,
INDEX_CACHE_MEMORY_LIMIT_DEFAULT);
if (spillper > (float)1.0 || spillper <= (float)0.0) {
throw new IOException("Invalid \"" + JobContext.MAP_SORT_SPILL_PERCENT +
"\": " + spillper);
}
if ((sortmb & 0x7FF) != sortmb) {
throw new IOException(
"Invalid \"" + JobContext.IO_SORT_MB + "\": " + sortmb);
}
sorter = ReflectionUtils.newInstance(job.getClass("map.sort.class",
QuickSort.class, IndexedSorter.class), job);
// buffers and accounting
int maxMemUsage = sortmb << 20;
maxMemUsage -= maxMemUsage % METASIZE;
kvbuffer = new byte[maxMemUsage];
bufvoid = kvbuffer.length;
kvmeta = ByteBuffer.wrap(kvbuffer)
.order(ByteOrder.nativeOrder())
.asIntBuffer();
setEquator(0);
bufstart = bufend = bufindex = equator;
kvstart = kvend = kvindex;
maxRec = kvmeta.capacity() / NMETA;
softLimit = (int)(kvbuffer.length * spillper);
bufferRemaining = softLimit;
LOG.info(JobContext.IO_SORT_MB + ": " + sortmb);
LOG.info("soft limit at " + softLimit);
LOG.info("bufstart = " + bufstart + "; bufvoid = " + bufvoid);
LOG.info("kvstart = " + kvstart + "; length = " + maxRec);
// k/v serialization
comparator = job.getOutputKeyComparator();
keyClass = (Class<K>)job.getMapOutputKeyClass();
valClass = (Class<V>)job.getMapOutputValueClass();
serializationFactory = new SerializationFactory(job);
keySerializer = serializationFactory.getSerializer(keyClass);
keySerializer.open(bb);
valSerializer = serializationFactory.getSerializer(valClass);
valSerializer.open(bb);
// output counters
mapOutputByteCounter = reporter.getCounter(TaskCounter.MAP_OUTPUT_BYTES);
mapOutputRecordCounter =
reporter.getCounter(TaskCounter.MAP_OUTPUT_RECORDS);
fileOutputByteCounter = reporter
.getCounter(TaskCounter.MAP_OUTPUT_MATERIALIZED_BYTES);
// compression
if (job.getCompressMapOutput()) {
Class<? extends CompressionCodec> codecClass =
job.getMapOutputCompressorClass(DefaultCodec.class);
codec = ReflectionUtils.newInstance(codecClass, job);
} else {
codec = null;
}
// combiner
final Counters.Counter combineInputCounter =
reporter.getCounter(TaskCounter.COMBINE_INPUT_RECORDS);
combinerRunner = CombinerRunner.create(job, getTaskID(),
combineInputCounter,
reporter, null);
if (combinerRunner != null) {
final Counters.Counter combineOutputCounter =
reporter.getCounter(TaskCounter.COMBINE_OUTPUT_RECORDS);
combineCollector= new CombineOutputCollector<K,V>(combineOutputCounter, reporter, job);
} else {
combineCollector = null;
}
spillInProgress = false;
minSpillsForCombine = job.getInt(JobContext.MAP_COMBINE_MIN_SPILLS, 3);
spillThread.setDaemon(true);
spillThread.setName("SpillThread");
spillLock.lock();
try {
spillThread.start();
while (!spillThreadRunning) {
spillDone.await();
}
} catch (InterruptedException e) {
throw new IOException("Spill thread failed to initialize", e);
} finally {
spillLock.unlock();
}
if (sortSpillException != null) {
throw new IOException("Spill thread failed to initialize",
sortSpillException);
}
}
/**
* Serialize the key, value to intermediate storage.
* When this method returns, kvindex must refer to sufficient unused
* storage to store one METADATA.
*/
public synchronized void collect(K key, V value, final int partition
) throws IOException {
reporter.progress();
if (key.getClass() != keyClass) {
throw new IOException("Type mismatch in key from map: expected "
+ keyClass.getName() + ", received "
+ key.getClass().getName());
}
if (value.getClass() != valClass) {
throw new IOException("Type mismatch in value from map: expected "
+ valClass.getName() + ", received "
+ value.getClass().getName());
}
if (partition < 0 || partition >= partitions) {
throw new IOException("Illegal partition for " + key + " (" +
partition + ")");
}
checkSpillException();
bufferRemaining -= METASIZE;
if (bufferRemaining <= 0) {
// start spill if the thread is not running and the soft limit has been
// reached
spillLock.lock();
try {
do {
if (!spillInProgress) {
final int kvbidx = 4 * kvindex;
final int kvbend = 4 * kvend;
// serialized, unspilled bytes always lie between kvindex and
// bufindex, crossing the equator. Note that any void space
// created by a reset must be included in "used" bytes
final int bUsed = distanceTo(kvbidx, bufindex);
final boolean bufsoftlimit = bUsed >= softLimit;
if ((kvbend + METASIZE) % kvbuffer.length !=
equator - (equator % METASIZE)) {
// spill finished, reclaim space
resetSpill();
bufferRemaining = Math.min(
distanceTo(bufindex, kvbidx) - 2 * METASIZE,
softLimit - bUsed) - METASIZE;
continue;
} else if (bufsoftlimit && kvindex != kvend) {
// spill records, if any collected; check latter, as it may
// be possible for metadata alignment to hit spill pcnt
startSpill();
final int avgRec = (int)
(mapOutputByteCounter.getCounter() /
mapOutputRecordCounter.getCounter());
// leave at least half the split buffer for serialization data
// ensure that kvindex >= bufindex
final int distkvi = distanceTo(bufindex, kvbidx);
final int newPos = (bufindex +
Math.max(2 * METASIZE - 1,
Math.min(distkvi / 2,
distkvi / (METASIZE + avgRec) * METASIZE)))
% kvbuffer.length;
setEquator(newPos);
bufmark = bufindex = newPos;
final int serBound = 4 * kvend;
// bytes remaining before the lock must be held and limits
// checked is the minimum of three arcs: the metadata space, the
// serialization space, and the soft limit
bufferRemaining = Math.min(
// metadata max
distanceTo(bufend, newPos),
Math.min(
// serialization max
distanceTo(newPos, serBound),
// soft limit
softLimit)) - 2 * METASIZE;
}
}
} while (false);
} finally {
spillLock.unlock();
}
}
try {
// serialize key bytes into buffer
int keystart = bufindex;
keySerializer.serialize(key);
if (bufindex < keystart) {
// wrapped the key; must make contiguous
bb.shiftBufferedKey();
keystart = 0;
}
// serialize value bytes into buffer
final int valstart = bufindex;
valSerializer.serialize(value);
// It's possible for records to have zero length, i.e. the serializer
// will perform no writes. To ensure that the boundary conditions are
// checked and that the kvindex invariant is maintained, perform a
// zero-length write into the buffer. The logic monitoring this could be
// moved into collect, but this is cleaner and inexpensive. For now, it
// is acceptable.
bb.write(b0, 0, 0);
// the record must be marked after the preceding write, as the metadata
// for this record are not yet written
int valend = bb.markRecord();
mapOutputRecordCounter.increment(1);
mapOutputByteCounter.increment(
distanceTo(keystart, valend, bufvoid));
// write accounting info
kvmeta.put(kvindex + PARTITION, partition);
kvmeta.put(kvindex + KEYSTART, keystart);
kvmeta.put(kvindex + VALSTART, valstart);
kvmeta.put(kvindex + VALLEN, distanceTo(valstart, valend));
// advance kvindex
kvindex = (kvindex - NMETA + kvmeta.capacity()) % kvmeta.capacity();
} catch (MapBufferTooSmallException e) {
LOG.info("Record too large for in-memory buffer: " + e.getMessage());
spillSingleRecord(key, value, partition);
mapOutputRecordCounter.increment(1);
return;
}
}
private TaskAttemptID getTaskID() {
return mapTask.getTaskID();
}
/**
* Set the point from which meta and serialization data expand. The meta
* indices are aligned with the buffer, so metadata never spans the ends of
* the circular buffer.
*/
private void setEquator(int pos) {
equator = pos;
// set index prior to first entry, aligned at meta boundary
final int aligned = pos - (pos % METASIZE);
// Cast one of the operands to long to avoid integer overflow
kvindex = (int)
(((long)aligned - METASIZE + kvbuffer.length) % kvbuffer.length) / 4;
LOG.info("(EQUATOR) " + pos + " kvi " + kvindex +
"(" + (kvindex * 4) + ")");
}
/**
* The spill is complete, so set the buffer and meta indices to be equal to
* the new equator to free space for continuing collection. Note that when
* kvindex == kvend == kvstart, the buffer is empty.
*/
private void resetSpill() {
final int e = equator;
bufstart = bufend = e;
final int aligned = e - (e % METASIZE);
// set start/end to point to first meta record
// Cast one of the operands to long to avoid integer overflow
kvstart = kvend = (int)
(((long)aligned - METASIZE + kvbuffer.length) % kvbuffer.length) / 4;
LOG.info("(RESET) equator " + e + " kv " + kvstart + "(" +
(kvstart * 4) + ")" + " kvi " + kvindex + "(" + (kvindex * 4) + ")");
}
/**
* Compute the distance in bytes between two indices in the serialization
* buffer.
* @see #distanceTo(int,int,int)
*/
final int distanceTo(final int i, final int j) {
return distanceTo(i, j, kvbuffer.length);
}
/**
* Compute the distance between two indices in the circular buffer given the
* max distance.
*/
int distanceTo(final int i, final int j, final int mod) {
return i <= j
? j - i
: mod - i + j;
}
/**
* For the given meta position, return the offset into the int-sized
* kvmeta buffer.
*/
int offsetFor(int metapos) {
return metapos * NMETA;
}
/**
* Compare logical range, st i, j MOD offset capacity.
* Compare by partition, then by key.
* @see IndexedSortable#compare
*/
@Override
public int compare(final int mi, final int mj) {
final int kvi = offsetFor(mi % maxRec);
final int kvj = offsetFor(mj % maxRec);
final int kvip = kvmeta.get(kvi + PARTITION);
final int kvjp = kvmeta.get(kvj + PARTITION);
// sort by partition
if (kvip != kvjp) {
return kvip - kvjp;
}
// sort by key
return comparator.compare(kvbuffer,
kvmeta.get(kvi + KEYSTART),
kvmeta.get(kvi + VALSTART) - kvmeta.get(kvi + KEYSTART),
kvbuffer,
kvmeta.get(kvj + KEYSTART),
kvmeta.get(kvj + VALSTART) - kvmeta.get(kvj + KEYSTART));
}
final byte META_BUFFER_TMP[] = new byte[METASIZE];
/**
* Swap metadata for items i, j
* @see IndexedSortable#swap
*/
@Override
public void swap(final int mi, final int mj) {
int iOff = (mi % maxRec) * METASIZE;
int jOff = (mj % maxRec) * METASIZE;
System.arraycopy(kvbuffer, iOff, META_BUFFER_TMP, 0, METASIZE);
System.arraycopy(kvbuffer, jOff, kvbuffer, iOff, METASIZE);
System.arraycopy(META_BUFFER_TMP, 0, kvbuffer, jOff, METASIZE);
}
/**
* Inner class managing the spill of serialized records to disk.
*/
protected class BlockingBuffer extends DataOutputStream {
public BlockingBuffer() {
super(new Buffer());
}
/**
* Mark end of record. Note that this is required if the buffer is to
* cut the spill in the proper place.
*/
public int markRecord() {
bufmark = bufindex;
return bufindex;
}
/**
* Set position from last mark to end of writable buffer, then rewrite
* the data between last mark and kvindex.
* This handles a special case where the key wraps around the buffer.
* If the key is to be passed to a RawComparator, then it must be
* contiguous in the buffer. This recopies the data in the buffer back
* into itself, but starting at the beginning of the buffer. Note that
* this method should <b>only</b> be called immediately after detecting
* this condition. To call it at any other time is undefined and would
* likely result in data loss or corruption.
* @see #markRecord()
*/
protected void shiftBufferedKey() throws IOException {
// spillLock unnecessary; both kvend and kvindex are current
int headbytelen = bufvoid - bufmark;
bufvoid = bufmark;
final int kvbidx = 4 * kvindex;
final int kvbend = 4 * kvend;
final int avail =
Math.min(distanceTo(0, kvbidx), distanceTo(0, kvbend));
if (bufindex + headbytelen < avail) {
System.arraycopy(kvbuffer, 0, kvbuffer, headbytelen, bufindex);
System.arraycopy(kvbuffer, bufvoid, kvbuffer, 0, headbytelen);
bufindex += headbytelen;
bufferRemaining -= kvbuffer.length - bufvoid;
} else {
byte[] keytmp = new byte[bufindex];
System.arraycopy(kvbuffer, 0, keytmp, 0, bufindex);
bufindex = 0;
out.write(kvbuffer, bufmark, headbytelen);
out.write(keytmp);
}
}
}
public class Buffer extends OutputStream {
private final byte[] scratch = new byte[1];
@Override
public void write(int v)
throws IOException {
scratch[0] = (byte)v;
write(scratch, 0, 1);
}
/**
* Attempt to write a sequence of bytes to the collection buffer.
* This method will block if the spill thread is running and it
* cannot write.
* @throws MapBufferTooSmallException if record is too large to
* deserialize into the collection buffer.
*/
@Override
public void write(byte b[], int off, int len)
throws IOException {
// must always verify the invariant that at least METASIZE bytes are
// available beyond kvindex, even when len == 0
bufferRemaining -= len;
if (bufferRemaining <= 0) {
// writing these bytes could exhaust available buffer space or fill
// the buffer to soft limit. check if spill or blocking are necessary
boolean blockwrite = false;
spillLock.lock();
try {
do {
checkSpillException();
final int kvbidx = 4 * kvindex;
final int kvbend = 4 * kvend;
// ser distance to key index
final int distkvi = distanceTo(bufindex, kvbidx);
// ser distance to spill end index
final int distkve = distanceTo(bufindex, kvbend);
// if kvindex is closer than kvend, then a spill is neither in
// progress nor complete and reset since the lock was held. The
// write should block only if there is insufficient space to
// complete the current write, write the metadata for this record,
// and write the metadata for the next record. If kvend is closer,
// then the write should block if there is too little space for
// either the metadata or the current write. Note that collect
// ensures its metadata requirement with a zero-length write
blockwrite = distkvi <= distkve
? distkvi <= len + 2 * METASIZE
: distkve <= len || distanceTo(bufend, kvbidx) < 2 * METASIZE;
if (!spillInProgress) {
if (blockwrite) {
if ((kvbend + METASIZE) % kvbuffer.length !=
equator - (equator % METASIZE)) {
// spill finished, reclaim space
// need to use meta exclusively; zero-len rec & 100% spill
// pcnt would fail
resetSpill(); // resetSpill doesn't move bufindex, kvindex
bufferRemaining = Math.min(
distkvi - 2 * METASIZE,
softLimit - distanceTo(kvbidx, bufindex)) - len;
continue;
}
// we have records we can spill; only spill if blocked
if (kvindex != kvend) {
startSpill();
// Blocked on this write, waiting for the spill just
// initiated to finish. Instead of repositioning the marker
// and copying the partial record, we set the record start
// to be the new equator
setEquator(bufmark);
} else {
// We have no buffered records, and this record is too large
// to write into kvbuffer. We must spill it directly from
// collect
final int size = distanceTo(bufstart, bufindex) + len;
setEquator(0);
bufstart = bufend = bufindex = equator;
kvstart = kvend = kvindex;
bufvoid = kvbuffer.length;
throw new MapBufferTooSmallException(size + " bytes");
}
}
}
if (blockwrite) {
// wait for spill
try {
while (spillInProgress) {
reporter.progress();
spillDone.await();
}
} catch (InterruptedException e) {
throw new IOException(
"Buffer interrupted while waiting for the writer", e);
}
}
} while (blockwrite);
} finally {
spillLock.unlock();
}
}
// here, we know that we have sufficient space to write
if (bufindex + len > bufvoid) {
final int gaplen = bufvoid - bufindex;
System.arraycopy(b, off, kvbuffer, bufindex, gaplen);
len -= gaplen;
off += gaplen;
bufindex = 0;
}
System.arraycopy(b, off, kvbuffer, bufindex, len);
bufindex += len;
}
}
public void flush() throws IOException, ClassNotFoundException,
InterruptedException {
LOG.info("Starting flush of map output");
if (kvbuffer == null) {
LOG.info("kvbuffer is null. Skipping flush.");
return;
}
spillLock.lock();
try {
while (spillInProgress) {
reporter.progress();
spillDone.await();
}
checkSpillException();
final int kvbend = 4 * kvend;
if ((kvbend + METASIZE) % kvbuffer.length !=
equator - (equator % METASIZE)) {
// spill finished
resetSpill();
}
if (kvindex != kvend) {
kvend = (kvindex + NMETA) % kvmeta.capacity();
bufend = bufmark;
LOG.info("Spilling map output");
LOG.info("bufstart = " + bufstart + "; bufend = " + bufmark +
"; bufvoid = " + bufvoid);
LOG.info("kvstart = " + kvstart + "(" + (kvstart * 4) +
"); kvend = " + kvend + "(" + (kvend * 4) +
"); length = " + (distanceTo(kvend, kvstart,
kvmeta.capacity()) + 1) + "/" + maxRec);
sortAndSpill();
}
} catch (InterruptedException e) {
throw new IOException("Interrupted while waiting for the writer", e);
} finally {
spillLock.unlock();
}
assert !spillLock.isHeldByCurrentThread();
// shut down spill thread and wait for it to exit. Since the preceding
// ensures that it is finished with its work (and sortAndSpill did not
// throw), we elect to use an interrupt instead of setting a flag.
// Spilling simultaneously from this thread while the spill thread
// finishes its work might be both a useful way to extend this and also
// sufficient motivation for the latter approach.
try {
spillThread.interrupt();
spillThread.join();
} catch (InterruptedException e) {
throw new IOException("Spill failed", e);
}
// release sort buffer before the merge
kvbuffer = null;
mergeParts();
Path outputPath = mapOutputFile.getOutputFile();
fileOutputByteCounter.increment(rfs.getFileStatus(outputPath).getLen());
}
public void close() { }
protected class SpillThread extends Thread {
@Override
public void run() {
spillLock.lock();
spillThreadRunning = true;
try {
while (true) {
spillDone.signal();
while (!spillInProgress) {
spillReady.await();
}
try {
spillLock.unlock();
sortAndSpill();
} catch (Throwable t) {
sortSpillException = t;
} finally {
spillLock.lock();
if (bufend < bufstart) {
bufvoid = kvbuffer.length;
}
kvstart = kvend;
bufstart = bufend;
spillInProgress = false;
}
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
} finally {
spillLock.unlock();
spillThreadRunning = false;
}
}
}
private void checkSpillException() throws IOException {
final Throwable lspillException = sortSpillException;
if (lspillException != null) {
if (lspillException instanceof Error) {
final String logMsg = "Task " + getTaskID() + " failed : " +
StringUtils.stringifyException(lspillException);
mapTask.reportFatalError(getTaskID(), lspillException, logMsg);
}
throw new IOException("Spill failed", lspillException);
}
}
private void startSpill() {
assert !spillInProgress;
kvend = (kvindex + NMETA) % kvmeta.capacity();
bufend = bufmark;
spillInProgress = true;
LOG.info("Spilling map output");
LOG.info("bufstart = " + bufstart + "; bufend = " + bufmark +
"; bufvoid = " + bufvoid);
LOG.info("kvstart = " + kvstart + "(" + (kvstart * 4) +
"); kvend = " + kvend + "(" + (kvend * 4) +
"); length = " + (distanceTo(kvend, kvstart,
kvmeta.capacity()) + 1) + "/" + maxRec);
spillReady.signal();
}
private void sortAndSpill() throws IOException, ClassNotFoundException,
InterruptedException {
//approximate the length of the output file to be the length of the
//buffer + header lengths for the partitions
final long size = distanceTo(bufstart, bufend, bufvoid) +
partitions * APPROX_HEADER_LENGTH;
FSDataOutputStream out = null;
try {
// create spill file
final SpillRecord spillRec = new SpillRecord(partitions);
final Path filename =
mapOutputFile.getSpillFileForWrite(numSpills, size);
out = rfs.create(filename);
final int mstart = kvend / NMETA;
final int mend = 1 + // kvend is a valid record
(kvstart >= kvend
? kvstart
: kvmeta.capacity() + kvstart) / NMETA;
sorter.sort(MapOutputBuffer.this, mstart, mend, reporter);
int spindex = mstart;
final IndexRecord rec = new IndexRecord();
final InMemValBytes value = new InMemValBytes();
for (int i = 0; i < partitions; ++i) {
IFile.Writer<K, V> writer = null;
try {
long segmentStart = out.getPos();
FSDataOutputStream partitionOut = CryptoUtils.wrapIfNecessary(job, out);
writer = new Writer<K, V>(job, partitionOut, keyClass, valClass, codec,
spilledRecordsCounter);
if (combinerRunner == null) {
// spill directly
DataInputBuffer key = new DataInputBuffer();
while (spindex < mend &&
kvmeta.get(offsetFor(spindex % maxRec) + PARTITION) == i) {
final int kvoff = offsetFor(spindex % maxRec);
int keystart = kvmeta.get(kvoff + KEYSTART);
int valstart = kvmeta.get(kvoff + VALSTART);
key.reset(kvbuffer, keystart, valstart - keystart);
getVBytesForOffset(kvoff, value);
writer.append(key, value);
++spindex;
}
} else {
int spstart = spindex;
while (spindex < mend &&
kvmeta.get(offsetFor(spindex % maxRec)
+ PARTITION) == i) {
++spindex;
}
// Note: we would like to avoid the combiner if we've fewer
// than some threshold of records for a partition
if (spstart != spindex) {
combineCollector.setWriter(writer);
RawKeyValueIterator kvIter =
new MRResultIterator(spstart, spindex);
combinerRunner.combine(kvIter, combineCollector);
}
}
// close the writer
writer.close();
// record offsets
rec.startOffset = segmentStart;
rec.rawLength = writer.getRawLength() + CryptoUtils.cryptoPadding(job);
rec.partLength = writer.getCompressedLength() + CryptoUtils.cryptoPadding(job);
spillRec.putIndex(rec, i);
writer = null;
} finally {
if (null != writer) writer.close();
}
}
if (totalIndexCacheMemory >= indexCacheMemoryLimit) {
// create spill index file
Path indexFilename =
mapOutputFile.getSpillIndexFileForWrite(numSpills, partitions
* MAP_OUTPUT_INDEX_RECORD_LENGTH);
spillRec.writeToFile(indexFilename, job);
} else {
indexCacheList.add(spillRec);
totalIndexCacheMemory +=
spillRec.size() * MAP_OUTPUT_INDEX_RECORD_LENGTH;
}
LOG.info("Finished spill " + numSpills);
++numSpills;
} finally {
if (out != null) out.close();
}
}
/**
* Handles the degenerate case where serialization fails to fit in
* the in-memory buffer, so we must spill the record from collect
* directly to a spill file. Consider this "losing".
*/
private void spillSingleRecord(final K key, final V value,
int partition) throws IOException {
long size = kvbuffer.length + partitions * APPROX_HEADER_LENGTH;
FSDataOutputStream out = null;
try {
// create spill file
final SpillRecord spillRec = new SpillRecord(partitions);
final Path filename =
mapOutputFile.getSpillFileForWrite(numSpills, size);
out = rfs.create(filename);
// we don't run the combiner for a single record
IndexRecord rec = new IndexRecord();
for (int i = 0; i < partitions; ++i) {
IFile.Writer<K, V> writer = null;
try {
long segmentStart = out.getPos();
// Create a new codec, don't care!
FSDataOutputStream partitionOut = CryptoUtils.wrapIfNecessary(job, out);
writer = new IFile.Writer<K,V>(job, partitionOut, keyClass, valClass, codec,
spilledRecordsCounter);
if (i == partition) {
final long recordStart = out.getPos();
writer.append(key, value);
// Note that our map byte count will not be accurate with
// compression
mapOutputByteCounter.increment(out.getPos() - recordStart);
}
writer.close();
// record offsets
rec.startOffset = segmentStart;
rec.rawLength = writer.getRawLength() + CryptoUtils.cryptoPadding(job);
rec.partLength = writer.getCompressedLength() + CryptoUtils.cryptoPadding(job);
spillRec.putIndex(rec, i);
writer = null;
} catch (IOException e) {
if (null != writer) writer.close();
throw e;
}
}
if (totalIndexCacheMemory >= indexCacheMemoryLimit) {
// create spill index file
Path indexFilename =
mapOutputFile.getSpillIndexFileForWrite(numSpills, partitions
* MAP_OUTPUT_INDEX_RECORD_LENGTH);
spillRec.writeToFile(indexFilename, job);
} else {
indexCacheList.add(spillRec);
totalIndexCacheMemory +=
spillRec.size() * MAP_OUTPUT_INDEX_RECORD_LENGTH;
}
++numSpills;
} finally {
if (out != null) out.close();
}
}
/**
* Given an offset, populate vbytes with the associated set of
* deserialized value bytes. Should only be called during a spill.
*/
private void getVBytesForOffset(int kvoff, InMemValBytes vbytes) {
// get the keystart for the next serialized value to be the end
// of this value. If this is the last value in the buffer, use bufend
final int vallen = kvmeta.get(kvoff + VALLEN);
assert vallen >= 0;
vbytes.reset(kvbuffer, kvmeta.get(kvoff + VALSTART), vallen);
}
/**
* Inner class wrapping valuebytes, used for appendRaw.
*/
protected class InMemValBytes extends DataInputBuffer {
private byte[] buffer;
private int start;
private int length;
public void reset(byte[] buffer, int start, int length) {
this.buffer = buffer;
this.start = start;
this.length = length;
if (start + length > bufvoid) {
this.buffer = new byte[this.length];
final int taillen = bufvoid - start;
System.arraycopy(buffer, start, this.buffer, 0, taillen);
System.arraycopy(buffer, 0, this.buffer, taillen, length-taillen);
this.start = 0;
}
super.reset(this.buffer, this.start, this.length);
}
}
protected class MRResultIterator implements RawKeyValueIterator {
private final DataInputBuffer keybuf = new DataInputBuffer();
private final InMemValBytes vbytes = new InMemValBytes();
private final int end;
private int current;
public MRResultIterator(int start, int end) {
this.end = end;
current = start - 1;
}
public boolean next() throws IOException {
return ++current < end;
}
public DataInputBuffer getKey() throws IOException {
final int kvoff = offsetFor(current % maxRec);
keybuf.reset(kvbuffer, kvmeta.get(kvoff + KEYSTART),
kvmeta.get(kvoff + VALSTART) - kvmeta.get(kvoff + KEYSTART));
return keybuf;
}
public DataInputBuffer getValue() throws IOException {
getVBytesForOffset(offsetFor(current % maxRec), vbytes);
return vbytes;
}
public Progress getProgress() {
return null;
}
public void close() { }
}
private void mergeParts() throws IOException, InterruptedException,
ClassNotFoundException {
// get the approximate size of the final output/index files
long finalOutFileSize = 0;
long finalIndexFileSize = 0;
final Path[] filename = new Path[numSpills];
final TaskAttemptID mapId = getTaskID();
for(int i = 0; i < numSpills; i++) {
filename[i] = mapOutputFile.getSpillFile(i);
finalOutFileSize += rfs.getFileStatus(filename[i]).getLen();
}
if (numSpills == 1) { //the spill is the final output
sameVolRename(filename[0],
mapOutputFile.getOutputFileForWriteInVolume(filename[0]));
if (indexCacheList.size() == 0) {
sameVolRename(mapOutputFile.getSpillIndexFile(0),
mapOutputFile.getOutputIndexFileForWriteInVolume(filename[0]));
} else {
indexCacheList.get(0).writeToFile(
mapOutputFile.getOutputIndexFileForWriteInVolume(filename[0]), job);
}
sortPhase.complete();
return;
}
// read in paged indices
for (int i = indexCacheList.size(); i < numSpills; ++i) {
Path indexFileName = mapOutputFile.getSpillIndexFile(i);
indexCacheList.add(new SpillRecord(indexFileName, job));
}
//make correction in the length to include the sequence file header
//lengths for each partition
finalOutFileSize += partitions * APPROX_HEADER_LENGTH;
finalIndexFileSize = partitions * MAP_OUTPUT_INDEX_RECORD_LENGTH;
Path finalOutputFile =
mapOutputFile.getOutputFileForWrite(finalOutFileSize);
Path finalIndexFile =
mapOutputFile.getOutputIndexFileForWrite(finalIndexFileSize);
//The output stream for the final single output file
FSDataOutputStream finalOut = rfs.create(finalOutputFile, true, 4096);
if (numSpills == 0) {
//create dummy files
IndexRecord rec = new IndexRecord();
SpillRecord sr = new SpillRecord(partitions);
try {
for (int i = 0; i < partitions; i++) {
long segmentStart = finalOut.getPos();
FSDataOutputStream finalPartitionOut = CryptoUtils.wrapIfNecessary(job, finalOut);
Writer<K, V> writer =
new Writer<K, V>(job, finalPartitionOut, keyClass, valClass, codec, null);
writer.close();
rec.startOffset = segmentStart;
rec.rawLength = writer.getRawLength() + CryptoUtils.cryptoPadding(job);
rec.partLength = writer.getCompressedLength() + CryptoUtils.cryptoPadding(job);
sr.putIndex(rec, i);
}
sr.writeToFile(finalIndexFile, job);
} finally {
finalOut.close();
}
sortPhase.complete();
return;
}
{
sortPhase.addPhases(partitions); // Divide sort phase into sub-phases
IndexRecord rec = new IndexRecord();
final SpillRecord spillRec = new SpillRecord(partitions);
for (int parts = 0; parts < partitions; parts++) {
//create the segments to be merged
List<Segment<K,V>> segmentList =
new ArrayList<Segment<K, V>>(numSpills);
for(int i = 0; i < numSpills; i++) {
IndexRecord indexRecord = indexCacheList.get(i).getIndex(parts);
Segment<K,V> s =
new Segment<K,V>(job, rfs, filename[i], indexRecord.startOffset,
indexRecord.partLength, codec, true);
segmentList.add(i, s);
if (LOG.isDebugEnabled()) {
LOG.debug("MapId=" + mapId + " Reducer=" + parts +
"Spill =" + i + "(" + indexRecord.startOffset + "," +
indexRecord.rawLength + ", " + indexRecord.partLength + ")");
}
}
int mergeFactor = job.getInt(JobContext.IO_SORT_FACTOR, 100);
// sort the segments only if there are intermediate merges
boolean sortSegments = segmentList.size() > mergeFactor;
//merge
@SuppressWarnings("unchecked")
RawKeyValueIterator kvIter = Merger.merge(job, rfs,
keyClass, valClass, codec,
segmentList, mergeFactor,
new Path(mapId.toString()),
job.getOutputKeyComparator(), reporter, sortSegments,
null, spilledRecordsCounter, sortPhase.phase(),
TaskType.MAP);
//write merged output to disk
long segmentStart = finalOut.getPos();
FSDataOutputStream finalPartitionOut = CryptoUtils.wrapIfNecessary(job, finalOut);
Writer<K, V> writer =
new Writer<K, V>(job, finalPartitionOut, keyClass, valClass, codec,
spilledRecordsCounter);
if (combinerRunner == null || numSpills < minSpillsForCombine) {
Merger.writeFile(kvIter, writer, reporter, job);
} else {
combineCollector.setWriter(writer);
combinerRunner.combine(kvIter, combineCollector);
}
//close
writer.close();
sortPhase.startNextPhase();
// record offsets
rec.startOffset = segmentStart;
rec.rawLength = writer.getRawLength() + CryptoUtils.cryptoPadding(job);
rec.partLength = writer.getCompressedLength() + CryptoUtils.cryptoPadding(job);
spillRec.putIndex(rec, parts);
}
spillRec.writeToFile(finalIndexFile, job);
finalOut.close();
for(int i = 0; i < numSpills; i++) {
rfs.delete(filename[i],true);
}
}
}
/**
* Rename srcPath to dstPath on the same volume. This is the same
* as RawLocalFileSystem's rename method, except that it will not
* fall back to a copy, and it will create the target directory
* if it doesn't exist.
*/
private void sameVolRename(Path srcPath,
Path dstPath) throws IOException {
RawLocalFileSystem rfs = (RawLocalFileSystem)this.rfs;
File src = rfs.pathToFile(srcPath);
File dst = rfs.pathToFile(dstPath);
if (!dst.getParentFile().exists()) {
if (!dst.getParentFile().mkdirs()) {
throw new IOException("Unable to rename " + src + " to "
+ dst + ": couldn't create parent directory");
}
}
if (!src.renameTo(dst)) {
throw new IOException("Unable to rename " + src + " to " + dst);
}
}
} // MapOutputBuffer
/**
* Exception indicating that the allocated sort buffer is insufficient
* to hold the current record.
*/
@SuppressWarnings("serial")
private static class MapBufferTooSmallException extends IOException {
public MapBufferTooSmallException(String s) {
super(s);
}
}
private <INKEY,INVALUE,OUTKEY,OUTVALUE>
void closeQuietly(RecordReader<INKEY, INVALUE> c) {
if (c != null) {
try {
c.close();
} catch (IOException ie) {
// Ignore
LOG.info("Ignoring exception during close for " + c, ie);
}
}
}
private <OUTKEY, OUTVALUE>
void closeQuietly(MapOutputCollector<OUTKEY, OUTVALUE> c) {
if (c != null) {
try {
c.close();
} catch (Exception ie) {
// Ignore
LOG.info("Ignoring exception during close for " + c, ie);
}
}
}
private <INKEY, INVALUE, OUTKEY, OUTVALUE>
void closeQuietly(
org.apache.hadoop.mapreduce.RecordReader<INKEY, INVALUE> c) {
if (c != null) {
try {
c.close();
} catch (Exception ie) {
// Ignore
LOG.info("Ignoring exception during close for " + c, ie);
}
}
}
private <INKEY, INVALUE, OUTKEY, OUTVALUE>
void closeQuietly(
org.apache.hadoop.mapreduce.RecordWriter<OUTKEY, OUTVALUE> c,
org.apache.hadoop.mapreduce.Mapper<INKEY,INVALUE,OUTKEY,OUTVALUE>.Context
mapperContext) {
if (c != null) {
try {
c.close(mapperContext);
} catch (Exception ie) {
// Ignore
LOG.info("Ignoring exception during close for " + c, ie);
}
}
}
}
| 76,074 | 36.493839 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskAttemptID.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.DataInput;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.TaskType;
/**
* TaskAttemptID represents the immutable and unique identifier for
* a task attempt. Each task attempt is one particular instance of a Map or
* Reduce Task identified by its TaskID.
*
* TaskAttemptID consists of 2 parts. First part is the
* {@link TaskID}, that this TaskAttemptID belongs to.
* Second part is the task attempt number. <br>
* An example TaskAttemptID is :
* <code>attempt_200707121733_0003_m_000005_0</code> , which represents the
* zeroth task attempt for the fifth map task in the third job
* running at the jobtracker started at <code>200707121733</code>.
* <p>
* Applications should never construct or parse TaskAttemptID strings
* , but rather use appropriate constructors or {@link #forName(String)}
* method.
*
* @see JobID
* @see TaskID
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class TaskAttemptID extends org.apache.hadoop.mapreduce.TaskAttemptID {
/**
* Constructs a TaskAttemptID object from given {@link TaskID}.
* @param taskId TaskID that this task belongs to
* @param id the task attempt number
*/
public TaskAttemptID(TaskID taskId, int id) {
super(taskId, id);
}
/**
* Constructs a TaskId object from given parts.
* @param jtIdentifier jobTracker identifier
* @param jobId job number
* @param isMap whether the tip is a map
* @param taskId taskId number
* @param id the task attempt number
* @deprecated Use {@link #TaskAttemptID(String, int, TaskType, int, int)}.
*/
@Deprecated
public TaskAttemptID(String jtIdentifier, int jobId, boolean isMap,
int taskId, int id) {
this(jtIdentifier, jobId, isMap ? TaskType.MAP : TaskType.REDUCE, taskId,
id);
}
/**
* Constructs a TaskId object from given parts.
* @param jtIdentifier jobTracker identifier
* @param jobId job number
* @param type the TaskType
* @param taskId taskId number
* @param id the task attempt number
*/
public TaskAttemptID(String jtIdentifier, int jobId, TaskType type,
int taskId, int id) {
this(new TaskID(jtIdentifier, jobId, type, taskId), id);
}
public TaskAttemptID() {
super(new TaskID(), 0);
}
/**
* Downgrade a new TaskAttemptID to an old one
* @param old the new id
* @return either old or a new TaskAttemptID constructed to match old
*/
public static
TaskAttemptID downgrade(org.apache.hadoop.mapreduce.TaskAttemptID old) {
if (old instanceof TaskAttemptID) {
return (TaskAttemptID) old;
} else {
return new TaskAttemptID(TaskID.downgrade(old.getTaskID()), old.getId());
}
}
public TaskID getTaskID() {
return (TaskID) super.getTaskID();
}
public JobID getJobID() {
return (JobID) super.getJobID();
}
@Deprecated
public static TaskAttemptID read(DataInput in) throws IOException {
TaskAttemptID taskId = new TaskAttemptID();
taskId.readFields(in);
return taskId;
}
/** Construct a TaskAttemptID object from given string
* @return constructed TaskAttemptID object or null if the given String is null
* @throws IllegalArgumentException if the given string is malformed
*/
public static TaskAttemptID forName(String str
) throws IllegalArgumentException {
return (TaskAttemptID)
org.apache.hadoop.mapreduce.TaskAttemptID.forName(str);
}
/**
* Returns a regex pattern which matches task attempt IDs. Arguments can
* be given null, in which case that part of the regex will be generic.
* For example to obtain a regex matching <i>all task attempt IDs</i>
* of <i>any jobtracker</i>, in <i>any job</i>, of the <i>first
* map task</i>, we would use :
* <pre>
* TaskAttemptID.getTaskAttemptIDsPattern(null, null, true, 1, null);
* </pre>
* which will return :
* <pre> "attempt_[^_]*_[0-9]*_m_000001_[0-9]*" </pre>
* @param jtIdentifier jobTracker identifier, or null
* @param jobId job number, or null
* @param isMap whether the tip is a map, or null
* @param taskId taskId number, or null
* @param attemptId the task attempt number, or null
* @return a regex pattern matching TaskAttemptIDs
*/
@Deprecated
public static String getTaskAttemptIDsPattern(String jtIdentifier,
Integer jobId, Boolean isMap, Integer taskId, Integer attemptId) {
return getTaskAttemptIDsPattern(jtIdentifier, jobId,
isMap ? TaskType.MAP : TaskType.REDUCE, taskId, attemptId);
}
/**
* Returns a regex pattern which matches task attempt IDs. Arguments can
* be given null, in which case that part of the regex will be generic.
* For example to obtain a regex matching <i>all task attempt IDs</i>
* of <i>any jobtracker</i>, in <i>any job</i>, of the <i>first
* map task</i>, we would use :
* <pre>
* TaskAttemptID.getTaskAttemptIDsPattern(null, null, TaskType.MAP, 1, null);
* </pre>
* which will return :
* <pre> "attempt_[^_]*_[0-9]*_m_000001_[0-9]*" </pre>
* @param jtIdentifier jobTracker identifier, or null
* @param jobId job number, or null
* @param type the {@link TaskType}
* @param taskId taskId number, or null
* @param attemptId the task attempt number, or null
* @return a regex pattern matching TaskAttemptIDs
*/
@Deprecated
public static String getTaskAttemptIDsPattern(String jtIdentifier,
Integer jobId, TaskType type, Integer taskId, Integer attemptId) {
StringBuilder builder = new StringBuilder(ATTEMPT).append(SEPARATOR);
builder.append(getTaskAttemptIDsPatternWOPrefix(jtIdentifier, jobId,
type, taskId, attemptId));
return builder.toString();
}
@Deprecated
static StringBuilder getTaskAttemptIDsPatternWOPrefix(String jtIdentifier
, Integer jobId, TaskType type, Integer taskId, Integer attemptId) {
StringBuilder builder = new StringBuilder();
builder.append(TaskID.getTaskIDsPatternWOPrefix(jtIdentifier
, jobId, type, taskId))
.append(SEPARATOR)
.append(attemptId != null ? attemptId : "[0-9]*");
return builder;
}
}
| 7,177 | 35.622449 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/OutputCommitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* <code>OutputCommitter</code> describes the commit of task output for a
* Map-Reduce job.
*
* <p>The Map-Reduce framework relies on the <code>OutputCommitter</code> of
* the job to:<p>
* <ol>
* <li>
* Setup the job during initialization. For example, create the temporary
* output directory for the job during the initialization of the job.
* </li>
* <li>
* Cleanup the job after the job completion. For example, remove the
* temporary output directory after the job completion.
* </li>
* <li>
* Setup the task temporary output.
* </li>
* <li>
* Check whether a task needs a commit. This is to avoid the commit
* procedure if a task does not need commit.
* </li>
* <li>
* Commit of the task output.
* </li>
* <li>
* Discard the task commit.
* </li>
* </ol>
* The methods in this class can be called from several different processes and
* from several different contexts. It is important to know which process and
* which context each is called from. Each method should be marked accordingly
* in its documentation. It is also important to note that not all methods are
* guaranteed to be called once and only once. If a method is not guaranteed to
* have this property the output committer needs to handle this appropriately.
* Also note it will only be in rare situations where they may be called
* multiple times for the same task.
*
* @see FileOutputCommitter
* @see JobContext
* @see TaskAttemptContext
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class OutputCommitter
extends org.apache.hadoop.mapreduce.OutputCommitter {
/**
* For the framework to setup the job output during initialization. This is
* called from the application master process for the entire job. This will be
* called multiple times, once per job attempt.
*
* @param jobContext Context of the job whose output is being written.
* @throws IOException if temporary output could not be created
*/
public abstract void setupJob(JobContext jobContext) throws IOException;
/**
* For cleaning up the job's output after job completion. This is called
* from the application master process for the entire job. This may be called
* multiple times.
*
* @param jobContext Context of the job whose output is being written.
* @throws IOException
* @deprecated Use {@link #commitJob(JobContext)} or
* {@link #abortJob(JobContext, int)} instead.
*/
@Deprecated
public void cleanupJob(JobContext jobContext) throws IOException { }
/**
* For committing job's output after successful job completion. Note that this
* is invoked for jobs with final runstate as SUCCESSFUL. This is called
* from the application master process for the entire job. This is guaranteed
* to only be called once. If it throws an exception the entire job will
* fail.
*
* @param jobContext Context of the job whose output is being written.
* @throws IOException
*/
public void commitJob(JobContext jobContext) throws IOException {
cleanupJob(jobContext);
}
/**
* For aborting an unsuccessful job's output. Note that this is invoked for
* jobs with final runstate as {@link JobStatus#FAILED} or
* {@link JobStatus#KILLED}. This is called from the application
* master process for the entire job. This may be called multiple times.
*
* @param jobContext Context of the job whose output is being written.
* @param status final runstate of the job
* @throws IOException
*/
public void abortJob(JobContext jobContext, int status)
throws IOException {
cleanupJob(jobContext);
}
/**
* Sets up output for the task. This is called from each individual task's
* process that will output to HDFS, and it is called just for that task. This
* may be called multiple times for the same task, but for different task
* attempts.
*
* @param taskContext Context of the task whose output is being written.
* @throws IOException
*/
public abstract void setupTask(TaskAttemptContext taskContext)
throws IOException;
/**
* Check whether task needs a commit. This is called from each individual
* task's process that will output to HDFS, and it is called just for that
* task.
*
* @param taskContext
* @return true/false
* @throws IOException
*/
public abstract boolean needsTaskCommit(TaskAttemptContext taskContext)
throws IOException;
/**
* To promote the task's temporary output to final output location.
* If {@link #needsTaskCommit(TaskAttemptContext)} returns true and this
* task is the task that the AM determines finished first, this method
* is called to commit an individual task's output. This is to mark
* that tasks output as complete, as {@link #commitJob(JobContext)} will
* also be called later on if the entire job finished successfully. This
* is called from a task's process. This may be called multiple times for the
* same task, but different task attempts. It should be very rare for this to
* be called multiple times and requires odd networking failures to make this
* happen. In the future the Hadoop framework may eliminate this race.
*
* @param taskContext Context of the task whose output is being written.
* @throws IOException if commit is not
*/
public abstract void commitTask(TaskAttemptContext taskContext)
throws IOException;
/**
* Discard the task output. This is called from a task's process to clean
* up a single task's output that can not yet been committed. This may be
* called multiple times for the same task, but for different task attempts.
*
* @param taskContext
* @throws IOException
*/
public abstract void abortTask(TaskAttemptContext taskContext)
throws IOException;
/**
* This method implements the new interface by calling the old method. Note
* that the input types are different between the new and old apis and this is
* a bridge between the two.
*
* @deprecated Use {@link #isRecoverySupported(JobContext)} instead.
*/
@Deprecated
@Override
public boolean isRecoverySupported() {
return false;
}
/**
* Is task output recovery supported for restarting jobs?
*
* If task output recovery is supported, job restart can be done more
* efficiently.
*
* @param jobContext
* Context of the job whose output is being written.
* @return <code>true</code> if task output recovery is supported,
* <code>false</code> otherwise
* @throws IOException
* @see #recoverTask(TaskAttemptContext)
*/
public boolean isRecoverySupported(JobContext jobContext) throws IOException {
return isRecoverySupported();
}
/**
* Recover the task output.
*
* The retry-count for the job will be passed via the
* {@link MRConstants#APPLICATION_ATTEMPT_ID} key in
* {@link TaskAttemptContext#getConfiguration()} for the
* <code>OutputCommitter</code>. This is called from the application master
* process, but it is called individually for each task.
*
* If an exception is thrown the task will be attempted again.
*
* @param taskContext Context of the task whose output is being recovered
* @throws IOException
*/
public void recoverTask(TaskAttemptContext taskContext)
throws IOException {
}
/**
* This method implements the new interface by calling the old method. Note
* that the input types are different between the new and old apis and this
* is a bridge between the two.
*/
@Override
public final void setupJob(org.apache.hadoop.mapreduce.JobContext jobContext
) throws IOException {
setupJob((JobContext) jobContext);
}
/**
* This method implements the new interface by calling the old method. Note
* that the input types are different between the new and old apis and this
* is a bridge between the two.
* @deprecated Use {@link #commitJob(org.apache.hadoop.mapreduce.JobContext)}
* or {@link #abortJob(org.apache.hadoop.mapreduce.JobContext, org.apache.hadoop.mapreduce.JobStatus.State)}
* instead.
*/
@Override
@Deprecated
public final void cleanupJob(org.apache.hadoop.mapreduce.JobContext context
) throws IOException {
cleanupJob((JobContext) context);
}
/**
* This method implements the new interface by calling the old method. Note
* that the input types are different between the new and old apis and this
* is a bridge between the two.
*/
@Override
public final void commitJob(org.apache.hadoop.mapreduce.JobContext context
) throws IOException {
commitJob((JobContext) context);
}
/**
* This method implements the new interface by calling the old method. Note
* that the input types are different between the new and old apis and this
* is a bridge between the two.
*/
@Override
public final void abortJob(org.apache.hadoop.mapreduce.JobContext context,
org.apache.hadoop.mapreduce.JobStatus.State runState)
throws IOException {
int state = JobStatus.getOldNewJobRunState(runState);
if (state != JobStatus.FAILED && state != JobStatus.KILLED) {
throw new IOException ("Invalid job run state : " + runState.name());
}
abortJob((JobContext) context, state);
}
/**
* This method implements the new interface by calling the old method. Note
* that the input types are different between the new and old apis and this
* is a bridge between the two.
*/
@Override
public final
void setupTask(org.apache.hadoop.mapreduce.TaskAttemptContext taskContext
) throws IOException {
setupTask((TaskAttemptContext) taskContext);
}
/**
* This method implements the new interface by calling the old method. Note
* that the input types are different between the new and old apis and this
* is a bridge between the two.
*/
@Override
public final boolean
needsTaskCommit(org.apache.hadoop.mapreduce.TaskAttemptContext taskContext
) throws IOException {
return needsTaskCommit((TaskAttemptContext) taskContext);
}
/**
* This method implements the new interface by calling the old method. Note
* that the input types are different between the new and old apis and this
* is a bridge between the two.
*/
@Override
public final
void commitTask(org.apache.hadoop.mapreduce.TaskAttemptContext taskContext
) throws IOException {
commitTask((TaskAttemptContext) taskContext);
}
/**
* This method implements the new interface by calling the old method. Note
* that the input types are different between the new and old apis and this
* is a bridge between the two.
*/
@Override
public final
void abortTask(org.apache.hadoop.mapreduce.TaskAttemptContext taskContext
) throws IOException {
abortTask((TaskAttemptContext) taskContext);
}
/**
* This method implements the new interface by calling the old method. Note
* that the input types are different between the new and old apis and this
* is a bridge between the two.
*/
@Override
public final
void recoverTask(org.apache.hadoop.mapreduce.TaskAttemptContext taskContext
) throws IOException {
recoverTask((TaskAttemptContext) taskContext);
}
/**
* This method implements the new interface by calling the old method. Note
* that the input types are different between the new and old apis and this is
* a bridge between the two.
*/
@Override
public final boolean isRecoverySupported(
org.apache.hadoop.mapreduce.JobContext context) throws IOException {
return isRecoverySupported((JobContext) context);
}
}
| 12,929 | 35.942857 | 122 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Counters.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import static org.apache.hadoop.mapreduce.util.CountersStrings.parseEscapedCompactString;
import static org.apache.hadoop.mapreduce.util.CountersStrings.toEscapedCompactString;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.text.ParseException;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import org.apache.commons.collections.IteratorUtils;
import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.FileSystemCounter;
import org.apache.hadoop.mapreduce.counters.AbstractCounterGroup;
import org.apache.hadoop.mapreduce.counters.AbstractCounters;
import org.apache.hadoop.mapreduce.counters.CounterGroupBase;
import org.apache.hadoop.mapreduce.counters.CounterGroupFactory;
import org.apache.hadoop.mapreduce.counters.FileSystemCounterGroup;
import org.apache.hadoop.mapreduce.counters.FrameworkCounterGroup;
import org.apache.hadoop.mapreduce.counters.GenericCounter;
import org.apache.hadoop.mapreduce.counters.Limits;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormatCounter;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormatCounter;
import com.google.common.collect.Iterators;
/**
* A set of named counters.
*
* <p><code>Counters</code> represent global counters, defined either by the
* Map-Reduce framework or applications. Each <code>Counter</code> can be of
* any {@link Enum} type.</p>
*
* <p><code>Counters</code> are bunched into {@link Group}s, each comprising of
* counters from a particular <code>Enum</code> class.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class Counters
extends AbstractCounters<Counters.Counter, Counters.Group> {
public static final int MAX_COUNTER_LIMIT = Limits.getCountersMax();
public static final int MAX_GROUP_LIMIT = Limits.getGroupsMax();
private static final HashMap<String, String> depricatedCounterMap =
new HashMap<String, String>();
static {
initDepricatedMap();
}
public Counters() {
super(groupFactory);
}
public Counters(org.apache.hadoop.mapreduce.Counters newCounters) {
super(newCounters, groupFactory);
}
@SuppressWarnings({ "deprecation" })
private static void initDepricatedMap() {
depricatedCounterMap.put(FileInputFormat.Counter.class.getName(),
FileInputFormatCounter.class.getName());
depricatedCounterMap.put(FileOutputFormat.Counter.class.getName(),
FileOutputFormatCounter.class.getName());
depricatedCounterMap.put(
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.Counter.class
.getName(), FileInputFormatCounter.class.getName());
depricatedCounterMap.put(
org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.Counter.class
.getName(), FileOutputFormatCounter.class.getName());
}
private static String getNewGroupKey(String oldGroup) {
if (depricatedCounterMap.containsKey(oldGroup)) {
return depricatedCounterMap.get(oldGroup);
}
return null;
}
/**
* Downgrade new {@link org.apache.hadoop.mapreduce.Counters} to old Counters
* @param newCounters new Counters
* @return old Counters instance corresponding to newCounters
*/
static Counters downgrade(org.apache.hadoop.mapreduce.Counters newCounters) {
return new Counters(newCounters);
}
public synchronized Group getGroup(String groupName) {
return super.getGroup(groupName);
}
@SuppressWarnings("unchecked")
public synchronized Collection<String> getGroupNames() {
return IteratorUtils.toList(super.getGroupNames().iterator());
}
public synchronized String makeCompactString() {
StringBuilder builder = new StringBuilder();
boolean first = true;
for(Group group: this){
for(Counter counter: group) {
if (first) {
first = false;
} else {
builder.append(',');
}
builder.append(group.getDisplayName());
builder.append('.');
builder.append(counter.getDisplayName());
builder.append(':');
builder.append(counter.getCounter());
}
}
return builder.toString();
}
/**
* A counter record, comprising its name and value.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public static class Counter implements org.apache.hadoop.mapreduce.Counter {
org.apache.hadoop.mapreduce.Counter realCounter;
Counter(org.apache.hadoop.mapreduce.Counter counter) {
this.realCounter = counter;
}
public Counter() {
this(new GenericCounter());
}
@SuppressWarnings("deprecation")
@Override
public void setDisplayName(String displayName) {
realCounter.setDisplayName(displayName);
}
@Override
public String getName() {
return realCounter.getName();
}
@Override
public String getDisplayName() {
return realCounter.getDisplayName();
}
@Override
public long getValue() {
return realCounter.getValue();
}
@Override
public void setValue(long value) {
realCounter.setValue(value);
}
@Override
public void increment(long incr) {
realCounter.increment(incr);
}
@Override
public void write(DataOutput out) throws IOException {
realCounter.write(out);
}
@Override
public void readFields(DataInput in) throws IOException {
realCounter.readFields(in);
}
/**
* Returns the compact stringified version of the counter in the format
* [(actual-name)(display-name)(value)]
* @return the stringified result
*/
public String makeEscapedCompactString() {
return toEscapedCompactString(realCounter);
}
/**
* Checks for (content) equality of two (basic) counters
* @param counter to compare
* @return true if content equals
* @deprecated
*/
@Deprecated
public boolean contentEquals(Counter counter) {
return realCounter.equals(counter.getUnderlyingCounter());
}
/**
* @return the value of the counter
*/
public long getCounter() {
return realCounter.getValue();
}
@Override
public org.apache.hadoop.mapreduce.Counter getUnderlyingCounter() {
return realCounter;
}
@Override
public synchronized boolean equals(Object genericRight) {
if (genericRight instanceof Counter) {
synchronized (genericRight) {
Counter right = (Counter) genericRight;
return getName().equals(right.getName()) &&
getDisplayName().equals(right.getDisplayName()) &&
getValue() == right.getValue();
}
}
return false;
}
@Override
public int hashCode() {
return realCounter.hashCode();
}
}
/**
* <code>Group</code> of counters, comprising of counters from a particular
* counter {@link Enum} class.
*
* <p><code>Group</code>handles localization of the class name and the
* counter names.</p>
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public static class Group implements CounterGroupBase<Counter> {
private CounterGroupBase<Counter> realGroup;
protected Group() {
realGroup = null;
}
Group(GenericGroup group) {
this.realGroup = group;
}
Group(FSGroupImpl group) {
this.realGroup = group;
}
@SuppressWarnings({ "unchecked", "rawtypes" })
Group(FrameworkGroupImpl group) {
this.realGroup = group;
}
/**
* @param counterName the name of the counter
* @return the value of the specified counter, or 0 if the counter does
* not exist.
*/
public long getCounter(String counterName) {
return getCounterValue(realGroup, counterName);
}
/**
* @return the compact stringified version of the group in the format
* {(actual-name)(display-name)(value)[][][]} where [] are compact strings
* for the counters within.
*/
public String makeEscapedCompactString() {
return toEscapedCompactString(realGroup);
}
/**
* Get the counter for the given id and create it if it doesn't exist.
* @param id the numeric id of the counter within the group
* @param name the internal counter name
* @return the counter
* @deprecated use {@link #findCounter(String)} instead
*/
@Deprecated
public Counter getCounter(int id, String name) {
return findCounter(name);
}
/**
* Get the counter for the given name and create it if it doesn't exist.
* @param name the internal counter name
* @return the counter
*/
public Counter getCounterForName(String name) {
return findCounter(name);
}
@Override
public void write(DataOutput out) throws IOException {
realGroup.write(out);
}
@Override
public void readFields(DataInput in) throws IOException {
realGroup.readFields(in);
}
@Override
public Iterator<Counter> iterator() {
return realGroup.iterator();
}
@Override
public String getName() {
return realGroup.getName();
}
@Override
public String getDisplayName() {
return realGroup.getDisplayName();
}
@Override
public void setDisplayName(String displayName) {
realGroup.setDisplayName(displayName);
}
@Override
public void addCounter(Counter counter) {
realGroup.addCounter(counter);
}
@Override
public Counter addCounter(String name, String displayName, long value) {
return realGroup.addCounter(name, displayName, value);
}
@Override
public Counter findCounter(String counterName, String displayName) {
return realGroup.findCounter(counterName, displayName);
}
@Override
public Counter findCounter(String counterName, boolean create) {
return realGroup.findCounter(counterName, create);
}
@Override
public Counter findCounter(String counterName) {
return realGroup.findCounter(counterName);
}
@Override
public int size() {
return realGroup.size();
}
@Override
public void incrAllCounters(CounterGroupBase<Counter> rightGroup) {
realGroup.incrAllCounters(rightGroup);
}
@Override
public CounterGroupBase<Counter> getUnderlyingGroup() {
return realGroup;
}
@Override
public synchronized boolean equals(Object genericRight) {
if (genericRight instanceof CounterGroupBase<?>) {
@SuppressWarnings("unchecked")
CounterGroupBase<Counter> right = ((CounterGroupBase<Counter>)
genericRight).getUnderlyingGroup();
return Iterators.elementsEqual(iterator(), right.iterator());
}
return false;
}
@Override
public int hashCode() {
return realGroup.hashCode();
}
}
// All the group impls need this for legacy group interface
static long getCounterValue(CounterGroupBase<Counter> group, String counterName) {
Counter counter = group.findCounter(counterName, false);
if (counter != null) return counter.getValue();
return 0L;
}
// Mix the generic group implementation into the Group interface
private static class GenericGroup extends AbstractCounterGroup<Counter> {
GenericGroup(String name, String displayName, Limits limits) {
super(name, displayName, limits);
}
@Override
protected Counter newCounter(String counterName, String displayName,
long value) {
return new Counter(new GenericCounter(counterName, displayName, value));
}
@Override
protected Counter newCounter() {
return new Counter();
}
@Override
public CounterGroupBase<Counter> getUnderlyingGroup() {
return this;
}
}
// Mix the framework group implementation into the Group interface
private static class FrameworkGroupImpl<T extends Enum<T>>
extends FrameworkCounterGroup<T, Counter> {
FrameworkGroupImpl(Class<T> cls) {
super(cls);
}
@Override
protected Counter newCounter(T key) {
return new Counter(new FrameworkCounter<T>(key, getName()));
}
@Override
public CounterGroupBase<Counter> getUnderlyingGroup() {
return this;
}
}
// Mix the file system counter group implementation into the Group interface
private static class FSGroupImpl extends FileSystemCounterGroup<Counter> {
@Override
protected Counter newCounter(String scheme, FileSystemCounter key) {
return new Counter(new FSCounter(scheme, key));
}
@Override
public CounterGroupBase<Counter> getUnderlyingGroup() {
return this;
}
}
public synchronized Counter findCounter(String group, String name) {
if (name.equals("MAP_INPUT_BYTES")) {
LOG.warn("Counter name MAP_INPUT_BYTES is deprecated. " +
"Use FileInputFormatCounters as group name and " +
" BYTES_READ as counter name instead");
return findCounter(FileInputFormatCounter.BYTES_READ);
}
String newGroupKey = getNewGroupKey(group);
if (newGroupKey != null) {
group = newGroupKey;
}
return getGroup(group).getCounterForName(name);
}
/**
* Provide factory methods for counter group factory implementation.
* See also the GroupFactory in
* {@link org.apache.hadoop.mapreduce.Counters mapreduce.Counters}
*/
static class GroupFactory extends CounterGroupFactory<Counter, Group> {
@Override
protected <T extends Enum<T>>
FrameworkGroupFactory<Group> newFrameworkGroupFactory(final Class<T> cls) {
return new FrameworkGroupFactory<Group>() {
@Override public Group newGroup(String name) {
return new Group(new FrameworkGroupImpl<T>(cls)); // impl in this package
}
};
}
@Override
protected Group newGenericGroup(String name, String displayName,
Limits limits) {
return new Group(new GenericGroup(name, displayName, limits));
}
@Override
protected Group newFileSystemGroup() {
return new Group(new FSGroupImpl());
}
}
private static final GroupFactory groupFactory = new GroupFactory();
/**
* Find a counter by using strings
* @param group the name of the group
* @param id the id of the counter within the group (0 to N-1)
* @param name the internal name of the counter
* @return the counter for that name
* @deprecated use {@link #findCounter(String, String)} instead
*/
@Deprecated
public Counter findCounter(String group, int id, String name) {
return findCounter(group, name);
}
/**
* Increments the specified counter by the specified amount, creating it if
* it didn't already exist.
* @param key identifies a counter
* @param amount amount by which counter is to be incremented
*/
public void incrCounter(Enum<?> key, long amount) {
findCounter(key).increment(amount);
}
/**
* Increments the specified counter by the specified amount, creating it if
* it didn't already exist.
* @param group the name of the group
* @param counter the internal name of the counter
* @param amount amount by which counter is to be incremented
*/
public void incrCounter(String group, String counter, long amount) {
findCounter(group, counter).increment(amount);
}
/**
* Returns current value of the specified counter, or 0 if the counter
* does not exist.
* @param key the counter enum to lookup
* @return the counter value or 0 if counter not found
*/
public synchronized long getCounter(Enum<?> key) {
return findCounter(key).getValue();
}
/**
* Increments multiple counters by their amounts in another Counters
* instance.
* @param other the other Counters instance
*/
public synchronized void incrAllCounters(Counters other) {
for (Group otherGroup: other) {
Group group = getGroup(otherGroup.getName());
group.setDisplayName(otherGroup.getDisplayName());
for (Counter otherCounter : otherGroup) {
Counter counter = group.getCounterForName(otherCounter.getName());
counter.setDisplayName(otherCounter.getDisplayName());
counter.increment(otherCounter.getValue());
}
}
}
/**
* @return the total number of counters
* @deprecated use {@link #countCounters()} instead
*/
public int size() {
return countCounters();
}
/**
* Convenience method for computing the sum of two sets of counters.
* @param a the first counters
* @param b the second counters
* @return a new summed counters object
*/
public static Counters sum(Counters a, Counters b) {
Counters counters = new Counters();
counters.incrAllCounters(a);
counters.incrAllCounters(b);
return counters;
}
/**
* Logs the current counter values.
* @param log The log to use.
*/
public void log(Log log) {
log.info("Counters: " + size());
for(Group group: this) {
log.info(" " + group.getDisplayName());
for (Counter counter: group) {
log.info(" " + counter.getDisplayName() + "=" +
counter.getCounter());
}
}
}
/**
* Represent the counter in a textual format that can be converted back to
* its object form
* @return the string in the following format
* {(groupName)(group-displayName)[(counterName)(displayName)(value)][]*}*
*/
public String makeEscapedCompactString() {
return toEscapedCompactString(this);
}
/**
* Convert a stringified (by {@link #makeEscapedCompactString()} counter
* representation into a counter object.
* @param compactString to parse
* @return a new counters object
* @throws ParseException
*/
public static Counters fromEscapedCompactString(String compactString)
throws ParseException {
return parseEscapedCompactString(compactString, new Counters());
}
/**
* Counter exception thrown when the number of counters exceed the limit
*/
public static class CountersExceededException extends RuntimeException {
private static final long serialVersionUID = 1L;
public CountersExceededException(String msg) {
super(msg);
}
// Only allows chaining of related exceptions
public CountersExceededException(CountersExceededException cause) {
super(cause);
}
}
}
| 19,351 | 28.818182 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SkipBadRecords.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
/**
* Utility class for skip bad records functionality. It contains various
* settings related to skipping of bad records.
*
* <p>Hadoop provides an optional mode of execution in which the bad records
* are detected and skipped in further attempts.
*
* <p>This feature can be used when map/reduce tasks crashes deterministically on
* certain input. This happens due to bugs in the map/reduce function. The usual
* course would be to fix these bugs. But sometimes this is not possible;
* perhaps the bug is in third party libraries for which the source code is
* not available. Due to this, the task never reaches to completion even with
* multiple attempts and complete data for that task is lost.</p>
*
* <p>With this feature, only a small portion of data is lost surrounding
* the bad record, which may be acceptable for some user applications.
* see {@link SkipBadRecords#setMapperMaxSkipRecords(Configuration, long)}</p>
*
* <p>The skipping mode gets kicked off after certain no of failures
* see {@link SkipBadRecords#setAttemptsToStartSkipping(Configuration, int)}</p>
*
* <p>In the skipping mode, the map/reduce task maintains the record range which
* is getting processed at all times. Before giving the input to the
* map/reduce function, it sends this record range to the Task tracker.
* If task crashes, the Task tracker knows which one was the last reported
* range. On further attempts that range get skipped.</p>
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class SkipBadRecords {
/**
* Special counters which are written by the application and are
* used by the framework for detecting bad records. For detecting bad records
* these counters must be incremented by the application.
*/
public static final String COUNTER_GROUP = "SkippingTaskCounters";
/**
* Number of processed map records.
* @see SkipBadRecords#getAutoIncrMapperProcCount(Configuration)
*/
public static final String COUNTER_MAP_PROCESSED_RECORDS =
"MapProcessedRecords";
/**
* Number of processed reduce groups.
* @see SkipBadRecords#getAutoIncrReducerProcCount(Configuration)
*/
public static final String COUNTER_REDUCE_PROCESSED_GROUPS =
"ReduceProcessedGroups";
private static final String ATTEMPTS_TO_START_SKIPPING =
JobContext.SKIP_START_ATTEMPTS;
private static final String AUTO_INCR_MAP_PROC_COUNT =
JobContext.MAP_SKIP_INCR_PROC_COUNT;
private static final String AUTO_INCR_REDUCE_PROC_COUNT =
JobContext.REDUCE_SKIP_INCR_PROC_COUNT;
private static final String OUT_PATH = JobContext.SKIP_OUTDIR;
private static final String MAPPER_MAX_SKIP_RECORDS =
JobContext.MAP_SKIP_MAX_RECORDS;
private static final String REDUCER_MAX_SKIP_GROUPS =
JobContext.REDUCE_SKIP_MAXGROUPS;
/**
* Get the number of Task attempts AFTER which skip mode
* will be kicked off. When skip mode is kicked off, the
* tasks reports the range of records which it will process
* next to the TaskTracker. So that on failures, TT knows which
* ones are possibly the bad records. On further executions,
* those are skipped.
* Default value is 2.
*
* @param conf the configuration
* @return attemptsToStartSkipping no of task attempts
*/
public static int getAttemptsToStartSkipping(Configuration conf) {
return conf.getInt(ATTEMPTS_TO_START_SKIPPING, 2);
}
/**
* Set the number of Task attempts AFTER which skip mode
* will be kicked off. When skip mode is kicked off, the
* tasks reports the range of records which it will process
* next to the TaskTracker. So that on failures, TT knows which
* ones are possibly the bad records. On further executions,
* those are skipped.
* Default value is 2.
*
* @param conf the configuration
* @param attemptsToStartSkipping no of task attempts
*/
public static void setAttemptsToStartSkipping(Configuration conf,
int attemptsToStartSkipping) {
conf.setInt(ATTEMPTS_TO_START_SKIPPING, attemptsToStartSkipping);
}
/**
* Get the flag which if set to true,
* {@link SkipBadRecords#COUNTER_MAP_PROCESSED_RECORDS} is incremented
* by MapRunner after invoking the map function. This value must be set to
* false for applications which process the records asynchronously
* or buffer the input records. For example streaming.
* In such cases applications should increment this counter on their own.
* Default value is true.
*
* @param conf the configuration
* @return <code>true</code> if auto increment
* {@link SkipBadRecords#COUNTER_MAP_PROCESSED_RECORDS}.
* <code>false</code> otherwise.
*/
public static boolean getAutoIncrMapperProcCount(Configuration conf) {
return conf.getBoolean(AUTO_INCR_MAP_PROC_COUNT, true);
}
/**
* Set the flag which if set to true,
* {@link SkipBadRecords#COUNTER_MAP_PROCESSED_RECORDS} is incremented
* by MapRunner after invoking the map function. This value must be set to
* false for applications which process the records asynchronously
* or buffer the input records. For example streaming.
* In such cases applications should increment this counter on their own.
* Default value is true.
*
* @param conf the configuration
* @param autoIncr whether to auto increment
* {@link SkipBadRecords#COUNTER_MAP_PROCESSED_RECORDS}.
*/
public static void setAutoIncrMapperProcCount(Configuration conf,
boolean autoIncr) {
conf.setBoolean(AUTO_INCR_MAP_PROC_COUNT, autoIncr);
}
/**
* Get the flag which if set to true,
* {@link SkipBadRecords#COUNTER_REDUCE_PROCESSED_GROUPS} is incremented
* by framework after invoking the reduce function. This value must be set to
* false for applications which process the records asynchronously
* or buffer the input records. For example streaming.
* In such cases applications should increment this counter on their own.
* Default value is true.
*
* @param conf the configuration
* @return <code>true</code> if auto increment
* {@link SkipBadRecords#COUNTER_REDUCE_PROCESSED_GROUPS}.
* <code>false</code> otherwise.
*/
public static boolean getAutoIncrReducerProcCount(Configuration conf) {
return conf.getBoolean(AUTO_INCR_REDUCE_PROC_COUNT, true);
}
/**
* Set the flag which if set to true,
* {@link SkipBadRecords#COUNTER_REDUCE_PROCESSED_GROUPS} is incremented
* by framework after invoking the reduce function. This value must be set to
* false for applications which process the records asynchronously
* or buffer the input records. For example streaming.
* In such cases applications should increment this counter on their own.
* Default value is true.
*
* @param conf the configuration
* @param autoIncr whether to auto increment
* {@link SkipBadRecords#COUNTER_REDUCE_PROCESSED_GROUPS}.
*/
public static void setAutoIncrReducerProcCount(Configuration conf,
boolean autoIncr) {
conf.setBoolean(AUTO_INCR_REDUCE_PROC_COUNT, autoIncr);
}
/**
* Get the directory to which skipped records are written. By default it is
* the sub directory of the output _logs directory.
* User can stop writing skipped records by setting the value null.
*
* @param conf the configuration.
* @return path skip output directory. Null is returned if this is not set
* and output directory is also not set.
*/
public static Path getSkipOutputPath(Configuration conf) {
String name = conf.get(OUT_PATH);
if(name!=null) {
if("none".equals(name)) {
return null;
}
return new Path(name);
}
Path outPath = FileOutputFormat.getOutputPath(new JobConf(conf));
return outPath==null ? null : new Path(outPath,
"_logs"+Path.SEPARATOR+"skip");
}
/**
* Set the directory to which skipped records are written. By default it is
* the sub directory of the output _logs directory.
* User can stop writing skipped records by setting the value null.
*
* @param conf the configuration.
* @param path skip output directory path
*/
public static void setSkipOutputPath(JobConf conf, Path path) {
String pathStr = null;
if(path==null) {
pathStr = "none";
} else {
pathStr = path.toString();
}
conf.set(OUT_PATH, pathStr);
}
/**
* Get the number of acceptable skip records surrounding the bad record PER
* bad record in mapper. The number includes the bad record as well.
* To turn the feature of detection/skipping of bad records off, set the
* value to 0.
* The framework tries to narrow down the skipped range by retrying
* until this threshold is met OR all attempts get exhausted for this task.
* Set the value to Long.MAX_VALUE to indicate that framework need not try to
* narrow down. Whatever records(depends on application) get skipped are
* acceptable.
* Default value is 0.
*
* @param conf the configuration
* @return maxSkipRecs acceptable skip records.
*/
public static long getMapperMaxSkipRecords(Configuration conf) {
return conf.getLong(MAPPER_MAX_SKIP_RECORDS, 0);
}
/**
* Set the number of acceptable skip records surrounding the bad record PER
* bad record in mapper. The number includes the bad record as well.
* To turn the feature of detection/skipping of bad records off, set the
* value to 0.
* The framework tries to narrow down the skipped range by retrying
* until this threshold is met OR all attempts get exhausted for this task.
* Set the value to Long.MAX_VALUE to indicate that framework need not try to
* narrow down. Whatever records(depends on application) get skipped are
* acceptable.
* Default value is 0.
*
* @param conf the configuration
* @param maxSkipRecs acceptable skip records.
*/
public static void setMapperMaxSkipRecords(Configuration conf,
long maxSkipRecs) {
conf.setLong(MAPPER_MAX_SKIP_RECORDS, maxSkipRecs);
}
/**
* Get the number of acceptable skip groups surrounding the bad group PER
* bad group in reducer. The number includes the bad group as well.
* To turn the feature of detection/skipping of bad groups off, set the
* value to 0.
* The framework tries to narrow down the skipped range by retrying
* until this threshold is met OR all attempts get exhausted for this task.
* Set the value to Long.MAX_VALUE to indicate that framework need not try to
* narrow down. Whatever groups(depends on application) get skipped are
* acceptable.
* Default value is 0.
*
* @param conf the configuration
* @return maxSkipGrps acceptable skip groups.
*/
public static long getReducerMaxSkipGroups(Configuration conf) {
return conf.getLong(REDUCER_MAX_SKIP_GROUPS, 0);
}
/**
* Set the number of acceptable skip groups surrounding the bad group PER
* bad group in reducer. The number includes the bad group as well.
* To turn the feature of detection/skipping of bad groups off, set the
* value to 0.
* The framework tries to narrow down the skipped range by retrying
* until this threshold is met OR all attempts get exhausted for this task.
* Set the value to Long.MAX_VALUE to indicate that framework need not try to
* narrow down. Whatever groups(depends on application) get skipped are
* acceptable.
* Default value is 0.
*
* @param conf the configuration
* @param maxSkipGrps acceptable skip groups.
*/
public static void setReducerMaxSkipGroups(Configuration conf,
long maxSkipGrps) {
conf.setLong(REDUCER_MAX_SKIP_GROUPS, maxSkipGrps);
}
}
| 12,863 | 40.099042 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobID.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.DataInput;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* JobID represents the immutable and unique identifier for
* the job. JobID consists of two parts. First part
* represents the jobtracker identifier, so that jobID to jobtracker map
* is defined. For cluster setup this string is the jobtracker
* start time, for local setting, it is "local".
* Second part of the JobID is the job number. <br>
* An example JobID is :
* <code>job_200707121733_0003</code> , which represents the third job
* running at the jobtracker started at <code>200707121733</code>.
* <p>
* Applications should never construct or parse JobID strings, but rather
* use appropriate constructors or {@link #forName(String)} method.
*
* @see TaskID
* @see TaskAttemptID
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class JobID extends org.apache.hadoop.mapreduce.JobID {
/**
* Constructs a JobID object
* @param jtIdentifier jobTracker identifier
* @param id job number
*/
public JobID(String jtIdentifier, int id) {
super(jtIdentifier, id);
}
public JobID() { }
/**
* Downgrade a new JobID to an old one
* @param old a new or old JobID
* @return either old or a new JobID build to match old
*/
public static JobID downgrade(org.apache.hadoop.mapreduce.JobID old) {
if (old instanceof JobID) {
return (JobID) old;
} else {
return new JobID(old.getJtIdentifier(), old.getId());
}
}
@Deprecated
public static JobID read(DataInput in) throws IOException {
JobID jobId = new JobID();
jobId.readFields(in);
return jobId;
}
/** Construct a JobId object from given string
* @return constructed JobId object or null if the given String is null
* @throws IllegalArgumentException if the given string is malformed
*/
public static JobID forName(String str) throws IllegalArgumentException {
return (JobID) org.apache.hadoop.mapreduce.JobID.forName(str);
}
/**
* Returns a regex pattern which matches task IDs. Arguments can
* be given null, in which case that part of the regex will be generic.
* For example to obtain a regex matching <i>any job</i>
* run on the jobtracker started at <i>200707121733</i>, we would use :
* <pre>
* JobID.getTaskIDsPattern("200707121733", null);
* </pre>
* which will return :
* <pre> "job_200707121733_[0-9]*" </pre>
* @param jtIdentifier jobTracker identifier, or null
* @param jobId job number, or null
* @return a regex pattern matching JobIDs
*/
@Deprecated
public static String getJobIDsPattern(String jtIdentifier, Integer jobId) {
StringBuilder builder = new StringBuilder(JOB).append(SEPARATOR);
builder.append(getJobIDsPatternWOPrefix(jtIdentifier, jobId));
return builder.toString();
}
@Deprecated
static StringBuilder getJobIDsPatternWOPrefix(String jtIdentifier,
Integer jobId) {
StringBuilder builder = new StringBuilder();
if (jtIdentifier != null) {
builder.append(jtIdentifier);
} else {
builder.append("[^").append(SEPARATOR).append("]*");
}
builder.append(SEPARATOR)
.append(jobId != null ? idFormat.format(jobId) : "[0-9]*");
return builder;
}
}
| 4,252 | 33.860656 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/LineRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.Seekable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.CodecPool;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.apache.hadoop.io.compress.Decompressor;
import org.apache.hadoop.io.compress.SplitCompressionInputStream;
import org.apache.hadoop.io.compress.SplittableCompressionCodec;
import org.apache.hadoop.mapreduce.lib.input.CompressedSplitLineReader;
import org.apache.hadoop.mapreduce.lib.input.SplitLineReader;
import org.apache.hadoop.mapreduce.lib.input.UncompressedSplitLineReader;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;
/**
* Treats keys as offset in file and value as line.
*/
@InterfaceAudience.LimitedPrivate({"MapReduce", "Pig"})
@InterfaceStability.Unstable
public class LineRecordReader implements RecordReader<LongWritable, Text> {
private static final Log LOG
= LogFactory.getLog(LineRecordReader.class.getName());
private CompressionCodecFactory compressionCodecs = null;
private long start;
private long pos;
private long end;
private SplitLineReader in;
private FSDataInputStream fileIn;
private final Seekable filePosition;
int maxLineLength;
private CompressionCodec codec;
private Decompressor decompressor;
/**
* A class that provides a line reader from an input stream.
* @deprecated Use {@link org.apache.hadoop.util.LineReader} instead.
*/
@Deprecated
public static class LineReader extends org.apache.hadoop.util.LineReader {
LineReader(InputStream in) {
super(in);
}
LineReader(InputStream in, int bufferSize) {
super(in, bufferSize);
}
public LineReader(InputStream in, Configuration conf) throws IOException {
super(in, conf);
}
LineReader(InputStream in, byte[] recordDelimiter) {
super(in, recordDelimiter);
}
LineReader(InputStream in, int bufferSize, byte[] recordDelimiter) {
super(in, bufferSize, recordDelimiter);
}
public LineReader(InputStream in, Configuration conf,
byte[] recordDelimiter) throws IOException {
super(in, conf, recordDelimiter);
}
}
public LineRecordReader(Configuration job,
FileSplit split) throws IOException {
this(job, split, null);
}
public LineRecordReader(Configuration job, FileSplit split,
byte[] recordDelimiter) throws IOException {
this.maxLineLength = job.getInt(org.apache.hadoop.mapreduce.lib.input.
LineRecordReader.MAX_LINE_LENGTH, Integer.MAX_VALUE);
start = split.getStart();
end = start + split.getLength();
final Path file = split.getPath();
compressionCodecs = new CompressionCodecFactory(job);
codec = compressionCodecs.getCodec(file);
// open the file and seek to the start of the split
final FileSystem fs = file.getFileSystem(job);
fileIn = fs.open(file);
if (isCompressedInput()) {
decompressor = CodecPool.getDecompressor(codec);
if (codec instanceof SplittableCompressionCodec) {
final SplitCompressionInputStream cIn =
((SplittableCompressionCodec)codec).createInputStream(
fileIn, decompressor, start, end,
SplittableCompressionCodec.READ_MODE.BYBLOCK);
in = new CompressedSplitLineReader(cIn, job, recordDelimiter);
start = cIn.getAdjustedStart();
end = cIn.getAdjustedEnd();
filePosition = cIn; // take pos from compressed stream
} else {
if (start != 0) {
// So we have a split that is part of a file stored using
// a Compression codec that cannot be split.
throw new IOException("Cannot seek in " +
codec.getClass().getSimpleName() + " compressed stream");
}
in = new SplitLineReader(codec.createInputStream(fileIn,
decompressor), job, recordDelimiter);
filePosition = fileIn;
}
} else {
fileIn.seek(start);
in = new UncompressedSplitLineReader(
fileIn, job, recordDelimiter, split.getLength());
filePosition = fileIn;
}
// If this is not the first split, we always throw away first record
// because we always (except the last split) read one extra line in
// next() method.
if (start != 0) {
start += in.readLine(new Text(), 0, maxBytesToConsume(start));
}
this.pos = start;
}
public LineRecordReader(InputStream in, long offset, long endOffset,
int maxLineLength) {
this(in, offset, endOffset, maxLineLength, null);
}
public LineRecordReader(InputStream in, long offset, long endOffset,
int maxLineLength, byte[] recordDelimiter) {
this.maxLineLength = maxLineLength;
this.in = new SplitLineReader(in, recordDelimiter);
this.start = offset;
this.pos = offset;
this.end = endOffset;
filePosition = null;
}
public LineRecordReader(InputStream in, long offset, long endOffset,
Configuration job)
throws IOException{
this(in, offset, endOffset, job, null);
}
public LineRecordReader(InputStream in, long offset, long endOffset,
Configuration job, byte[] recordDelimiter)
throws IOException{
this.maxLineLength = job.getInt(org.apache.hadoop.mapreduce.lib.input.
LineRecordReader.MAX_LINE_LENGTH, Integer.MAX_VALUE);
this.in = new SplitLineReader(in, job, recordDelimiter);
this.start = offset;
this.pos = offset;
this.end = endOffset;
filePosition = null;
}
public LongWritable createKey() {
return new LongWritable();
}
public Text createValue() {
return new Text();
}
private boolean isCompressedInput() {
return (codec != null);
}
private int maxBytesToConsume(long pos) {
return isCompressedInput()
? Integer.MAX_VALUE
: (int) Math.max(Math.min(Integer.MAX_VALUE, end - pos), maxLineLength);
}
private long getFilePosition() throws IOException {
long retVal;
if (isCompressedInput() && null != filePosition) {
retVal = filePosition.getPos();
} else {
retVal = pos;
}
return retVal;
}
private int skipUtfByteOrderMark(Text value) throws IOException {
// Strip BOM(Byte Order Mark)
// Text only support UTF-8, we only need to check UTF-8 BOM
// (0xEF,0xBB,0xBF) at the start of the text stream.
int newMaxLineLength = (int) Math.min(3L + (long) maxLineLength,
Integer.MAX_VALUE);
int newSize = in.readLine(value, newMaxLineLength, maxBytesToConsume(pos));
// Even we read 3 extra bytes for the first line,
// we won't alter existing behavior (no backwards incompat issue).
// Because the newSize is less than maxLineLength and
// the number of bytes copied to Text is always no more than newSize.
// If the return size from readLine is not less than maxLineLength,
// we will discard the current line and read the next line.
pos += newSize;
int textLength = value.getLength();
byte[] textBytes = value.getBytes();
if ((textLength >= 3) && (textBytes[0] == (byte)0xEF) &&
(textBytes[1] == (byte)0xBB) && (textBytes[2] == (byte)0xBF)) {
// find UTF-8 BOM, strip it.
LOG.info("Found UTF-8 BOM and skipped it");
textLength -= 3;
newSize -= 3;
if (textLength > 0) {
// It may work to use the same buffer and not do the copyBytes
textBytes = value.copyBytes();
value.set(textBytes, 3, textLength);
} else {
value.clear();
}
}
return newSize;
}
/** Read a line. */
public synchronized boolean next(LongWritable key, Text value)
throws IOException {
// We always read one extra line, which lies outside the upper
// split limit i.e. (end - 1)
while (getFilePosition() <= end || in.needAdditionalRecordAfterSplit()) {
key.set(pos);
int newSize = 0;
if (pos == 0) {
newSize = skipUtfByteOrderMark(value);
} else {
newSize = in.readLine(value, maxLineLength, maxBytesToConsume(pos));
pos += newSize;
}
if (newSize == 0) {
return false;
}
if (newSize < maxLineLength) {
return true;
}
// line too long. try again
LOG.info("Skipped line of size " + newSize + " at pos " + (pos - newSize));
}
return false;
}
/**
* Get the progress within the split
*/
public synchronized float getProgress() throws IOException {
if (start == end) {
return 0.0f;
} else {
return Math.min(1.0f, (getFilePosition() - start) / (float)(end - start));
}
}
public synchronized long getPos() throws IOException {
return pos;
}
public synchronized void close() throws IOException {
try {
if (in != null) {
in.close();
}
} finally {
if (decompressor != null) {
CodecPool.returnDecompressor(decompressor);
decompressor = null;
}
}
}
}
| 10,279 | 33.152824 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/CumulativePeriodicStats.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
/**
*
* This class is a concrete PeriodicStatsAccumulator that deals with
* measurements where the raw data are a measurement of an
* accumulation. The result in each bucket is the estimate
* of the progress-weighted change in that quantity over the
* progress range covered by the bucket.
*
* <p>An easy-to-understand example of this kind of quantity would be
* a distance traveled. It makes sense to consider that portion of
* the total travel that can be apportioned to each bucket.
*
*/
class CumulativePeriodicStats extends PeriodicStatsAccumulator {
// int's are acceptable here, even though times are normally
// long's, because these are a difference and an int won't
// overflow for 24 days. Tasks can't run for more than about a
// week for other reasons, and most jobs would be written
int previousValue = 0;
CumulativePeriodicStats(int count) {
super(count);
}
/**
*
* accumulates a new reading by keeping a running account of the
* value distance from the beginning of the bucket to the end of
* this reading
*/
@Override
protected void extendInternal(double newProgress, int newValue) {
if (state == null) {
return;
}
state.currentAccumulation += (double)(newValue - previousValue);
previousValue = newValue;
}
}
| 2,162 | 35.05 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MergeSorter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.util.Comparator;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.util.MergeSort;
import org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator;
/** This class implements the sort method from BasicTypeSorterBase class as
* MergeSort. Note that this class is really a wrapper over the actual
* mergesort implementation that is there in the util package. The main intent
* of providing this class is to setup the input data for the util.MergeSort
* algo so that the latter doesn't need to bother about the various data
* structures that have been created for the Map output but rather concentrate
* on the core algorithm (thereby allowing easy integration of a mergesort
* implementation). The bridge between this class and the util.MergeSort class
* is the Comparator.
*/
class MergeSorter extends BasicTypeSorterBase
implements Comparator<IntWritable> {
private static int progressUpdateFrequency = 10000;
private int progressCalls = 0;
/** The sort method derived from BasicTypeSorterBase and overridden here*/
public RawKeyValueIterator sort() {
MergeSort m = new MergeSort(this);
int count = super.count;
if (count == 0) return null;
int [] pointers = super.pointers;
int [] pointersCopy = new int[count];
System.arraycopy(pointers, 0, pointersCopy, 0, count);
m.mergeSort(pointers, pointersCopy, 0, count);
return new MRSortResultIterator(super.keyValBuffer, pointersCopy,
super.startOffsets, super.keyLengths, super.valueLengths);
}
/** The implementation of the compare method from Comparator. Note that
* Comparator.compare takes objects as inputs and so the int values are
* wrapped in (reusable) IntWritables from the class util.MergeSort
* @param i
* @param j
* @return int as per the specification of Comparator.compare
*/
public int compare (IntWritable i, IntWritable j) {
// indicate we're making progress but do a batch update
if (progressCalls < progressUpdateFrequency) {
progressCalls++;
} else {
progressCalls = 0;
reporter.progress();
}
return comparator.compare(keyValBuffer.getData(), startOffsets[i.get()],
keyLengths[i.get()],
keyValBuffer.getData(), startOffsets[j.get()],
keyLengths[j.get()]);
}
/** Add the extra memory that will be utilized by the sort method */
public long getMemoryUtilized() {
//this is memory that will be actually utilized (considering the temp
//array that will be allocated by the sort() method (mergesort))
return super.getMemoryUtilized() + super.count * 4;
}
}
| 3,566 | 43.037037 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapOutputCollector.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapred.Task.TaskReporter;
@InterfaceAudience.LimitedPrivate({"MapReduce"})
@InterfaceStability.Unstable
public interface MapOutputCollector<K, V> {
public void init(Context context
) throws IOException, ClassNotFoundException;
public void collect(K key, V value, int partition
) throws IOException, InterruptedException;
public void close() throws IOException, InterruptedException;
public void flush() throws IOException, InterruptedException,
ClassNotFoundException;
@InterfaceAudience.LimitedPrivate({"MapReduce"})
@InterfaceStability.Unstable
public static class Context {
private final MapTask mapTask;
private final JobConf jobConf;
private final TaskReporter reporter;
public Context(MapTask mapTask, JobConf jobConf, TaskReporter reporter) {
this.mapTask = mapTask;
this.jobConf = jobConf;
this.reporter = reporter;
}
public MapTask getMapTask() {
return mapTask;
}
public JobConf getJobConf() {
return jobConf;
}
public TaskReporter getReporter() {
return reporter;
}
}
}
| 2,174 | 31.954545 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/QueueAclsInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
/**
* Class to encapsulate Queue ACLs for a particular
* user.
*/
class QueueAclsInfo extends org.apache.hadoop.mapreduce.QueueAclsInfo {
/**
* Default constructor for QueueAclsInfo.
*
*/
QueueAclsInfo() {
super();
}
/**
* Construct a new QueueAclsInfo object using the queue name and the
* queue operations array
*
* @param queueName Name of the job queue
* @param queue operations
*
*/
QueueAclsInfo(String queueName, String[] operations) {
super(queueName, operations);
}
public static QueueAclsInfo downgrade(
org.apache.hadoop.mapreduce.QueueAclsInfo acl) {
return new QueueAclsInfo(acl.getQueueName(), acl.getOperations());
}
}
| 1,553 | 29.470588 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileOutputCommitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Path;
/** An {@link OutputCommitter} that commits files specified
* in job output directory i.e. ${mapreduce.output.fileoutputformat.outputdir}.
**/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class FileOutputCommitter extends OutputCommitter {
public static final Log LOG = LogFactory.getLog(
"org.apache.hadoop.mapred.FileOutputCommitter");
/**
* Temporary directory name
*/
public static final String TEMP_DIR_NAME =
org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter.PENDING_DIR_NAME;
public static final String SUCCEEDED_FILE_NAME =
org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter.SUCCEEDED_FILE_NAME;
static final String SUCCESSFUL_JOB_OUTPUT_DIR_MARKER =
org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter.SUCCESSFUL_JOB_OUTPUT_DIR_MARKER;
private static Path getOutputPath(JobContext context) {
JobConf conf = context.getJobConf();
return FileOutputFormat.getOutputPath(conf);
}
private static Path getOutputPath(TaskAttemptContext context) {
JobConf conf = context.getJobConf();
return FileOutputFormat.getOutputPath(conf);
}
private org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter wrapped = null;
private org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter
getWrapped(JobContext context) throws IOException {
if(wrapped == null) {
wrapped = new org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter(
getOutputPath(context), context);
}
return wrapped;
}
private org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter
getWrapped(TaskAttemptContext context) throws IOException {
if(wrapped == null) {
wrapped = new org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter(
getOutputPath(context), context);
}
return wrapped;
}
/**
* Compute the path where the output of a given job attempt will be placed.
* @param context the context of the job. This is used to get the
* application attempt id.
* @return the path to store job attempt data.
*/
@Private
Path getJobAttemptPath(JobContext context) {
Path out = getOutputPath(context);
return out == null ? null :
org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter
.getJobAttemptPath(context, out);
}
@Private
public Path getTaskAttemptPath(TaskAttemptContext context) throws IOException {
Path out = getOutputPath(context);
return out == null ? null : getTaskAttemptPath(context, out);
}
private Path getTaskAttemptPath(TaskAttemptContext context, Path out) throws IOException {
Path workPath = FileOutputFormat.getWorkOutputPath(context.getJobConf());
if(workPath == null && out != null) {
return org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter
.getTaskAttemptPath(context, out);
}
return workPath;
}
/**
* Compute the path where the output of a committed task is stored until
* the entire job is committed.
* @param context the context of the task attempt
* @return the path where the output of a committed task is stored until
* the entire job is committed.
*/
@Private
Path getCommittedTaskPath(TaskAttemptContext context) {
Path out = getOutputPath(context);
return out == null ? null :
org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter
.getCommittedTaskPath(context, out);
}
public Path getWorkPath(TaskAttemptContext context, Path outputPath)
throws IOException {
return outputPath == null ? null : getTaskAttemptPath(context, outputPath);
}
@Override
public void setupJob(JobContext context) throws IOException {
getWrapped(context).setupJob(context);
}
@Override
public void commitJob(JobContext context) throws IOException {
getWrapped(context).commitJob(context);
}
@Override
@Deprecated
public void cleanupJob(JobContext context) throws IOException {
getWrapped(context).cleanupJob(context);
}
@Override
public void abortJob(JobContext context, int runState)
throws IOException {
JobStatus.State state;
if(runState == JobStatus.State.RUNNING.getValue()) {
state = JobStatus.State.RUNNING;
} else if(runState == JobStatus.State.SUCCEEDED.getValue()) {
state = JobStatus.State.SUCCEEDED;
} else if(runState == JobStatus.State.FAILED.getValue()) {
state = JobStatus.State.FAILED;
} else if(runState == JobStatus.State.PREP.getValue()) {
state = JobStatus.State.PREP;
} else if(runState == JobStatus.State.KILLED.getValue()) {
state = JobStatus.State.KILLED;
} else {
throw new IllegalArgumentException(runState+" is not a valid runState.");
}
getWrapped(context).abortJob(context, state);
}
@Override
public void setupTask(TaskAttemptContext context) throws IOException {
getWrapped(context).setupTask(context);
}
@Override
public void commitTask(TaskAttemptContext context) throws IOException {
getWrapped(context).commitTask(context, getTaskAttemptPath(context));
}
@Override
public void abortTask(TaskAttemptContext context) throws IOException {
getWrapped(context).abortTask(context, getTaskAttemptPath(context));
}
@Override
public boolean needsTaskCommit(TaskAttemptContext context)
throws IOException {
return getWrapped(context).needsTaskCommit(context, getTaskAttemptPath(context));
}
@Override
@Deprecated
public boolean isRecoverySupported() {
return true;
}
@Override
public boolean isRecoverySupported(JobContext context) throws IOException {
return getWrapped(context).isRecoverySupported(context);
}
@Override
public void recoverTask(TaskAttemptContext context)
throws IOException {
getWrapped(context).recoverTask(context);
}
}
| 7,019 | 33.581281 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileSplit.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.io.DataInput;
import java.io.DataOutput;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.fs.Path;
/** A section of an input file. Returned by {@link
* InputFormat#getSplits(JobConf, int)} and passed to
* {@link InputFormat#getRecordReader(InputSplit,JobConf,Reporter)}.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class FileSplit extends org.apache.hadoop.mapreduce.InputSplit
implements InputSplitWithLocationInfo {
org.apache.hadoop.mapreduce.lib.input.FileSplit fs;
protected FileSplit() {
fs = new org.apache.hadoop.mapreduce.lib.input.FileSplit();
}
/** Constructs a split.
* @deprecated
* @param file the file name
* @param start the position of the first byte in the file to process
* @param length the number of bytes in the file to process
*/
@Deprecated
public FileSplit(Path file, long start, long length, JobConf conf) {
this(file, start, length, (String[])null);
}
/** Constructs a split with host information
*
* @param file the file name
* @param start the position of the first byte in the file to process
* @param length the number of bytes in the file to process
* @param hosts the list of hosts containing the block, possibly null
*/
public FileSplit(Path file, long start, long length, String[] hosts) {
fs = new org.apache.hadoop.mapreduce.lib.input.FileSplit(file, start,
length, hosts);
}
/** Constructs a split with host information
*
* @param file the file name
* @param start the position of the first byte in the file to process
* @param length the number of bytes in the file to process
* @param hosts the list of hosts containing the block, possibly null
* @param inMemoryHosts the list of hosts containing the block in memory
*/
public FileSplit(Path file, long start, long length, String[] hosts,
String[] inMemoryHosts) {
fs = new org.apache.hadoop.mapreduce.lib.input.FileSplit(file, start,
length, hosts, inMemoryHosts);
}
public FileSplit(org.apache.hadoop.mapreduce.lib.input.FileSplit fs) {
this.fs = fs;
}
/** The file containing this split's data. */
public Path getPath() { return fs.getPath(); }
/** The position of the first byte in the file to process. */
public long getStart() { return fs.getStart(); }
/** The number of bytes in the file to process. */
public long getLength() { return fs.getLength(); }
public String toString() { return fs.toString(); }
////////////////////////////////////////////
// Writable methods
////////////////////////////////////////////
public void write(DataOutput out) throws IOException {
fs.write(out);
}
public void readFields(DataInput in) throws IOException {
fs.readFields(in);
}
public String[] getLocations() throws IOException {
return fs.getLocations();
}
@Override
@Evolving
public SplitLocationInfo[] getLocationInfo() throws IOException {
return fs.getLocationInfo();
}
}
| 4,058 | 33.991379 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JvmTask.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Writable;
/**
* Task abstraction that can be serialized, implements the writable interface.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class JvmTask implements Writable {
Task t;
boolean shouldDie;
public JvmTask(Task t, boolean shouldDie) {
this.t = t;
this.shouldDie = shouldDie;
}
public JvmTask() {}
public Task getTask() {
return t;
}
public boolean shouldDie() {
return shouldDie;
}
public void write(DataOutput out) throws IOException {
out.writeBoolean(shouldDie);
if (t != null) {
out.writeBoolean(true);
out.writeBoolean(t.isMapTask());
t.write(out);
} else {
out.writeBoolean(false);
}
}
public void readFields(DataInput in) throws IOException {
shouldDie = in.readBoolean();
boolean taskComing = in.readBoolean();
if (taskComing) {
boolean isMap = in.readBoolean();
if (isMap) {
t = new MapTask();
} else {
t = new ReduceTask();
}
t.readFields(in);
}
}
}
| 2,109 | 28.305556 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskID.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.DataInput;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.TaskType;
/**
* TaskID represents the immutable and unique identifier for
* a Map or Reduce Task. Each TaskID encompasses multiple attempts made to
* execute the Map or Reduce Task, each of which are uniquely indentified by
* their TaskAttemptID.
*
* TaskID consists of 3 parts. First part is the {@link JobID}, that this
* TaskInProgress belongs to. Second part of the TaskID is either 'm' or 'r'
* representing whether the task is a map task or a reduce task.
* And the third part is the task number. <br>
* An example TaskID is :
* <code>task_200707121733_0003_m_000005</code> , which represents the
* fifth map task in the third job running at the jobtracker
* started at <code>200707121733</code>.
* <p>
* Applications should never construct or parse TaskID strings
* , but rather use appropriate constructors or {@link #forName(String)}
* method.
*
* @see JobID
* @see TaskAttemptID
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class TaskID extends org.apache.hadoop.mapreduce.TaskID {
/**
* Constructs a TaskID object from given {@link JobID}.
* @param jobId JobID that this tip belongs to
* @param isMap whether the tip is a map
* @param id the tip number
* @deprecated Use {@link #TaskID(String, int, TaskType, int)}
*/
@Deprecated
public TaskID(org.apache.hadoop.mapreduce.JobID jobId, boolean isMap,int id) {
this(jobId, isMap ? TaskType.MAP : TaskType.REDUCE, id);
}
/**
* Constructs a TaskInProgressId object from given parts.
* @param jtIdentifier jobTracker identifier
* @param jobId job number
* @param isMap whether the tip is a map
* @param id the tip number
* @deprecated Use {@link #TaskID(org.apache.hadoop.mapreduce.JobID, TaskType,
* int)}
*/
@Deprecated
public TaskID(String jtIdentifier, int jobId, boolean isMap, int id) {
this(jtIdentifier, jobId, isMap ? TaskType.MAP : TaskType.REDUCE, id);
}
/**
* Constructs a TaskID object from given {@link JobID}.
* @param jobId JobID that this tip belongs to
* @param type the {@link TaskType}
* @param id the tip number
*/
public TaskID(org.apache.hadoop.mapreduce.JobID jobId, TaskType type,int id) {
super(jobId, type, id);
}
/**
* Constructs a TaskInProgressId object from given parts.
* @param jtIdentifier jobTracker identifier
* @param jobId job number
* @param type the {@link TaskType}
* @param id the tip number
*/
public TaskID(String jtIdentifier, int jobId, TaskType type, int id) {
this(new JobID(jtIdentifier, jobId), type, id);
}
public TaskID() {
super(new JobID(), TaskType.REDUCE, 0);
}
/**
* Downgrade a new TaskID to an old one
* @param old a new or old TaskID
* @return either old or a new TaskID build to match old
*/
public static TaskID downgrade(org.apache.hadoop.mapreduce.TaskID old) {
if (old instanceof TaskID) {
return (TaskID) old;
} else {
return new TaskID(JobID.downgrade(old.getJobID()), old.getTaskType(),
old.getId());
}
}
@Deprecated
public static TaskID read(DataInput in) throws IOException {
TaskID tipId = new TaskID();
tipId.readFields(in);
return tipId;
}
public JobID getJobID() {
return (JobID) super.getJobID();
}
/**
* Returns a regex pattern which matches task IDs. Arguments can
* be given null, in which case that part of the regex will be generic.
* For example to obtain a regex matching <i>the first map task</i>
* of <i>any jobtracker</i>, of <i>any job</i>, we would use :
* <pre>
* TaskID.getTaskIDsPattern(null, null, true, 1);
* </pre>
* which will return :
* <pre> "task_[^_]*_[0-9]*_m_000001*" </pre>
* @param jtIdentifier jobTracker identifier, or null
* @param jobId job number, or null
* @param isMap whether the tip is a map, or null
* @param taskId taskId number, or null
* @return a regex pattern matching TaskIDs
* @deprecated Use {@link TaskID#getTaskIDsPattern(String, Integer, TaskType,
* Integer)}
*/
@Deprecated
public static String getTaskIDsPattern(String jtIdentifier, Integer jobId
, Boolean isMap, Integer taskId) {
return getTaskIDsPattern(jtIdentifier, jobId,
isMap ? TaskType.MAP : TaskType.REDUCE, taskId);
}
/**
* Returns a regex pattern which matches task IDs. Arguments can
* be given null, in which case that part of the regex will be generic.
* For example to obtain a regex matching <i>the first map task</i>
* of <i>any jobtracker</i>, of <i>any job</i>, we would use :
* <pre>
* TaskID.getTaskIDsPattern(null, null, true, 1);
* </pre>
* which will return :
* <pre> "task_[^_]*_[0-9]*_m_000001*" </pre>
* @param jtIdentifier jobTracker identifier, or null
* @param jobId job number, or null
* @param type the {@link TaskType}, or null
* @param taskId taskId number, or null
* @return a regex pattern matching TaskIDs
*/
@Deprecated
public static String getTaskIDsPattern(String jtIdentifier, Integer jobId
, TaskType type, Integer taskId) {
StringBuilder builder = new StringBuilder(TASK).append(SEPARATOR)
.append(getTaskIDsPatternWOPrefix(jtIdentifier, jobId, type, taskId));
return builder.toString();
}
@Deprecated
static StringBuilder getTaskIDsPatternWOPrefix(String jtIdentifier
, Integer jobId, TaskType type, Integer taskId) {
StringBuilder builder = new StringBuilder();
builder.append(JobID.getJobIDsPatternWOPrefix(jtIdentifier, jobId))
.append(SEPARATOR)
.append(type != null ?
(org.apache.hadoop.mapreduce.TaskID.getRepresentingCharacter(type)) :
org.apache.hadoop.mapreduce.TaskID.getAllTaskTypes()).
append(SEPARATOR)
.append(taskId != null ? idFormat.format(taskId) : "[0-9]*");
return builder;
}
public static TaskID forName(String str
) throws IllegalArgumentException {
return (TaskID) org.apache.hadoop.mapreduce.TaskID.forName(str);
}
}
| 7,161 | 34.98995 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JVMId.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.text.NumberFormat;
class JVMId {
boolean isMap;
final JobID jobId;
private long jvmId;
private static final String JVM = "jvm";
private static final char SEPARATOR = '_';
private static NumberFormat idFormat = NumberFormat.getInstance();
static {
idFormat.setGroupingUsed(false);
idFormat.setMinimumIntegerDigits(6);
}
public JVMId(JobID jobId, boolean isMap, long id) {
this.jvmId = id;
this.isMap = isMap;
this.jobId = jobId;
}
public JVMId (String jtIdentifier, int jobId, boolean isMap, long id) {
this(new JobID(jtIdentifier, jobId), isMap, id);
}
public JVMId() {
jobId = new JobID();
}
public boolean isMapJVM() {
return isMap;
}
public JobID getJobId() {
return jobId;
}
@Override
public boolean equals(Object o) {
// Generated by IntelliJ IDEA 13.1.
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
JVMId jvmId1 = (JVMId) o;
if (isMap != jvmId1.isMap) {
return false;
}
if (jvmId != jvmId1.jvmId) {
return false;
}
if (!jobId.equals(jvmId1.jobId)) {
return false;
}
return true;
}
@Override
public int hashCode() {
// Generated by IntelliJ IDEA 13.1.
int result = (isMap ? 1 : 0);
result = 31 * result + jobId.hashCode();
result = 31 * result + (int) (jvmId ^ (jvmId >>> 32));
return result;
}
/**
* Compare TaskInProgressIds by first jobIds, then by tip numbers. Reduces are
* defined as greater then maps.
**/
public int compareTo(JVMId that) {
int jobComp = this.jobId.compareTo(that.jobId);
if(jobComp == 0) {
if(this.isMap == that.isMap) {
return Long.valueOf(this.jvmId).compareTo(that.jvmId);
} else {
return this.isMap ? -1 : 1;
}
} else {
return jobComp;
}
}
@Override
public String toString() {
return appendTo(new StringBuilder(JVM)).toString();
}
/**
* This method does NOT override org.apache.hadoop.mapred.ID to accept 64-bit
* ID to support work-preserving RM restart.
* @return 64-bit JVM id.
*/
public long getId() {
return jvmId;
}
/**
* Add the unique id to the given StringBuilder.
* @param builder the builder to append to
* @return the passed in builder.
*/
protected StringBuilder appendTo(StringBuilder builder) {
return jobId.appendTo(builder).
append(SEPARATOR).
append(isMap ? 'm' : 'r').
append(SEPARATOR).
append(idFormat.format(jvmId));
}
public void readFields(DataInput in) throws IOException {
this.jvmId = in.readLong();
this.jobId.readFields(in);
this.isMap = in.readBoolean();
}
public void write(DataOutput out) throws IOException {
out.writeLong(jvmId);
jobId.write(out);
out.writeBoolean(isMap);
}
/** Construct a JVMId object from given string
* @return constructed JVMId object or null if the given String is null
* @throws IllegalArgumentException if the given string is malformed
*/
public static JVMId forName(String str)
throws IllegalArgumentException {
if(str == null)
return null;
try {
String[] parts = str.split("_");
if(parts.length == 5) {
if(parts[0].equals(JVM)) {
boolean isMap = false;
if(parts[3].equals("m")) isMap = true;
else if(parts[3].equals("r")) isMap = false;
else throw new Exception();
return new JVMId(parts[1], Integer.parseInt(parts[2]),
isMap, Integer.parseInt(parts[4]));
}
}
}catch (Exception ex) {//fall below
}
throw new IllegalArgumentException("TaskId string : " + str
+ " is not properly formed");
}
}
| 4,771 | 26.113636 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/ShuffleConsumerPlugin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.util.Map;
import org.apache.hadoop.mapred.Task.CombineOutputCollector;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalDirAllocator;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.util.Progress;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* ShuffleConsumerPlugin for serving Reducers. It may shuffle MOF files from
* either the built-in ShuffleHandler or from a 3rd party AuxiliaryService.
*
*/
@InterfaceAudience.LimitedPrivate("mapreduce")
@InterfaceStability.Unstable
public interface ShuffleConsumerPlugin<K, V> {
public void init(Context<K, V> context);
public RawKeyValueIterator run() throws IOException, InterruptedException;
public void close();
@InterfaceAudience.LimitedPrivate("mapreduce")
@InterfaceStability.Unstable
public static class Context<K,V> {
private final org.apache.hadoop.mapreduce.TaskAttemptID reduceId;
private final JobConf jobConf;
private final FileSystem localFS;
private final TaskUmbilicalProtocol umbilical;
private final LocalDirAllocator localDirAllocator;
private final Reporter reporter;
private final CompressionCodec codec;
private final Class<? extends Reducer> combinerClass;
private final CombineOutputCollector<K, V> combineCollector;
private final Counters.Counter spilledRecordsCounter;
private final Counters.Counter reduceCombineInputCounter;
private final Counters.Counter shuffledMapsCounter;
private final Counters.Counter reduceShuffleBytes;
private final Counters.Counter failedShuffleCounter;
private final Counters.Counter mergedMapOutputsCounter;
private final TaskStatus status;
private final Progress copyPhase;
private final Progress mergePhase;
private final Task reduceTask;
private final MapOutputFile mapOutputFile;
private final Map<TaskAttemptID, MapOutputFile> localMapFiles;
public Context(org.apache.hadoop.mapreduce.TaskAttemptID reduceId,
JobConf jobConf, FileSystem localFS,
TaskUmbilicalProtocol umbilical,
LocalDirAllocator localDirAllocator,
Reporter reporter, CompressionCodec codec,
Class<? extends Reducer> combinerClass,
CombineOutputCollector<K,V> combineCollector,
Counters.Counter spilledRecordsCounter,
Counters.Counter reduceCombineInputCounter,
Counters.Counter shuffledMapsCounter,
Counters.Counter reduceShuffleBytes,
Counters.Counter failedShuffleCounter,
Counters.Counter mergedMapOutputsCounter,
TaskStatus status, Progress copyPhase, Progress mergePhase,
Task reduceTask, MapOutputFile mapOutputFile,
Map<TaskAttemptID, MapOutputFile> localMapFiles) {
this.reduceId = reduceId;
this.jobConf = jobConf;
this.localFS = localFS;
this. umbilical = umbilical;
this.localDirAllocator = localDirAllocator;
this.reporter = reporter;
this.codec = codec;
this.combinerClass = combinerClass;
this.combineCollector = combineCollector;
this.spilledRecordsCounter = spilledRecordsCounter;
this.reduceCombineInputCounter = reduceCombineInputCounter;
this.shuffledMapsCounter = shuffledMapsCounter;
this.reduceShuffleBytes = reduceShuffleBytes;
this.failedShuffleCounter = failedShuffleCounter;
this.mergedMapOutputsCounter = mergedMapOutputsCounter;
this.status = status;
this.copyPhase = copyPhase;
this.mergePhase = mergePhase;
this.reduceTask = reduceTask;
this.mapOutputFile = mapOutputFile;
this.localMapFiles = localMapFiles;
}
public org.apache.hadoop.mapreduce.TaskAttemptID getReduceId() {
return reduceId;
}
public JobConf getJobConf() {
return jobConf;
}
public FileSystem getLocalFS() {
return localFS;
}
public TaskUmbilicalProtocol getUmbilical() {
return umbilical;
}
public LocalDirAllocator getLocalDirAllocator() {
return localDirAllocator;
}
public Reporter getReporter() {
return reporter;
}
public CompressionCodec getCodec() {
return codec;
}
public Class<? extends Reducer> getCombinerClass() {
return combinerClass;
}
public CombineOutputCollector<K, V> getCombineCollector() {
return combineCollector;
}
public Counters.Counter getSpilledRecordsCounter() {
return spilledRecordsCounter;
}
public Counters.Counter getReduceCombineInputCounter() {
return reduceCombineInputCounter;
}
public Counters.Counter getShuffledMapsCounter() {
return shuffledMapsCounter;
}
public Counters.Counter getReduceShuffleBytes() {
return reduceShuffleBytes;
}
public Counters.Counter getFailedShuffleCounter() {
return failedShuffleCounter;
}
public Counters.Counter getMergedMapOutputsCounter() {
return mergedMapOutputsCounter;
}
public TaskStatus getStatus() {
return status;
}
public Progress getCopyPhase() {
return copyPhase;
}
public Progress getMergePhase() {
return mergePhase;
}
public Task getReduceTask() {
return reduceTask;
}
public MapOutputFile getMapOutputFile() {
return mapOutputFile;
}
public Map<TaskAttemptID, MapOutputFile> getLocalMapFiles() {
return localMapFiles;
}
} // end of public static class Context<K,V>
}
| 6,568 | 36.112994 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/QueueACL.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Enum representing an AccessControlList that drives set of operations that
* can be performed on a queue.
*/
@InterfaceAudience.Private
public enum QueueACL {
SUBMIT_JOB ("acl-submit-job"),
ADMINISTER_JOBS ("acl-administer-jobs");
// Currently this ACL acl-administer-jobs is checked for the operations
// FAIL_TASK, KILL_TASK, KILL_JOB, SET_JOB_PRIORITY and VIEW_JOB.
// TODO: Add ACL for LIST_JOBS when we have ability to authenticate
// users in UI
// TODO: Add ACL for CHANGE_ACL when we have an admin tool for
// configuring queues.
private final String aclName;
QueueACL(String aclName) {
this.aclName = aclName;
}
public final String getAclName() {
return aclName;
}
}
| 1,636 | 33.104167 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/PeriodicStatsAccumulator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
/**
*
* This abstract class that represents a bucketed series of
* measurements of a quantity being measured in a running task
* attempt.
*
* <p>The sole constructor is called with a count, which is the
* number of buckets into which we evenly divide the spectrum of
* progress from 0.0D to 1.0D . In the future we may provide for
* custom split points that don't have to be uniform.
*
* <p>A subclass determines how we fold readings for portions of a
* bucket and how we interpret the readings by overriding
* {@code extendInternal(...)} and {@code initializeInterval()}
*/
@Private
@Unstable
public abstract class PeriodicStatsAccumulator {
// The range of progress from 0.0D through 1.0D is divided into
// count "progress segments". This object accumulates an
// estimate of the effective value of a time-varying value during
// the zero-based i'th progress segment, ranging from i/count
// through (i+1)/count .
// This is an abstract class. We have two implementations: one
// for monotonically increasing time-dependent variables
// [currently, CPU time in milliseconds and wallclock time in
// milliseconds] and one for quantities that can vary arbitrarily
// over time, currently virtual and physical memory used, in
// kilobytes.
// We carry int's here. This saves a lot of JVM heap space in the
// job tracker per running task attempt [200 bytes per] but it
// has a small downside.
// No task attempt can run for more than 57 days nor occupy more
// than two terabytes of virtual memory.
protected final int count;
protected final int[] values;
static class StatsetState {
int oldValue = 0;
double oldProgress = 0.0D;
double currentAccumulation = 0.0D;
}
// We provide this level of indirection to reduce the memory
// footprint of done task attempts. When a task's progress
// reaches 1.0D, we delete this objecte StatsetState.
StatsetState state = new StatsetState();
PeriodicStatsAccumulator(int count) {
this.count = count;
this.values = new int[count];
for (int i = 0; i < count; ++i) {
values[i] = -1;
}
}
protected int[] getValues() {
return values;
}
// The concrete implementation of this abstract function
// accumulates more data into the current progress segment.
// newProgress [from the call] and oldProgress [from the object]
// must be in [or at the border of] a single progress segment.
/**
*
* adds a new reading to the current bucket.
*
* @param newProgress the endpoint of the interval this new
* reading covers
* @param newValue the value of the reading at {@code newProgress}
*
* The class has three instance variables, {@code oldProgress} and
* {@code oldValue} and {@code currentAccumulation}.
*
* {@code extendInternal} can count on three things:
*
* 1: The first time it's called in a particular instance, both
* oldXXX's will be zero.
*
* 2: oldXXX for a later call is the value of newXXX of the
* previous call. This ensures continuity in accumulation from
* one call to the next.
*
* 3: {@code currentAccumulation} is owned by
* {@code initializeInterval} and {@code extendInternal}.
*/
protected abstract void extendInternal(double newProgress, int newValue);
// What has to be done when you open a new interval
/**
* initializes the state variables to be ready for a new interval
*/
protected void initializeInterval() {
state.currentAccumulation = 0.0D;
}
// called for each new reading
/**
* This method calls {@code extendInternal} at least once. It
* divides the current progress interval [from the last call's
* {@code newProgress} to this call's {@code newProgress} ]
* into one or more subintervals by splitting at any point which
* is an interval boundary if there are any such points. It
* then calls {@code extendInternal} for each subinterval, or the
* whole interval if there are no splitting points.
*
* <p>For example, if the value was {@code 300} last time with
* {@code 0.3} progress, and count is {@code 5}, and you get a
* new reading with the variable at {@code 700} and progress at
* {@code 0.7}, you get three calls to {@code extendInternal}:
* one extending from progress {@code 0.3} to {@code 0.4} [the
* next boundary] with a value of {@code 400}, the next one
* through {@code 0.6} with a value of {@code 600}, and finally
* one at {@code 700} with a progress of {@code 0.7} .
*
* @param newProgress the endpoint of the progress range this new
* reading covers
* @param newValue the value of the reading at {@code newProgress}
*/
protected void extend(double newProgress, int newValue) {
if (state == null || newProgress < state.oldProgress) {
return;
}
// This correctness of this code depends on 100% * count = count.
int oldIndex = (int)(state.oldProgress * count);
int newIndex = (int)(newProgress * count);
int originalOldValue = state.oldValue;
double fullValueDistance = (double)newValue - state.oldValue;
double fullProgressDistance = newProgress - state.oldProgress;
double originalOldProgress = state.oldProgress;
// In this loop we detect each subinterval boundary within the
// range from the old progress to the new one. Then we
// interpolate the value from the old value to the new one to
// infer what its value might have been at each such boundary.
// Lastly we make the necessary calls to extendInternal to fold
// in the data for each trapazoid where no such trapazoid
// crosses a boundary.
for (int closee = oldIndex; closee < newIndex; ++closee) {
double interpolationProgress = (double)(closee + 1) / count;
// In floats, x * y / y might not equal y.
interpolationProgress = Math.min(interpolationProgress, newProgress);
double progressLength = (interpolationProgress - originalOldProgress);
double interpolationProportion = progressLength / fullProgressDistance;
double interpolationValueDistance
= fullValueDistance * interpolationProportion;
// estimates the value at the next [interpolated] subsegment boundary
int interpolationValue
= (int)interpolationValueDistance + originalOldValue;
extendInternal(interpolationProgress, interpolationValue);
advanceState(interpolationProgress, interpolationValue);
values[closee] = (int)state.currentAccumulation;
initializeInterval();
}
extendInternal(newProgress, newValue);
advanceState(newProgress, newValue);
if (newIndex == count) {
state = null;
}
}
protected void advanceState(double newProgress, int newValue) {
state.oldValue = newValue;
state.oldProgress = newProgress;
}
int getCount() {
return count;
}
int get(int index) {
return values[index];
}
}
| 8,040 | 37.109005 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/InvalidFileTypeException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Used when file type differs from the desired file type. like
* getting a file when a directory is expected. Or a wrong file type.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class InvalidFileTypeException
extends IOException {
private static final long serialVersionUID = 1L;
public InvalidFileTypeException() {
super();
}
public InvalidFileTypeException(String msg) {
super(msg);
}
}
| 1,433 | 30.173913 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobContextImpl.java
|
/* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.util.Progressable;
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class JobContextImpl
extends org.apache.hadoop.mapreduce.task.JobContextImpl
implements JobContext {
private JobConf job;
private Progressable progress;
public JobContextImpl(JobConf conf, org.apache.hadoop.mapreduce.JobID jobId,
Progressable progress) {
super(conf, jobId);
this.job = conf;
this.progress = progress;
}
public JobContextImpl(JobConf conf, org.apache.hadoop.mapreduce.JobID jobId) {
this(conf, jobId, Reporter.NULL);
}
/**
* Get the job Configuration
*
* @return JobConf
*/
public JobConf getJobConf() {
return job;
}
/**
* Get the progress mechanism for reporting progress.
*
* @return progress mechanism
*/
public Progressable getProgressible() {
return progress;
}
}
| 1,863 | 29.557377 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SequenceFileOutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.util.Arrays;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.DefaultCodec;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.ReflectionUtils;
/**
* An {@link OutputFormat} that writes {@link SequenceFile}s.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class SequenceFileOutputFormat <K,V> extends FileOutputFormat<K, V> {
public RecordWriter<K, V> getRecordWriter(
FileSystem ignored, JobConf job,
String name, Progressable progress)
throws IOException {
// get the path of the temporary output file
Path file = FileOutputFormat.getTaskOutputPath(job, name);
FileSystem fs = file.getFileSystem(job);
CompressionCodec codec = null;
CompressionType compressionType = CompressionType.NONE;
if (getCompressOutput(job)) {
// find the kind of compression to do
compressionType = getOutputCompressionType(job);
// find the right codec
Class<? extends CompressionCodec> codecClass = getOutputCompressorClass(job,
DefaultCodec.class);
codec = ReflectionUtils.newInstance(codecClass, job);
}
final SequenceFile.Writer out =
SequenceFile.createWriter(fs, job, file,
job.getOutputKeyClass(),
job.getOutputValueClass(),
compressionType,
codec,
progress);
return new RecordWriter<K, V>() {
public void write(K key, V value)
throws IOException {
out.append(key, value);
}
public void close(Reporter reporter) throws IOException { out.close();}
};
}
/** Open the output generated by this format. */
public static SequenceFile.Reader[] getReaders(Configuration conf, Path dir)
throws IOException {
FileSystem fs = dir.getFileSystem(conf);
Path[] names = FileUtil.stat2Paths(fs.listStatus(dir));
// sort names, so that hash partitioning works
Arrays.sort(names);
SequenceFile.Reader[] parts = new SequenceFile.Reader[names.length];
for (int i = 0; i < names.length; i++) {
parts[i] = new SequenceFile.Reader(fs, names[i], conf);
}
return parts;
}
/**
* Get the {@link CompressionType} for the output {@link SequenceFile}.
* @param conf the {@link JobConf}
* @return the {@link CompressionType} for the output {@link SequenceFile},
* defaulting to {@link CompressionType#RECORD}
*/
public static CompressionType getOutputCompressionType(JobConf conf) {
String val = conf.get(org.apache.hadoop.mapreduce.lib.output.
FileOutputFormat.COMPRESS_TYPE, CompressionType.RECORD.toString());
return CompressionType.valueOf(val);
}
/**
* Set the {@link CompressionType} for the output {@link SequenceFile}.
* @param conf the {@link JobConf} to modify
* @param style the {@link CompressionType} for the output
* {@link SequenceFile}
*/
public static void setOutputCompressionType(JobConf conf,
CompressionType style) {
setCompressOutput(conf, true);
conf.set(org.apache.hadoop.mapreduce.lib.output.
FileOutputFormat.COMPRESS_TYPE, style.toString());
}
}
| 4,668 | 36.055556 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
/**
* Represents the basic information that is saved per a job when the
* JobTracker receives a submitJob request. The information is saved
* so that the JobTracker can recover incomplete jobs upon restart.
*/
class JobInfo implements Writable {
private org.apache.hadoop.mapreduce.JobID id;
private Text user;
private Path jobSubmitDir;
public JobInfo() {}
public JobInfo(org.apache.hadoop.mapreduce.JobID id,
Text user,
Path jobSubmitDir) {
this.id = id;
this.user = user;
this.jobSubmitDir = jobSubmitDir;
}
/**
* Get the job id.
*/
public org.apache.hadoop.mapreduce.JobID getJobID() {
return id;
}
/**
* Get the configured job's user-name.
*/
public Text getUser() {
return user;
}
/**
* Get the job submission directory
*/
public Path getJobSubmitDir() {
return this.jobSubmitDir;
}
public void readFields(DataInput in) throws IOException {
id = new org.apache.hadoop.mapreduce.JobID();
id.readFields(in);
user = new Text();
user.readFields(in);
jobSubmitDir = new Path(WritableUtils.readString(in));
}
public void write(DataOutput out) throws IOException {
id.write(out);
user.write(out);
WritableUtils.writeString(out, jobSubmitDir.toString());
}
}
| 2,381 | 27.698795 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SequenceFileAsBinaryOutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.DefaultCodec;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.ReflectionUtils;
/**
* An {@link OutputFormat} that writes keys, values to
* {@link SequenceFile}s in binary(raw) format
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class SequenceFileAsBinaryOutputFormat
extends SequenceFileOutputFormat <BytesWritable,BytesWritable> {
/**
* Inner class used for appendRaw
*/
static protected class WritableValueBytes extends org.apache.hadoop.mapreduce
.lib.output.SequenceFileAsBinaryOutputFormat.WritableValueBytes {
public WritableValueBytes() {
super();
}
public WritableValueBytes(BytesWritable value) {
super(value);
}
}
/**
* Set the key class for the {@link SequenceFile}
* <p>This allows the user to specify the key class to be different
* from the actual class ({@link BytesWritable}) used for writing </p>
*
* @param conf the {@link JobConf} to modify
* @param theClass the SequenceFile output key class.
*/
static public void setSequenceFileOutputKeyClass(JobConf conf,
Class<?> theClass) {
conf.setClass(org.apache.hadoop.mapreduce.lib.output.
SequenceFileAsBinaryOutputFormat.KEY_CLASS, theClass, Object.class);
}
/**
* Set the value class for the {@link SequenceFile}
* <p>This allows the user to specify the value class to be different
* from the actual class ({@link BytesWritable}) used for writing </p>
*
* @param conf the {@link JobConf} to modify
* @param theClass the SequenceFile output key class.
*/
static public void setSequenceFileOutputValueClass(JobConf conf,
Class<?> theClass) {
conf.setClass(org.apache.hadoop.mapreduce.lib.output.
SequenceFileAsBinaryOutputFormat.VALUE_CLASS, theClass, Object.class);
}
/**
* Get the key class for the {@link SequenceFile}
*
* @return the key class of the {@link SequenceFile}
*/
static public Class<? extends WritableComparable> getSequenceFileOutputKeyClass(JobConf conf) {
return conf.getClass(org.apache.hadoop.mapreduce.lib.output.
SequenceFileAsBinaryOutputFormat.KEY_CLASS,
conf.getOutputKeyClass().asSubclass(WritableComparable.class),
WritableComparable.class);
}
/**
* Get the value class for the {@link SequenceFile}
*
* @return the value class of the {@link SequenceFile}
*/
static public Class<? extends Writable> getSequenceFileOutputValueClass(JobConf conf) {
return conf.getClass(org.apache.hadoop.mapreduce.lib.output.
SequenceFileAsBinaryOutputFormat.VALUE_CLASS,
conf.getOutputValueClass().asSubclass(Writable.class), Writable.class);
}
@Override
public RecordWriter <BytesWritable, BytesWritable>
getRecordWriter(FileSystem ignored, JobConf job,
String name, Progressable progress)
throws IOException {
// get the path of the temporary output file
Path file = FileOutputFormat.getTaskOutputPath(job, name);
FileSystem fs = file.getFileSystem(job);
CompressionCodec codec = null;
CompressionType compressionType = CompressionType.NONE;
if (getCompressOutput(job)) {
// find the kind of compression to do
compressionType = getOutputCompressionType(job);
// find the right codec
Class<? extends CompressionCodec> codecClass = getOutputCompressorClass(job,
DefaultCodec.class);
codec = ReflectionUtils.newInstance(codecClass, job);
}
final SequenceFile.Writer out =
SequenceFile.createWriter(fs, job, file,
getSequenceFileOutputKeyClass(job),
getSequenceFileOutputValueClass(job),
compressionType,
codec,
progress);
return new RecordWriter<BytesWritable, BytesWritable>() {
private WritableValueBytes wvaluebytes = new WritableValueBytes();
public void write(BytesWritable bkey, BytesWritable bvalue)
throws IOException {
wvaluebytes.reset(bvalue);
out.appendRaw(bkey.getBytes(), 0, bkey.getLength(), wvaluebytes);
wvaluebytes.reset(null);
}
public void close(Reporter reporter) throws IOException {
out.close();
}
};
}
@Override
public void checkOutputSpecs(FileSystem ignored, JobConf job)
throws IOException {
super.checkOutputSpecs(ignored, job);
if (getCompressOutput(job) &&
getOutputCompressionType(job) == CompressionType.RECORD ){
throw new InvalidJobConfException("SequenceFileAsBinaryOutputFormat "
+ "doesn't support Record Compression" );
}
}
}
| 6,216 | 35.356725 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobContext.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.util.Progressable;
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface JobContext extends org.apache.hadoop.mapreduce.JobContext {
/**
* Get the job Configuration
*
* @return JobConf
*/
public JobConf getJobConf();
/**
* Get the progress mechanism for reporting progress.
*
* @return progress mechanism
*/
public Progressable getProgressible();
}
| 1,391 | 32.142857 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapOutputFile.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.fs.Path;
/**
* Manipulate the working area for the transient store for maps and reduces.
*
* This class is used by map and reduce tasks to identify the directories that
* they need to write to/read from for intermediate files. The callers of
* these methods are from child space and see mapreduce.cluster.local.dir as
* taskTracker/jobCache/jobId/attemptId
* This class should not be used from TaskTracker space.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public abstract class MapOutputFile implements Configurable {
private Configuration conf;
static final String MAP_OUTPUT_FILENAME_STRING = "file.out";
static final String MAP_OUTPUT_INDEX_SUFFIX_STRING = ".index";
static final String REDUCE_INPUT_FILE_FORMAT_STRING = "%s/map_%d.out";
public MapOutputFile() {
}
/**
* Return the path to local map output file created earlier
*
* @return path
* @throws IOException
*/
public abstract Path getOutputFile() throws IOException;
/**
* Create a local map output file name.
*
* @param size the size of the file
* @return path
* @throws IOException
*/
public abstract Path getOutputFileForWrite(long size) throws IOException;
/**
* Create a local map output file name on the same volume.
*/
public abstract Path getOutputFileForWriteInVolume(Path existing);
/**
* Return the path to a local map output index file created earlier
*
* @return path
* @throws IOException
*/
public abstract Path getOutputIndexFile() throws IOException;
/**
* Create a local map output index file name.
*
* @param size the size of the file
* @return path
* @throws IOException
*/
public abstract Path getOutputIndexFileForWrite(long size) throws IOException;
/**
* Create a local map output index file name on the same volume.
*/
public abstract Path getOutputIndexFileForWriteInVolume(Path existing);
/**
* Return a local map spill file created earlier.
*
* @param spillNumber the number
* @return path
* @throws IOException
*/
public abstract Path getSpillFile(int spillNumber) throws IOException;
/**
* Create a local map spill file name.
*
* @param spillNumber the number
* @param size the size of the file
* @return path
* @throws IOException
*/
public abstract Path getSpillFileForWrite(int spillNumber, long size)
throws IOException;
/**
* Return a local map spill index file created earlier
*
* @param spillNumber the number
* @return path
* @throws IOException
*/
public abstract Path getSpillIndexFile(int spillNumber) throws IOException;
/**
* Create a local map spill index file name.
*
* @param spillNumber the number
* @param size the size of the file
* @return path
* @throws IOException
*/
public abstract Path getSpillIndexFileForWrite(int spillNumber, long size)
throws IOException;
/**
* Return a local reduce input file created earlier
*
* @param mapId a map task id
* @return path
* @throws IOException
*/
public abstract Path getInputFile(int mapId) throws IOException;
/**
* Create a local reduce input file name.
*
* @param mapId a map task id
* @param size the size of the file
* @return path
* @throws IOException
*/
public abstract Path getInputFileForWrite(
org.apache.hadoop.mapreduce.TaskID mapId, long size) throws IOException;
/** Removes all of the files related to a task. */
public abstract void removeAll() throws IOException;
@Override
public void setConf(Configuration conf) {
this.conf = conf;
}
@Override
public Configuration getConf() {
return conf;
}
}
| 4,818 | 27.514793 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/ReduceTaskStatus.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
class ReduceTaskStatus extends TaskStatus {
private long shuffleFinishTime;
private long sortFinishTime;
private List<TaskAttemptID> failedFetchTasks = new ArrayList<TaskAttemptID>(1);
public ReduceTaskStatus() {}
public ReduceTaskStatus(TaskAttemptID taskid, float progress, int numSlots,
State runState, String diagnosticInfo, String stateString,
String taskTracker, Phase phase, Counters counters) {
super(taskid, progress, numSlots, runState, diagnosticInfo, stateString,
taskTracker, phase, counters);
}
@Override
public Object clone() {
ReduceTaskStatus myClone = (ReduceTaskStatus)super.clone();
myClone.failedFetchTasks = new ArrayList<TaskAttemptID>(failedFetchTasks);
return myClone;
}
@Override
public boolean getIsMap() {
return false;
}
@Override
void setFinishTime(long finishTime) {
if (shuffleFinishTime == 0) {
this.shuffleFinishTime = finishTime;
}
if (sortFinishTime == 0){
this.sortFinishTime = finishTime;
}
super.setFinishTime(finishTime);
}
@Override
public long getShuffleFinishTime() {
return shuffleFinishTime;
}
@Override
void setShuffleFinishTime(long shuffleFinishTime) {
this.shuffleFinishTime = shuffleFinishTime;
}
@Override
public long getSortFinishTime() {
return sortFinishTime;
}
@Override
void setSortFinishTime(long sortFinishTime) {
this.sortFinishTime = sortFinishTime;
if (0 == this.shuffleFinishTime){
this.shuffleFinishTime = sortFinishTime;
}
}
@Override
public long getMapFinishTime() {
throw new UnsupportedOperationException(
"getMapFinishTime() not supported for ReduceTask");
}
@Override
void setMapFinishTime(long shuffleFinishTime) {
throw new UnsupportedOperationException(
"setMapFinishTime() not supported for ReduceTask");
}
@Override
public List<TaskAttemptID> getFetchFailedMaps() {
return failedFetchTasks;
}
@Override
public void addFetchFailedMap(TaskAttemptID mapTaskId) {
failedFetchTasks.add(mapTaskId);
}
@Override
synchronized void statusUpdate(TaskStatus status) {
super.statusUpdate(status);
if (status.getShuffleFinishTime() != 0) {
this.shuffleFinishTime = status.getShuffleFinishTime();
}
if (status.getSortFinishTime() != 0) {
sortFinishTime = status.getSortFinishTime();
}
List<TaskAttemptID> newFetchFailedMaps = status.getFetchFailedMaps();
if (failedFetchTasks == null) {
failedFetchTasks = newFetchFailedMaps;
} else if (newFetchFailedMaps != null){
failedFetchTasks.addAll(newFetchFailedMaps);
}
}
@Override
synchronized void clearStatus() {
super.clearStatus();
failedFetchTasks.clear();
}
@Override
public void readFields(DataInput in) throws IOException {
super.readFields(in);
shuffleFinishTime = in.readLong();
sortFinishTime = in.readLong();
int noFailedFetchTasks = in.readInt();
failedFetchTasks = new ArrayList<TaskAttemptID>(noFailedFetchTasks);
for (int i=0; i < noFailedFetchTasks; ++i) {
TaskAttemptID id = new TaskAttemptID();
id.readFields(in);
failedFetchTasks.add(id);
}
}
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
out.writeLong(shuffleFinishTime);
out.writeLong(sortFinishTime);
out.writeInt(failedFetchTasks.size());
for (TaskAttemptID taskId : failedFetchTasks) {
taskId.write(out);
}
}
}
| 4,575 | 26.902439 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/QueueConfigurationParser.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.QueueState;
import org.apache.hadoop.security.authorize.AccessControlList;
import static org.apache.hadoop.mapred.QueueManager.toFullPropertyName;
import org.xml.sax.SAXException;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.NamedNodeMap;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.w3c.dom.DOMException;
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.DocumentBuilder;
import java.io.IOException;
import java.io.File;
import java.io.InputStream;
import java.io.BufferedInputStream;
import java.io.FileInputStream;
import java.util.Map;
import java.util.HashMap;
import java.util.List;
import java.util.ArrayList;
import java.util.Properties;
import java.util.Set;
import java.util.HashSet;
/**
* Class for parsing mapred-queues.xml.
* The format consists nesting of
* queues within queues - a feature called hierarchical queues.
* The parser expects that queues are
* defined within the 'queues' tag which is the top level element for
* XML document.
*
* Creates the complete queue hieararchy
*/
class QueueConfigurationParser {
private static final Log LOG =
LogFactory.getLog(QueueConfigurationParser.class);
private boolean aclsEnabled = false;
//Default root.
protected Queue root = null;
//xml tags for mapred-queues.xml
static final String NAME_SEPARATOR = ":";
static final String QUEUE_TAG = "queue";
static final String ACL_SUBMIT_JOB_TAG = "acl-submit-job";
static final String ACL_ADMINISTER_JOB_TAG = "acl-administer-jobs";
// The value read from queues config file for this tag is not used at all.
// To enable queue acls and job acls, mapreduce.cluster.acls.enabled is
// to be set in mapred-site.xml
@Deprecated
static final String ACLS_ENABLED_TAG = "aclsEnabled";
static final String PROPERTIES_TAG = "properties";
static final String STATE_TAG = "state";
static final String QUEUE_NAME_TAG = "name";
static final String QUEUES_TAG = "queues";
static final String PROPERTY_TAG = "property";
static final String KEY_TAG = "key";
static final String VALUE_TAG = "value";
/**
* Default constructor for DeperacatedQueueConfigurationParser
*/
QueueConfigurationParser() {
}
QueueConfigurationParser(String confFile, boolean areAclsEnabled) {
aclsEnabled = areAclsEnabled;
File file = new File(confFile).getAbsoluteFile();
if (!file.exists()) {
throw new RuntimeException("Configuration file not found at " +
confFile);
}
InputStream in = null;
try {
in = new BufferedInputStream(new FileInputStream(file));
loadFrom(in);
} catch (IOException ioe) {
throw new RuntimeException(ioe);
} finally {
IOUtils.closeStream(in);
}
}
QueueConfigurationParser(InputStream xmlInput, boolean areAclsEnabled) {
aclsEnabled = areAclsEnabled;
loadFrom(xmlInput);
}
private void loadFrom(InputStream xmlInput) {
try {
this.root = loadResource(xmlInput);
} catch (ParserConfigurationException e) {
throw new RuntimeException(e);
} catch (SAXException e) {
throw new RuntimeException(e);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
void setAclsEnabled(boolean aclsEnabled) {
this.aclsEnabled = aclsEnabled;
}
boolean isAclsEnabled() {
return aclsEnabled;
}
Queue getRoot() {
return root;
}
void setRoot(Queue root) {
this.root = root;
}
/**
* Method to load the resource file.
* generates the root.
*
* @param resourceInput InputStream that provides the XML to parse
* @return
* @throws ParserConfigurationException
* @throws SAXException
* @throws IOException
*/
protected Queue loadResource(InputStream resourceInput)
throws ParserConfigurationException, SAXException, IOException {
DocumentBuilderFactory docBuilderFactory
= DocumentBuilderFactory.newInstance();
//ignore all comments inside the xml file
docBuilderFactory.setIgnoringComments(true);
//allow includes in the xml file
docBuilderFactory.setNamespaceAware(true);
try {
docBuilderFactory.setXIncludeAware(true);
} catch (UnsupportedOperationException e) {
LOG.info(
"Failed to set setXIncludeAware(true) for parser "
+ docBuilderFactory
+ NAME_SEPARATOR + e);
}
DocumentBuilder builder = docBuilderFactory.newDocumentBuilder();
Document doc = null;
Element queuesNode = null;
doc = builder.parse(resourceInput);
queuesNode = doc.getDocumentElement();
return this.parseResource(queuesNode);
}
private Queue parseResource(Element queuesNode) {
Queue rootNode = null;
try {
if (!QUEUES_TAG.equals(queuesNode.getTagName())) {
LOG.info("Bad conf file: top-level element not <queues>");
throw new RuntimeException("No queues defined ");
}
NamedNodeMap nmp = queuesNode.getAttributes();
Node acls = nmp.getNamedItem(ACLS_ENABLED_TAG);
if (acls != null) {
LOG.warn("Configuring " + ACLS_ENABLED_TAG + " flag in " +
QueueManager.QUEUE_CONF_FILE_NAME + " is not valid. " +
"This tag is ignored. Configure " +
MRConfig.MR_ACLS_ENABLED + " in mapred-site.xml. See the " +
" documentation of " + MRConfig.MR_ACLS_ENABLED +
", which is used for enabling job level authorization and " +
" queue level authorization.");
}
NodeList props = queuesNode.getChildNodes();
if (props == null || props.getLength() <= 0) {
LOG.info(" Bad configuration no queues defined ");
throw new RuntimeException(" No queues defined ");
}
//We have root level nodes.
for (int i = 0; i < props.getLength(); i++) {
Node propNode = props.item(i);
if (!(propNode instanceof Element)) {
continue;
}
if (!propNode.getNodeName().equals(QUEUE_TAG)) {
LOG.info("At root level only \" queue \" tags are allowed ");
throw
new RuntimeException("Malformed xml document no queue defined ");
}
Element prop = (Element) propNode;
//Add children to root.
Queue q = createHierarchy("", prop);
if(rootNode == null) {
rootNode = new Queue();
rootNode.setName("");
}
rootNode.addChild(q);
}
return rootNode;
} catch (DOMException e) {
LOG.info("Error parsing conf file: " + e);
throw new RuntimeException(e);
}
}
/**
* @param parent Name of the parent queue
* @param queueNode
* @return
*/
private Queue createHierarchy(String parent, Element queueNode) {
if (queueNode == null) {
return null;
}
//Name of the current queue.
//Complete qualified queue name.
String name = "";
Queue newQueue = new Queue();
Map<String, AccessControlList> acls =
new HashMap<String, AccessControlList>();
NodeList fields = queueNode.getChildNodes();
validate(queueNode);
List<Element> subQueues = new ArrayList<Element>();
String submitKey = "";
String adminKey = "";
for (int j = 0; j < fields.getLength(); j++) {
Node fieldNode = fields.item(j);
if (!(fieldNode instanceof Element)) {
continue;
}
Element field = (Element) fieldNode;
if (QUEUE_NAME_TAG.equals(field.getTagName())) {
String nameValue = field.getTextContent();
if (field.getTextContent() == null ||
field.getTextContent().trim().equals("") ||
field.getTextContent().contains(NAME_SEPARATOR)) {
throw new RuntimeException("Improper queue name : " + nameValue);
}
if (!parent.equals("")) {
name += parent + NAME_SEPARATOR;
}
//generate the complete qualified name
//parent.child
name += nameValue;
newQueue.setName(name);
submitKey = toFullPropertyName(name,
QueueACL.SUBMIT_JOB.getAclName());
adminKey = toFullPropertyName(name,
QueueACL.ADMINISTER_JOBS.getAclName());
}
if (QUEUE_TAG.equals(field.getTagName()) && field.hasChildNodes()) {
subQueues.add(field);
}
if(isAclsEnabled()) {
if (ACL_SUBMIT_JOB_TAG.equals(field.getTagName())) {
acls.put(submitKey, new AccessControlList(field.getTextContent()));
}
if (ACL_ADMINISTER_JOB_TAG.equals(field.getTagName())) {
acls.put(adminKey, new AccessControlList(field.getTextContent()));
}
}
if (PROPERTIES_TAG.equals(field.getTagName())) {
Properties properties = populateProperties(field);
newQueue.setProperties(properties);
}
if (STATE_TAG.equals(field.getTagName())) {
String state = field.getTextContent();
newQueue.setState(QueueState.getState(state));
}
}
if (!acls.containsKey(submitKey)) {
acls.put(submitKey, new AccessControlList(" "));
}
if (!acls.containsKey(adminKey)) {
acls.put(adminKey, new AccessControlList(" "));
}
//Set acls
newQueue.setAcls(acls);
//At this point we have the queue ready at current height level.
//so we have parent name available.
for(Element field:subQueues) {
newQueue.addChild(createHierarchy(newQueue.getName(), field));
}
return newQueue;
}
/**
* Populate the properties for Queue
*
* @param field
* @return
*/
private Properties populateProperties(Element field) {
Properties props = new Properties();
NodeList propfields = field.getChildNodes();
for (int i = 0; i < propfields.getLength(); i++) {
Node prop = propfields.item(i);
//If this node is not of type element
//skip this.
if (!(prop instanceof Element)) {
continue;
}
if (PROPERTY_TAG.equals(prop.getNodeName())) {
if (prop.hasAttributes()) {
NamedNodeMap nmp = prop.getAttributes();
if (nmp.getNamedItem(KEY_TAG) != null && nmp.getNamedItem(
VALUE_TAG) != null) {
props.setProperty(
nmp.getNamedItem(KEY_TAG).getTextContent(), nmp.getNamedItem(
VALUE_TAG).getTextContent());
}
}
}
}
return props;
}
/**
*
* Checks if there is NAME_TAG for queues.
*
* Checks if (queue has children)
* then it shouldnot have acls-* or state
* else
* throws an Exception.
* @param node
*/
private void validate(Node node) {
NodeList fields = node.getChildNodes();
//Check if <queue> & (<acls-*> || <state>) are not siblings
//if yes throw an IOException.
Set<String> siblings = new HashSet<String>();
for (int i = 0; i < fields.getLength(); i++) {
if (!(fields.item(i) instanceof Element)) {
continue;
}
siblings.add((fields.item(i)).getNodeName());
}
if(! siblings.contains(QUEUE_NAME_TAG)) {
throw new RuntimeException(
" Malformed xml formation queue name not specified ");
}
if (siblings.contains(QUEUE_TAG) && (
siblings.contains(ACL_ADMINISTER_JOB_TAG) ||
siblings.contains(ACL_SUBMIT_JOB_TAG) ||
siblings.contains(STATE_TAG)
)) {
throw new RuntimeException(
" Malformed xml formation queue tag and acls " +
"tags or state tags are siblings ");
}
}
private static String getSimpleQueueName(String fullQName) {
int index = fullQName.lastIndexOf(NAME_SEPARATOR);
if (index < 0) {
return fullQName;
}
return fullQName.substring(index + 1, fullQName.length());
}
/**
* Construct an {@link Element} for a single queue, constructing the inner
* queue <name/>, <properties/>, <state/> and the inner
* <queue> elements recursively.
*
* @param document
* @param jqi
* @return
*/
static Element getQueueElement(Document document, JobQueueInfo jqi) {
// Queue
Element q = document.createElement(QUEUE_TAG);
// Queue-name
Element qName = document.createElement(QUEUE_NAME_TAG);
qName.setTextContent(getSimpleQueueName(jqi.getQueueName()));
q.appendChild(qName);
// Queue-properties
Properties props = jqi.getProperties();
Element propsElement = document.createElement(PROPERTIES_TAG);
if (props != null) {
Set<String> propList = props.stringPropertyNames();
for (String prop : propList) {
Element propertyElement = document.createElement(PROPERTY_TAG);
propertyElement.setAttribute(KEY_TAG, prop);
propertyElement.setAttribute(VALUE_TAG, (String) props.get(prop));
propsElement.appendChild(propertyElement);
}
}
q.appendChild(propsElement);
// Queue-state
String queueState = jqi.getState().getStateName();
if (queueState != null
&& !queueState.equals(QueueState.UNDEFINED.getStateName())) {
Element qStateElement = document.createElement(STATE_TAG);
qStateElement.setTextContent(queueState);
q.appendChild(qStateElement);
}
// Queue-children
List<JobQueueInfo> children = jqi.getChildren();
if (children != null) {
for (JobQueueInfo child : children) {
q.appendChild(getQueueElement(document, child));
}
}
return q;
}
}
| 14,570 | 29.870763 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/RunningJob.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
/**
* <code>RunningJob</code> is the user-interface to query for details on a
* running Map-Reduce job.
*
* <p>Clients can get hold of <code>RunningJob</code> via the {@link JobClient}
* and then query the running-job for details such as name, configuration,
* progress etc.</p>
*
* @see JobClient
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface RunningJob {
/**
* Get the underlying job configuration
*
* @return the configuration of the job.
*/
public Configuration getConfiguration();
/**
* Get the job identifier.
*
* @return the job identifier.
*/
public JobID getID();
/** @deprecated This method is deprecated and will be removed. Applications should
* rather use {@link #getID()}.
*/
@Deprecated
public String getJobID();
/**
* Get the name of the job.
*
* @return the name of the job.
*/
public String getJobName();
/**
* Get the path of the submitted job configuration.
*
* @return the path of the submitted job configuration.
*/
public String getJobFile();
/**
* Get the URL where some job progress information will be displayed.
*
* @return the URL where some job progress information will be displayed.
*/
public String getTrackingURL();
/**
* Get the <i>progress</i> of the job's map-tasks, as a float between 0.0
* and 1.0. When all map tasks have completed, the function returns 1.0.
*
* @return the progress of the job's map-tasks.
* @throws IOException
*/
public float mapProgress() throws IOException;
/**
* Get the <i>progress</i> of the job's reduce-tasks, as a float between 0.0
* and 1.0. When all reduce tasks have completed, the function returns 1.0.
*
* @return the progress of the job's reduce-tasks.
* @throws IOException
*/
public float reduceProgress() throws IOException;
/**
* Get the <i>progress</i> of the job's cleanup-tasks, as a float between 0.0
* and 1.0. When all cleanup tasks have completed, the function returns 1.0.
*
* @return the progress of the job's cleanup-tasks.
* @throws IOException
*/
public float cleanupProgress() throws IOException;
/**
* Get the <i>progress</i> of the job's setup-tasks, as a float between 0.0
* and 1.0. When all setup tasks have completed, the function returns 1.0.
*
* @return the progress of the job's setup-tasks.
* @throws IOException
*/
public float setupProgress() throws IOException;
/**
* Check if the job is finished or not.
* This is a non-blocking call.
*
* @return <code>true</code> if the job is complete, else <code>false</code>.
* @throws IOException
*/
public boolean isComplete() throws IOException;
/**
* Check if the job completed successfully.
*
* @return <code>true</code> if the job succeeded, else <code>false</code>.
* @throws IOException
*/
public boolean isSuccessful() throws IOException;
/**
* Blocks until the job is complete.
*
* @throws IOException
*/
public void waitForCompletion() throws IOException;
/**
* Returns the current state of the Job.
* {@link JobStatus}
*
* @throws IOException
*/
public int getJobState() throws IOException;
/**
* Returns a snapshot of the current status, {@link JobStatus}, of the Job.
* Need to call again for latest information.
*
* @throws IOException
*/
public JobStatus getJobStatus() throws IOException;
/**
* Kill the running job. Blocks until all job tasks have been killed as well.
* If the job is no longer running, it simply returns.
*
* @throws IOException
*/
public void killJob() throws IOException;
/**
* Set the priority of a running job.
* @param priority the new priority for the job.
* @throws IOException
*/
public void setJobPriority(String priority) throws IOException;
/**
* Get events indicating completion (success/failure) of component tasks.
*
* @param startFrom index to start fetching events from
* @return an array of {@link TaskCompletionEvent}s
* @throws IOException
*/
public TaskCompletionEvent[] getTaskCompletionEvents(int startFrom)
throws IOException;
/**
* Kill indicated task attempt.
*
* @param taskId the id of the task to be terminated.
* @param shouldFail if true the task is failed and added to failed tasks
* list, otherwise it is just killed, w/o affecting
* job failure status.
* @throws IOException
*/
public void killTask(TaskAttemptID taskId, boolean shouldFail) throws IOException;
/** @deprecated Applications should rather use {@link #killTask(TaskAttemptID, boolean)}*/
@Deprecated
public void killTask(String taskId, boolean shouldFail) throws IOException;
/**
* Gets the counters for this job.
*
* @return the counters for this job or null if the job has been retired.
* @throws IOException
*/
public Counters getCounters() throws IOException;
/**
* Gets the diagnostic messages for a given task attempt.
* @param taskid
* @return the list of diagnostic messages for the task
* @throws IOException
*/
public String[] getTaskDiagnostics(TaskAttemptID taskid) throws IOException;
/**
* Get the url where history file is archived. Returns empty string if
* history file is not available yet.
*
* @return the url where history file is archived
* @throws IOException
*/
public String getHistoryUrl() throws IOException;
/**
* Check whether the job has been removed from JobTracker memory and retired.
* On retire, the job history file is copied to a location known by
* {@link #getHistoryUrl()}
* @return <code>true</code> if the job retired, else <code>false</code>.
* @throws IOException
*/
public boolean isRetired() throws IOException;
/**
* Get failure info for the job.
* @return the failure info for the job.
* @throws IOException
*/
public String getFailureInfo() throws IOException;
}
| 7,151 | 28.8 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/RamManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.InputStream;
/**
* <code>RamManager</code> manages a memory pool of a configured limit.
*/
interface RamManager {
/**
* Reserve memory for data coming through the given input-stream.
*
* @param requestedSize size of memory requested
* @param in input stream
* @throws InterruptedException
* @return <code>true</code> if memory was allocated immediately,
* else <code>false</code>
*/
boolean reserve(int requestedSize, InputStream in)
throws InterruptedException;
/**
* Return memory to the pool.
*
* @param requestedSize size of memory returned to the pool
*/
void unreserve(int requestedSize);
}
| 1,518 | 32.755556 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Merger.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.ChecksumFileSystem;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalDirAllocator;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.mapred.IFile.Reader;
import org.apache.hadoop.mapred.IFile.Writer;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.CryptoUtils;
import org.apache.hadoop.util.PriorityQueue;
import org.apache.hadoop.util.Progress;
import org.apache.hadoop.util.Progressable;
/**
* Merger is an utility class used by the Map and Reduce tasks for merging
* both their memory and disk segments
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class Merger {
private static final Log LOG = LogFactory.getLog(Merger.class);
// Local directories
private static LocalDirAllocator lDirAlloc =
new LocalDirAllocator(MRConfig.LOCAL_DIR);
public static <K extends Object, V extends Object>
RawKeyValueIterator merge(Configuration conf, FileSystem fs,
Class<K> keyClass, Class<V> valueClass,
CompressionCodec codec,
Path[] inputs, boolean deleteInputs,
int mergeFactor, Path tmpDir,
RawComparator<K> comparator, Progressable reporter,
Counters.Counter readsCounter,
Counters.Counter writesCounter,
Progress mergePhase)
throws IOException {
return
new MergeQueue<K, V>(conf, fs, inputs, deleteInputs, codec, comparator,
reporter, null,
TaskType.REDUCE).merge(keyClass, valueClass,
mergeFactor, tmpDir,
readsCounter, writesCounter,
mergePhase);
}
public static <K extends Object, V extends Object>
RawKeyValueIterator merge(Configuration conf, FileSystem fs,
Class<K> keyClass, Class<V> valueClass,
CompressionCodec codec,
Path[] inputs, boolean deleteInputs,
int mergeFactor, Path tmpDir,
RawComparator<K> comparator,
Progressable reporter,
Counters.Counter readsCounter,
Counters.Counter writesCounter,
Counters.Counter mergedMapOutputsCounter,
Progress mergePhase)
throws IOException {
return
new MergeQueue<K, V>(conf, fs, inputs, deleteInputs, codec, comparator,
reporter, mergedMapOutputsCounter,
TaskType.REDUCE).merge(
keyClass, valueClass,
mergeFactor, tmpDir,
readsCounter, writesCounter,
mergePhase);
}
public static <K extends Object, V extends Object>
RawKeyValueIterator merge(Configuration conf, FileSystem fs,
Class<K> keyClass, Class<V> valueClass,
List<Segment<K, V>> segments,
int mergeFactor, Path tmpDir,
RawComparator<K> comparator, Progressable reporter,
Counters.Counter readsCounter,
Counters.Counter writesCounter,
Progress mergePhase)
throws IOException {
return merge(conf, fs, keyClass, valueClass, segments, mergeFactor, tmpDir,
comparator, reporter, false, readsCounter, writesCounter,
mergePhase);
}
public static <K extends Object, V extends Object>
RawKeyValueIterator merge(Configuration conf, FileSystem fs,
Class<K> keyClass, Class<V> valueClass,
List<Segment<K, V>> segments,
int mergeFactor, Path tmpDir,
RawComparator<K> comparator, Progressable reporter,
boolean sortSegments,
Counters.Counter readsCounter,
Counters.Counter writesCounter,
Progress mergePhase)
throws IOException {
return new MergeQueue<K, V>(conf, fs, segments, comparator, reporter,
sortSegments,
TaskType.REDUCE).merge(keyClass, valueClass,
mergeFactor, tmpDir,
readsCounter, writesCounter,
mergePhase);
}
public static <K extends Object, V extends Object>
RawKeyValueIterator merge(Configuration conf, FileSystem fs,
Class<K> keyClass, Class<V> valueClass,
CompressionCodec codec,
List<Segment<K, V>> segments,
int mergeFactor, Path tmpDir,
RawComparator<K> comparator, Progressable reporter,
boolean sortSegments,
Counters.Counter readsCounter,
Counters.Counter writesCounter,
Progress mergePhase,
TaskType taskType)
throws IOException {
return new MergeQueue<K, V>(conf, fs, segments, comparator, reporter,
sortSegments, codec,
taskType).merge(keyClass, valueClass,
mergeFactor, tmpDir,
readsCounter, writesCounter,
mergePhase);
}
public static <K extends Object, V extends Object>
RawKeyValueIterator merge(Configuration conf, FileSystem fs,
Class<K> keyClass, Class<V> valueClass,
List<Segment<K, V>> segments,
int mergeFactor, int inMemSegments, Path tmpDir,
RawComparator<K> comparator, Progressable reporter,
boolean sortSegments,
Counters.Counter readsCounter,
Counters.Counter writesCounter,
Progress mergePhase)
throws IOException {
return new MergeQueue<K, V>(conf, fs, segments, comparator, reporter,
sortSegments,
TaskType.REDUCE).merge(keyClass, valueClass,
mergeFactor, inMemSegments,
tmpDir,
readsCounter, writesCounter,
mergePhase);
}
public static <K extends Object, V extends Object>
RawKeyValueIterator merge(Configuration conf, FileSystem fs,
Class<K> keyClass, Class<V> valueClass,
CompressionCodec codec,
List<Segment<K, V>> segments,
int mergeFactor, int inMemSegments, Path tmpDir,
RawComparator<K> comparator, Progressable reporter,
boolean sortSegments,
Counters.Counter readsCounter,
Counters.Counter writesCounter,
Progress mergePhase)
throws IOException {
return new MergeQueue<K, V>(conf, fs, segments, comparator, reporter,
sortSegments, codec,
TaskType.REDUCE).merge(keyClass, valueClass,
mergeFactor, inMemSegments,
tmpDir,
readsCounter, writesCounter,
mergePhase);
}
public static <K extends Object, V extends Object>
void writeFile(RawKeyValueIterator records, Writer<K, V> writer,
Progressable progressable, Configuration conf)
throws IOException {
long progressBar = conf.getLong(JobContext.RECORDS_BEFORE_PROGRESS,
10000);
long recordCtr = 0;
while(records.next()) {
writer.append(records.getKey(), records.getValue());
if (((recordCtr++) % progressBar) == 0) {
progressable.progress();
}
}
}
@InterfaceAudience.Private
@InterfaceStability.Unstable
public static class Segment<K extends Object, V extends Object> {
Reader<K, V> reader = null;
final DataInputBuffer key = new DataInputBuffer();
Configuration conf = null;
FileSystem fs = null;
Path file = null;
boolean preserve = false;
CompressionCodec codec = null;
long segmentOffset = 0;
long segmentLength = -1;
long rawDataLength = -1;
Counters.Counter mapOutputsCounter = null;
public Segment(Configuration conf, FileSystem fs, Path file,
CompressionCodec codec, boolean preserve)
throws IOException {
this(conf, fs, file, codec, preserve, null);
}
public Segment(Configuration conf, FileSystem fs, Path file,
CompressionCodec codec, boolean preserve,
Counters.Counter mergedMapOutputsCounter)
throws IOException {
this(conf, fs, file, 0, fs.getFileStatus(file).getLen(), codec, preserve,
mergedMapOutputsCounter);
}
public Segment(Configuration conf, FileSystem fs, Path file,
CompressionCodec codec, boolean preserve,
Counters.Counter mergedMapOutputsCounter, long rawDataLength)
throws IOException {
this(conf, fs, file, 0, fs.getFileStatus(file).getLen(), codec, preserve,
mergedMapOutputsCounter);
this.rawDataLength = rawDataLength;
}
public Segment(Configuration conf, FileSystem fs, Path file,
long segmentOffset, long segmentLength,
CompressionCodec codec,
boolean preserve) throws IOException {
this(conf, fs, file, segmentOffset, segmentLength, codec, preserve, null);
}
public Segment(Configuration conf, FileSystem fs, Path file,
long segmentOffset, long segmentLength, CompressionCodec codec,
boolean preserve, Counters.Counter mergedMapOutputsCounter)
throws IOException {
this.conf = conf;
this.fs = fs;
this.file = file;
this.codec = codec;
this.preserve = preserve;
this.segmentOffset = segmentOffset;
this.segmentLength = segmentLength;
this.mapOutputsCounter = mergedMapOutputsCounter;
}
public Segment(Reader<K, V> reader, boolean preserve) {
this(reader, preserve, null);
}
public Segment(Reader<K, V> reader, boolean preserve, long rawDataLength) {
this(reader, preserve, null);
this.rawDataLength = rawDataLength;
}
public Segment(Reader<K, V> reader, boolean preserve,
Counters.Counter mapOutputsCounter) {
this.reader = reader;
this.preserve = preserve;
this.segmentLength = reader.getLength();
this.mapOutputsCounter = mapOutputsCounter;
}
void init(Counters.Counter readsCounter) throws IOException {
if (reader == null) {
FSDataInputStream in = fs.open(file);
in.seek(segmentOffset);
in = CryptoUtils.wrapIfNecessary(conf, in);
reader = new Reader<K, V>(conf, in,
segmentLength - CryptoUtils.cryptoPadding(conf),
codec, readsCounter);
}
if (mapOutputsCounter != null) {
mapOutputsCounter.increment(1);
}
}
boolean inMemory() {
return fs == null;
}
DataInputBuffer getKey() { return key; }
DataInputBuffer getValue(DataInputBuffer value) throws IOException {
nextRawValue(value);
return value;
}
public long getLength() {
return (reader == null) ?
segmentLength : reader.getLength();
}
public long getRawDataLength() {
return (rawDataLength > 0) ? rawDataLength : getLength();
}
boolean nextRawKey() throws IOException {
return reader.nextRawKey(key);
}
void nextRawValue(DataInputBuffer value) throws IOException {
reader.nextRawValue(value);
}
void closeReader() throws IOException {
if (reader != null) {
reader.close();
reader = null;
}
}
void close() throws IOException {
closeReader();
if (!preserve && fs != null) {
fs.delete(file, false);
}
}
public long getPosition() throws IOException {
return reader.getPosition();
}
// This method is used by BackupStore to extract the
// absolute position after a reset
long getActualPosition() throws IOException {
return segmentOffset + reader.getPosition();
}
Reader<K,V> getReader() {
return reader;
}
// This method is used by BackupStore to reinitialize the
// reader to start reading from a different segment offset
void reinitReader(int offset) throws IOException {
if (!inMemory()) {
closeReader();
segmentOffset = offset;
segmentLength = fs.getFileStatus(file).getLen() - segmentOffset;
init(null);
}
}
}
private static class MergeQueue<K extends Object, V extends Object>
extends PriorityQueue<Segment<K, V>> implements RawKeyValueIterator {
Configuration conf;
FileSystem fs;
CompressionCodec codec;
List<Segment<K, V>> segments = new ArrayList<Segment<K,V>>();
RawComparator<K> comparator;
private long totalBytesProcessed;
private float progPerByte;
private Progress mergeProgress = new Progress();
Progressable reporter;
DataInputBuffer key;
final DataInputBuffer value = new DataInputBuffer();
final DataInputBuffer diskIFileValue = new DataInputBuffer();
// Boolean variable for including/considering final merge as part of sort
// phase or not. This is true in map task, false in reduce task. It is
// used in calculating mergeProgress.
private boolean includeFinalMerge = false;
/**
* Sets the boolean variable includeFinalMerge to true. Called from
* map task before calling merge() so that final merge of map task
* is also considered as part of sort phase.
*/
private void considerFinalMergeForProgress() {
includeFinalMerge = true;
}
Segment<K, V> minSegment;
Comparator<Segment<K, V>> segmentComparator =
new Comparator<Segment<K, V>>() {
public int compare(Segment<K, V> o1, Segment<K, V> o2) {
if (o1.getLength() == o2.getLength()) {
return 0;
}
return o1.getLength() < o2.getLength() ? -1 : 1;
}
};
public MergeQueue(Configuration conf, FileSystem fs,
Path[] inputs, boolean deleteInputs,
CompressionCodec codec, RawComparator<K> comparator,
Progressable reporter)
throws IOException {
this(conf, fs, inputs, deleteInputs, codec, comparator, reporter, null,
TaskType.REDUCE);
}
public MergeQueue(Configuration conf, FileSystem fs,
Path[] inputs, boolean deleteInputs,
CompressionCodec codec, RawComparator<K> comparator,
Progressable reporter,
Counters.Counter mergedMapOutputsCounter,
TaskType taskType)
throws IOException {
this.conf = conf;
this.fs = fs;
this.codec = codec;
this.comparator = comparator;
this.reporter = reporter;
if (taskType == TaskType.MAP) {
considerFinalMergeForProgress();
}
for (Path file : inputs) {
LOG.debug("MergeQ: adding: " + file);
segments.add(new Segment<K, V>(conf, fs, file, codec, !deleteInputs,
(file.toString().endsWith(
Task.MERGED_OUTPUT_PREFIX) ?
null : mergedMapOutputsCounter)));
}
// Sort segments on file-lengths
Collections.sort(segments, segmentComparator);
}
public MergeQueue(Configuration conf, FileSystem fs,
List<Segment<K, V>> segments, RawComparator<K> comparator,
Progressable reporter) {
this(conf, fs, segments, comparator, reporter, false, TaskType.REDUCE);
}
public MergeQueue(Configuration conf, FileSystem fs,
List<Segment<K, V>> segments, RawComparator<K> comparator,
Progressable reporter, boolean sortSegments, TaskType taskType) {
this.conf = conf;
this.fs = fs;
this.comparator = comparator;
this.segments = segments;
this.reporter = reporter;
if (taskType == TaskType.MAP) {
considerFinalMergeForProgress();
}
if (sortSegments) {
Collections.sort(segments, segmentComparator);
}
}
public MergeQueue(Configuration conf, FileSystem fs,
List<Segment<K, V>> segments, RawComparator<K> comparator,
Progressable reporter, boolean sortSegments, CompressionCodec codec,
TaskType taskType) {
this(conf, fs, segments, comparator, reporter, sortSegments,
taskType);
this.codec = codec;
}
public void close() throws IOException {
Segment<K, V> segment;
while((segment = pop()) != null) {
segment.close();
}
}
public DataInputBuffer getKey() throws IOException {
return key;
}
public DataInputBuffer getValue() throws IOException {
return value;
}
private void adjustPriorityQueue(Segment<K, V> reader) throws IOException{
long startPos = reader.getReader().bytesRead;
boolean hasNext = reader.nextRawKey();
long endPos = reader.getReader().bytesRead;
totalBytesProcessed += endPos - startPos;
mergeProgress.set(totalBytesProcessed * progPerByte);
if (hasNext) {
adjustTop();
} else {
pop();
reader.close();
}
}
private void resetKeyValue() {
key = null;
value.reset(new byte[] {}, 0);
diskIFileValue.reset(new byte[] {}, 0);
}
public boolean next() throws IOException {
if (size() == 0) {
resetKeyValue();
return false;
}
if (minSegment != null) {
//minSegment is non-null for all invocations of next except the first
//one. For the first invocation, the priority queue is ready for use
//but for the subsequent invocations, first adjust the queue
adjustPriorityQueue(minSegment);
if (size() == 0) {
minSegment = null;
resetKeyValue();
return false;
}
}
minSegment = top();
long startPos = minSegment.getReader().bytesRead;
key = minSegment.getKey();
if (!minSegment.inMemory()) {
//When we load the value from an inmemory segment, we reset
//the "value" DIB in this class to the inmem segment's byte[].
//When we load the value bytes from disk, we shouldn't use
//the same byte[] since it would corrupt the data in the inmem
//segment. So we maintain an explicit DIB for value bytes
//obtained from disk, and if the current segment is a disk
//segment, we reset the "value" DIB to the byte[] in that (so
//we reuse the disk segment DIB whenever we consider
//a disk segment).
minSegment.getValue(diskIFileValue);
value.reset(diskIFileValue.getData(), diskIFileValue.getLength());
} else {
minSegment.getValue(value);
}
long endPos = minSegment.getReader().bytesRead;
totalBytesProcessed += endPos - startPos;
mergeProgress.set(totalBytesProcessed * progPerByte);
return true;
}
@SuppressWarnings("unchecked")
protected boolean lessThan(Object a, Object b) {
DataInputBuffer key1 = ((Segment<K, V>)a).getKey();
DataInputBuffer key2 = ((Segment<K, V>)b).getKey();
int s1 = key1.getPosition();
int l1 = key1.getLength() - s1;
int s2 = key2.getPosition();
int l2 = key2.getLength() - s2;
return comparator.compare(key1.getData(), s1, l1, key2.getData(), s2, l2) < 0;
}
public RawKeyValueIterator merge(Class<K> keyClass, Class<V> valueClass,
int factor, Path tmpDir,
Counters.Counter readsCounter,
Counters.Counter writesCounter,
Progress mergePhase)
throws IOException {
return merge(keyClass, valueClass, factor, 0, tmpDir,
readsCounter, writesCounter, mergePhase);
}
RawKeyValueIterator merge(Class<K> keyClass, Class<V> valueClass,
int factor, int inMem, Path tmpDir,
Counters.Counter readsCounter,
Counters.Counter writesCounter,
Progress mergePhase)
throws IOException {
LOG.info("Merging " + segments.size() + " sorted segments");
/*
* If there are inMemory segments, then they come first in the segments
* list and then the sorted disk segments. Otherwise(if there are only
* disk segments), then they are sorted segments if there are more than
* factor segments in the segments list.
*/
int numSegments = segments.size();
int origFactor = factor;
int passNo = 1;
if (mergePhase != null) {
mergeProgress = mergePhase;
}
long totalBytes = computeBytesInMerges(factor, inMem);
if (totalBytes != 0) {
progPerByte = 1.0f / (float)totalBytes;
}
//create the MergeStreams from the sorted map created in the constructor
//and dump the final output to a file
do {
//get the factor for this pass of merge. We assume in-memory segments
//are the first entries in the segment list and that the pass factor
//doesn't apply to them
factor = getPassFactor(factor, passNo, numSegments - inMem);
if (1 == passNo) {
factor += inMem;
}
List<Segment<K, V>> segmentsToMerge =
new ArrayList<Segment<K, V>>();
int segmentsConsidered = 0;
int numSegmentsToConsider = factor;
long startBytes = 0; // starting bytes of segments of this merge
while (true) {
//extract the smallest 'factor' number of segments
//Call cleanup on the empty segments (no key/value data)
List<Segment<K, V>> mStream =
getSegmentDescriptors(numSegmentsToConsider);
for (Segment<K, V> segment : mStream) {
// Initialize the segment at the last possible moment;
// this helps in ensuring we don't use buffers until we need them
segment.init(readsCounter);
long startPos = segment.getReader().bytesRead;
boolean hasNext = segment.nextRawKey();
long endPos = segment.getReader().bytesRead;
if (hasNext) {
startBytes += endPos - startPos;
segmentsToMerge.add(segment);
segmentsConsidered++;
}
else {
segment.close();
numSegments--; //we ignore this segment for the merge
}
}
//if we have the desired number of segments
//or looked at all available segments, we break
if (segmentsConsidered == factor ||
segments.size() == 0) {
break;
}
numSegmentsToConsider = factor - segmentsConsidered;
}
//feed the streams to the priority queue
initialize(segmentsToMerge.size());
clear();
for (Segment<K, V> segment : segmentsToMerge) {
put(segment);
}
//if we have lesser number of segments remaining, then just return the
//iterator, else do another single level merge
if (numSegments <= factor) {
if (!includeFinalMerge) { // for reduce task
// Reset totalBytesProcessed and recalculate totalBytes from the
// remaining segments to track the progress of the final merge.
// Final merge is considered as the progress of the reducePhase,
// the 3rd phase of reduce task.
totalBytesProcessed = 0;
totalBytes = 0;
for (int i = 0; i < segmentsToMerge.size(); i++) {
totalBytes += segmentsToMerge.get(i).getRawDataLength();
}
}
if (totalBytes != 0) //being paranoid
progPerByte = 1.0f / (float)totalBytes;
totalBytesProcessed += startBytes;
if (totalBytes != 0)
mergeProgress.set(totalBytesProcessed * progPerByte);
else
mergeProgress.set(1.0f); // Last pass and no segments left - we're done
LOG.info("Down to the last merge-pass, with " + numSegments +
" segments left of total size: " +
(totalBytes - totalBytesProcessed) + " bytes");
return this;
} else {
LOG.info("Merging " + segmentsToMerge.size() +
" intermediate segments out of a total of " +
(segments.size()+segmentsToMerge.size()));
long bytesProcessedInPrevMerges = totalBytesProcessed;
totalBytesProcessed += startBytes;
//we want to spread the creation of temp files on multiple disks if
//available under the space constraints
long approxOutputSize = 0;
for (Segment<K, V> s : segmentsToMerge) {
approxOutputSize += s.getLength() +
ChecksumFileSystem.getApproxChkSumLength(
s.getLength());
}
Path tmpFilename =
new Path(tmpDir, "intermediate").suffix("." + passNo);
Path outputFile = lDirAlloc.getLocalPathForWrite(
tmpFilename.toString(),
approxOutputSize, conf);
FSDataOutputStream out = fs.create(outputFile);
out = CryptoUtils.wrapIfNecessary(conf, out);
Writer<K, V> writer = new Writer<K, V>(conf, out, keyClass, valueClass,
codec, writesCounter, true);
writeFile(this, writer, reporter, conf);
writer.close();
//we finished one single level merge; now clean up the priority
//queue
this.close();
// Add the newly create segment to the list of segments to be merged
Segment<K, V> tempSegment =
new Segment<K, V>(conf, fs, outputFile, codec, false);
// Insert new merged segment into the sorted list
int pos = Collections.binarySearch(segments, tempSegment,
segmentComparator);
if (pos < 0) {
// binary search failed. So position to be inserted at is -pos-1
pos = -pos-1;
}
segments.add(pos, tempSegment);
numSegments = segments.size();
// Subtract the difference between expected size of new segment and
// actual size of new segment(Expected size of new segment is
// inputBytesOfThisMerge) from totalBytes. Expected size and actual
// size will match(almost) if combiner is not called in merge.
long inputBytesOfThisMerge = totalBytesProcessed -
bytesProcessedInPrevMerges;
totalBytes -= inputBytesOfThisMerge - tempSegment.getRawDataLength();
if (totalBytes != 0) {
progPerByte = 1.0f / (float)totalBytes;
}
passNo++;
}
//we are worried about only the first pass merge factor. So reset the
//factor to what it originally was
factor = origFactor;
} while(true);
}
/**
* Determine the number of segments to merge in a given pass. Assuming more
* than factor segments, the first pass should attempt to bring the total
* number of segments - 1 to be divisible by the factor - 1 (each pass
* takes X segments and produces 1) to minimize the number of merges.
*/
private int getPassFactor(int factor, int passNo, int numSegments) {
if (passNo > 1 || numSegments <= factor || factor == 1)
return factor;
int mod = (numSegments - 1) % (factor - 1);
if (mod == 0)
return factor;
return mod + 1;
}
/** Return (& remove) the requested number of segment descriptors from the
* sorted map.
*/
private List<Segment<K, V>> getSegmentDescriptors(int numDescriptors) {
if (numDescriptors > segments.size()) {
List<Segment<K, V>> subList = new ArrayList<Segment<K,V>>(segments);
segments.clear();
return subList;
}
List<Segment<K, V>> subList =
new ArrayList<Segment<K,V>>(segments.subList(0, numDescriptors));
for (int i=0; i < numDescriptors; ++i) {
segments.remove(0);
}
return subList;
}
/**
* Compute expected size of input bytes to merges, will be used in
* calculating mergeProgress. This simulates the above merge() method and
* tries to obtain the number of bytes that are going to be merged in all
* merges(assuming that there is no combiner called while merging).
* @param factor mapreduce.task.io.sort.factor
* @param inMem number of segments in memory to be merged
*/
long computeBytesInMerges(int factor, int inMem) {
int numSegments = segments.size();
List<Long> segmentSizes = new ArrayList<Long>(numSegments);
long totalBytes = 0;
int n = numSegments - inMem;
// factor for 1st pass
int f = getPassFactor(factor, 1, n) + inMem;
n = numSegments;
for (int i = 0; i < numSegments; i++) {
// Not handling empty segments here assuming that it would not affect
// much in calculation of mergeProgress.
segmentSizes.add(segments.get(i).getRawDataLength());
}
// If includeFinalMerge is true, allow the following while loop iterate
// for 1 more iteration. This is to include final merge as part of the
// computation of expected input bytes of merges
boolean considerFinalMerge = includeFinalMerge;
while (n > f || considerFinalMerge) {
if (n <=f ) {
considerFinalMerge = false;
}
long mergedSize = 0;
f = Math.min(f, segmentSizes.size());
for (int j = 0; j < f; j++) {
mergedSize += segmentSizes.remove(0);
}
totalBytes += mergedSize;
// insert new size into the sorted list
int pos = Collections.binarySearch(segmentSizes, mergedSize);
if (pos < 0) {
pos = -pos-1;
}
segmentSizes.add(pos, mergedSize);
n -= (f-1);
f = factor;
}
return totalBytes;
}
public Progress getProgress() {
return mergeProgress;
}
}
}
| 33,572 | 37.678571 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/ID.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* A general identifier, which internally stores the id
* as an integer. This is the super class of {@link JobID},
* {@link TaskID} and {@link TaskAttemptID}.
*
* @see JobID
* @see TaskID
* @see TaskAttemptID
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class ID extends org.apache.hadoop.mapreduce.ID {
/** constructs an ID object from the given int */
public ID(int id) {
super(id);
}
protected ID() {
}
}
| 1,428 | 30.065217 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConfigurable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/** That what may be configured. */
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface JobConfigurable {
/** Initializes a new instance from a {@link JobConf}.
*
* @param job the configuration
*/
void configure(JobConf job);
}
| 1,222 | 34.970588 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SequenceFileAsBinaryInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.SequenceFile;
/**
* InputFormat reading keys, values from SequenceFiles in binary (raw)
* format.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class SequenceFileAsBinaryInputFormat
extends SequenceFileInputFormat<BytesWritable,BytesWritable> {
public SequenceFileAsBinaryInputFormat() {
super();
}
public RecordReader<BytesWritable,BytesWritable> getRecordReader(
InputSplit split, JobConf job, Reporter reporter)
throws IOException {
return new SequenceFileAsBinaryRecordReader(job, (FileSplit)split);
}
/**
* Read records from a SequenceFile as binary (raw) bytes.
*/
public static class SequenceFileAsBinaryRecordReader
implements RecordReader<BytesWritable,BytesWritable> {
private SequenceFile.Reader in;
private long start;
private long end;
private boolean done = false;
private DataOutputBuffer buffer = new DataOutputBuffer();
private SequenceFile.ValueBytes vbytes;
public SequenceFileAsBinaryRecordReader(Configuration conf, FileSplit split)
throws IOException {
Path path = split.getPath();
FileSystem fs = path.getFileSystem(conf);
this.in = new SequenceFile.Reader(fs, path, conf);
this.end = split.getStart() + split.getLength();
if (split.getStart() > in.getPosition())
in.sync(split.getStart()); // sync to start
this.start = in.getPosition();
vbytes = in.createValueBytes();
done = start >= end;
}
public BytesWritable createKey() {
return new BytesWritable();
}
public BytesWritable createValue() {
return new BytesWritable();
}
/**
* Retrieve the name of the key class for this SequenceFile.
* @see org.apache.hadoop.io.SequenceFile.Reader#getKeyClassName
*/
public String getKeyClassName() {
return in.getKeyClassName();
}
/**
* Retrieve the name of the value class for this SequenceFile.
* @see org.apache.hadoop.io.SequenceFile.Reader#getValueClassName
*/
public String getValueClassName() {
return in.getValueClassName();
}
/**
* Read raw bytes from a SequenceFile.
*/
public synchronized boolean next(BytesWritable key, BytesWritable val)
throws IOException {
if (done) return false;
long pos = in.getPosition();
boolean eof = -1 == in.nextRawKey(buffer);
if (!eof) {
key.set(buffer.getData(), 0, buffer.getLength());
buffer.reset();
in.nextRawValue(vbytes);
vbytes.writeUncompressedBytes(buffer);
val.set(buffer.getData(), 0, buffer.getLength());
buffer.reset();
}
return !(done = (eof || (pos >= end && in.syncSeen())));
}
public long getPos() throws IOException {
return in.getPosition();
}
public void close() throws IOException {
in.close();
}
/**
* Return the progress within the input split
* @return 0.0 to 1.0 of the input byte range
*/
public float getProgress() throws IOException {
if (end == start) {
return 0.0f;
} else {
return Math.min(1.0f, (float)((in.getPosition() - start) /
(double)(end - start)));
}
}
}
}
| 4,511 | 31.228571 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BufferSorter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.io.OutputBuffer;
import org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator;
import org.apache.hadoop.util.Progressable;
/** This class provides a generic sort interface that should be implemented
* by specific sort algorithms. The use case is the following:
* A user class writes key/value records to a buffer, and finally wants to
* sort the buffer. This interface defines methods by which the user class
* can update the interface implementation with the offsets of the records
* and the lengths of the keys/values. The user class gives a reference to
* the buffer when the latter wishes to sort the records written to the buffer
* so far. Typically, the user class decides the point at which sort should
* happen based on the memory consumed so far by the buffer and the data
* structures maintained by an implementation of this interface. That is why
* a method is provided to get the memory consumed so far by the datastructures
* in the interface implementation.
*/
interface BufferSorter extends JobConfigurable {
/** Pass the Progressable object so that sort can call progress while it is sorting
* @param reporter the Progressable object reference
*/
public void setProgressable(Progressable reporter);
/** When a key/value is added at a particular offset in the key/value buffer,
* this method is invoked by the user class so that the impl of this sort
* interface can update its datastructures.
* @param recordOffset the offset of the key in the buffer
* @param keyLength the length of the key
* @param valLength the length of the val in the buffer
*/
public void addKeyValue(int recordoffset, int keyLength, int valLength);
/** The user class invokes this method to set the buffer that the specific
* sort algorithm should "indirectly" sort (generally, sort algorithm impl
* should access this buffer via comparators and sort offset-indices to the
* buffer).
* @param buffer the map output buffer
*/
public void setInputBuffer(OutputBuffer buffer);
/** The framework invokes this method to get the memory consumed so far
* by an implementation of this interface.
* @return memoryUsed in bytes
*/
public long getMemoryUtilized();
/** Framework decides when to actually sort
*/
public RawKeyValueIterator sort();
/** Framework invokes this to signal the sorter to cleanup
*/
public void close();
}
| 3,304 | 43.066667 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueClient.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.PrintWriter;
import java.io.Writer;
import java.util.List;
import java.util.ArrayList;
import java.util.Arrays;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.mapreduce.JobStatus;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import com.google.common.base.Charsets;
/**
* <code>JobQueueClient</code> is interface provided to the user in order to get
* JobQueue related information from the {@link JobTracker}
*
* It provides the facility to list the JobQueues present and ability to view
* the list of jobs within a specific JobQueue
*
**/
class JobQueueClient extends Configured implements Tool {
JobClient jc;
public JobQueueClient() {
}
public JobQueueClient(JobConf conf) throws IOException {
setConf(conf);
}
private void init(JobConf conf) throws IOException {
setConf(conf);
jc = new JobClient(conf);
}
@Override
public int run(String[] argv) throws Exception {
int exitcode = -1;
if (argv.length < 1) {
displayUsage("");
return exitcode;
}
String cmd = argv[0];
boolean displayQueueList = false;
boolean displayQueueInfoWithJobs = false;
boolean displayQueueInfoWithoutJobs = false;
boolean displayQueueAclsInfoForCurrentUser = false;
if ("-list".equals(cmd)) {
displayQueueList = true;
} else if ("-showacls".equals(cmd)) {
displayQueueAclsInfoForCurrentUser = true;
} else if ("-info".equals(cmd)) {
if (argv.length == 2 && !(argv[1].equals("-showJobs"))) {
displayQueueInfoWithoutJobs = true;
} else if (argv.length == 3) {
if (argv[2].equals("-showJobs")) {
displayQueueInfoWithJobs = true;
} else {
displayUsage(cmd);
return exitcode;
}
} else {
displayUsage(cmd);
return exitcode;
}
} else {
displayUsage(cmd);
return exitcode;
}
JobConf conf = new JobConf(getConf());
init(conf);
if (displayQueueList) {
displayQueueList();
exitcode = 0;
} else if (displayQueueInfoWithoutJobs) {
displayQueueInfo(argv[1], false);
exitcode = 0;
} else if (displayQueueInfoWithJobs) {
displayQueueInfo(argv[1], true);
exitcode = 0;
} else if (displayQueueAclsInfoForCurrentUser) {
this.displayQueueAclsInfoForCurrentUser();
exitcode = 0;
}
return exitcode;
}
// format and print information about the passed in job queue.
void printJobQueueInfo(JobQueueInfo jobQueueInfo, Writer writer)
throws IOException {
printJobQueueInfo(jobQueueInfo, writer, "");
}
// format and print information about the passed in job queue.
@SuppressWarnings("deprecation")
void printJobQueueInfo(JobQueueInfo jobQueueInfo, Writer writer,
String prefix) throws IOException {
if (jobQueueInfo == null) {
writer.write("No queue found.\n");
writer.flush();
return;
}
writer.write(String.format(prefix + "======================\n"));
writer.write(String.format(prefix + "Queue Name : %s \n",
jobQueueInfo.getQueueName()));
writer.write(String.format(prefix + "Queue State : %s \n",
jobQueueInfo.getQueueState()));
writer.write(String.format(prefix + "Scheduling Info : %s \n",
jobQueueInfo.getSchedulingInfo()));
List<JobQueueInfo> childQueues = jobQueueInfo.getChildren();
if (childQueues != null && childQueues.size() > 0) {
for (int i = 0; i < childQueues.size(); i++) {
printJobQueueInfo(childQueues.get(i), writer, " " + prefix);
}
}
writer.flush();
}
private void displayQueueList() throws IOException {
JobQueueInfo[] rootQueues = jc.getRootQueues();
for (JobQueueInfo queue : rootQueues) {
printJobQueueInfo(queue, new PrintWriter(new OutputStreamWriter(
System.out, Charsets.UTF_8)));
}
}
/**
* Expands the hierarchy of queues and gives the list of all queues in
* depth-first order
* @param rootQueues the top-level queues
* @return the list of all the queues in depth-first order.
*/
List<JobQueueInfo> expandQueueList(JobQueueInfo[] rootQueues) {
List<JobQueueInfo> allQueues = new ArrayList<JobQueueInfo>();
for (JobQueueInfo queue : rootQueues) {
allQueues.add(queue);
if (queue.getChildren() != null) {
JobQueueInfo[] childQueues
= queue.getChildren().toArray(new JobQueueInfo[0]);
allQueues.addAll(expandQueueList(childQueues));
}
}
return allQueues;
}
/**
* Method used to display information pertaining to a Single JobQueue
* registered with the {@link QueueManager}. Display of the Jobs is determine
* by the boolean
*
* @throws IOException, InterruptedException
*/
private void displayQueueInfo(String queue, boolean showJobs)
throws IOException, InterruptedException {
JobQueueInfo jobQueueInfo = jc.getQueueInfo(queue);
if (jobQueueInfo == null) {
System.out.println("Queue \"" + queue + "\" does not exist.");
return;
}
printJobQueueInfo(jobQueueInfo, new PrintWriter(new OutputStreamWriter(
System.out, Charsets.UTF_8)));
if (showJobs && (jobQueueInfo.getChildren() == null ||
jobQueueInfo.getChildren().size() == 0)) {
JobStatus[] jobs = jobQueueInfo.getJobStatuses();
if (jobs == null)
jobs = new JobStatus[0];
jc.displayJobList(jobs);
}
}
private void displayQueueAclsInfoForCurrentUser() throws IOException {
QueueAclsInfo[] queueAclsInfoList = jc.getQueueAclsForCurrentUser();
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
if (queueAclsInfoList.length > 0) {
System.out.println("Queue acls for user : " + ugi.getShortUserName());
System.out.println("\nQueue Operations");
System.out.println("=====================");
for (QueueAclsInfo queueInfo : queueAclsInfoList) {
System.out.print(queueInfo.getQueueName() + " ");
String[] ops = queueInfo.getOperations();
Arrays.sort(ops);
int max = ops.length - 1;
for (int j = 0; j < ops.length; j++) {
System.out.print(ops[j].replaceFirst("acl-", ""));
if (j < max) {
System.out.print(",");
}
}
System.out.println();
}
} else {
System.out.println("User " + ugi.getShortUserName()
+ " does not have access to any queue. \n");
}
}
private void displayUsage(String cmd) {
String prefix = "Usage: queue ";
if ("-queueinfo".equals(cmd)) {
System.err.println(prefix + "[" + cmd + "<job-queue-name> [-showJobs]]");
} else {
System.err.printf(prefix + "<command> <args>%n");
System.err.printf("\t[-list]%n");
System.err.printf("\t[-info <job-queue-name> [-showJobs]]%n");
System.err.printf("\t[-showacls] %n%n");
ToolRunner.printGenericCommandUsage(System.out);
}
}
public static void main(String[] argv) throws Exception {
int res = ToolRunner.run(new JobQueueClient(), argv);
System.exit(res);
}
}
| 8,117 | 32.134694 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTaskCompletionEventsUpdate.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Writable;
/**
* A class that represents the communication between the tasktracker and child
* tasks w.r.t the map task completion events. It also indicates whether the
* child task should reset its events index.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class MapTaskCompletionEventsUpdate implements Writable {
TaskCompletionEvent[] events;
boolean reset;
public MapTaskCompletionEventsUpdate() { }
public MapTaskCompletionEventsUpdate(TaskCompletionEvent[] events,
boolean reset) {
this.events = events;
this.reset = reset;
}
public boolean shouldReset() {
return reset;
}
public TaskCompletionEvent[] getMapTaskCompletionEvents() {
return events;
}
public void write(DataOutput out) throws IOException {
out.writeBoolean(reset);
out.writeInt(events.length);
for (TaskCompletionEvent event : events) {
event.write(out);
}
}
public void readFields(DataInput in) throws IOException {
reset = in.readBoolean();
events = new TaskCompletionEvent[in.readInt()];
for (int i = 0; i < events.length; ++i) {
events[i] = new TaskCompletionEvent();
events[i].readFields(in);
}
}
}
| 2,276 | 30.625 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IndexRecord.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@InterfaceAudience.LimitedPrivate({"MapReduce"})
@InterfaceStability.Unstable
public class IndexRecord {
public long startOffset;
public long rawLength;
public long partLength;
public IndexRecord() { }
public IndexRecord(long startOffset, long rawLength, long partLength) {
this.startOffset = startOffset;
this.rawLength = rawLength;
this.partLength = partLength;
}
}
| 1,356 | 34.710526 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Clock.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
/**
* A clock class - can be mocked out for testing.
*/
class Clock {
long getTime() {
return System.currentTimeMillis();
}
}
| 978 | 33.964286 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/StatePeriodicStats.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
/**
*
* This class is a concrete PeriodicStatsAccumulator that deals with
* measurements where the raw data are a measurement of a
* time-varying quantity. The result in each bucket is the estimate
* of the progress-weighted mean value of that quantity over the
* progress range covered by the bucket.
*
* <p>An easy-to-understand example of this kind of quantity would be
* a temperature. It makes sense to consider the mean temperature
* over a progress range.
*
*/
class StatePeriodicStats extends PeriodicStatsAccumulator {
StatePeriodicStats(int count) {
super(count);
}
/**
*
* accumulates a new reading by keeping a running account of the
* area under the piecewise linear curve marked by pairs of
* {@code newProgress, newValue} .
*/
@Override
protected void extendInternal(double newProgress, int newValue) {
if (state == null) {
return;
}
// the effective height of this trapezoid if rectangularized
double mean = ((double)newValue + (double)state.oldValue)/2.0D;
// conceptually mean * (newProgress - state.oldProgress) / (1 / count)
state.currentAccumulation += mean * (newProgress - state.oldProgress) * count;
}
}
| 2,060 | 34.534483 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/jobcontrol/JobControl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.jobcontrol;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.lib.jobcontrol.ControlledJob;
@InterfaceAudience.Public
@InterfaceStability.Stable
public class JobControl extends
org.apache.hadoop.mapreduce.lib.jobcontrol.JobControl {
/**
* Construct a job control for a group of jobs.
* @param groupName a name identifying this group
*/
public JobControl(String groupName) {
super(groupName);
}
static ArrayList<Job> castToJobList(List<ControlledJob> cjobs) {
ArrayList<Job> ret = new ArrayList<Job>();
for (ControlledJob job : cjobs) {
ret.add((Job)job);
}
return ret;
}
/**
* @return the jobs in the waiting state
*/
public ArrayList<Job> getWaitingJobs() {
return castToJobList(super.getWaitingJobList());
}
/**
* @return the jobs in the running state
*/
public ArrayList<Job> getRunningJobs() {
return castToJobList(super.getRunningJobList());
}
/**
* @return the jobs in the ready state
*/
public ArrayList<Job> getReadyJobs() {
return castToJobList(super.getReadyJobsList());
}
/**
* @return the jobs in the success state
*/
public ArrayList<Job> getSuccessfulJobs() {
return castToJobList(super.getSuccessfulJobList());
}
public ArrayList<Job> getFailedJobs() {
return castToJobList(super.getFailedJobList());
}
/**
* Add a collection of jobs
*
* @param jobs
*/
public void addJobs(Collection <Job> jobs) {
for (Job job : jobs) {
addJob(job);
}
}
/**
* @return the thread state
*/
public int getState() {
ThreadState state = super.getThreadState();
if (state == ThreadState.RUNNING) {
return 0;
}
if (state == ThreadState.SUSPENDED) {
return 1;
}
if (state == ThreadState.STOPPED) {
return 2;
}
if (state == ThreadState.STOPPING) {
return 3;
}
if (state == ThreadState.READY ) {
return 4;
}
return -1;
}
}
| 3,005 | 24.692308 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/jobcontrol/Job.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.jobcontrol;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobID;
import org.apache.hadoop.mapreduce.lib.jobcontrol.ControlledJob;
@InterfaceAudience.Public
@InterfaceStability.Stable
public class Job extends ControlledJob {
static final Log LOG = LogFactory.getLog(Job.class);
final public static int SUCCESS = 0;
final public static int WAITING = 1;
final public static int RUNNING = 2;
final public static int READY = 3;
final public static int FAILED = 4;
final public static int DEPENDENT_FAILED = 5;
/**
* Construct a job.
* @param jobConf a mapred job configuration representing a job to be executed.
* @param dependingJobs an array of jobs the current job depends on
*/
@SuppressWarnings("unchecked")
public Job(JobConf jobConf, ArrayList<?> dependingJobs) throws IOException {
super(org.apache.hadoop.mapreduce.Job.getInstance(jobConf),
(List<ControlledJob>) dependingJobs);
}
public Job(JobConf conf) throws IOException {
super(conf);
}
/**
* @return the mapred ID of this job as assigned by the mapred framework.
*/
public JobID getAssignedJobID() {
org.apache.hadoop.mapreduce.JobID temp = super.getMapredJobId();
if (temp == null) {
return null;
}
return JobID.downgrade(temp);
}
/**
* @deprecated setAssignedJobID should not be called.
* JOBID is set by the framework.
*/
@Deprecated
public void setAssignedJobID(JobID mapredJobID) {
// do nothing
}
/**
* @return the mapred job conf of this job
*/
public synchronized JobConf getJobConf() {
return new JobConf(super.getJob().getConfiguration());
}
/**
* Set the mapred job conf for this job.
* @param jobConf the mapred job conf for this job.
*/
public synchronized void setJobConf(JobConf jobConf) {
try {
super.setJob(org.apache.hadoop.mapreduce.Job.getInstance(jobConf));
} catch (IOException ioe) {
LOG.info("Exception" + ioe);
}
}
/**
* @return the state of this job
*/
public synchronized int getState() {
State state = super.getJobState();
if (state == State.SUCCESS) {
return SUCCESS;
}
if (state == State.WAITING) {
return WAITING;
}
if (state == State.RUNNING) {
return RUNNING;
}
if (state == State.READY) {
return READY;
}
if (state == State.FAILED ) {
return FAILED;
}
if (state == State.DEPENDENT_FAILED ) {
return DEPENDENT_FAILED;
}
return -1;
}
/**
* This is a no-op function, Its a behavior change from 1.x We no more can
* change the state from job
*
* @param state
* the new state for this job.
*/
@Deprecated
protected synchronized void setState(int state) {
// No-Op, we dont want to change the sate
}
/**
* Add a job to this jobs' dependency list.
* Dependent jobs can only be added while a Job
* is waiting to run, not during or afterwards.
*
* @param dependingJob Job that this Job depends on.
* @return <tt>true</tt> if the Job was added.
*/
public synchronized boolean addDependingJob(Job dependingJob) {
return super.addDependingJob(dependingJob);
}
/**
* @return the job client of this job
*/
public JobClient getJobClient() {
try {
return new JobClient(super.getJob().getConfiguration());
} catch (IOException ioe) {
return null;
}
}
/**
* @return the depending jobs of this job
*/
public ArrayList<Job> getDependingJobs() {
return JobControl.castToJobList(super.getDependentJobs());
}
/**
* @return the mapred ID of this job as assigned by the mapred framework.
*/
public synchronized String getMapredJobID() {
if (super.getMapredJobId() != null) {
return super.getMapredJobId().toString();
}
return null;
}
/**
* This is no-op method for backward compatibility. It's a behavior change
* from 1.x, we can not change job ids from job.
*
* @param mapredJobID
* the mapred job ID for this job.
*/
@Deprecated
public synchronized void setMapredJobID(String mapredJobID) {
setAssignedJobID(JobID.forName(mapredJobID));
}
}
| 5,401 | 27.135417 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/CompositeRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.join;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.PriorityQueue;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.util.ReflectionUtils;
/**
* A RecordReader that can effect joins of RecordReaders sharing a common key
* type and partitioning.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class CompositeRecordReader<
K extends WritableComparable, // key type
V extends Writable, // accepts RecordReader<K,V> as children
X extends Writable> // emits Writables of this type
implements Configurable {
private int id;
private Configuration conf;
private final ResetableIterator<X> EMPTY = new ResetableIterator.EMPTY<X>();
private WritableComparator cmp;
private Class<? extends WritableComparable> keyclass;
private PriorityQueue<ComposableRecordReader<K,?>> q;
protected final JoinCollector jc;
protected final ComposableRecordReader<K,? extends V>[] kids;
protected abstract boolean combine(Object[] srcs, TupleWritable value);
/**
* Create a RecordReader with <tt>capacity</tt> children to position
* <tt>id</tt> in the parent reader.
* The id of a root CompositeRecordReader is -1 by convention, but relying
* on this is not recommended.
*/
@SuppressWarnings("unchecked") // Generic array assignment
public CompositeRecordReader(int id, int capacity,
Class<? extends WritableComparator> cmpcl)
throws IOException {
assert capacity > 0 : "Invalid capacity";
this.id = id;
if (null != cmpcl) {
cmp = ReflectionUtils.newInstance(cmpcl, null);
q = new PriorityQueue<ComposableRecordReader<K,?>>(3,
new Comparator<ComposableRecordReader<K,?>>() {
public int compare(ComposableRecordReader<K,?> o1,
ComposableRecordReader<K,?> o2) {
return cmp.compare(o1.key(), o2.key());
}
});
}
jc = new JoinCollector(capacity);
kids = new ComposableRecordReader[capacity];
}
/**
* Return the position in the collector this class occupies.
*/
public int id() {
return id;
}
/**
* {@inheritDoc}
*/
public void setConf(Configuration conf) {
this.conf = conf;
}
/**
* {@inheritDoc}
*/
public Configuration getConf() {
return conf;
}
/**
* Return sorted list of RecordReaders for this composite.
*/
protected PriorityQueue<ComposableRecordReader<K,?>> getRecordReaderQueue() {
return q;
}
/**
* Return comparator defining the ordering for RecordReaders in this
* composite.
*/
protected WritableComparator getComparator() {
return cmp;
}
/**
* Add a RecordReader to this collection.
* The id() of a RecordReader determines where in the Tuple its
* entry will appear. Adding RecordReaders with the same id has
* undefined behavior.
*/
public void add(ComposableRecordReader<K,? extends V> rr) throws IOException {
kids[rr.id()] = rr;
if (null == q) {
cmp = WritableComparator.get(rr.createKey().getClass(), conf);
q = new PriorityQueue<ComposableRecordReader<K,?>>(3,
new Comparator<ComposableRecordReader<K,?>>() {
public int compare(ComposableRecordReader<K,?> o1,
ComposableRecordReader<K,?> o2) {
return cmp.compare(o1.key(), o2.key());
}
});
}
if (rr.hasNext()) {
q.add(rr);
}
}
/**
* Collector for join values.
* This accumulates values for a given key from the child RecordReaders. If
* one or more child RR contain duplicate keys, this will emit the cross
* product of the associated values until exhausted.
*/
class JoinCollector {
private K key;
private ResetableIterator<X>[] iters;
private int pos = -1;
private boolean first = true;
/**
* Construct a collector capable of handling the specified number of
* children.
*/
@SuppressWarnings("unchecked") // Generic array assignment
public JoinCollector(int card) {
iters = new ResetableIterator[card];
for (int i = 0; i < iters.length; ++i) {
iters[i] = EMPTY;
}
}
/**
* Register a given iterator at position id.
*/
public void add(int id, ResetableIterator<X> i)
throws IOException {
iters[id] = i;
}
/**
* Return the key associated with this collection.
*/
public K key() {
return key;
}
/**
* Codify the contents of the collector to be iterated over.
* When this is called, all RecordReaders registered for this
* key should have added ResetableIterators.
*/
public void reset(K key) {
this.key = key;
first = true;
pos = iters.length - 1;
for (int i = 0; i < iters.length; ++i) {
iters[i].reset();
}
}
/**
* Clear all state information.
*/
public void clear() {
key = null;
pos = -1;
for (int i = 0; i < iters.length; ++i) {
iters[i].clear();
iters[i] = EMPTY;
}
}
/**
* Returns false if exhausted or if reset(K) has not been called.
*/
protected boolean hasNext() {
return !(pos < 0);
}
/**
* Populate Tuple from iterators.
* It should be the case that, given iterators i_1...i_n over values from
* sources s_1...s_n sharing key k, repeated calls to next should yield
* I x I.
*/
@SuppressWarnings("unchecked") // No static typeinfo on Tuples
protected boolean next(TupleWritable val) throws IOException {
if (first) {
int i = -1;
for (pos = 0; pos < iters.length; ++pos) {
if (iters[pos].hasNext() && iters[pos].next((X)val.get(pos))) {
i = pos;
val.setWritten(i);
}
}
pos = i;
first = false;
if (pos < 0) {
clear();
return false;
}
return true;
}
while (0 <= pos && !(iters[pos].hasNext() &&
iters[pos].next((X)val.get(pos)))) {
--pos;
}
if (pos < 0) {
clear();
return false;
}
val.setWritten(pos);
for (int i = 0; i < pos; ++i) {
if (iters[i].replay((X)val.get(i))) {
val.setWritten(i);
}
}
while (pos + 1 < iters.length) {
++pos;
iters[pos].reset();
if (iters[pos].hasNext() && iters[pos].next((X)val.get(pos))) {
val.setWritten(pos);
}
}
return true;
}
/**
* Replay the last Tuple emitted.
*/
@SuppressWarnings("unchecked") // No static typeinfo on Tuples
public boolean replay(TupleWritable val) throws IOException {
// The last emitted tuple might have drawn on an empty source;
// it can't be cleared prematurely, b/c there may be more duplicate
// keys in iterator positions < pos
assert !first;
boolean ret = false;
for (int i = 0; i < iters.length; ++i) {
if (iters[i].replay((X)val.get(i))) {
val.setWritten(i);
ret = true;
}
}
return ret;
}
/**
* Close all child iterators.
*/
public void close() throws IOException {
for (int i = 0; i < iters.length; ++i) {
iters[i].close();
}
}
/**
* Write the next value into key, value as accepted by the operation
* associated with this set of RecordReaders.
*/
public boolean flush(TupleWritable value) throws IOException {
while (hasNext()) {
value.clearWritten();
if (next(value) && combine(kids, value)) {
return true;
}
}
return false;
}
}
/**
* Return the key for the current join or the value at the top of the
* RecordReader heap.
*/
public K key() {
if (jc.hasNext()) {
return jc.key();
}
if (!q.isEmpty()) {
return q.peek().key();
}
return null;
}
/**
* Clone the key at the top of this RR into the given object.
*/
public void key(K key) throws IOException {
WritableUtils.cloneInto(key, key());
}
/**
* Return true if it is possible that this could emit more values.
*/
public boolean hasNext() {
return jc.hasNext() || !q.isEmpty();
}
/**
* Pass skip key to child RRs.
*/
public void skip(K key) throws IOException {
ArrayList<ComposableRecordReader<K,?>> tmp =
new ArrayList<ComposableRecordReader<K,?>>();
while (!q.isEmpty() && cmp.compare(q.peek().key(), key) <= 0) {
tmp.add(q.poll());
}
for (ComposableRecordReader<K,?> rr : tmp) {
rr.skip(key);
if (rr.hasNext()) {
q.add(rr);
}
}
}
/**
* Obtain an iterator over the child RRs apropos of the value type
* ultimately emitted from this join.
*/
protected abstract ResetableIterator<X> getDelegate();
/**
* If key provided matches that of this Composite, give JoinCollector
* iterator over values it may emit.
*/
@SuppressWarnings("unchecked") // No values from static EMPTY class
public void accept(CompositeRecordReader.JoinCollector jc, K key)
throws IOException {
if (hasNext() && 0 == cmp.compare(key, key())) {
fillJoinCollector(createKey());
jc.add(id, getDelegate());
return;
}
jc.add(id, EMPTY);
}
/**
* For all child RRs offering the key provided, obtain an iterator
* at that position in the JoinCollector.
*/
protected void fillJoinCollector(K iterkey) throws IOException {
if (!q.isEmpty()) {
q.peek().key(iterkey);
while (0 == cmp.compare(q.peek().key(), iterkey)) {
ComposableRecordReader<K,?> t = q.poll();
t.accept(jc, iterkey);
if (t.hasNext()) {
q.add(t);
} else if (q.isEmpty()) {
return;
}
}
}
}
/**
* Implement Comparable contract (compare key of join or head of heap
* with that of another).
*/
public int compareTo(ComposableRecordReader<K,?> other) {
return cmp.compare(key(), other.key());
}
/**
* Create a new key value common to all child RRs.
* @throws ClassCastException if key classes differ.
*/
@SuppressWarnings("unchecked") // Explicit check for key class agreement
public K createKey() {
if (null == keyclass) {
final Class<?> cls = kids[0].createKey().getClass();
for (RecordReader<K,? extends Writable> rr : kids) {
if (!cls.equals(rr.createKey().getClass())) {
throw new ClassCastException("Child key classes fail to agree");
}
}
keyclass = cls.asSubclass(WritableComparable.class);
}
return (K) ReflectionUtils.newInstance(keyclass, getConf());
}
/**
* Create a value to be used internally for joins.
*/
protected TupleWritable createInternalValue() {
Writable[] vals = new Writable[kids.length];
for (int i = 0; i < vals.length; ++i) {
vals[i] = kids[i].createValue();
}
return new TupleWritable(vals);
}
/**
* Unsupported (returns zero in all cases).
*/
public long getPos() throws IOException {
return 0;
}
/**
* Close all child RRs.
*/
public void close() throws IOException {
if (kids != null) {
for (RecordReader<K,? extends Writable> rr : kids) {
rr.close();
}
}
if (jc != null) {
jc.close();
}
}
/**
* Report progress as the minimum of all child RR progress.
*/
public float getProgress() throws IOException {
float ret = 1.0f;
for (RecordReader<K,? extends Writable> rr : kids) {
ret = Math.min(ret, rr.getProgress());
}
return ret;
}
}
| 13,024 | 27.010753 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/StreamBackedIterator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.join;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Writable;
/**
* This class provides an implementation of ResetableIterator. This
* implementation uses a byte array to store elements added to it.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class StreamBackedIterator<X extends Writable>
extends org.apache.hadoop.mapreduce.lib.join.StreamBackedIterator<X>
implements ResetableIterator<X> {
}
| 1,368 | 39.264706 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/OverrideRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.join;
import java.io.IOException;
import java.util.ArrayList;
import java.util.PriorityQueue;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.mapred.JobConf;
/**
* Prefer the "rightmost" data source for this key.
* For example, <tt>override(S1,S2,S3)</tt> will prefer values
* from S3 over S2, and values from S2 over S1 for all keys
* emitted from all sources.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class OverrideRecordReader<K extends WritableComparable,
V extends Writable>
extends MultiFilterRecordReader<K,V> {
OverrideRecordReader(int id, JobConf conf, int capacity,
Class<? extends WritableComparator> cmpcl) throws IOException {
super(id, conf, capacity, cmpcl);
}
/**
* Emit the value with the highest position in the tuple.
*/
@SuppressWarnings("unchecked") // No static typeinfo on Tuples
protected V emit(TupleWritable dst) {
return (V) dst.iterator().next();
}
/**
* Instead of filling the JoinCollector with iterators from all
* data sources, fill only the rightmost for this key.
* This not only saves space by discarding the other sources, but
* it also emits the number of key-value pairs in the preferred
* RecordReader instead of repeating that stream n times, where
* n is the cardinality of the cross product of the discarded
* streams for the given key.
*/
protected void fillJoinCollector(K iterkey) throws IOException {
final PriorityQueue<ComposableRecordReader<K,?>> q = getRecordReaderQueue();
if (!q.isEmpty()) {
int highpos = -1;
ArrayList<ComposableRecordReader<K,?>> list =
new ArrayList<ComposableRecordReader<K,?>>(kids.length);
q.peek().key(iterkey);
final WritableComparator cmp = getComparator();
while (0 == cmp.compare(q.peek().key(), iterkey)) {
ComposableRecordReader<K,?> t = q.poll();
if (-1 == highpos || list.get(highpos).id() < t.id()) {
highpos = list.size();
}
list.add(t);
if (q.isEmpty())
break;
}
ComposableRecordReader<K,?> t = list.remove(highpos);
t.accept(jc, iterkey);
for (ComposableRecordReader<K,?> rr : list) {
rr.skip(iterkey);
}
list.add(t);
for (ComposableRecordReader<K,?> rr : list) {
if (rr.hasNext()) {
q.add(rr);
}
}
}
}
}
| 3,500 | 34.72449 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/InnerJoinRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.join;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.mapred.JobConf;
/**
* Full inner join.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class InnerJoinRecordReader<K extends WritableComparable>
extends JoinRecordReader<K> {
InnerJoinRecordReader(int id, JobConf conf, int capacity,
Class<? extends WritableComparator> cmpcl) throws IOException {
super(id, conf, capacity, cmpcl);
}
/**
* Return true iff the tuple is full (all data sources contain this key).
*/
protected boolean combine(Object[] srcs, TupleWritable dst) {
assert srcs.length == dst.size();
for (int i = 0; i < srcs.length; ++i) {
if (!dst.has(i)) {
return false;
}
}
return true;
}
}
| 1,806 | 31.854545 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/CompositeInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.join;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Reporter;
/**
* An InputFormat capable of performing joins over a set of data sources sorted
* and partitioned the same way.
*
* A user may define new join types by setting the property
* <tt>mapred.join.define.<ident></tt> to a classname. In the expression
* <tt>mapred.join.expr</tt>, the identifier will be assumed to be a
* ComposableRecordReader.
* <tt>mapred.join.keycomparator</tt> can be a classname used to compare keys
* in the join.
* @see #setFormat
* @see JoinRecordReader
* @see MultiFilterRecordReader
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class CompositeInputFormat<K extends WritableComparable>
implements ComposableInputFormat<K,TupleWritable> {
// expression parse tree to which IF requests are proxied
private Parser.Node root;
public CompositeInputFormat() { }
/**
* Interpret a given string as a composite expression.
* {@code
* func ::= <ident>([<func>,]*<func>)
* func ::= tbl(<class>,"<path>")
* class ::= @see java.lang.Class#forName(java.lang.String)
* path ::= @see org.apache.hadoop.fs.Path#Path(java.lang.String)
* }
* Reads expression from the <tt>mapred.join.expr</tt> property and
* user-supplied join types from <tt>mapred.join.define.<ident></tt>
* types. Paths supplied to <tt>tbl</tt> are given as input paths to the
* InputFormat class listed.
* @see #compose(java.lang.String, java.lang.Class, java.lang.String...)
*/
public void setFormat(JobConf job) throws IOException {
addDefaults();
addUserIdentifiers(job);
root = Parser.parse(job.get("mapred.join.expr", null), job);
}
/**
* Adds the default set of identifiers to the parser.
*/
protected void addDefaults() {
try {
Parser.CNode.addIdentifier("inner", InnerJoinRecordReader.class);
Parser.CNode.addIdentifier("outer", OuterJoinRecordReader.class);
Parser.CNode.addIdentifier("override", OverrideRecordReader.class);
Parser.WNode.addIdentifier("tbl", WrappedRecordReader.class);
} catch (NoSuchMethodException e) {
throw new RuntimeException("FATAL: Failed to init defaults", e);
}
}
/**
* Inform the parser of user-defined types.
*/
private void addUserIdentifiers(JobConf job) throws IOException {
Pattern x = Pattern.compile("^mapred\\.join\\.define\\.(\\w+)$");
for (Map.Entry<String,String> kv : job) {
Matcher m = x.matcher(kv.getKey());
if (m.matches()) {
try {
Parser.CNode.addIdentifier(m.group(1),
job.getClass(m.group(0), null, ComposableRecordReader.class));
} catch (NoSuchMethodException e) {
throw (IOException)new IOException(
"Invalid define for " + m.group(1)).initCause(e);
}
}
}
}
/**
* Build a CompositeInputSplit from the child InputFormats by assigning the
* ith split from each child to the ith composite split.
*/
public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
setFormat(job);
job.setLong("mapred.min.split.size", Long.MAX_VALUE);
return root.getSplits(job, numSplits);
}
/**
* Construct a CompositeRecordReader for the children of this InputFormat
* as defined in the init expression.
* The outermost join need only be composable, not necessarily a composite.
* Mandating TupleWritable isn't strictly correct.
*/
@SuppressWarnings("unchecked") // child types unknown
public ComposableRecordReader<K,TupleWritable> getRecordReader(
InputSplit split, JobConf job, Reporter reporter) throws IOException {
setFormat(job);
return root.getRecordReader(split, job, reporter);
}
/**
* Convenience method for constructing composite formats.
* Given InputFormat class (inf), path (p) return:
* {@code tbl(<inf>, <p>) }
*/
public static String compose(Class<? extends InputFormat> inf, String path) {
return compose(inf.getName().intern(), path, new StringBuffer()).toString();
}
/**
* Convenience method for constructing composite formats.
* Given operation (op), Object class (inf), set of paths (p) return:
* {@code <op>(tbl(<inf>,<p1>),tbl(<inf>,<p2>),...,tbl(<inf>,<pn>)) }
*/
public static String compose(String op, Class<? extends InputFormat> inf,
String... path) {
final String infname = inf.getName();
StringBuffer ret = new StringBuffer(op + '(');
for (String p : path) {
compose(infname, p, ret);
ret.append(',');
}
ret.setCharAt(ret.length() - 1, ')');
return ret.toString();
}
/**
* Convenience method for constructing composite formats.
* Given operation (op), Object class (inf), set of paths (p) return:
* {@code <op>(tbl(<inf>,<p1>),tbl(<inf>,<p2>),...,tbl(<inf>,<pn>)) }
*/
public static String compose(String op, Class<? extends InputFormat> inf,
Path... path) {
ArrayList<String> tmp = new ArrayList<String>(path.length);
for (Path p : path) {
tmp.add(p.toString());
}
return compose(op, inf, tmp.toArray(new String[0]));
}
private static StringBuffer compose(String inf, String path,
StringBuffer sb) {
sb.append("tbl(" + inf + ",\"");
sb.append(path);
sb.append("\")");
return sb;
}
}
| 6,636 | 34.682796 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/ComposableInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.join;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Reporter;
/**
* Refinement of InputFormat requiring implementors to provide
* ComposableRecordReader instead of RecordReader.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface ComposableInputFormat<K extends WritableComparable,
V extends Writable>
extends InputFormat<K,V> {
ComposableRecordReader<K,V> getRecordReader(InputSplit split,
JobConf job, Reporter reporter) throws IOException;
}
| 1,714 | 37.111111 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/OuterJoinRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.join;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.mapred.JobConf;
/**
* Full outer join.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class OuterJoinRecordReader<K extends WritableComparable>
extends JoinRecordReader<K> {
OuterJoinRecordReader(int id, JobConf conf, int capacity,
Class<? extends WritableComparator> cmpcl) throws IOException {
super(id, conf, capacity, cmpcl);
}
/**
* Emit everything from the collector.
*/
protected boolean combine(Object[] srcs, TupleWritable dst) {
assert srcs.length == dst.size();
return true;
}
}
| 1,666 | 32.34 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/ComposableRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.join;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.RecordReader;
/**
* Additional operations required of a RecordReader to participate in a join.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface ComposableRecordReader<K extends WritableComparable,
V extends Writable>
extends RecordReader<K,V>, Comparable<ComposableRecordReader<K,?>> {
/**
* Return the position in the collector this class occupies.
*/
int id();
/**
* Return the key this RecordReader would supply on a call to next(K,V)
*/
K key();
/**
* Clone the key at the head of this RecordReader into the object provided.
*/
void key(K key) throws IOException;
/**
* Returns true if the stream is not empty, but provides no guarantee that
* a call to next(K,V) will succeed.
*/
boolean hasNext();
/**
* Skip key-value pairs with keys less than or equal to the key provided.
*/
void skip(K key) throws IOException;
/**
* While key-value pairs from this RecordReader match the given key, register
* them with the JoinCollector provided.
*/
void accept(CompositeRecordReader.JoinCollector jc, K key) throws IOException;
}
| 2,276 | 31.528571 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/ArrayListBackedIterator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.join;
import java.util.ArrayList;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Writable;
/**
* This class provides an implementation of ResetableIterator. The
* implementation uses an {@link java.util.ArrayList} to store elements
* added to it, replaying them as requested.
* Prefer {@link StreamBackedIterator}.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ArrayListBackedIterator<X extends Writable> extends
org.apache.hadoop.mapreduce.lib.join.ArrayListBackedIterator<X>
implements ResetableIterator<X> {
public ArrayListBackedIterator() {
super();
}
public ArrayListBackedIterator(ArrayList<X> data) {
super(data);
}
}
| 1,624 | 34.326087 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/Parser.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.join;
import java.io.CharArrayReader;
import java.io.IOException;
import java.io.StreamTokenizer;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.ListIterator;
import java.util.Map;
import java.util.Stack;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.util.ReflectionUtils;
/**
* Very simple shift-reduce parser for join expressions.
*
* This should be sufficient for the user extension permitted now, but ought to
* be replaced with a parser generator if more complex grammars are supported.
* In particular, this "shift-reduce" parser has no states. Each set
* of formals requires a different internal node type, which is responsible for
* interpreting the list of tokens it receives. This is sufficient for the
* current grammar, but it has several annoying properties that might inhibit
* extension. In particular, parenthesis are always function calls; an
* algebraic or filter grammar would not only require a node type, but must
* also work around the internals of this parser.
*
* For most other cases, adding classes to the hierarchy- particularly by
* extending JoinRecordReader and MultiFilterRecordReader- is fairly
* straightforward. One need only override the relevant method(s) (usually only
* {@link CompositeRecordReader#combine}) and include a property to map its
* value to an identifier in the parser.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class Parser {
@InterfaceAudience.Public
@InterfaceStability.Evolving
public enum TType { CIF, IDENT, COMMA, LPAREN, RPAREN, QUOT, NUM, }
/**
* Tagged-union type for tokens from the join expression.
* @see Parser.TType
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public static class Token {
private TType type;
Token(TType type) {
this.type = type;
}
public TType getType() { return type; }
public Node getNode() throws IOException {
throw new IOException("Expected nodetype");
}
public double getNum() throws IOException {
throw new IOException("Expected numtype");
}
public String getStr() throws IOException {
throw new IOException("Expected strtype");
}
}
@InterfaceAudience.Public
@InterfaceStability.Evolving
public static class NumToken extends Token {
private double num;
public NumToken(double num) {
super(TType.NUM);
this.num = num;
}
public double getNum() { return num; }
}
@InterfaceAudience.Public
@InterfaceStability.Evolving
public static class NodeToken extends Token {
private Node node;
NodeToken(Node node) {
super(TType.CIF);
this.node = node;
}
public Node getNode() {
return node;
}
}
@InterfaceAudience.Public
@InterfaceStability.Evolving
public static class StrToken extends Token {
private String str;
public StrToken(TType type, String str) {
super(type);
this.str = str;
}
public String getStr() {
return str;
}
}
/**
* Simple lexer wrapping a StreamTokenizer.
* This encapsulates the creation of tagged-union Tokens and initializes the
* SteamTokenizer.
*/
private static class Lexer {
private StreamTokenizer tok;
Lexer(String s) {
tok = new StreamTokenizer(new CharArrayReader(s.toCharArray()));
tok.quoteChar('"');
tok.parseNumbers();
tok.ordinaryChar(',');
tok.ordinaryChar('(');
tok.ordinaryChar(')');
tok.wordChars('$','$');
tok.wordChars('_','_');
}
Token next() throws IOException {
int type = tok.nextToken();
switch (type) {
case StreamTokenizer.TT_EOF:
case StreamTokenizer.TT_EOL:
return null;
case StreamTokenizer.TT_NUMBER:
return new NumToken(tok.nval);
case StreamTokenizer.TT_WORD:
return new StrToken(TType.IDENT, tok.sval);
case '"':
return new StrToken(TType.QUOT, tok.sval);
default:
switch (type) {
case ',':
return new Token(TType.COMMA);
case '(':
return new Token(TType.LPAREN);
case ')':
return new Token(TType.RPAREN);
default:
throw new IOException("Unexpected: " + type);
}
}
}
}
@InterfaceAudience.Public
@InterfaceStability.Evolving
public abstract static class Node implements ComposableInputFormat {
/**
* Return the node type registered for the particular identifier.
* By default, this is a CNode for any composite node and a WNode
* for "wrapped" nodes. User nodes will likely be composite
* nodes.
* @see #addIdentifier(java.lang.String, java.lang.Class[], java.lang.Class, java.lang.Class)
* @see CompositeInputFormat#setFormat(org.apache.hadoop.mapred.JobConf)
*/
static Node forIdent(String ident) throws IOException {
try {
if (!nodeCstrMap.containsKey(ident)) {
throw new IOException("No nodetype for " + ident);
}
return nodeCstrMap.get(ident).newInstance(ident);
} catch (IllegalAccessException e) {
throw (IOException)new IOException().initCause(e);
} catch (InstantiationException e) {
throw (IOException)new IOException().initCause(e);
} catch (InvocationTargetException e) {
throw (IOException)new IOException().initCause(e);
}
}
private static final Class<?>[] ncstrSig = { String.class };
private static final
Map<String,Constructor<? extends Node>> nodeCstrMap =
new HashMap<String,Constructor<? extends Node>>();
protected static final
Map<String,Constructor<? extends ComposableRecordReader>> rrCstrMap =
new HashMap<String,Constructor<? extends ComposableRecordReader>>();
/**
* For a given identifier, add a mapping to the nodetype for the parse
* tree and to the ComposableRecordReader to be created, including the
* formals required to invoke the constructor.
* The nodetype and constructor signature should be filled in from the
* child node.
*/
protected static void addIdentifier(String ident, Class<?>[] mcstrSig,
Class<? extends Node> nodetype,
Class<? extends ComposableRecordReader> cl)
throws NoSuchMethodException {
Constructor<? extends Node> ncstr =
nodetype.getDeclaredConstructor(ncstrSig);
ncstr.setAccessible(true);
nodeCstrMap.put(ident, ncstr);
Constructor<? extends ComposableRecordReader> mcstr =
cl.getDeclaredConstructor(mcstrSig);
mcstr.setAccessible(true);
rrCstrMap.put(ident, mcstr);
}
// inst
protected int id = -1;
protected String ident;
protected Class<? extends WritableComparator> cmpcl;
protected Node(String ident) {
this.ident = ident;
}
protected void setID(int id) {
this.id = id;
}
protected void setKeyComparator(Class<? extends WritableComparator> cmpcl) {
this.cmpcl = cmpcl;
}
abstract void parse(List<Token> args, JobConf job) throws IOException;
}
/**
* Nodetype in the parse tree for "wrapped" InputFormats.
*/
static class WNode extends Node {
private static final Class<?>[] cstrSig =
{ Integer.TYPE, RecordReader.class, Class.class };
static void addIdentifier(String ident,
Class<? extends ComposableRecordReader> cl)
throws NoSuchMethodException {
Node.addIdentifier(ident, cstrSig, WNode.class, cl);
}
private String indir;
private InputFormat inf;
public WNode(String ident) {
super(ident);
}
/**
* Let the first actual define the InputFormat and the second define
* the <tt>mapred.input.dir</tt> property.
*/
public void parse(List<Token> ll, JobConf job) throws IOException {
StringBuilder sb = new StringBuilder();
Iterator<Token> i = ll.iterator();
while (i.hasNext()) {
Token t = i.next();
if (TType.COMMA.equals(t.getType())) {
try {
inf = (InputFormat)ReflectionUtils.newInstance(
job.getClassByName(sb.toString()),
job);
} catch (ClassNotFoundException e) {
throw (IOException)new IOException().initCause(e);
} catch (IllegalArgumentException e) {
throw (IOException)new IOException().initCause(e);
}
break;
}
sb.append(t.getStr());
}
if (!i.hasNext()) {
throw new IOException("Parse error");
}
Token t = i.next();
if (!TType.QUOT.equals(t.getType())) {
throw new IOException("Expected quoted string");
}
indir = t.getStr();
// no check for ll.isEmpty() to permit extension
}
private JobConf getConf(JobConf job) {
JobConf conf = new JobConf(job);
FileInputFormat.setInputPaths(conf, indir);
conf.setClassLoader(job.getClassLoader());
return conf;
}
public InputSplit[] getSplits(JobConf job, int numSplits)
throws IOException {
return inf.getSplits(getConf(job), numSplits);
}
public ComposableRecordReader getRecordReader(
InputSplit split, JobConf job, Reporter reporter) throws IOException {
try {
if (!rrCstrMap.containsKey(ident)) {
throw new IOException("No RecordReader for " + ident);
}
return rrCstrMap.get(ident).newInstance(id,
inf.getRecordReader(split, getConf(job), reporter), cmpcl);
} catch (IllegalAccessException e) {
throw (IOException)new IOException().initCause(e);
} catch (InstantiationException e) {
throw (IOException)new IOException().initCause(e);
} catch (InvocationTargetException e) {
throw (IOException)new IOException().initCause(e);
}
}
public String toString() {
return ident + "(" + inf.getClass().getName() + ",\"" + indir + "\")";
}
}
/**
* Internal nodetype for "composite" InputFormats.
*/
static class CNode extends Node {
private static final Class<?>[] cstrSig =
{ Integer.TYPE, JobConf.class, Integer.TYPE, Class.class };
static void addIdentifier(String ident,
Class<? extends ComposableRecordReader> cl)
throws NoSuchMethodException {
Node.addIdentifier(ident, cstrSig, CNode.class, cl);
}
// inst
private ArrayList<Node> kids = new ArrayList<Node>();
public CNode(String ident) {
super(ident);
}
public void setKeyComparator(Class<? extends WritableComparator> cmpcl) {
super.setKeyComparator(cmpcl);
for (Node n : kids) {
n.setKeyComparator(cmpcl);
}
}
/**
* Combine InputSplits from child InputFormats into a
* {@link CompositeInputSplit}.
*/
public InputSplit[] getSplits(JobConf job, int numSplits)
throws IOException {
InputSplit[][] splits = new InputSplit[kids.size()][];
for (int i = 0; i < kids.size(); ++i) {
final InputSplit[] tmp = kids.get(i).getSplits(job, numSplits);
if (null == tmp) {
throw new IOException("Error gathering splits from child RReader");
}
if (i > 0 && splits[i-1].length != tmp.length) {
throw new IOException("Inconsistent split cardinality from child " +
i + " (" + splits[i-1].length + "/" + tmp.length + ")");
}
splits[i] = tmp;
}
final int size = splits[0].length;
CompositeInputSplit[] ret = new CompositeInputSplit[size];
for (int i = 0; i < size; ++i) {
ret[i] = new CompositeInputSplit(splits.length);
for (int j = 0; j < splits.length; ++j) {
ret[i].add(splits[j][i]);
}
}
return ret;
}
@SuppressWarnings("unchecked") // child types unknowable
public ComposableRecordReader getRecordReader(
InputSplit split, JobConf job, Reporter reporter) throws IOException {
if (!(split instanceof CompositeInputSplit)) {
throw new IOException("Invalid split type:" +
split.getClass().getName());
}
final CompositeInputSplit spl = (CompositeInputSplit)split;
final int capacity = kids.size();
CompositeRecordReader ret = null;
try {
if (!rrCstrMap.containsKey(ident)) {
throw new IOException("No RecordReader for " + ident);
}
ret = (CompositeRecordReader)
rrCstrMap.get(ident).newInstance(id, job, capacity, cmpcl);
} catch (IllegalAccessException e) {
throw (IOException)new IOException().initCause(e);
} catch (InstantiationException e) {
throw (IOException)new IOException().initCause(e);
} catch (InvocationTargetException e) {
throw (IOException)new IOException().initCause(e);
}
for (int i = 0; i < capacity; ++i) {
ret.add(kids.get(i).getRecordReader(spl.get(i), job, reporter));
}
return (ComposableRecordReader)ret;
}
/**
* Parse a list of comma-separated nodes.
*/
public void parse(List<Token> args, JobConf job) throws IOException {
ListIterator<Token> i = args.listIterator();
while (i.hasNext()) {
Token t = i.next();
t.getNode().setID(i.previousIndex() >> 1);
kids.add(t.getNode());
if (i.hasNext() && !TType.COMMA.equals(i.next().getType())) {
throw new IOException("Expected ','");
}
}
}
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(ident + "(");
for (Node n : kids) {
sb.append(n.toString() + ",");
}
sb.setCharAt(sb.length() - 1, ')');
return sb.toString();
}
}
private static Token reduce(Stack<Token> st, JobConf job) throws IOException {
LinkedList<Token> args = new LinkedList<Token>();
while (!st.isEmpty() && !TType.LPAREN.equals(st.peek().getType())) {
args.addFirst(st.pop());
}
if (st.isEmpty()) {
throw new IOException("Unmatched ')'");
}
st.pop();
if (st.isEmpty() || !TType.IDENT.equals(st.peek().getType())) {
throw new IOException("Identifier expected");
}
Node n = Node.forIdent(st.pop().getStr());
n.parse(args, job);
return new NodeToken(n);
}
/**
* Given an expression and an optional comparator, build a tree of
* InputFormats using the comparator to sort keys.
*/
static Node parse(String expr, JobConf job) throws IOException {
if (null == expr) {
throw new IOException("Expression is null");
}
Class<? extends WritableComparator> cmpcl =
job.getClass("mapred.join.keycomparator", null, WritableComparator.class);
Lexer lex = new Lexer(expr);
Stack<Token> st = new Stack<Token>();
Token tok;
while ((tok = lex.next()) != null) {
if (TType.RPAREN.equals(tok.getType())) {
st.push(reduce(st, job));
} else {
st.push(tok);
}
}
if (st.size() == 1 && TType.CIF.equals(st.peek().getType())) {
Node ret = st.pop().getNode();
if (cmpcl != null) {
ret.setKeyComparator(cmpcl);
}
return ret;
}
throw new IOException("Missing ')'");
}
}
| 16,835 | 32.404762 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/JoinRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.join;
import java.io.IOException;
import java.util.PriorityQueue;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapred.JobConf;
/**
* Base class for Composite joins returning Tuples of arbitrary Writables.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class JoinRecordReader<K extends WritableComparable>
extends CompositeRecordReader<K,Writable,TupleWritable>
implements ComposableRecordReader<K,TupleWritable> {
public JoinRecordReader(int id, JobConf conf, int capacity,
Class<? extends WritableComparator> cmpcl) throws IOException {
super(id, capacity, cmpcl);
setConf(conf);
}
/**
* Emit the next set of key, value pairs as defined by the child
* RecordReaders and operation associated with this composite RR.
*/
public boolean next(K key, TupleWritable value) throws IOException {
if (jc.flush(value)) {
WritableUtils.cloneInto(key, jc.key());
return true;
}
jc.clear();
K iterkey = createKey();
final PriorityQueue<ComposableRecordReader<K,?>> q = getRecordReaderQueue();
while (!q.isEmpty()) {
fillJoinCollector(iterkey);
jc.reset(iterkey);
if (jc.flush(value)) {
WritableUtils.cloneInto(key, jc.key());
return true;
}
jc.clear();
}
return false;
}
/** {@inheritDoc} */
public TupleWritable createValue() {
return createInternalValue();
}
/**
* Return an iterator wrapping the JoinCollector.
*/
protected ResetableIterator<TupleWritable> getDelegate() {
return new JoinDelegationIterator();
}
/**
* Since the JoinCollector is effecting our operation, we need only
* provide an iterator proxy wrapping its operation.
*/
protected class JoinDelegationIterator
implements ResetableIterator<TupleWritable> {
public boolean hasNext() {
return jc.hasNext();
}
public boolean next(TupleWritable val) throws IOException {
return jc.flush(val);
}
public boolean replay(TupleWritable val) throws IOException {
return jc.replay(val);
}
public void reset() {
jc.reset(jc.key());
}
public void add(TupleWritable item) throws IOException {
throw new UnsupportedOperationException();
}
public void close() throws IOException {
jc.close();
}
public void clear() {
jc.clear();
}
}
}
| 3,510 | 28.504202 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/TupleWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.join;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Writable;
/**
* Writable type storing multiple {@link org.apache.hadoop.io.Writable}s.
*
* This is *not* a general-purpose tuple type. In almost all cases, users are
* encouraged to implement their own serializable types, which can perform
* better validation and provide more efficient encodings than this class is
* capable. TupleWritable relies on the join framework for type safety and
* assumes its instances will rarely be persisted, assumptions not only
* incompatible with, but contrary to the general case.
*
* @see org.apache.hadoop.io.Writable
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class TupleWritable
extends org.apache.hadoop.mapreduce.lib.join.TupleWritable {
/**
* Create an empty tuple with no allocated storage for writables.
*/
public TupleWritable() {
super();
}
/**
* Initialize tuple with storage; unknown whether any of them contain
* "written" values.
*/
public TupleWritable(Writable[] vals) {
super(vals);
}
/**
* Record that the tuple contains an element at the position provided.
*/
void setWritten(int i) {
written.set(i);
}
/**
* Record that the tuple does not contain an element at the position
* provided.
*/
void clearWritten(int i) {
written.clear(i);
}
/**
* Clear any record of which writables have been written to, without
* releasing storage.
*/
void clearWritten() {
written.clear();
}
}
| 2,469 | 29.121951 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/WrappedRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.join;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapred.RecordReader;
/**
* Proxy class for a RecordReader participating in the join framework.
* This class keeps track of the "head" key-value pair for the
* provided RecordReader and keeps a store of values matching a key when
* this source is participating in a join.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class WrappedRecordReader<K extends WritableComparable,
U extends Writable>
implements ComposableRecordReader<K,U>, Configurable {
private boolean empty = false;
private RecordReader<K,U> rr;
private int id; // index at which values will be inserted in collector
private K khead; // key at the top of this RR
private U vhead; // value assoc with khead
private WritableComparator cmp;
private Configuration conf;
private ResetableIterator<U> vjoin;
/**
* For a given RecordReader rr, occupy position id in collector.
*/
WrappedRecordReader(int id, RecordReader<K,U> rr,
Class<? extends WritableComparator> cmpcl) throws IOException {
this(id, rr, cmpcl, null);
}
WrappedRecordReader(int id, RecordReader<K,U> rr,
Class<? extends WritableComparator> cmpcl,
Configuration conf) throws IOException {
this.id = id;
this.rr = rr;
this.conf = (conf == null) ? new Configuration() : conf;
khead = rr.createKey();
vhead = rr.createValue();
try {
cmp = (null == cmpcl)
? WritableComparator.get(khead.getClass(), this.conf)
: cmpcl.newInstance();
} catch (InstantiationException e) {
throw (IOException)new IOException().initCause(e);
} catch (IllegalAccessException e) {
throw (IOException)new IOException().initCause(e);
}
vjoin = new StreamBackedIterator<U>();
next();
}
/** {@inheritDoc} */
public int id() {
return id;
}
/**
* Return the key at the head of this RR.
*/
public K key() {
return khead;
}
/**
* Clone the key at the head of this RR into the object supplied.
*/
public void key(K qkey) throws IOException {
WritableUtils.cloneInto(qkey, khead);
}
/**
* Return true if the RR- including the k,v pair stored in this object-
* is exhausted.
*/
public boolean hasNext() {
return !empty;
}
/**
* Skip key-value pairs with keys less than or equal to the key provided.
*/
public void skip(K key) throws IOException {
if (hasNext()) {
while (cmp.compare(khead, key) <= 0 && next());
}
}
/**
* Read the next k,v pair into the head of this object; return true iff
* the RR and this are exhausted.
*/
protected boolean next() throws IOException {
empty = !rr.next(khead, vhead);
return hasNext();
}
/**
* Add an iterator to the collector at the position occupied by this
* RecordReader over the values in this stream paired with the key
* provided (ie register a stream of values from this source matching K
* with a collector).
*/
// JoinCollector comes from parent, which has
@SuppressWarnings("unchecked") // no static type for the slot this sits in
public void accept(CompositeRecordReader.JoinCollector i, K key)
throws IOException {
vjoin.clear();
if (0 == cmp.compare(key, khead)) {
do {
vjoin.add(vhead);
} while (next() && 0 == cmp.compare(key, khead));
}
i.add(id, vjoin);
}
/**
* Write key-value pair at the head of this stream to the objects provided;
* get next key-value pair from proxied RR.
*/
public boolean next(K key, U value) throws IOException {
if (hasNext()) {
WritableUtils.cloneInto(key, khead);
WritableUtils.cloneInto(value, vhead);
next();
return true;
}
return false;
}
/**
* Request new key from proxied RR.
*/
public K createKey() {
return rr.createKey();
}
/**
* Request new value from proxied RR.
*/
public U createValue() {
return rr.createValue();
}
/**
* Request progress from proxied RR.
*/
public float getProgress() throws IOException {
return rr.getProgress();
}
/**
* Request position from proxied RR.
*/
public long getPos() throws IOException {
return rr.getPos();
}
/**
* Forward close request to proxied RR.
*/
public void close() throws IOException {
rr.close();
}
/**
* Implement Comparable contract (compare key at head of proxied RR
* with that of another).
*/
public int compareTo(ComposableRecordReader<K,?> other) {
return cmp.compare(key(), other.key());
}
/**
* Return true iff compareTo(other) retn true.
*/
@SuppressWarnings("unchecked") // Explicit type check prior to cast
public boolean equals(Object other) {
return other instanceof ComposableRecordReader
&& 0 == compareTo((ComposableRecordReader)other);
}
public int hashCode() {
assert false : "hashCode not designed";
return 42;
}
@Override
public void setConf(Configuration conf) {
this.conf = conf;
}
@Override
public Configuration getConf() {
return conf;
}
}
| 6,443 | 27.017391 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/MultiFilterRecordReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.join;
import java.io.IOException;
import java.util.PriorityQueue;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordReader;
/**
* Base class for Composite join returning values derived from multiple
* sources, but generally not tuples.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class MultiFilterRecordReader<K extends WritableComparable,
V extends Writable>
extends CompositeRecordReader<K,V,V>
implements ComposableRecordReader<K,V> {
private Class<? extends Writable> valueclass;
private TupleWritable ivalue;
public MultiFilterRecordReader(int id, JobConf conf, int capacity,
Class<? extends WritableComparator> cmpcl) throws IOException {
super(id, capacity, cmpcl);
setConf(conf);
}
/**
* For each tuple emitted, return a value (typically one of the values
* in the tuple).
* Modifying the Writables in the tuple is permitted and unlikely to affect
* join behavior in most cases, but it is not recommended. It's safer to
* clone first.
*/
protected abstract V emit(TupleWritable dst) throws IOException;
/**
* Default implementation offers {@link #emit} every Tuple from the
* collector (the outer join of child RRs).
*/
protected boolean combine(Object[] srcs, TupleWritable dst) {
return true;
}
/** {@inheritDoc} */
public boolean next(K key, V value) throws IOException {
if (jc.flush(ivalue)) {
WritableUtils.cloneInto(key, jc.key());
WritableUtils.cloneInto(value, emit(ivalue));
return true;
}
jc.clear();
K iterkey = createKey();
final PriorityQueue<ComposableRecordReader<K,?>> q = getRecordReaderQueue();
while (!q.isEmpty()) {
fillJoinCollector(iterkey);
jc.reset(iterkey);
if (jc.flush(ivalue)) {
WritableUtils.cloneInto(key, jc.key());
WritableUtils.cloneInto(value, emit(ivalue));
return true;
}
jc.clear();
}
return false;
}
/** {@inheritDoc} */
@SuppressWarnings("unchecked") // Explicit check for value class agreement
public V createValue() {
if (null == valueclass) {
final Class<?> cls = kids[0].createValue().getClass();
for (RecordReader<K,? extends V> rr : kids) {
if (!cls.equals(rr.createValue().getClass())) {
throw new ClassCastException("Child value classes fail to agree");
}
}
valueclass = cls.asSubclass(Writable.class);
ivalue = createInternalValue();
}
return (V) ReflectionUtils.newInstance(valueclass, null);
}
/**
* Return an iterator returning a single value from the tuple.
* @see MultiFilterDelegationIterator
*/
protected ResetableIterator<V> getDelegate() {
return new MultiFilterDelegationIterator();
}
/**
* Proxy the JoinCollector, but include callback to emit.
*/
protected class MultiFilterDelegationIterator
implements ResetableIterator<V> {
public boolean hasNext() {
return jc.hasNext();
}
public boolean next(V val) throws IOException {
boolean ret;
if (ret = jc.flush(ivalue)) {
WritableUtils.cloneInto(val, emit(ivalue));
}
return ret;
}
public boolean replay(V val) throws IOException {
WritableUtils.cloneInto(val, emit(ivalue));
return true;
}
public void reset() {
jc.reset(jc.key());
}
public void add(V item) throws IOException {
throw new UnsupportedOperationException();
}
public void close() throws IOException {
jc.close();
}
public void clear() {
jc.clear();
}
}
}
| 4,868 | 29.622642 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/ResetableIterator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.join;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Writable;
/**
* This defines an interface to a stateful Iterator that can replay elements
* added to it directly.
* Note that this does not extend {@link java.util.Iterator}.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface ResetableIterator<T extends Writable>
extends org.apache.hadoop.mapreduce.lib.join.ResetableIterator<T> {
public static class EMPTY<U extends Writable>
extends org.apache.hadoop.mapreduce.lib.join.ResetableIterator.EMPTY<U>
implements ResetableIterator<U> {
}
}
| 1,531 | 38.282051 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/CompositeInputSplit.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.join;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.HashSet;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.util.ReflectionUtils;
/**
* This InputSplit contains a set of child InputSplits. Any InputSplit inserted
* into this collection must have a public default constructor.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class CompositeInputSplit implements InputSplit {
private int fill = 0;
private long totsize = 0L;
private InputSplit[] splits;
public CompositeInputSplit() { }
public CompositeInputSplit(int capacity) {
splits = new InputSplit[capacity];
}
/**
* Add an InputSplit to this collection.
* @throws IOException If capacity was not specified during construction
* or if capacity has been reached.
*/
public void add(InputSplit s) throws IOException {
if (null == splits) {
throw new IOException("Uninitialized InputSplit");
}
if (fill == splits.length) {
throw new IOException("Too many splits");
}
splits[fill++] = s;
totsize += s.getLength();
}
/**
* Get ith child InputSplit.
*/
public InputSplit get(int i) {
return splits[i];
}
/**
* Return the aggregate length of all child InputSplits currently added.
*/
public long getLength() throws IOException {
return totsize;
}
/**
* Get the length of ith child InputSplit.
*/
public long getLength(int i) throws IOException {
return splits[i].getLength();
}
/**
* Collect a set of hosts from all child InputSplits.
*/
public String[] getLocations() throws IOException {
HashSet<String> hosts = new HashSet<String>();
for (InputSplit s : splits) {
String[] hints = s.getLocations();
if (hints != null && hints.length > 0) {
for (String host : hints) {
hosts.add(host);
}
}
}
return hosts.toArray(new String[hosts.size()]);
}
/**
* getLocations from ith InputSplit.
*/
public String[] getLocation(int i) throws IOException {
return splits[i].getLocations();
}
/**
* Write splits in the following format.
* {@code
* <count><class1><class2>...<classn><split1><split2>...<splitn>
* }
*/
public void write(DataOutput out) throws IOException {
WritableUtils.writeVInt(out, splits.length);
for (InputSplit s : splits) {
Text.writeString(out, s.getClass().getName());
}
for (InputSplit s : splits) {
s.write(out);
}
}
/**
* {@inheritDoc}
* @throws IOException If the child InputSplit cannot be read, typically
* for faliing access checks.
*/
@SuppressWarnings("unchecked") // Generic array assignment
public void readFields(DataInput in) throws IOException {
int card = WritableUtils.readVInt(in);
if (splits == null || splits.length != card) {
splits = new InputSplit[card];
}
Class<? extends InputSplit>[] cls = new Class[card];
try {
for (int i = 0; i < card; ++i) {
cls[i] =
Class.forName(Text.readString(in)).asSubclass(InputSplit.class);
}
for (int i = 0; i < card; ++i) {
splits[i] = ReflectionUtils.newInstance(cls[i], null);
splits[i].readFields(in);
}
} catch (ClassNotFoundException e) {
throw (IOException)new IOException("Failed split init").initCause(e);
}
}
}
| 4,495 | 28.194805 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/PipesMapRunner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.pipes;
import java.io.IOException;
import org.apache.hadoop.io.FloatWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapRunner;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.SkipBadRecords;
import org.apache.hadoop.mapreduce.MRJobConfig;
/**
* An adaptor to run a C++ mapper.
*/
class PipesMapRunner<K1 extends WritableComparable, V1 extends Writable,
K2 extends WritableComparable, V2 extends Writable>
extends MapRunner<K1, V1, K2, V2> {
private JobConf job;
/**
* Get the new configuration.
* @param job the job's configuration
*/
public void configure(JobConf job) {
this.job = job;
//disable the auto increment of the counter. For pipes, no of processed
//records could be different(equal or less) than the no of records input.
SkipBadRecords.setAutoIncrMapperProcCount(job, false);
}
/**
* Run the map task.
* @param input the set of inputs
* @param output the object to collect the outputs of the map
* @param reporter the object to update with status
*/
@SuppressWarnings("unchecked")
public void run(RecordReader<K1, V1> input, OutputCollector<K2, V2> output,
Reporter reporter) throws IOException {
Application<K1, V1, K2, V2> application = null;
try {
RecordReader<FloatWritable, NullWritable> fakeInput =
(!Submitter.getIsJavaRecordReader(job) &&
!Submitter.getIsJavaMapper(job)) ?
(RecordReader<FloatWritable, NullWritable>) input : null;
application = new Application<K1, V1, K2, V2>(job, fakeInput, output,
reporter,
(Class<? extends K2>) job.getOutputKeyClass(),
(Class<? extends V2>) job.getOutputValueClass());
} catch (InterruptedException ie) {
throw new RuntimeException("interrupted", ie);
}
DownwardProtocol<K1, V1> downlink = application.getDownlink();
boolean isJavaInput = Submitter.getIsJavaRecordReader(job);
downlink.runMap(reporter.getInputSplit(),
job.getNumReduceTasks(), isJavaInput);
boolean skipping = job.getBoolean(MRJobConfig.SKIP_RECORDS, false);
try {
if (isJavaInput) {
// allocate key & value instances that are re-used for all entries
K1 key = input.createKey();
V1 value = input.createValue();
downlink.setInputTypes(key.getClass().getName(),
value.getClass().getName());
while (input.next(key, value)) {
// map pair to output
downlink.mapItem(key, value);
if(skipping) {
//flush the streams on every record input if running in skip mode
//so that we don't buffer other records surrounding a bad record.
downlink.flush();
}
}
downlink.endOfInput();
}
application.waitForFinish();
} catch (Throwable t) {
application.abort(t);
} finally {
application.cleanup();
}
}
}
| 4,116 | 36.770642 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/PipesReducer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.pipes;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.SkipBadRecords;
import org.apache.hadoop.mapreduce.MRJobConfig;
import java.io.IOException;
import java.util.Iterator;
/**
* This class is used to talk to a C++ reduce task.
*/
class PipesReducer<K2 extends WritableComparable, V2 extends Writable,
K3 extends WritableComparable, V3 extends Writable>
implements Reducer<K2, V2, K3, V3> {
private static final Log LOG= LogFactory.getLog(PipesReducer.class.getName());
private JobConf job;
private Application<K2, V2, K3, V3> application = null;
private DownwardProtocol<K2, V2> downlink = null;
private boolean isOk = true;
private boolean skipping = false;
public void configure(JobConf job) {
this.job = job;
//disable the auto increment of the counter. For pipes, no of processed
//records could be different(equal or less) than the no of records input.
SkipBadRecords.setAutoIncrReducerProcCount(job, false);
skipping = job.getBoolean(MRJobConfig.SKIP_RECORDS, false);
}
/**
* Process all of the keys and values. Start up the application if we haven't
* started it yet.
*/
public void reduce(K2 key, Iterator<V2> values,
OutputCollector<K3, V3> output, Reporter reporter
) throws IOException {
isOk = false;
startApplication(output, reporter);
downlink.reduceKey(key);
while (values.hasNext()) {
downlink.reduceValue(values.next());
}
if(skipping) {
//flush the streams on every record input if running in skip mode
//so that we don't buffer other records surrounding a bad record.
downlink.flush();
}
isOk = true;
}
@SuppressWarnings("unchecked")
private void startApplication(OutputCollector<K3, V3> output, Reporter reporter) throws IOException {
if (application == null) {
try {
LOG.info("starting application");
application =
new Application<K2, V2, K3, V3>(
job, null, output, reporter,
(Class<? extends K3>) job.getOutputKeyClass(),
(Class<? extends V3>) job.getOutputValueClass());
downlink = application.getDownlink();
} catch (InterruptedException ie) {
throw new RuntimeException("interrupted", ie);
}
int reduce=0;
downlink.runReduce(reduce, Submitter.getIsJavaRecordWriter(job));
}
}
/**
* Handle the end of the input by closing down the application.
*/
public void close() throws IOException {
// if we haven't started the application, we have nothing to do
if (isOk) {
OutputCollector<K3, V3> nullCollector = new OutputCollector<K3, V3>() {
public void collect(K3 key,
V3 value) throws IOException {
// NULL
}
};
startApplication(nullCollector, Reporter.NULL);
}
try {
if (isOk) {
application.getDownlink().endOfInput();
} else {
// send the abort to the application and let it clean up
application.getDownlink().abort();
}
LOG.info("waiting for finish");
application.waitForFinish();
LOG.info("got done");
} catch (Throwable t) {
application.abort(t);
} finally {
application.cleanup();
}
}
}
| 4,469 | 34.19685 | 103 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Submitter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.pipes;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.net.URLClassLoader;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.util.StringTokenizer;
import org.apache.commons.cli.BasicParser;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.cli.Parser;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputFormat;
import org.apache.hadoop.mapred.Partitioner;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.RunningJob;
import org.apache.hadoop.mapred.lib.HashPartitioner;
import org.apache.hadoop.mapred.lib.LazyOutputFormat;
import org.apache.hadoop.mapred.lib.NullOutputFormat;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.filecache.DistributedCache;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.Tool;
/**
* The main entry point and job submitter. It may either be used as a command
* line-based or API-based method to launch Pipes jobs.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class Submitter extends Configured implements Tool {
protected static final Log LOG = LogFactory.getLog(Submitter.class);
public static final String PRESERVE_COMMANDFILE =
"mapreduce.pipes.commandfile.preserve";
public static final String EXECUTABLE = "mapreduce.pipes.executable";
public static final String INTERPRETOR =
"mapreduce.pipes.executable.interpretor";
public static final String IS_JAVA_MAP = "mapreduce.pipes.isjavamapper";
public static final String IS_JAVA_RR = "mapreduce.pipes.isjavarecordreader";
public static final String IS_JAVA_RW = "mapreduce.pipes.isjavarecordwriter";
public static final String IS_JAVA_REDUCE = "mapreduce.pipes.isjavareducer";
public static final String PARTITIONER = "mapreduce.pipes.partitioner";
public static final String INPUT_FORMAT = "mapreduce.pipes.inputformat";
public static final String PORT = "mapreduce.pipes.command.port";
public Submitter() {
this(new Configuration());
}
public Submitter(Configuration conf) {
setConf(conf);
}
/**
* Get the URI of the application's executable.
* @param conf
* @return the URI where the application's executable is located
*/
public static String getExecutable(JobConf conf) {
return conf.get(Submitter.EXECUTABLE);
}
/**
* Set the URI for the application's executable. Normally this is a hdfs:
* location.
* @param conf
* @param executable The URI of the application's executable.
*/
public static void setExecutable(JobConf conf, String executable) {
conf.set(Submitter.EXECUTABLE, executable);
}
/**
* Set whether the job is using a Java RecordReader.
* @param conf the configuration to modify
* @param value the new value
*/
public static void setIsJavaRecordReader(JobConf conf, boolean value) {
conf.setBoolean(Submitter.IS_JAVA_RR, value);
}
/**
* Check whether the job is using a Java RecordReader
* @param conf the configuration to check
* @return is it a Java RecordReader?
*/
public static boolean getIsJavaRecordReader(JobConf conf) {
return conf.getBoolean(Submitter.IS_JAVA_RR, false);
}
/**
* Set whether the Mapper is written in Java.
* @param conf the configuration to modify
* @param value the new value
*/
public static void setIsJavaMapper(JobConf conf, boolean value) {
conf.setBoolean(Submitter.IS_JAVA_MAP, value);
}
/**
* Check whether the job is using a Java Mapper.
* @param conf the configuration to check
* @return is it a Java Mapper?
*/
public static boolean getIsJavaMapper(JobConf conf) {
return conf.getBoolean(Submitter.IS_JAVA_MAP, false);
}
/**
* Set whether the Reducer is written in Java.
* @param conf the configuration to modify
* @param value the new value
*/
public static void setIsJavaReducer(JobConf conf, boolean value) {
conf.setBoolean(Submitter.IS_JAVA_REDUCE, value);
}
/**
* Check whether the job is using a Java Reducer.
* @param conf the configuration to check
* @return is it a Java Reducer?
*/
public static boolean getIsJavaReducer(JobConf conf) {
return conf.getBoolean(Submitter.IS_JAVA_REDUCE, false);
}
/**
* Set whether the job will use a Java RecordWriter.
* @param conf the configuration to modify
* @param value the new value to set
*/
public static void setIsJavaRecordWriter(JobConf conf, boolean value) {
conf.setBoolean(Submitter.IS_JAVA_RW, value);
}
/**
* Will the reduce use a Java RecordWriter?
* @param conf the configuration to check
* @return true, if the output of the job will be written by Java
*/
public static boolean getIsJavaRecordWriter(JobConf conf) {
return conf.getBoolean(Submitter.IS_JAVA_RW, false);
}
/**
* Set the configuration, if it doesn't already have a value for the given
* key.
* @param conf the configuration to modify
* @param key the key to set
* @param value the new "default" value to set
*/
private static void setIfUnset(JobConf conf, String key, String value) {
if (conf.get(key) == null) {
conf.set(key, value);
}
}
/**
* Save away the user's original partitioner before we override it.
* @param conf the configuration to modify
* @param cls the user's partitioner class
*/
static void setJavaPartitioner(JobConf conf, Class cls) {
conf.set(Submitter.PARTITIONER, cls.getName());
}
/**
* Get the user's original partitioner.
* @param conf the configuration to look in
* @return the class that the user submitted
*/
static Class<? extends Partitioner> getJavaPartitioner(JobConf conf) {
return conf.getClass(Submitter.PARTITIONER,
HashPartitioner.class,
Partitioner.class);
}
/**
* Does the user want to keep the command file for debugging? If this is
* true, pipes will write a copy of the command data to a file in the
* task directory named "downlink.data", which may be used to run the C++
* program under the debugger. You probably also want to set
* JobConf.setKeepFailedTaskFiles(true) to keep the entire directory from
* being deleted.
* To run using the data file, set the environment variable
* "mapreduce.pipes.commandfile" to point to the file.
* @param conf the configuration to check
* @return will the framework save the command file?
*/
public static boolean getKeepCommandFile(JobConf conf) {
return conf.getBoolean(Submitter.PRESERVE_COMMANDFILE, false);
}
/**
* Set whether to keep the command file for debugging
* @param conf the configuration to modify
* @param keep the new value
*/
public static void setKeepCommandFile(JobConf conf, boolean keep) {
conf.setBoolean(Submitter.PRESERVE_COMMANDFILE, keep);
}
/**
* Submit a job to the map/reduce cluster. All of the necessary modifications
* to the job to run under pipes are made to the configuration.
* @param conf the job to submit to the cluster (MODIFIED)
* @throws IOException
* @deprecated Use {@link Submitter#runJob(JobConf)}
*/
@Deprecated
public static RunningJob submitJob(JobConf conf) throws IOException {
return runJob(conf);
}
/**
* Submit a job to the map/reduce cluster. All of the necessary modifications
* to the job to run under pipes are made to the configuration.
* @param conf the job to submit to the cluster (MODIFIED)
* @throws IOException
*/
public static RunningJob runJob(JobConf conf) throws IOException {
setupPipesJob(conf);
return JobClient.runJob(conf);
}
/**
* Submit a job to the Map-Reduce framework.
* This returns a handle to the {@link RunningJob} which can be used to track
* the running-job.
*
* @param conf the job configuration.
* @return a handle to the {@link RunningJob} which can be used to track the
* running-job.
* @throws IOException
*/
public static RunningJob jobSubmit(JobConf conf) throws IOException {
setupPipesJob(conf);
return new JobClient(conf).submitJob(conf);
}
private static void setupPipesJob(JobConf conf) throws IOException {
// default map output types to Text
if (!getIsJavaMapper(conf)) {
conf.setMapRunnerClass(PipesMapRunner.class);
// Save the user's partitioner and hook in our's.
setJavaPartitioner(conf, conf.getPartitionerClass());
conf.setPartitionerClass(PipesPartitioner.class);
}
if (!getIsJavaReducer(conf)) {
conf.setReducerClass(PipesReducer.class);
if (!getIsJavaRecordWriter(conf)) {
conf.setOutputFormat(NullOutputFormat.class);
}
}
String textClassname = Text.class.getName();
setIfUnset(conf, MRJobConfig.MAP_OUTPUT_KEY_CLASS, textClassname);
setIfUnset(conf, MRJobConfig.MAP_OUTPUT_VALUE_CLASS, textClassname);
setIfUnset(conf, MRJobConfig.OUTPUT_KEY_CLASS, textClassname);
setIfUnset(conf, MRJobConfig.OUTPUT_VALUE_CLASS, textClassname);
// Use PipesNonJavaInputFormat if necessary to handle progress reporting
// from C++ RecordReaders ...
if (!getIsJavaRecordReader(conf) && !getIsJavaMapper(conf)) {
conf.setClass(Submitter.INPUT_FORMAT,
conf.getInputFormat().getClass(), InputFormat.class);
conf.setInputFormat(PipesNonJavaInputFormat.class);
}
String exec = getExecutable(conf);
if (exec == null) {
throw new IllegalArgumentException("No application program defined.");
}
// add default debug script only when executable is expressed as
// <path>#<executable>
if (exec.contains("#")) {
// set default gdb commands for map and reduce task
String defScript = "$HADOOP_PREFIX/src/c++/pipes/debug/pipes-default-script";
setIfUnset(conf, MRJobConfig.MAP_DEBUG_SCRIPT,defScript);
setIfUnset(conf, MRJobConfig.REDUCE_DEBUG_SCRIPT,defScript);
}
URI[] fileCache = DistributedCache.getCacheFiles(conf);
if (fileCache == null) {
fileCache = new URI[1];
} else {
URI[] tmp = new URI[fileCache.length+1];
System.arraycopy(fileCache, 0, tmp, 1, fileCache.length);
fileCache = tmp;
}
try {
fileCache[0] = new URI(exec);
} catch (URISyntaxException e) {
IOException ie = new IOException("Problem parsing execable URI " + exec);
ie.initCause(e);
throw ie;
}
DistributedCache.setCacheFiles(fileCache, conf);
}
/**
* A command line parser for the CLI-based Pipes job submitter.
*/
static class CommandLineParser {
private Options options = new Options();
void addOption(String longName, boolean required, String description,
String paramName) {
Option option = OptionBuilder.withArgName(paramName).hasArgs(1).withDescription(description).isRequired(required).create(longName);
options.addOption(option);
}
void addArgument(String name, boolean required, String description) {
Option option = OptionBuilder.withArgName(name).hasArgs(1).withDescription(description).isRequired(required).create();
options.addOption(option);
}
Parser createParser() {
Parser result = new BasicParser();
return result;
}
void printUsage() {
// The CLI package should do this for us, but I can't figure out how
// to make it print something reasonable.
System.out.println("Usage: pipes ");
System.out.println(" [-input <path>] // Input directory");
System.out.println(" [-output <path>] // Output directory");
System.out.println(" [-jar <jar file> // jar filename");
System.out.println(" [-inputformat <class>] // InputFormat class");
System.out.println(" [-map <class>] // Java Map class");
System.out.println(" [-partitioner <class>] // Java Partitioner");
System.out.println(" [-reduce <class>] // Java Reduce class");
System.out.println(" [-writer <class>] // Java RecordWriter");
System.out.println(" [-program <executable>] // executable URI");
System.out.println(" [-reduces <num>] // number of reduces");
System.out.println(" [-lazyOutput <true/false>] // createOutputLazily");
System.out.println();
GenericOptionsParser.printGenericCommandUsage(System.out);
}
}
private static <InterfaceType>
Class<? extends InterfaceType> getClass(CommandLine cl, String key,
JobConf conf,
Class<InterfaceType> cls
) throws ClassNotFoundException {
return conf.getClassByName(cl.getOptionValue(key)).asSubclass(cls);
}
@Override
public int run(String[] args) throws Exception {
CommandLineParser cli = new CommandLineParser();
if (args.length == 0) {
cli.printUsage();
return 1;
}
cli.addOption("input", false, "input path to the maps", "path");
cli.addOption("output", false, "output path from the reduces", "path");
cli.addOption("jar", false, "job jar file", "path");
cli.addOption("inputformat", false, "java classname of InputFormat",
"class");
//cli.addArgument("javareader", false, "is the RecordReader in Java");
cli.addOption("map", false, "java classname of Mapper", "class");
cli.addOption("partitioner", false, "java classname of Partitioner",
"class");
cli.addOption("reduce", false, "java classname of Reducer", "class");
cli.addOption("writer", false, "java classname of OutputFormat", "class");
cli.addOption("program", false, "URI to application executable", "class");
cli.addOption("reduces", false, "number of reduces", "num");
cli.addOption("jobconf", false,
"\"n1=v1,n2=v2,..\" (Deprecated) Optional. Add or override a JobConf property.",
"key=val");
cli.addOption("lazyOutput", false, "Optional. Create output lazily",
"boolean");
Parser parser = cli.createParser();
try {
GenericOptionsParser genericParser = new GenericOptionsParser(getConf(), args);
CommandLine results = parser.parse(cli.options, genericParser.getRemainingArgs());
JobConf job = new JobConf(getConf());
if (results.hasOption("input")) {
FileInputFormat.setInputPaths(job, results.getOptionValue("input"));
}
if (results.hasOption("output")) {
FileOutputFormat.setOutputPath(job,
new Path(results.getOptionValue("output")));
}
if (results.hasOption("jar")) {
job.setJar(results.getOptionValue("jar"));
}
if (results.hasOption("inputformat")) {
setIsJavaRecordReader(job, true);
job.setInputFormat(getClass(results, "inputformat", job,
InputFormat.class));
}
if (results.hasOption("javareader")) {
setIsJavaRecordReader(job, true);
}
if (results.hasOption("map")) {
setIsJavaMapper(job, true);
job.setMapperClass(getClass(results, "map", job, Mapper.class));
}
if (results.hasOption("partitioner")) {
job.setPartitionerClass(getClass(results, "partitioner", job,
Partitioner.class));
}
if (results.hasOption("reduce")) {
setIsJavaReducer(job, true);
job.setReducerClass(getClass(results, "reduce", job, Reducer.class));
}
if (results.hasOption("reduces")) {
job.setNumReduceTasks(Integer.parseInt(
results.getOptionValue("reduces")));
}
if (results.hasOption("writer")) {
setIsJavaRecordWriter(job, true);
job.setOutputFormat(getClass(results, "writer", job,
OutputFormat.class));
}
if (results.hasOption("lazyOutput")) {
if (Boolean.parseBoolean(results.getOptionValue("lazyOutput"))) {
LazyOutputFormat.setOutputFormatClass(job,
job.getOutputFormat().getClass());
}
}
if (results.hasOption("program")) {
setExecutable(job, results.getOptionValue("program"));
}
if (results.hasOption("jobconf")) {
LOG.warn("-jobconf option is deprecated, please use -D instead.");
String options = results.getOptionValue("jobconf");
StringTokenizer tokenizer = new StringTokenizer(options, ",");
while (tokenizer.hasMoreTokens()) {
String keyVal = tokenizer.nextToken().trim();
String[] keyValSplit = keyVal.split("=");
job.set(keyValSplit[0], keyValSplit[1]);
}
}
// if they gave us a jar file, include it into the class path
String jarFile = job.getJar();
if (jarFile != null) {
final URL[] urls = new URL[]{ FileSystem.getLocal(job).
pathToFile(new Path(jarFile)).toURL()};
//FindBugs complains that creating a URLClassLoader should be
//in a doPrivileged() block.
ClassLoader loader =
AccessController.doPrivileged(
new PrivilegedAction<ClassLoader>() {
public ClassLoader run() {
return new URLClassLoader(urls);
}
}
);
job.setClassLoader(loader);
}
runJob(job);
return 0;
} catch (ParseException pe) {
LOG.info("Error : " + pe);
cli.printUsage();
return 1;
}
}
/**
* Submit a pipes job based on the command line arguments.
* @param args
*/
public static void main(String[] args) throws Exception {
int exitCode = new Submitter().run(args);
ExitUtil.terminate(exitCode);
}
}
| 19,628 | 36.531549 | 137 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/PipesNonJavaInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.pipes;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.FloatWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.util.ReflectionUtils;
/**
* Dummy input format used when non-Java a {@link RecordReader} is used by
* the Pipes' application.
*
* The only useful thing this does is set up the Map-Reduce job to get the
* {@link PipesDummyRecordReader}, everything else left for the 'actual'
* InputFormat specified by the user which is given by
* <i>mapreduce.pipes.inputformat</i>.
*/
class PipesNonJavaInputFormat
implements InputFormat<FloatWritable, NullWritable> {
public RecordReader<FloatWritable, NullWritable> getRecordReader(
InputSplit genericSplit, JobConf job, Reporter reporter)
throws IOException {
return new PipesDummyRecordReader(job, genericSplit);
}
public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
// Delegate the generation of input splits to the 'original' InputFormat
return ReflectionUtils.newInstance(
job.getClass(Submitter.INPUT_FORMAT,
TextInputFormat.class,
InputFormat.class), job).getSplits(job, numSplits);
}
/**
* A dummy {@link org.apache.hadoop.mapred.RecordReader} to help track the
* progress of Hadoop Pipes' applications when they are using a non-Java
* <code>RecordReader</code>.
*
* The <code>PipesDummyRecordReader</code> is informed of the 'progress' of
* the task by the {@link OutputHandler#progress(float)} which calls the
* {@link #next(FloatWritable, NullWritable)} with the progress as the
* <code>key</code>.
*/
static class PipesDummyRecordReader implements RecordReader<FloatWritable, NullWritable> {
float progress = 0.0f;
public PipesDummyRecordReader(Configuration job, InputSplit split)
throws IOException{
}
public FloatWritable createKey() {
return null;
}
public NullWritable createValue() {
return null;
}
public synchronized void close() throws IOException {}
public synchronized long getPos() throws IOException {
return 0;
}
public float getProgress() {
return progress;
}
public synchronized boolean next(FloatWritable key, NullWritable value)
throws IOException {
progress = key.get();
return true;
}
}
}
| 3,536 | 33.676471 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/PipesPartitioner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.pipes;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Partitioner;
import org.apache.hadoop.util.ReflectionUtils;
/**
* This partitioner is one that can either be set manually per a record or it
* can fall back onto a Java partitioner that was set by the user.
*/
class PipesPartitioner<K extends WritableComparable,
V extends Writable>
implements Partitioner<K, V> {
private static final ThreadLocal<Integer> CACHE = new ThreadLocal<Integer>();
private Partitioner<K, V> part = null;
@SuppressWarnings("unchecked")
public void configure(JobConf conf) {
part =
ReflectionUtils.newInstance(Submitter.getJavaPartitioner(conf), conf);
}
/**
* Set the next key to have the given partition.
* @param newValue the next partition value
*/
static void setNextPartition(int newValue) {
CACHE.set(newValue);
}
/**
* If a partition result was set manually, return it. Otherwise, we call
* the Java partitioner.
* @param key the key to partition
* @param value the value to partition
* @param numPartitions the number of reduces
*/
public int getPartition(K key, V value,
int numPartitions) {
Integer result = CACHE.get();
if (result == null) {
return part.getPartition(key, value, numPartitions);
} else {
return result;
}
}
}
| 2,326 | 32.242857 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/UpwardProtocol.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.pipes;
import java.io.IOException;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
/**
* The interface for the messages that can come up from the child. All of these
* calls are asynchronous and return before the message has been processed.
*/
interface UpwardProtocol<K extends WritableComparable, V extends Writable> {
/**
* Output a record from the child.
* @param key the record's key
* @param value the record's value
* @throws IOException
*/
void output(K key, V value) throws IOException;
/**
* Map functions where the application has defined a partition function
* output records along with their partition.
* @param reduce the reduce to send this record to
* @param key the record's key
* @param value the record's value
* @throws IOException
*/
void partitionedOutput(int reduce, K key,
V value) throws IOException;
/**
* Update the task's status message
* @param msg the string to display to the user
* @throws IOException
*/
void status(String msg) throws IOException;
/**
* Report making progress (and the current progress)
* @param progress the current progress (0.0 to 1.0)
* @throws IOException
*/
void progress(float progress) throws IOException;
/**
* Report that the application has finished processing all inputs
* successfully.
* @throws IOException
*/
void done() throws IOException;
/**
* Report that the application or more likely communication failed.
* @param e
*/
void failed(Throwable e);
/**
* Register a counter with the given id and group/name.
* @param group counter group
* @param name counter name
* @throws IOException
*/
void registerCounter(int id, String group, String name) throws IOException;
/**
* Increment the value of a registered counter.
* @param id counter id of the registered counter
* @param amount increment for the counter value
* @throws IOException
*/
void incrementCounter(int id, long amount) throws IOException;
/**
* Handles authentication response from client.
* It must notify the threads waiting for authentication response.
* @param digest
* @return true if authentication is successful
* @throws IOException
*/
boolean authenticate(String digest) throws IOException;
}
| 3,231 | 30.686275 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.pipes;
import java.io.File;
import java.io.IOException;
import java.net.ServerSocket;
import java.net.Socket;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
import javax.crypto.SecretKey;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.FloatWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TaskAttemptID;
import org.apache.hadoop.mapred.TaskLog;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.filecache.DistributedCache;
import org.apache.hadoop.mapreduce.security.SecureShuffleUtils;
import org.apache.hadoop.mapreduce.security.TokenCache;
import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
/**
* This class is responsible for launching and communicating with the child
* process.
*/
class Application<K1 extends WritableComparable, V1 extends Writable,
K2 extends WritableComparable, V2 extends Writable> {
private static final Log LOG = LogFactory.getLog(Application.class.getName());
private ServerSocket serverSocket;
private Process process;
private Socket clientSocket;
private OutputHandler<K2, V2> handler;
private DownwardProtocol<K1, V1> downlink;
static final boolean WINDOWS
= System.getProperty("os.name").startsWith("Windows");
/**
* Start the child process to handle the task for us.
* @param conf the task's configuration
* @param recordReader the fake record reader to update progress with
* @param output the collector to send output to
* @param reporter the reporter for the task
* @param outputKeyClass the class of the output keys
* @param outputValueClass the class of the output values
* @throws IOException
* @throws InterruptedException
*/
Application(JobConf conf,
RecordReader<FloatWritable, NullWritable> recordReader,
OutputCollector<K2,V2> output, Reporter reporter,
Class<? extends K2> outputKeyClass,
Class<? extends V2> outputValueClass
) throws IOException, InterruptedException {
serverSocket = new ServerSocket(0);
Map<String, String> env = new HashMap<String,String>();
// add TMPDIR environment variable with the value of java.io.tmpdir
env.put("TMPDIR", System.getProperty("java.io.tmpdir"));
env.put(Submitter.PORT,
Integer.toString(serverSocket.getLocalPort()));
//Add token to the environment if security is enabled
Token<JobTokenIdentifier> jobToken = TokenCache.getJobToken(conf
.getCredentials());
// This password is used as shared secret key between this application and
// child pipes process
byte[] password = jobToken.getPassword();
String localPasswordFile = new File(".") + Path.SEPARATOR
+ "jobTokenPassword";
writePasswordToLocalFile(localPasswordFile, password, conf);
env.put("hadoop.pipes.shared.secret.location", localPasswordFile);
List<String> cmd = new ArrayList<String>();
String interpretor = conf.get(Submitter.INTERPRETOR);
if (interpretor != null) {
cmd.add(interpretor);
}
String executable = DistributedCache.getLocalCacheFiles(conf)[0].toString();
if (!FileUtil.canExecute(new File(executable))) {
// LinuxTaskController sets +x permissions on all distcache files already.
// In case of DefaultTaskController, set permissions here.
FileUtil.chmod(executable, "u+x");
}
cmd.add(executable);
// wrap the command in a stdout/stderr capture
// we are starting map/reduce task of the pipes job. this is not a cleanup
// attempt.
TaskAttemptID taskid =
TaskAttemptID.forName(conf.get(MRJobConfig.TASK_ATTEMPT_ID));
File stdout = TaskLog.getTaskLogFile(taskid, false, TaskLog.LogName.STDOUT);
File stderr = TaskLog.getTaskLogFile(taskid, false, TaskLog.LogName.STDERR);
long logLength = TaskLog.getTaskLogLength(conf);
cmd = TaskLog.captureOutAndError(null, cmd, stdout, stderr, logLength,
false);
process = runClient(cmd, env);
clientSocket = serverSocket.accept();
String challenge = getSecurityChallenge();
String digestToSend = createDigest(password, challenge);
String digestExpected = createDigest(password, digestToSend);
handler = new OutputHandler<K2, V2>(output, reporter, recordReader,
digestExpected);
K2 outputKey = (K2)
ReflectionUtils.newInstance(outputKeyClass, conf);
V2 outputValue = (V2)
ReflectionUtils.newInstance(outputValueClass, conf);
downlink = new BinaryProtocol<K1, V1, K2, V2>(clientSocket, handler,
outputKey, outputValue, conf);
downlink.authenticate(digestToSend, challenge);
waitForAuthentication();
LOG.debug("Authentication succeeded");
downlink.start();
downlink.setJobConf(conf);
}
private String getSecurityChallenge() {
Random rand = new Random(System.currentTimeMillis());
//Use 4 random integers so as to have 16 random bytes.
StringBuilder strBuilder = new StringBuilder();
strBuilder.append(rand.nextInt(0x7fffffff));
strBuilder.append(rand.nextInt(0x7fffffff));
strBuilder.append(rand.nextInt(0x7fffffff));
strBuilder.append(rand.nextInt(0x7fffffff));
return strBuilder.toString();
}
private void writePasswordToLocalFile(String localPasswordFile,
byte[] password, JobConf conf) throws IOException {
FileSystem localFs = FileSystem.getLocal(conf);
Path localPath = new Path(localPasswordFile);
FSDataOutputStream out = FileSystem.create(localFs, localPath,
new FsPermission("400"));
out.write(password);
out.close();
}
/**
* Get the downward protocol object that can send commands down to the
* application.
* @return the downlink proxy
*/
DownwardProtocol<K1, V1> getDownlink() {
return downlink;
}
/**
* Wait for authentication response.
* @throws IOException
* @throws InterruptedException
*/
void waitForAuthentication() throws IOException,
InterruptedException {
downlink.flush();
LOG.debug("Waiting for authentication response");
handler.waitForAuthentication();
}
/**
* Wait for the application to finish
* @return did the application finish correctly?
* @throws Throwable
*/
boolean waitForFinish() throws Throwable {
downlink.flush();
return handler.waitForFinish();
}
/**
* Abort the application and wait for it to finish.
* @param t the exception that signalled the problem
* @throws IOException A wrapper around the exception that was passed in
*/
void abort(Throwable t) throws IOException {
LOG.info("Aborting because of " + StringUtils.stringifyException(t));
try {
downlink.abort();
downlink.flush();
} catch (IOException e) {
// IGNORE cleanup problems
}
try {
handler.waitForFinish();
} catch (Throwable ignored) {
process.destroy();
}
IOException wrapper = new IOException("pipe child exception");
wrapper.initCause(t);
throw wrapper;
}
/**
* Clean up the child procress and socket.
* @throws IOException
*/
void cleanup() throws IOException {
serverSocket.close();
try {
downlink.close();
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
}
}
/**
* Run a given command in a subprocess, including threads to copy its stdout
* and stderr to our stdout and stderr.
* @param command the command and its arguments
* @param env the environment to run the process in
* @return a handle on the process
* @throws IOException
*/
static Process runClient(List<String> command,
Map<String, String> env) throws IOException {
ProcessBuilder builder = new ProcessBuilder(command);
if (env != null) {
builder.environment().putAll(env);
}
Process result = builder.start();
return result;
}
public static String createDigest(byte[] password, String data)
throws IOException {
SecretKey key = JobTokenSecretManager.createSecretKey(password);
return SecureShuffleUtils.hashFromString(data, key);
}
}
| 9,913 | 35.855019 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/OutputHandler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.pipes;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.io.FloatWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.Counters;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
/**
* Handles the upward (C++ to Java) messages from the application.
*/
class OutputHandler<K extends WritableComparable,
V extends Writable>
implements UpwardProtocol<K, V> {
private Reporter reporter;
private OutputCollector<K, V> collector;
private float progressValue = 0.0f;
private boolean done = false;
private Throwable exception = null;
RecordReader<FloatWritable,NullWritable> recordReader = null;
private Map<Integer, Counters.Counter> registeredCounters =
new HashMap<Integer, Counters.Counter>();
private String expectedDigest = null;
private boolean digestReceived = false;
/**
* Create a handler that will handle any records output from the application.
* @param collector the "real" collector that takes the output
* @param reporter the reporter for reporting progress
*/
public OutputHandler(OutputCollector<K, V> collector, Reporter reporter,
RecordReader<FloatWritable,NullWritable> recordReader,
String expectedDigest) {
this.reporter = reporter;
this.collector = collector;
this.recordReader = recordReader;
this.expectedDigest = expectedDigest;
}
/**
* The task output a normal record.
*/
public void output(K key, V value) throws IOException {
collector.collect(key, value);
}
/**
* The task output a record with a partition number attached.
*/
public void partitionedOutput(int reduce, K key,
V value) throws IOException {
PipesPartitioner.setNextPartition(reduce);
collector.collect(key, value);
}
/**
* Update the status message for the task.
*/
public void status(String msg) {
reporter.setStatus(msg);
}
private FloatWritable progressKey = new FloatWritable(0.0f);
private NullWritable nullValue = NullWritable.get();
/**
* Update the amount done and call progress on the reporter.
*/
public void progress(float progress) throws IOException {
progressValue = progress;
reporter.progress();
if (recordReader != null) {
progressKey.set(progress);
recordReader.next(progressKey, nullValue);
}
}
/**
* The task finished successfully.
*/
public void done() throws IOException {
synchronized (this) {
done = true;
notify();
}
}
/**
* Get the current amount done.
* @return a float between 0.0 and 1.0
*/
public float getProgress() {
return progressValue;
}
/**
* The task failed with an exception.
*/
public void failed(Throwable e) {
synchronized (this) {
exception = e;
notify();
}
}
/**
* Wait for the task to finish or abort.
* @return did the task finish correctly?
* @throws Throwable
*/
public synchronized boolean waitForFinish() throws Throwable {
while (!done && exception == null) {
wait();
}
if (exception != null) {
throw exception;
}
return done;
}
public void registerCounter(int id, String group, String name) throws IOException {
Counters.Counter counter = reporter.getCounter(group, name);
registeredCounters.put(id, counter);
}
public void incrementCounter(int id, long amount) throws IOException {
if (id < registeredCounters.size()) {
Counters.Counter counter = registeredCounters.get(id);
counter.increment(amount);
} else {
throw new IOException("Invalid counter with id: " + id);
}
}
public synchronized boolean authenticate(String digest) throws IOException {
boolean success = true;
if (!expectedDigest.equals(digest)) {
exception = new IOException("Authentication Failed: Expected digest="
+ expectedDigest + ", received=" + digestReceived);
success = false;
}
digestReceived = true;
notify();
return success;
}
/**
* This is called by Application and blocks the thread until
* authentication response is received.
* @throws IOException
* @throws InterruptedException
*/
synchronized void waitForAuthentication()
throws IOException, InterruptedException {
while (digestReceived == false && exception == null) {
wait();
}
if (exception != null) {
throw new IOException(exception.getMessage());
}
}
}
| 5,593 | 28.442105 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/DownwardProtocol.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.pipes;
import java.io.IOException;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
/**
* The abstract description of the downward (from Java to C++) Pipes protocol.
* All of these calls are asynchronous and return before the message has been
* processed.
*/
interface DownwardProtocol<K extends WritableComparable, V extends Writable> {
/**
* request authentication
* @throws IOException
*/
void authenticate(String digest, String challenge) throws IOException;
/**
* Start communication
* @throws IOException
*/
void start() throws IOException;
/**
* Set the JobConf for the task.
* @param conf
* @throws IOException
*/
void setJobConf(JobConf conf) throws IOException;
/**
* Set the input types for Maps.
* @param keyType the name of the key's type
* @param valueType the name of the value's type
* @throws IOException
*/
void setInputTypes(String keyType, String valueType) throws IOException;
/**
* Run a map task in the child.
* @param split The input split for this map.
* @param numReduces The number of reduces for this job.
* @param pipedInput Is the input coming from Java?
* @throws IOException
*/
void runMap(InputSplit split, int numReduces,
boolean pipedInput) throws IOException;
/**
* For maps with pipedInput, the key/value pairs are sent via this messaage.
* @param key The record's key
* @param value The record's value
* @throws IOException
*/
void mapItem(K key, V value) throws IOException;
/**
* Run a reduce task in the child
* @param reduce the index of the reduce (0 .. numReduces - 1)
* @param pipedOutput is the output being sent to Java?
* @throws IOException
*/
void runReduce(int reduce, boolean pipedOutput) throws IOException;
/**
* The reduce should be given a new key
* @param key the new key
* @throws IOException
*/
void reduceKey(K key) throws IOException;
/**
* The reduce should be given a new value
* @param value the new value
* @throws IOException
*/
void reduceValue(V value) throws IOException;
/**
* The task has no more input coming, but it should finish processing it's
* input.
* @throws IOException
*/
void endOfInput() throws IOException;
/**
* The task should stop as soon as possible, because something has gone wrong.
* @throws IOException
*/
void abort() throws IOException;
/**
* Flush the data through any buffers.
*/
void flush() throws IOException;
/**
* Close the connection.
*/
void close() throws IOException, InterruptedException;
}
| 3,622 | 28.217742 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/BinaryProtocol.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.pipes;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.FileOutputStream;
import java.io.FilterOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.Socket;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.util.StringUtils;
/**
* This protocol is a binary implementation of the Pipes protocol.
*/
class BinaryProtocol<K1 extends WritableComparable, V1 extends Writable,
K2 extends WritableComparable, V2 extends Writable>
implements DownwardProtocol<K1, V1> {
public static final int CURRENT_PROTOCOL_VERSION = 0;
/**
* The buffer size for the command socket
*/
private static final int BUFFER_SIZE = 128*1024;
private DataOutputStream stream;
private DataOutputBuffer buffer = new DataOutputBuffer();
private static final Log LOG =
LogFactory.getLog(BinaryProtocol.class.getName());
private UplinkReaderThread uplink;
/**
* The integer codes to represent the different messages. These must match
* the C++ codes or massive confusion will result.
*/
private static enum MessageType { START(0),
SET_JOB_CONF(1),
SET_INPUT_TYPES(2),
RUN_MAP(3),
MAP_ITEM(4),
RUN_REDUCE(5),
REDUCE_KEY(6),
REDUCE_VALUE(7),
CLOSE(8),
ABORT(9),
AUTHENTICATION_REQ(10),
OUTPUT(50),
PARTITIONED_OUTPUT(51),
STATUS(52),
PROGRESS(53),
DONE(54),
REGISTER_COUNTER(55),
INCREMENT_COUNTER(56),
AUTHENTICATION_RESP(57);
final int code;
MessageType(int code) {
this.code = code;
}
}
private static class UplinkReaderThread<K2 extends WritableComparable,
V2 extends Writable>
extends Thread {
private DataInputStream inStream;
private UpwardProtocol<K2, V2> handler;
private K2 key;
private V2 value;
private boolean authPending = true;
public UplinkReaderThread(InputStream stream,
UpwardProtocol<K2, V2> handler,
K2 key, V2 value) throws IOException{
inStream = new DataInputStream(new BufferedInputStream(stream,
BUFFER_SIZE));
this.handler = handler;
this.key = key;
this.value = value;
}
public void closeConnection() throws IOException {
inStream.close();
}
public void run() {
while (true) {
try {
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedException();
}
int cmd = WritableUtils.readVInt(inStream);
LOG.debug("Handling uplink command " + cmd);
if (cmd == MessageType.AUTHENTICATION_RESP.code) {
String digest = Text.readString(inStream);
authPending = !handler.authenticate(digest);
} else if (authPending) {
LOG.warn("Message " + cmd + " received before authentication is "
+ "complete. Ignoring");
continue;
} else if (cmd == MessageType.OUTPUT.code) {
readObject(key);
readObject(value);
handler.output(key, value);
} else if (cmd == MessageType.PARTITIONED_OUTPUT.code) {
int part = WritableUtils.readVInt(inStream);
readObject(key);
readObject(value);
handler.partitionedOutput(part, key, value);
} else if (cmd == MessageType.STATUS.code) {
handler.status(Text.readString(inStream));
} else if (cmd == MessageType.PROGRESS.code) {
handler.progress(inStream.readFloat());
} else if (cmd == MessageType.REGISTER_COUNTER.code) {
int id = WritableUtils.readVInt(inStream);
String group = Text.readString(inStream);
String name = Text.readString(inStream);
handler.registerCounter(id, group, name);
} else if (cmd == MessageType.INCREMENT_COUNTER.code) {
int id = WritableUtils.readVInt(inStream);
long amount = WritableUtils.readVLong(inStream);
handler.incrementCounter(id, amount);
} else if (cmd == MessageType.DONE.code) {
LOG.debug("Pipe child done");
handler.done();
return;
} else {
throw new IOException("Bad command code: " + cmd);
}
} catch (InterruptedException e) {
return;
} catch (Throwable e) {
LOG.error(StringUtils.stringifyException(e));
handler.failed(e);
return;
}
}
}
private void readObject(Writable obj) throws IOException {
int numBytes = WritableUtils.readVInt(inStream);
byte[] buffer;
// For BytesWritable and Text, use the specified length to set the length
// this causes the "obvious" translations to work. So that if you emit
// a string "abc" from C++, it shows up as "abc".
if (obj instanceof BytesWritable) {
buffer = new byte[numBytes];
inStream.readFully(buffer);
((BytesWritable) obj).set(buffer, 0, numBytes);
} else if (obj instanceof Text) {
buffer = new byte[numBytes];
inStream.readFully(buffer);
((Text) obj).set(buffer);
} else {
obj.readFields(inStream);
}
}
}
/**
* An output stream that will save a copy of the data into a file.
*/
private static class TeeOutputStream extends FilterOutputStream {
private OutputStream file;
TeeOutputStream(String filename, OutputStream base) throws IOException {
super(base);
file = new FileOutputStream(filename);
}
public void write(byte b[], int off, int len) throws IOException {
file.write(b,off,len);
out.write(b,off,len);
}
public void write(int b) throws IOException {
file.write(b);
out.write(b);
}
public void flush() throws IOException {
file.flush();
out.flush();
}
public void close() throws IOException {
flush();
file.close();
out.close();
}
}
/**
* Create a proxy object that will speak the binary protocol on a socket.
* Upward messages are passed on the specified handler and downward
* downward messages are public methods on this object.
* @param sock The socket to communicate on.
* @param handler The handler for the received messages.
* @param key The object to read keys into.
* @param value The object to read values into.
* @param config The job's configuration
* @throws IOException
*/
public BinaryProtocol(Socket sock,
UpwardProtocol<K2, V2> handler,
K2 key,
V2 value,
JobConf config) throws IOException {
OutputStream raw = sock.getOutputStream();
// If we are debugging, save a copy of the downlink commands to a file
if (Submitter.getKeepCommandFile(config)) {
raw = new TeeOutputStream("downlink.data", raw);
}
stream = new DataOutputStream(new BufferedOutputStream(raw,
BUFFER_SIZE)) ;
uplink = new UplinkReaderThread<K2, V2>(sock.getInputStream(),
handler, key, value);
uplink.setName("pipe-uplink-handler");
uplink.start();
}
/**
* Close the connection and shutdown the handler thread.
* @throws IOException
* @throws InterruptedException
*/
public void close() throws IOException, InterruptedException {
LOG.debug("closing connection");
stream.close();
uplink.closeConnection();
uplink.interrupt();
uplink.join();
}
public void authenticate(String digest, String challenge)
throws IOException {
LOG.debug("Sending AUTHENTICATION_REQ, digest=" + digest + ", challenge="
+ challenge);
WritableUtils.writeVInt(stream, MessageType.AUTHENTICATION_REQ.code);
Text.writeString(stream, digest);
Text.writeString(stream, challenge);
}
public void start() throws IOException {
LOG.debug("starting downlink");
WritableUtils.writeVInt(stream, MessageType.START.code);
WritableUtils.writeVInt(stream, CURRENT_PROTOCOL_VERSION);
}
public void setJobConf(JobConf job) throws IOException {
WritableUtils.writeVInt(stream, MessageType.SET_JOB_CONF.code);
List<String> list = new ArrayList<String>();
for(Map.Entry<String, String> itm: job) {
list.add(itm.getKey());
list.add(itm.getValue());
}
WritableUtils.writeVInt(stream, list.size());
for(String entry: list){
Text.writeString(stream, entry);
}
}
public void setInputTypes(String keyType,
String valueType) throws IOException {
WritableUtils.writeVInt(stream, MessageType.SET_INPUT_TYPES.code);
Text.writeString(stream, keyType);
Text.writeString(stream, valueType);
}
public void runMap(InputSplit split, int numReduces,
boolean pipedInput) throws IOException {
WritableUtils.writeVInt(stream, MessageType.RUN_MAP.code);
writeObject(split);
WritableUtils.writeVInt(stream, numReduces);
WritableUtils.writeVInt(stream, pipedInput ? 1 : 0);
}
public void mapItem(WritableComparable key,
Writable value) throws IOException {
WritableUtils.writeVInt(stream, MessageType.MAP_ITEM.code);
writeObject(key);
writeObject(value);
}
public void runReduce(int reduce, boolean pipedOutput) throws IOException {
WritableUtils.writeVInt(stream, MessageType.RUN_REDUCE.code);
WritableUtils.writeVInt(stream, reduce);
WritableUtils.writeVInt(stream, pipedOutput ? 1 : 0);
}
public void reduceKey(WritableComparable key) throws IOException {
WritableUtils.writeVInt(stream, MessageType.REDUCE_KEY.code);
writeObject(key);
}
public void reduceValue(Writable value) throws IOException {
WritableUtils.writeVInt(stream, MessageType.REDUCE_VALUE.code);
writeObject(value);
}
public void endOfInput() throws IOException {
WritableUtils.writeVInt(stream, MessageType.CLOSE.code);
LOG.debug("Sent close command");
}
public void abort() throws IOException {
WritableUtils.writeVInt(stream, MessageType.ABORT.code);
LOG.debug("Sent abort command");
}
public void flush() throws IOException {
stream.flush();
}
/**
* Write the given object to the stream. If it is a Text or BytesWritable,
* write it directly. Otherwise, write it to a buffer and then write the
* length and data to the stream.
* @param obj the object to write
* @throws IOException
*/
private void writeObject(Writable obj) throws IOException {
// For Text and BytesWritable, encode them directly, so that they end up
// in C++ as the natural translations.
if (obj instanceof Text) {
Text t = (Text) obj;
int len = t.getLength();
WritableUtils.writeVInt(stream, len);
stream.write(t.getBytes(), 0, len);
} else if (obj instanceof BytesWritable) {
BytesWritable b = (BytesWritable) obj;
int len = b.getLength();
WritableUtils.writeVInt(stream, len);
stream.write(b.getBytes(), 0, len);
} else {
buffer.reset();
obj.write(buffer);
int length = buffer.getLength();
WritableUtils.writeVInt(stream, length);
stream.write(buffer.getData(), 0, length);
}
}
}
| 13,483 | 34.957333 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/MultipleOutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import java.io.IOException;
import java.util.Iterator;
import java.util.TreeMap;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.RecordWriter;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.util.Progressable;
/**
* This abstract class extends the FileOutputFormat, allowing to write the
* output data to different output files. There are three basic use cases for
* this class.
*
* Case one: This class is used for a map reduce job with at least one reducer.
* The reducer wants to write data to different files depending on the actual
* keys. It is assumed that a key (or value) encodes the actual key (value)
* and the desired location for the actual key (value).
*
* Case two: This class is used for a map only job. The job wants to use an
* output file name that is either a part of the input file name of the input
* data, or some derivation of it.
*
* Case three: This class is used for a map only job. The job wants to use an
* output file name that depends on both the keys and the input file name,
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class MultipleOutputFormat<K, V>
extends FileOutputFormat<K, V> {
/**
* Create a composite record writer that can write key/value data to different
* output files
*
* @param fs
* the file system to use
* @param job
* the job conf for the job
* @param name
* the leaf file name for the output file (such as part-00000")
* @param arg3
* a progressable for reporting progress.
* @return a composite record writer
* @throws IOException
*/
public RecordWriter<K, V> getRecordWriter(FileSystem fs, JobConf job,
String name, Progressable arg3) throws IOException {
final FileSystem myFS = fs;
final String myName = generateLeafFileName(name);
final JobConf myJob = job;
final Progressable myProgressable = arg3;
return new RecordWriter<K, V>() {
// a cache storing the record writers for different output files.
TreeMap<String, RecordWriter<K, V>> recordWriters = new TreeMap<String, RecordWriter<K, V>>();
public void write(K key, V value) throws IOException {
// get the file name based on the key
String keyBasedPath = generateFileNameForKeyValue(key, value, myName);
// get the file name based on the input file name
String finalPath = getInputFileBasedOutputFileName(myJob, keyBasedPath);
// get the actual key
K actualKey = generateActualKey(key, value);
V actualValue = generateActualValue(key, value);
RecordWriter<K, V> rw = this.recordWriters.get(finalPath);
if (rw == null) {
// if we don't have the record writer yet for the final path, create
// one
// and add it to the cache
rw = getBaseRecordWriter(myFS, myJob, finalPath, myProgressable);
this.recordWriters.put(finalPath, rw);
}
rw.write(actualKey, actualValue);
};
public void close(Reporter reporter) throws IOException {
Iterator<String> keys = this.recordWriters.keySet().iterator();
while (keys.hasNext()) {
RecordWriter<K, V> rw = this.recordWriters.get(keys.next());
rw.close(reporter);
}
this.recordWriters.clear();
};
};
}
/**
* Generate the leaf name for the output file name. The default behavior does
* not change the leaf file name (such as part-00000)
*
* @param name
* the leaf file name for the output file
* @return the given leaf file name
*/
protected String generateLeafFileName(String name) {
return name;
}
/**
* Generate the file output file name based on the given key and the leaf file
* name. The default behavior is that the file name does not depend on the
* key.
*
* @param key
* the key of the output data
* @param name
* the leaf file name
* @return generated file name
*/
protected String generateFileNameForKeyValue(K key, V value, String name) {
return name;
}
/**
* Generate the actual key from the given key/value. The default behavior is that
* the actual key is equal to the given key
*
* @param key
* the key of the output data
* @param value
* the value of the output data
* @return the actual key derived from the given key/value
*/
protected K generateActualKey(K key, V value) {
return key;
}
/**
* Generate the actual value from the given key and value. The default behavior is that
* the actual value is equal to the given value
*
* @param key
* the key of the output data
* @param value
* the value of the output data
* @return the actual value derived from the given key/value
*/
protected V generateActualValue(K key, V value) {
return value;
}
/**
* Generate the outfile name based on a given anme and the input file name. If
* the {@link JobContext#MAP_INPUT_FILE} does not exists (i.e. this is not for a map only job),
* the given name is returned unchanged. If the config value for
* "num.of.trailing.legs.to.use" is not set, or set 0 or negative, the given
* name is returned unchanged. Otherwise, return a file name consisting of the
* N trailing legs of the input file name where N is the config value for
* "num.of.trailing.legs.to.use".
*
* @param job
* the job config
* @param name
* the output file name
* @return the outfile name based on a given anme and the input file name.
*/
protected String getInputFileBasedOutputFileName(JobConf job, String name) {
String infilepath = job.get(MRJobConfig.MAP_INPUT_FILE);
if (infilepath == null) {
// if the {@link JobContext#MAP_INPUT_FILE} does not exists,
// then return the given name
return name;
}
int numOfTrailingLegsToUse = job.getInt("mapred.outputformat.numOfTrailingLegs", 0);
if (numOfTrailingLegsToUse <= 0) {
return name;
}
Path infile = new Path(infilepath);
Path parent = infile.getParent();
String midName = infile.getName();
Path outPath = new Path(midName);
for (int i = 1; i < numOfTrailingLegsToUse; i++) {
if (parent == null) break;
midName = parent.getName();
if (midName.length() == 0) break;
parent = parent.getParent();
outPath = new Path(midName, outPath);
}
return outPath.toString();
}
/**
*
* @param fs
* the file system to use
* @param job
* a job conf object
* @param name
* the name of the file over which a record writer object will be
* constructed
* @param arg3
* a progressable object
* @return A RecordWriter object over the given file
* @throws IOException
*/
abstract protected RecordWriter<K, V> getBaseRecordWriter(FileSystem fs,
JobConf job, String name, Progressable arg3) throws IOException;
}
| 8,275 | 34.367521 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/CombineFileRecordReaderWrapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
/**
* A wrapper class for a record reader that handles a single file split. It
* delegates most of the methods to the wrapped instance. A concrete subclass
* needs to provide a constructor that calls this parent constructor with the
* appropriate input format. The subclass constructor must satisfy the specific
* constructor signature that is required by
* <code>CombineFileRecordReader</code>.
*
* Subclassing is needed to get a concrete record reader wrapper because of the
* constructor requirement.
*
* @see CombineFileRecordReader
* @see CombineFileInputFormat
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class CombineFileRecordReaderWrapper<K,V>
implements RecordReader<K,V> {
private final RecordReader<K,V> delegate;
protected CombineFileRecordReaderWrapper(FileInputFormat<K,V> inputFormat,
CombineFileSplit split, Configuration conf, Reporter reporter, Integer idx)
throws IOException {
FileSplit fileSplit = new FileSplit(split.getPath(idx),
split.getOffset(idx),
split.getLength(idx),
split.getLocations());
delegate = inputFormat.getRecordReader(fileSplit, (JobConf)conf, reporter);
}
public boolean next(K key, V value) throws IOException {
return delegate.next(key, value);
}
public K createKey() {
return delegate.createKey();
}
public V createValue() {
return delegate.createValue();
}
public long getPos() throws IOException {
return delegate.getPos();
}
public void close() throws IOException {
delegate.close();
}
public float getProgress() throws IOException {
return delegate.getProgress();
}
}
| 2,918 | 32.94186 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/MultipleTextOutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordWriter;
import org.apache.hadoop.mapred.TextOutputFormat;
import org.apache.hadoop.util.Progressable;
/**
* This class extends the MultipleOutputFormat, allowing to write the output
* data to different output files in Text output format.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class MultipleTextOutputFormat<K, V>
extends MultipleOutputFormat<K, V> {
private TextOutputFormat<K, V> theTextOutputFormat = null;
@Override
protected RecordWriter<K, V> getBaseRecordWriter(FileSystem fs, JobConf job,
String name, Progressable arg3) throws IOException {
if (theTextOutputFormat == null) {
theTextOutputFormat = new TextOutputFormat<K, V>();
}
return theTextOutputFormat.getRecordWriter(fs, job, name, arg3);
}
}
| 1,888 | 36.039216 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/MultithreadedMapRunner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapred.MapRunnable;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.SkipBadRecords;
import org.apache.hadoop.mapreduce.lib.map.MultithreadedMapper;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import java.io.IOException;
import java.util.concurrent.*;
/**
* Multithreaded implementation for {@link MapRunnable}.
* <p>
* It can be used instead of the default implementation,
* of {@link org.apache.hadoop.mapred.MapRunner}, when the Map
* operation is not CPU bound in order to improve throughput.
* <p>
* Map implementations using this MapRunnable must be thread-safe.
* <p>
* The Map-Reduce job has to be configured to use this MapRunnable class (using
* the JobConf.setMapRunnerClass method) and
* the number of threads the thread-pool can use with the
* <code>mapred.map.multithreadedrunner.threads</code> property, its default
* value is 10 threads.
* <p>
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class MultithreadedMapRunner<K1, V1, K2, V2>
implements MapRunnable<K1, V1, K2, V2> {
private static final Log LOG =
LogFactory.getLog(MultithreadedMapRunner.class.getName());
private JobConf job;
private Mapper<K1, V1, K2, V2> mapper;
private ExecutorService executorService;
private volatile IOException ioException;
private volatile RuntimeException runtimeException;
private boolean incrProcCount;
@SuppressWarnings("unchecked")
public void configure(JobConf jobConf) {
int numberOfThreads =
jobConf.getInt(MultithreadedMapper.NUM_THREADS, 10);
if (LOG.isDebugEnabled()) {
LOG.debug("Configuring jobConf " + jobConf.getJobName() +
" to use " + numberOfThreads + " threads");
}
this.job = jobConf;
//increment processed counter only if skipping feature is enabled
this.incrProcCount = SkipBadRecords.getMapperMaxSkipRecords(job)>0 &&
SkipBadRecords.getAutoIncrMapperProcCount(job);
this.mapper = ReflectionUtils.newInstance(jobConf.getMapperClass(),
jobConf);
// Creating a threadpool of the configured size to execute the Mapper
// map method in parallel.
executorService = new ThreadPoolExecutor(numberOfThreads, numberOfThreads,
0L, TimeUnit.MILLISECONDS,
new BlockingArrayQueue
(numberOfThreads));
}
/**
* A blocking array queue that replaces offer and add, which throws on a full
* queue, to a put, which waits on a full queue.
*/
private static class BlockingArrayQueue extends ArrayBlockingQueue<Runnable> {
private static final long serialVersionUID = 1L;
public BlockingArrayQueue(int capacity) {
super(capacity);
}
public boolean offer(Runnable r) {
return add(r);
}
public boolean add(Runnable r) {
try {
put(r);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
}
return true;
}
}
private void checkForExceptionsFromProcessingThreads()
throws IOException, RuntimeException {
// Checking if a Mapper.map within a Runnable has generated an
// IOException. If so we rethrow it to force an abort of the Map
// operation thus keeping the semantics of the default
// implementation.
if (ioException != null) {
throw ioException;
}
// Checking if a Mapper.map within a Runnable has generated a
// RuntimeException. If so we rethrow it to force an abort of the Map
// operation thus keeping the semantics of the default
// implementation.
if (runtimeException != null) {
throw runtimeException;
}
}
public void run(RecordReader<K1, V1> input, OutputCollector<K2, V2> output,
Reporter reporter)
throws IOException {
try {
// allocate key & value instances these objects will not be reused
// because execution of Mapper.map is not serialized.
K1 key = input.createKey();
V1 value = input.createValue();
while (input.next(key, value)) {
executorService.execute(new MapperInvokeRunable(key, value, output,
reporter));
checkForExceptionsFromProcessingThreads();
// Allocate new key & value instances as mapper is running in parallel
key = input.createKey();
value = input.createValue();
}
if (LOG.isDebugEnabled()) {
LOG.debug("Finished dispatching all Mappper.map calls, job "
+ job.getJobName());
}
// Graceful shutdown of the Threadpool, it will let all scheduled
// Runnables to end.
executorService.shutdown();
try {
// Now waiting for all Runnables to end.
while (!executorService.awaitTermination(100, TimeUnit.MILLISECONDS)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Awaiting all running Mappper.map calls to finish, job "
+ job.getJobName());
}
// NOTE: while Mapper.map dispatching has concluded there are still
// map calls in progress and exceptions would be thrown.
checkForExceptionsFromProcessingThreads();
}
// NOTE: it could be that a map call has had an exception after the
// call for awaitTermination() returing true. And edge case but it
// could happen.
checkForExceptionsFromProcessingThreads();
} catch (IOException ioEx) {
// Forcing a shutdown of all thread of the threadpool and rethrowing
// the IOException
executorService.shutdownNow();
throw ioEx;
} catch (InterruptedException iEx) {
throw new RuntimeException(iEx);
}
} finally {
mapper.close();
}
}
/**
* Runnable to execute a single Mapper.map call from a forked thread.
*/
private class MapperInvokeRunable implements Runnable {
private K1 key;
private V1 value;
private OutputCollector<K2, V2> output;
private Reporter reporter;
/**
* Collecting all required parameters to execute a Mapper.map call.
* <p>
*
* @param key
* @param value
* @param output
* @param reporter
*/
public MapperInvokeRunable(K1 key, V1 value,
OutputCollector<K2, V2> output,
Reporter reporter) {
this.key = key;
this.value = value;
this.output = output;
this.reporter = reporter;
}
/**
* Executes a Mapper.map call with the given Mapper and parameters.
* <p>
* This method is called from the thread-pool thread.
*
*/
public void run() {
try {
// map pair to output
MultithreadedMapRunner.this.mapper.map(key, value, output, reporter);
if(incrProcCount) {
reporter.incrCounter(SkipBadRecords.COUNTER_GROUP,
SkipBadRecords.COUNTER_MAP_PROCESSED_RECORDS, 1);
}
} catch (IOException ex) {
// If there is an IOException during the call it is set in an instance
// variable of the MultithreadedMapRunner from where it will be
// rethrown.
synchronized (MultithreadedMapRunner.this) {
if (MultithreadedMapRunner.this.ioException == null) {
MultithreadedMapRunner.this.ioException = ex;
}
}
} catch (RuntimeException ex) {
// If there is a RuntimeException during the call it is set in an
// instance variable of the MultithreadedMapRunner from where it will be
// rethrown.
synchronized (MultithreadedMapRunner.this) {
if (MultithreadedMapRunner.this.runtimeException == null) {
MultithreadedMapRunner.this.runtimeException = ex;
}
}
}
}
}
}
| 9,124 | 33.564394 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/NullOutputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.OutputFormat;
import org.apache.hadoop.mapred.RecordWriter;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.util.Progressable;
/**
* Consume all outputs and put them in /dev/null.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class NullOutputFormat<K, V> implements OutputFormat<K, V> {
public RecordWriter<K, V> getRecordWriter(FileSystem ignored, JobConf job,
String name, Progressable progress) {
return new RecordWriter<K, V>(){
public void write(K key, V value) { }
public void close(Reporter reporter) { }
};
}
public void checkOutputSpecs(FileSystem ignored, JobConf job) { }
}
| 1,781 | 36.914894 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/InputSampler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapreduce.Job;
@InterfaceAudience.Public
@InterfaceStability.Stable
public class InputSampler<K,V> extends
org.apache.hadoop.mapreduce.lib.partition.InputSampler<K, V> {
private static final Log LOG = LogFactory.getLog(InputSampler.class);
public InputSampler(JobConf conf) {
super(conf);
}
public static <K,V> void writePartitionFile(JobConf job, Sampler<K,V> sampler)
throws IOException, ClassNotFoundException, InterruptedException {
writePartitionFile(Job.getInstance(job), sampler);
}
/**
* Interface to sample using an {@link org.apache.hadoop.mapred.InputFormat}.
*/
public interface Sampler<K,V> extends
org.apache.hadoop.mapreduce.lib.partition.InputSampler.Sampler<K, V> {
/**
* For a given job, collect and return a subset of the keys from the
* input data.
*/
K[] getSample(InputFormat<K,V> inf, JobConf job) throws IOException;
}
/**
* Samples the first n records from s splits.
* Inexpensive way to sample random data.
*/
public static class SplitSampler<K,V> extends
org.apache.hadoop.mapreduce.lib.partition.InputSampler.SplitSampler<K, V>
implements Sampler<K,V> {
/**
* Create a SplitSampler sampling <em>all</em> splits.
* Takes the first numSamples / numSplits records from each split.
* @param numSamples Total number of samples to obtain from all selected
* splits.
*/
public SplitSampler(int numSamples) {
this(numSamples, Integer.MAX_VALUE);
}
/**
* Create a new SplitSampler.
* @param numSamples Total number of samples to obtain from all selected
* splits.
* @param maxSplitsSampled The maximum number of splits to examine.
*/
public SplitSampler(int numSamples, int maxSplitsSampled) {
super(numSamples, maxSplitsSampled);
}
/**
* From each split sampled, take the first numSamples / numSplits records.
*/
@SuppressWarnings("unchecked") // ArrayList::toArray doesn't preserve type
public K[] getSample(InputFormat<K,V> inf, JobConf job) throws IOException {
InputSplit[] splits = inf.getSplits(job, job.getNumMapTasks());
ArrayList<K> samples = new ArrayList<K>(numSamples);
int splitsToSample = Math.min(maxSplitsSampled, splits.length);
int splitStep = splits.length / splitsToSample;
int samplesPerSplit = numSamples / splitsToSample;
long records = 0;
for (int i = 0; i < splitsToSample; ++i) {
RecordReader<K,V> reader = inf.getRecordReader(splits[i * splitStep],
job, Reporter.NULL);
K key = reader.createKey();
V value = reader.createValue();
while (reader.next(key, value)) {
samples.add(key);
key = reader.createKey();
++records;
if ((i+1) * samplesPerSplit <= records) {
break;
}
}
reader.close();
}
return (K[])samples.toArray();
}
}
/**
* Sample from random points in the input.
* General-purpose sampler. Takes numSamples / maxSplitsSampled inputs from
* each split.
*/
public static class RandomSampler<K,V> extends
org.apache.hadoop.mapreduce.lib.partition.InputSampler.RandomSampler<K, V>
implements Sampler<K,V> {
/**
* Create a new RandomSampler sampling <em>all</em> splits.
* This will read every split at the client, which is very expensive.
* @param freq Probability with which a key will be chosen.
* @param numSamples Total number of samples to obtain from all selected
* splits.
*/
public RandomSampler(double freq, int numSamples) {
this(freq, numSamples, Integer.MAX_VALUE);
}
/**
* Create a new RandomSampler.
* @param freq Probability with which a key will be chosen.
* @param numSamples Total number of samples to obtain from all selected
* splits.
* @param maxSplitsSampled The maximum number of splits to examine.
*/
public RandomSampler(double freq, int numSamples, int maxSplitsSampled) {
super(freq, numSamples, maxSplitsSampled);
}
/**
* Randomize the split order, then take the specified number of keys from
* each split sampled, where each key is selected with the specified
* probability and possibly replaced by a subsequently selected key when
* the quota of keys from that split is satisfied.
*/
@SuppressWarnings("unchecked") // ArrayList::toArray doesn't preserve type
public K[] getSample(InputFormat<K,V> inf, JobConf job) throws IOException {
InputSplit[] splits = inf.getSplits(job, job.getNumMapTasks());
ArrayList<K> samples = new ArrayList<K>(numSamples);
int splitsToSample = Math.min(maxSplitsSampled, splits.length);
Random r = new Random();
long seed = r.nextLong();
r.setSeed(seed);
LOG.debug("seed: " + seed);
// shuffle splits
for (int i = 0; i < splits.length; ++i) {
InputSplit tmp = splits[i];
int j = r.nextInt(splits.length);
splits[i] = splits[j];
splits[j] = tmp;
}
// our target rate is in terms of the maximum number of sample splits,
// but we accept the possibility of sampling additional splits to hit
// the target sample keyset
for (int i = 0; i < splitsToSample ||
(i < splits.length && samples.size() < numSamples); ++i) {
RecordReader<K,V> reader = inf.getRecordReader(splits[i], job,
Reporter.NULL);
K key = reader.createKey();
V value = reader.createValue();
while (reader.next(key, value)) {
if (r.nextDouble() <= freq) {
if (samples.size() < numSamples) {
samples.add(key);
} else {
// When exceeding the maximum number of samples, replace a
// random element with this one, then adjust the frequency
// to reflect the possibility of existing elements being
// pushed out
int ind = r.nextInt(numSamples);
if (ind != numSamples) {
samples.set(ind, key);
}
freq *= (numSamples - 1) / (double) numSamples;
}
key = reader.createKey();
}
}
reader.close();
}
return (K[])samples.toArray();
}
}
/**
* Sample from s splits at regular intervals.
* Useful for sorted data.
*/
public static class IntervalSampler<K,V> extends
org.apache.hadoop.mapreduce.lib.partition.InputSampler.IntervalSampler<K, V>
implements Sampler<K,V> {
/**
* Create a new IntervalSampler sampling <em>all</em> splits.
* @param freq The frequency with which records will be emitted.
*/
public IntervalSampler(double freq) {
this(freq, Integer.MAX_VALUE);
}
/**
* Create a new IntervalSampler.
* @param freq The frequency with which records will be emitted.
* @param maxSplitsSampled The maximum number of splits to examine.
* @see #getSample
*/
public IntervalSampler(double freq, int maxSplitsSampled) {
super(freq, maxSplitsSampled);
}
/**
* For each split sampled, emit when the ratio of the number of records
* retained to the total record count is less than the specified
* frequency.
*/
@SuppressWarnings("unchecked") // ArrayList::toArray doesn't preserve type
public K[] getSample(InputFormat<K,V> inf, JobConf job) throws IOException {
InputSplit[] splits = inf.getSplits(job, job.getNumMapTasks());
ArrayList<K> samples = new ArrayList<K>();
int splitsToSample = Math.min(maxSplitsSampled, splits.length);
int splitStep = splits.length / splitsToSample;
long records = 0;
long kept = 0;
for (int i = 0; i < splitsToSample; ++i) {
RecordReader<K,V> reader = inf.getRecordReader(splits[i * splitStep],
job, Reporter.NULL);
K key = reader.createKey();
V value = reader.createValue();
while (reader.next(key, value)) {
++records;
if ((double) kept / records < freq) {
++kept;
samples.add(key);
key = reader.createKey();
}
}
reader.close();
}
return (K[])samples.toArray();
}
}
}
| 9,819 | 35.779026 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/ChainReducer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.lib;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapred.*;
import java.io.IOException;
import java.util.Iterator;
/**
* The ChainReducer class allows to chain multiple Mapper classes after a
* Reducer within the Reducer task.
* <p>
* For each record output by the Reducer, the Mapper classes are invoked in a
* chained (or piped) fashion, the output of the first becomes the input of the
* second, and so on until the last Mapper, the output of the last Mapper will
* be written to the task's output.
* <p>
* The key functionality of this feature is that the Mappers in the chain do not
* need to be aware that they are executed after the Reducer or in a chain.
* This enables having reusable specialized Mappers that can be combined to
* perform composite operations within a single task.
* <p>
* Special care has to be taken when creating chains that the key/values output
* by a Mapper are valid for the following Mapper in the chain. It is assumed
* all Mappers and the Reduce in the chain use maching output and input key and
* value classes as no conversion is done by the chaining code.
* <p>
* Using the ChainMapper and the ChainReducer classes is possible to compose
* Map/Reduce jobs that look like <code>[MAP+ / REDUCE MAP*]</code>. And
* immediate benefit of this pattern is a dramatic reduction in disk IO.
* <p>
* IMPORTANT: There is no need to specify the output key/value classes for the
* ChainReducer, this is done by the setReducer or the addMapper for the last
* element in the chain.
* <p>
* ChainReducer usage pattern:
* <p>
* <pre>
* ...
* conf.setJobName("chain");
* conf.setInputFormat(TextInputFormat.class);
* conf.setOutputFormat(TextOutputFormat.class);
*
* JobConf mapAConf = new JobConf(false);
* ...
* ChainMapper.addMapper(conf, AMap.class, LongWritable.class, Text.class,
* Text.class, Text.class, true, mapAConf);
*
* JobConf mapBConf = new JobConf(false);
* ...
* ChainMapper.addMapper(conf, BMap.class, Text.class, Text.class,
* LongWritable.class, Text.class, false, mapBConf);
*
* JobConf reduceConf = new JobConf(false);
* ...
* ChainReducer.setReducer(conf, XReduce.class, LongWritable.class, Text.class,
* Text.class, Text.class, true, reduceConf);
*
* ChainReducer.addMapper(conf, CMap.class, Text.class, Text.class,
* LongWritable.class, Text.class, false, null);
*
* ChainReducer.addMapper(conf, DMap.class, LongWritable.class, Text.class,
* LongWritable.class, LongWritable.class, true, null);
*
* FileInputFormat.setInputPaths(conf, inDir);
* FileOutputFormat.setOutputPath(conf, outDir);
* ...
*
* JobClient jc = new JobClient(conf);
* RunningJob job = jc.submitJob(conf);
* ...
* </pre>
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ChainReducer implements Reducer {
/**
* Sets the Reducer class to the chain job's JobConf.
* <p>
* It has to be specified how key and values are passed from one element of
* the chain to the next, by value or by reference. If a Reducer leverages the
* assumed semantics that the key and values are not modified by the collector
* 'by value' must be used. If the Reducer does not expect this semantics, as
* an optimization to avoid serialization and deserialization 'by reference'
* can be used.
* <p>
* For the added Reducer the configuration given for it,
* <code>reducerConf</code>, have precedence over the job's JobConf. This
* precedence is in effect when the task is running.
* <p>
* IMPORTANT: There is no need to specify the output key/value classes for the
* ChainReducer, this is done by the setReducer or the addMapper for the last
* element in the chain.
*
* @param job job's JobConf to add the Reducer class.
* @param klass the Reducer class to add.
* @param inputKeyClass reducer input key class.
* @param inputValueClass reducer input value class.
* @param outputKeyClass reducer output key class.
* @param outputValueClass reducer output value class.
* @param byValue indicates if key/values should be passed by value
* to the next Mapper in the chain, if any.
* @param reducerConf a JobConf with the configuration for the Reducer
* class. It is recommended to use a JobConf without default values using the
* <code>JobConf(boolean loadDefaults)</code> constructor with FALSE.
*/
public static <K1, V1, K2, V2> void setReducer(JobConf job,
Class<? extends Reducer<K1, V1, K2, V2>> klass,
Class<? extends K1> inputKeyClass,
Class<? extends V1> inputValueClass,
Class<? extends K2> outputKeyClass,
Class<? extends V2> outputValueClass,
boolean byValue, JobConf reducerConf) {
job.setReducerClass(ChainReducer.class);
job.setOutputKeyClass(outputKeyClass);
job.setOutputValueClass(outputValueClass);
Chain.setReducer(job, klass, inputKeyClass, inputValueClass, outputKeyClass,
outputValueClass, byValue, reducerConf);
}
/**
* Adds a Mapper class to the chain job's JobConf.
* <p>
* It has to be specified how key and values are passed from one element of
* the chain to the next, by value or by reference. If a Mapper leverages the
* assumed semantics that the key and values are not modified by the collector
* 'by value' must be used. If the Mapper does not expect this semantics, as
* an optimization to avoid serialization and deserialization 'by reference'
* can be used.
* <p>
* For the added Mapper the configuration given for it,
* <code>mapperConf</code>, have precedence over the job's JobConf. This
* precedence is in effect when the task is running.
* <p>
* IMPORTANT: There is no need to specify the output key/value classes for the
* ChainMapper, this is done by the addMapper for the last mapper in the chain
* .
*
* @param job chain job's JobConf to add the Mapper class.
* @param klass the Mapper class to add.
* @param inputKeyClass mapper input key class.
* @param inputValueClass mapper input value class.
* @param outputKeyClass mapper output key class.
* @param outputValueClass mapper output value class.
* @param byValue indicates if key/values should be passed by value
* to the next Mapper in the chain, if any.
* @param mapperConf a JobConf with the configuration for the Mapper
* class. It is recommended to use a JobConf without default values using the
* <code>JobConf(boolean loadDefaults)</code> constructor with FALSE.
*/
public static <K1, V1, K2, V2> void addMapper(JobConf job,
Class<? extends Mapper<K1, V1, K2, V2>> klass,
Class<? extends K1> inputKeyClass,
Class<? extends V1> inputValueClass,
Class<? extends K2> outputKeyClass,
Class<? extends V2> outputValueClass,
boolean byValue, JobConf mapperConf) {
job.setOutputKeyClass(outputKeyClass);
job.setOutputValueClass(outputValueClass);
Chain.addMapper(false, job, klass, inputKeyClass, inputValueClass,
outputKeyClass, outputValueClass, byValue, mapperConf);
}
private Chain chain;
/**
* Constructor.
*/
public ChainReducer() {
chain = new Chain(false);
}
/**
* Configures the ChainReducer, the Reducer and all the Mappers in the chain.
* <p>
* If this method is overriden <code>super.configure(...)</code> should be
* invoked at the beginning of the overwriter method.
*/
public void configure(JobConf job) {
chain.configure(job);
}
/**
* Chains the <code>reduce(...)</code> method of the Reducer with the
* <code>map(...) </code> methods of the Mappers in the chain.
*/
@SuppressWarnings({"unchecked"})
public void reduce(Object key, Iterator values, OutputCollector output,
Reporter reporter) throws IOException {
Reducer reducer = chain.getReducer();
if (reducer != null) {
reducer.reduce(key, values, chain.getReducerCollector(output, reporter),
reporter);
}
}
/**
* Closes the ChainReducer, the Reducer and all the Mappers in the chain.
* <p>
* If this method is overriden <code>super.close()</code> should be
* invoked at the end of the overwriter method.
*/
public void close() throws IOException {
chain.close();
}
}
| 9,576 | 41.189427 | 80 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.